xref: /linux/drivers/net/ethernet/broadcom/tg3.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 
48 #include <net/checksum.h>
49 #include <net/ip.h>
50 
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55 
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60 
61 #define BAR_0	0
62 #define BAR_2	2
63 
64 #include "tg3.h"
65 
66 /* Functions & macros to verify TG3_FLAGS types */
67 
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70 	return test_bit(flag, bits);
71 }
72 
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 	set_bit(flag, bits);
76 }
77 
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	clear_bit(flag, bits);
81 }
82 
83 #define tg3_flag(tp, flag)				\
84 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)				\
86 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)			\
88 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 
90 #define DRV_MODULE_NAME		"tg3"
91 #define TG3_MAJ_NUM			3
92 #define TG3_MIN_NUM			121
93 #define DRV_MODULE_VERSION	\
94 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE	"November 2, 2011"
96 
97 #define RESET_KIND_SHUTDOWN	0
98 #define RESET_KIND_INIT		1
99 #define RESET_KIND_SUSPEND	2
100 
101 #define TG3_DEF_RX_MODE		0
102 #define TG3_DEF_TX_MODE		0
103 #define TG3_DEF_MSG_ENABLE	  \
104 	(NETIF_MSG_DRV		| \
105 	 NETIF_MSG_PROBE	| \
106 	 NETIF_MSG_LINK		| \
107 	 NETIF_MSG_TIMER	| \
108 	 NETIF_MSG_IFDOWN	| \
109 	 NETIF_MSG_IFUP		| \
110 	 NETIF_MSG_RX_ERR	| \
111 	 NETIF_MSG_TX_ERR)
112 
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
114 
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118 
119 #define TG3_TX_TIMEOUT			(5 * HZ)
120 
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU			60
123 #define TG3_MAX_MTU(tp)	\
124 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125 
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING		200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
138 #define TG3_RSS_INDIR_TBL_SIZE		128
139 
140 /* Do not place this n-ring entries value into the tp struct itself,
141  * we really want to expose these constants to GCC so that modulo et
142  * al.  operations are done with shifts and masks instead of with
143  * hw multiply/modulo instructions.  Another solution would be to
144  * replace things like '% foo' with '& (foo - 1)'.
145  */
146 
147 #define TG3_TX_RING_SIZE		512
148 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
149 
150 #define TG3_RX_STD_RING_BYTES(tp) \
151 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
157 				 TG3_TX_RING_SIZE)
158 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
159 
160 #define TG3_DMA_BYTE_ENAB		64
161 
162 #define TG3_RX_STD_DMA_SZ		1536
163 #define TG3_RX_JMB_DMA_SZ		9046
164 
165 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
166 
167 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169 
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172 
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175 
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177  * that are at least dword aligned when used in PCIX mode.  The driver
178  * works around this bug by double copying the packet.  This workaround
179  * is built into the normal double copy length check for efficiency.
180  *
181  * However, the double copy is only necessary on those architectures
182  * where unaligned memory accesses are inefficient.  For those architectures
183  * where unaligned memory accesses incur little penalty, we can reintegrate
184  * the 5701 in the normal rx path.  Doing so saves a device structure
185  * dereference by hardcoding the double copy threshold in place.
186  */
187 #define TG3_RX_COPY_THRESHOLD		256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
190 #else
191 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
192 #endif
193 
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp)	0
198 #endif
199 
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX		4096
203 
204 #define TG3_RAW_IP_ALIGN 2
205 
206 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
207 
208 #define FIRMWARE_TG3		"tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
211 
212 static char version[] __devinitdata =
213 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214 
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222 
223 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226 
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309 	{}
310 };
311 
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313 
314 static const struct {
315 	const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317 	{ "rx_octets" },
318 	{ "rx_fragments" },
319 	{ "rx_ucast_packets" },
320 	{ "rx_mcast_packets" },
321 	{ "rx_bcast_packets" },
322 	{ "rx_fcs_errors" },
323 	{ "rx_align_errors" },
324 	{ "rx_xon_pause_rcvd" },
325 	{ "rx_xoff_pause_rcvd" },
326 	{ "rx_mac_ctrl_rcvd" },
327 	{ "rx_xoff_entered" },
328 	{ "rx_frame_too_long_errors" },
329 	{ "rx_jabbers" },
330 	{ "rx_undersize_packets" },
331 	{ "rx_in_length_errors" },
332 	{ "rx_out_length_errors" },
333 	{ "rx_64_or_less_octet_packets" },
334 	{ "rx_65_to_127_octet_packets" },
335 	{ "rx_128_to_255_octet_packets" },
336 	{ "rx_256_to_511_octet_packets" },
337 	{ "rx_512_to_1023_octet_packets" },
338 	{ "rx_1024_to_1522_octet_packets" },
339 	{ "rx_1523_to_2047_octet_packets" },
340 	{ "rx_2048_to_4095_octet_packets" },
341 	{ "rx_4096_to_8191_octet_packets" },
342 	{ "rx_8192_to_9022_octet_packets" },
343 
344 	{ "tx_octets" },
345 	{ "tx_collisions" },
346 
347 	{ "tx_xon_sent" },
348 	{ "tx_xoff_sent" },
349 	{ "tx_flow_control" },
350 	{ "tx_mac_errors" },
351 	{ "tx_single_collisions" },
352 	{ "tx_mult_collisions" },
353 	{ "tx_deferred" },
354 	{ "tx_excessive_collisions" },
355 	{ "tx_late_collisions" },
356 	{ "tx_collide_2times" },
357 	{ "tx_collide_3times" },
358 	{ "tx_collide_4times" },
359 	{ "tx_collide_5times" },
360 	{ "tx_collide_6times" },
361 	{ "tx_collide_7times" },
362 	{ "tx_collide_8times" },
363 	{ "tx_collide_9times" },
364 	{ "tx_collide_10times" },
365 	{ "tx_collide_11times" },
366 	{ "tx_collide_12times" },
367 	{ "tx_collide_13times" },
368 	{ "tx_collide_14times" },
369 	{ "tx_collide_15times" },
370 	{ "tx_ucast_packets" },
371 	{ "tx_mcast_packets" },
372 	{ "tx_bcast_packets" },
373 	{ "tx_carrier_sense_errors" },
374 	{ "tx_discards" },
375 	{ "tx_errors" },
376 
377 	{ "dma_writeq_full" },
378 	{ "dma_write_prioq_full" },
379 	{ "rxbds_empty" },
380 	{ "rx_discards" },
381 	{ "rx_errors" },
382 	{ "rx_threshold_hit" },
383 
384 	{ "dma_readq_full" },
385 	{ "dma_read_prioq_full" },
386 	{ "tx_comp_queue_full" },
387 
388 	{ "ring_set_send_prod_index" },
389 	{ "ring_status_update" },
390 	{ "nic_irqs" },
391 	{ "nic_avoided_irqs" },
392 	{ "nic_tx_threshold_hit" },
393 
394 	{ "mbuf_lwm_thresh_hit" },
395 };
396 
397 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
398 
399 
400 static const struct {
401 	const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403 	{ "nvram test        (online) " },
404 	{ "link test         (online) " },
405 	{ "register test     (offline)" },
406 	{ "memory test       (offline)" },
407 	{ "mac loopback test (offline)" },
408 	{ "phy loopback test (offline)" },
409 	{ "ext loopback test (offline)" },
410 	{ "interrupt test    (offline)" },
411 };
412 
413 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
414 
415 
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418 	writel(val, tp->regs + off);
419 }
420 
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423 	return readl(tp->regs + off);
424 }
425 
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428 	writel(val, tp->aperegs + off);
429 }
430 
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433 	return readl(tp->aperegs + off);
434 }
435 
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438 	unsigned long flags;
439 
440 	spin_lock_irqsave(&tp->indirect_lock, flags);
441 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445 
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448 	writel(val, tp->regs + off);
449 	readl(tp->regs + off);
450 }
451 
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454 	unsigned long flags;
455 	u32 val;
456 
457 	spin_lock_irqsave(&tp->indirect_lock, flags);
458 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
461 	return val;
462 }
463 
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466 	unsigned long flags;
467 
468 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 				       TG3_64BIT_REG_LOW, val);
471 		return;
472 	}
473 	if (off == TG3_RX_STD_PROD_IDX_REG) {
474 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 				       TG3_64BIT_REG_LOW, val);
476 		return;
477 	}
478 
479 	spin_lock_irqsave(&tp->indirect_lock, flags);
480 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
483 
484 	/* In indirect mode when disabling interrupts, we also need
485 	 * to clear the interrupt bit in the GRC local ctrl register.
486 	 */
487 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488 	    (val == 0x1)) {
489 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491 	}
492 }
493 
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496 	unsigned long flags;
497 	u32 val;
498 
499 	spin_lock_irqsave(&tp->indirect_lock, flags);
500 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
503 	return val;
504 }
505 
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514 		/* Non-posted methods */
515 		tp->write32(tp, off, val);
516 	else {
517 		/* Posted method */
518 		tg3_write32(tp, off, val);
519 		if (usec_wait)
520 			udelay(usec_wait);
521 		tp->read32(tp, off);
522 	}
523 	/* Wait again after the read for the posted method to guarantee that
524 	 * the wait time is met.
525 	 */
526 	if (usec_wait)
527 		udelay(usec_wait);
528 }
529 
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532 	tp->write32_mbox(tp, off, val);
533 	if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534 		tp->read32_mbox(tp, off);
535 }
536 
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539 	void __iomem *mbox = tp->regs + off;
540 	writel(val, mbox);
541 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
542 		writel(val, mbox);
543 	if (tg3_flag(tp, MBOX_WRITE_REORDER))
544 		readl(mbox);
545 }
546 
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549 	return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551 
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554 	writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556 
557 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
562 
563 #define tw32(reg, val)			tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)			tp->read32(tp, reg)
567 
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570 	unsigned long flags;
571 
572 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574 		return;
575 
576 	spin_lock_irqsave(&tp->indirect_lock, flags);
577 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580 
581 		/* Always leave this as zero. */
582 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583 	} else {
584 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
586 
587 		/* Always leave this as zero. */
588 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589 	}
590 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592 
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595 	unsigned long flags;
596 
597 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599 		*val = 0;
600 		return;
601 	}
602 
603 	spin_lock_irqsave(&tp->indirect_lock, flags);
604 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607 
608 		/* Always leave this as zero. */
609 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610 	} else {
611 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 		*val = tr32(TG3PCI_MEM_WIN_DATA);
613 
614 		/* Always leave this as zero. */
615 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616 	}
617 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619 
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622 	int i;
623 	u32 regbase, bit;
624 
625 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 		regbase = TG3_APE_LOCK_GRANT;
627 	else
628 		regbase = TG3_APE_PER_LOCK_GRANT;
629 
630 	/* Make sure the driver hasn't any stale locks. */
631 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632 		switch (i) {
633 		case TG3_APE_LOCK_PHY0:
634 		case TG3_APE_LOCK_PHY1:
635 		case TG3_APE_LOCK_PHY2:
636 		case TG3_APE_LOCK_PHY3:
637 			bit = APE_LOCK_GRANT_DRIVER;
638 			break;
639 		default:
640 			if (!tp->pci_fn)
641 				bit = APE_LOCK_GRANT_DRIVER;
642 			else
643 				bit = 1 << tp->pci_fn;
644 		}
645 		tg3_ape_write32(tp, regbase + 4 * i, bit);
646 	}
647 
648 }
649 
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652 	int i, off;
653 	int ret = 0;
654 	u32 status, req, gnt, bit;
655 
656 	if (!tg3_flag(tp, ENABLE_APE))
657 		return 0;
658 
659 	switch (locknum) {
660 	case TG3_APE_LOCK_GPIO:
661 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662 			return 0;
663 	case TG3_APE_LOCK_GRC:
664 	case TG3_APE_LOCK_MEM:
665 		if (!tp->pci_fn)
666 			bit = APE_LOCK_REQ_DRIVER;
667 		else
668 			bit = 1 << tp->pci_fn;
669 		break;
670 	default:
671 		return -EINVAL;
672 	}
673 
674 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675 		req = TG3_APE_LOCK_REQ;
676 		gnt = TG3_APE_LOCK_GRANT;
677 	} else {
678 		req = TG3_APE_PER_LOCK_REQ;
679 		gnt = TG3_APE_PER_LOCK_GRANT;
680 	}
681 
682 	off = 4 * locknum;
683 
684 	tg3_ape_write32(tp, req + off, bit);
685 
686 	/* Wait for up to 1 millisecond to acquire lock. */
687 	for (i = 0; i < 100; i++) {
688 		status = tg3_ape_read32(tp, gnt + off);
689 		if (status == bit)
690 			break;
691 		udelay(10);
692 	}
693 
694 	if (status != bit) {
695 		/* Revoke the lock request. */
696 		tg3_ape_write32(tp, gnt + off, bit);
697 		ret = -EBUSY;
698 	}
699 
700 	return ret;
701 }
702 
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705 	u32 gnt, bit;
706 
707 	if (!tg3_flag(tp, ENABLE_APE))
708 		return;
709 
710 	switch (locknum) {
711 	case TG3_APE_LOCK_GPIO:
712 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713 			return;
714 	case TG3_APE_LOCK_GRC:
715 	case TG3_APE_LOCK_MEM:
716 		if (!tp->pci_fn)
717 			bit = APE_LOCK_GRANT_DRIVER;
718 		else
719 			bit = 1 << tp->pci_fn;
720 		break;
721 	default:
722 		return;
723 	}
724 
725 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726 		gnt = TG3_APE_LOCK_GRANT;
727 	else
728 		gnt = TG3_APE_PER_LOCK_GRANT;
729 
730 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732 
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735 	int i;
736 	u32 apedata;
737 
738 	/* NCSI does not support APE events */
739 	if (tg3_flag(tp, APE_HAS_NCSI))
740 		return;
741 
742 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743 	if (apedata != APE_SEG_SIG_MAGIC)
744 		return;
745 
746 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747 	if (!(apedata & APE_FW_STATUS_READY))
748 		return;
749 
750 	/* Wait for up to 1 millisecond for APE to service previous event. */
751 	for (i = 0; i < 10; i++) {
752 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753 			return;
754 
755 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756 
757 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758 			tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759 					event | APE_EVENT_STATUS_EVENT_PENDING);
760 
761 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762 
763 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764 			break;
765 
766 		udelay(100);
767 	}
768 
769 	if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772 
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775 	u32 event;
776 	u32 apedata;
777 
778 	if (!tg3_flag(tp, ENABLE_APE))
779 		return;
780 
781 	switch (kind) {
782 	case RESET_KIND_INIT:
783 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784 				APE_HOST_SEG_SIG_MAGIC);
785 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786 				APE_HOST_SEG_LEN_MAGIC);
787 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792 				APE_HOST_BEHAV_NO_PHYLOCK);
793 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794 				    TG3_APE_HOST_DRVR_STATE_START);
795 
796 		event = APE_EVENT_STATUS_STATE_START;
797 		break;
798 	case RESET_KIND_SHUTDOWN:
799 		/* With the interface we are currently using,
800 		 * APE does not track driver state.  Wiping
801 		 * out the HOST SEGMENT SIGNATURE forces
802 		 * the APE to assume OS absent status.
803 		 */
804 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805 
806 		if (device_may_wakeup(&tp->pdev->dev) &&
807 		    tg3_flag(tp, WOL_ENABLE)) {
808 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809 					    TG3_APE_HOST_WOL_SPEED_AUTO);
810 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811 		} else
812 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813 
814 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815 
816 		event = APE_EVENT_STATUS_STATE_UNLOAD;
817 		break;
818 	case RESET_KIND_SUSPEND:
819 		event = APE_EVENT_STATUS_STATE_SUSPEND;
820 		break;
821 	default:
822 		return;
823 	}
824 
825 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826 
827 	tg3_ape_send_event(tp, event);
828 }
829 
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832 	int i;
833 
834 	tw32(TG3PCI_MISC_HOST_CTRL,
835 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836 	for (i = 0; i < tp->irq_max; i++)
837 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839 
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842 	int i;
843 
844 	tp->irq_sync = 0;
845 	wmb();
846 
847 	tw32(TG3PCI_MISC_HOST_CTRL,
848 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849 
850 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851 	for (i = 0; i < tp->irq_cnt; i++) {
852 		struct tg3_napi *tnapi = &tp->napi[i];
853 
854 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855 		if (tg3_flag(tp, 1SHOT_MSI))
856 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857 
858 		tp->coal_now |= tnapi->coal_now;
859 	}
860 
861 	/* Force an initial interrupt */
862 	if (!tg3_flag(tp, TAGGED_STATUS) &&
863 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865 	else
866 		tw32(HOSTCC_MODE, tp->coal_now);
867 
868 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870 
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873 	struct tg3 *tp = tnapi->tp;
874 	struct tg3_hw_status *sblk = tnapi->hw_status;
875 	unsigned int work_exists = 0;
876 
877 	/* check for phy events */
878 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879 		if (sblk->status & SD_STATUS_LINK_CHG)
880 			work_exists = 1;
881 	}
882 	/* check for RX/TX work to do */
883 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885 		work_exists = 1;
886 
887 	return work_exists;
888 }
889 
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897 	struct tg3 *tp = tnapi->tp;
898 
899 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900 	mmiowb();
901 
902 	/* When doing tagged status, this work check is unnecessary.
903 	 * The last_tag we write above tells the chip which piece of
904 	 * work we've completed.
905 	 */
906 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907 		tw32(HOSTCC_MODE, tp->coalesce_mode |
908 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910 
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913 	u32 clock_ctrl;
914 	u32 orig_clock_ctrl;
915 
916 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917 		return;
918 
919 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920 
921 	orig_clock_ctrl = clock_ctrl;
922 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923 		       CLOCK_CTRL_CLKRUN_OENABLE |
924 		       0x1f);
925 	tp->pci_clock_ctrl = clock_ctrl;
926 
927 	if (tg3_flag(tp, 5705_PLUS)) {
928 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
930 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931 		}
932 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
934 			    clock_ctrl |
935 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936 			    40);
937 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
938 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
939 			    40);
940 	}
941 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943 
944 #define PHY_BUSY_LOOPS	5000
945 
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948 	u32 frame_val;
949 	unsigned int loops;
950 	int ret;
951 
952 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953 		tw32_f(MAC_MI_MODE,
954 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955 		udelay(80);
956 	}
957 
958 	*val = 0x0;
959 
960 	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961 		      MI_COM_PHY_ADDR_MASK);
962 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963 		      MI_COM_REG_ADDR_MASK);
964 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965 
966 	tw32_f(MAC_MI_COM, frame_val);
967 
968 	loops = PHY_BUSY_LOOPS;
969 	while (loops != 0) {
970 		udelay(10);
971 		frame_val = tr32(MAC_MI_COM);
972 
973 		if ((frame_val & MI_COM_BUSY) == 0) {
974 			udelay(5);
975 			frame_val = tr32(MAC_MI_COM);
976 			break;
977 		}
978 		loops -= 1;
979 	}
980 
981 	ret = -EBUSY;
982 	if (loops != 0) {
983 		*val = frame_val & MI_COM_DATA_MASK;
984 		ret = 0;
985 	}
986 
987 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988 		tw32_f(MAC_MI_MODE, tp->mi_mode);
989 		udelay(80);
990 	}
991 
992 	return ret;
993 }
994 
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997 	u32 frame_val;
998 	unsigned int loops;
999 	int ret;
1000 
1001 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003 		return 0;
1004 
1005 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006 		tw32_f(MAC_MI_MODE,
1007 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008 		udelay(80);
1009 	}
1010 
1011 	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012 		      MI_COM_PHY_ADDR_MASK);
1013 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014 		      MI_COM_REG_ADDR_MASK);
1015 	frame_val |= (val & MI_COM_DATA_MASK);
1016 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017 
1018 	tw32_f(MAC_MI_COM, frame_val);
1019 
1020 	loops = PHY_BUSY_LOOPS;
1021 	while (loops != 0) {
1022 		udelay(10);
1023 		frame_val = tr32(MAC_MI_COM);
1024 		if ((frame_val & MI_COM_BUSY) == 0) {
1025 			udelay(5);
1026 			frame_val = tr32(MAC_MI_COM);
1027 			break;
1028 		}
1029 		loops -= 1;
1030 	}
1031 
1032 	ret = -EBUSY;
1033 	if (loops != 0)
1034 		ret = 0;
1035 
1036 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1038 		udelay(80);
1039 	}
1040 
1041 	return ret;
1042 }
1043 
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046 	int err;
1047 
1048 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049 	if (err)
1050 		goto done;
1051 
1052 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053 	if (err)
1054 		goto done;
1055 
1056 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058 	if (err)
1059 		goto done;
1060 
1061 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062 
1063 done:
1064 	return err;
1065 }
1066 
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069 	int err;
1070 
1071 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072 	if (err)
1073 		goto done;
1074 
1075 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076 	if (err)
1077 		goto done;
1078 
1079 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081 	if (err)
1082 		goto done;
1083 
1084 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085 
1086 done:
1087 	return err;
1088 }
1089 
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092 	int err;
1093 
1094 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095 	if (!err)
1096 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097 
1098 	return err;
1099 }
1100 
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103 	int err;
1104 
1105 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106 	if (!err)
1107 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108 
1109 	return err;
1110 }
1111 
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114 	int err;
1115 
1116 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1119 	if (!err)
1120 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121 
1122 	return err;
1123 }
1124 
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128 		set |= MII_TG3_AUXCTL_MISC_WREN;
1129 
1130 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132 
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134 	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135 			     MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136 			     MII_TG3_AUXCTL_ACTL_TX_6DB)
1137 
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139 	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140 			     MII_TG3_AUXCTL_ACTL_TX_6DB);
1141 
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144 	u32 phy_control;
1145 	int limit, err;
1146 
1147 	/* OK, reset it, and poll the BMCR_RESET bit until it
1148 	 * clears or we time out.
1149 	 */
1150 	phy_control = BMCR_RESET;
1151 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1152 	if (err != 0)
1153 		return -EBUSY;
1154 
1155 	limit = 5000;
1156 	while (limit--) {
1157 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158 		if (err != 0)
1159 			return -EBUSY;
1160 
1161 		if ((phy_control & BMCR_RESET) == 0) {
1162 			udelay(40);
1163 			break;
1164 		}
1165 		udelay(10);
1166 	}
1167 	if (limit < 0)
1168 		return -EBUSY;
1169 
1170 	return 0;
1171 }
1172 
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175 	struct tg3 *tp = bp->priv;
1176 	u32 val;
1177 
1178 	spin_lock_bh(&tp->lock);
1179 
1180 	if (tg3_readphy(tp, reg, &val))
1181 		val = -EIO;
1182 
1183 	spin_unlock_bh(&tp->lock);
1184 
1185 	return val;
1186 }
1187 
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190 	struct tg3 *tp = bp->priv;
1191 	u32 ret = 0;
1192 
1193 	spin_lock_bh(&tp->lock);
1194 
1195 	if (tg3_writephy(tp, reg, val))
1196 		ret = -EIO;
1197 
1198 	spin_unlock_bh(&tp->lock);
1199 
1200 	return ret;
1201 }
1202 
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205 	return 0;
1206 }
1207 
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210 	u32 val;
1211 	struct phy_device *phydev;
1212 
1213 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215 	case PHY_ID_BCM50610:
1216 	case PHY_ID_BCM50610M:
1217 		val = MAC_PHYCFG2_50610_LED_MODES;
1218 		break;
1219 	case PHY_ID_BCMAC131:
1220 		val = MAC_PHYCFG2_AC131_LED_MODES;
1221 		break;
1222 	case PHY_ID_RTL8211C:
1223 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224 		break;
1225 	case PHY_ID_RTL8201E:
1226 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227 		break;
1228 	default:
1229 		return;
1230 	}
1231 
1232 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233 		tw32(MAC_PHYCFG2, val);
1234 
1235 		val = tr32(MAC_PHYCFG1);
1236 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1237 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239 		tw32(MAC_PHYCFG1, val);
1240 
1241 		return;
1242 	}
1243 
1244 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1247 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1248 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1249 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1250 		       MAC_PHYCFG2_INBAND_ENABLE;
1251 
1252 	tw32(MAC_PHYCFG2, val);
1253 
1254 	val = tr32(MAC_PHYCFG1);
1255 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262 	}
1263 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265 	tw32(MAC_PHYCFG1, val);
1266 
1267 	val = tr32(MAC_EXT_RGMII_MODE);
1268 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269 		 MAC_RGMII_MODE_RX_QUALITY |
1270 		 MAC_RGMII_MODE_RX_ACTIVITY |
1271 		 MAC_RGMII_MODE_RX_ENG_DET |
1272 		 MAC_RGMII_MODE_TX_ENABLE |
1273 		 MAC_RGMII_MODE_TX_LOWPWR |
1274 		 MAC_RGMII_MODE_TX_RESET);
1275 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277 			val |= MAC_RGMII_MODE_RX_INT_B |
1278 			       MAC_RGMII_MODE_RX_QUALITY |
1279 			       MAC_RGMII_MODE_RX_ACTIVITY |
1280 			       MAC_RGMII_MODE_RX_ENG_DET;
1281 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282 			val |= MAC_RGMII_MODE_TX_ENABLE |
1283 			       MAC_RGMII_MODE_TX_LOWPWR |
1284 			       MAC_RGMII_MODE_TX_RESET;
1285 	}
1286 	tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288 
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1293 	udelay(80);
1294 
1295 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1296 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297 		tg3_mdio_config_5785(tp);
1298 }
1299 
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302 	int i;
1303 	u32 reg;
1304 	struct phy_device *phydev;
1305 
1306 	if (tg3_flag(tp, 5717_PLUS)) {
1307 		u32 is_serdes;
1308 
1309 		tp->phy_addr = tp->pci_fn + 1;
1310 
1311 		if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313 		else
1314 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1316 		if (is_serdes)
1317 			tp->phy_addr += 7;
1318 	} else
1319 		tp->phy_addr = TG3_PHY_MII_ADDR;
1320 
1321 	tg3_mdio_start(tp);
1322 
1323 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324 		return 0;
1325 
1326 	tp->mdio_bus = mdiobus_alloc();
1327 	if (tp->mdio_bus == NULL)
1328 		return -ENOMEM;
1329 
1330 	tp->mdio_bus->name     = "tg3 mdio bus";
1331 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333 	tp->mdio_bus->priv     = tp;
1334 	tp->mdio_bus->parent   = &tp->pdev->dev;
1335 	tp->mdio_bus->read     = &tg3_mdio_read;
1336 	tp->mdio_bus->write    = &tg3_mdio_write;
1337 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1338 	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340 
1341 	for (i = 0; i < PHY_MAX_ADDR; i++)
1342 		tp->mdio_bus->irq[i] = PHY_POLL;
1343 
1344 	/* The bus registration will look for all the PHYs on the mdio bus.
1345 	 * Unfortunately, it does not ensure the PHY is powered up before
1346 	 * accessing the PHY ID registers.  A chip reset is the
1347 	 * quickest way to bring the device back to an operational state..
1348 	 */
1349 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350 		tg3_bmcr_reset(tp);
1351 
1352 	i = mdiobus_register(tp->mdio_bus);
1353 	if (i) {
1354 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355 		mdiobus_free(tp->mdio_bus);
1356 		return i;
1357 	}
1358 
1359 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360 
1361 	if (!phydev || !phydev->drv) {
1362 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363 		mdiobus_unregister(tp->mdio_bus);
1364 		mdiobus_free(tp->mdio_bus);
1365 		return -ENODEV;
1366 	}
1367 
1368 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369 	case PHY_ID_BCM57780:
1370 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1371 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372 		break;
1373 	case PHY_ID_BCM50610:
1374 	case PHY_ID_BCM50610M:
1375 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376 				     PHY_BRCM_RX_REFCLK_UNUSED |
1377 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385 		/* fallthru */
1386 	case PHY_ID_RTL8211C:
1387 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388 		break;
1389 	case PHY_ID_RTL8201E:
1390 	case PHY_ID_BCMAC131:
1391 		phydev->interface = PHY_INTERFACE_MODE_MII;
1392 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394 		break;
1395 	}
1396 
1397 	tg3_flag_set(tp, MDIOBUS_INITED);
1398 
1399 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400 		tg3_mdio_config_5785(tp);
1401 
1402 	return 0;
1403 }
1404 
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1408 		tg3_flag_clear(tp, MDIOBUS_INITED);
1409 		mdiobus_unregister(tp->mdio_bus);
1410 		mdiobus_free(tp->mdio_bus);
1411 	}
1412 }
1413 
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417 	u32 val;
1418 
1419 	val = tr32(GRC_RX_CPU_EVENT);
1420 	val |= GRC_RX_CPU_DRIVER_EVENT;
1421 	tw32_f(GRC_RX_CPU_EVENT, val);
1422 
1423 	tp->last_event_jiffies = jiffies;
1424 }
1425 
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427 
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431 	int i;
1432 	unsigned int delay_cnt;
1433 	long time_remain;
1434 
1435 	/* If enough time has passed, no wait is necessary. */
1436 	time_remain = (long)(tp->last_event_jiffies + 1 +
1437 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438 		      (long)jiffies;
1439 	if (time_remain < 0)
1440 		return;
1441 
1442 	/* Check if we can shorten the wait time. */
1443 	delay_cnt = jiffies_to_usecs(time_remain);
1444 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446 	delay_cnt = (delay_cnt >> 3) + 1;
1447 
1448 	for (i = 0; i < delay_cnt; i++) {
1449 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450 			break;
1451 		udelay(8);
1452 	}
1453 }
1454 
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458 	u32 reg;
1459 	u32 val;
1460 
1461 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462 		return;
1463 
1464 	tg3_wait_for_event_ack(tp);
1465 
1466 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467 
1468 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469 
1470 	val = 0;
1471 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1472 		val = reg << 16;
1473 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1474 		val |= (reg & 0xffff);
1475 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476 
1477 	val = 0;
1478 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479 		val = reg << 16;
1480 	if (!tg3_readphy(tp, MII_LPA, &reg))
1481 		val |= (reg & 0xffff);
1482 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483 
1484 	val = 0;
1485 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487 			val = reg << 16;
1488 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489 			val |= (reg & 0xffff);
1490 	}
1491 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492 
1493 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494 		val = reg << 16;
1495 	else
1496 		val = 0;
1497 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498 
1499 	tg3_generate_fw_event(tp);
1500 }
1501 
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506 		/* Wait for RX cpu to ACK the previous event. */
1507 		tg3_wait_for_event_ack(tp);
1508 
1509 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510 
1511 		tg3_generate_fw_event(tp);
1512 
1513 		/* Wait for RX cpu to ACK this event. */
1514 		tg3_wait_for_event_ack(tp);
1515 	}
1516 }
1517 
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523 
1524 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525 		switch (kind) {
1526 		case RESET_KIND_INIT:
1527 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528 				      DRV_STATE_START);
1529 			break;
1530 
1531 		case RESET_KIND_SHUTDOWN:
1532 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533 				      DRV_STATE_UNLOAD);
1534 			break;
1535 
1536 		case RESET_KIND_SUSPEND:
1537 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538 				      DRV_STATE_SUSPEND);
1539 			break;
1540 
1541 		default:
1542 			break;
1543 		}
1544 	}
1545 
1546 	if (kind == RESET_KIND_INIT ||
1547 	    kind == RESET_KIND_SUSPEND)
1548 		tg3_ape_driver_state_change(tp, kind);
1549 }
1550 
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555 		switch (kind) {
1556 		case RESET_KIND_INIT:
1557 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558 				      DRV_STATE_START_DONE);
1559 			break;
1560 
1561 		case RESET_KIND_SHUTDOWN:
1562 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563 				      DRV_STATE_UNLOAD_DONE);
1564 			break;
1565 
1566 		default:
1567 			break;
1568 		}
1569 	}
1570 
1571 	if (kind == RESET_KIND_SHUTDOWN)
1572 		tg3_ape_driver_state_change(tp, kind);
1573 }
1574 
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578 	if (tg3_flag(tp, ENABLE_ASF)) {
1579 		switch (kind) {
1580 		case RESET_KIND_INIT:
1581 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582 				      DRV_STATE_START);
1583 			break;
1584 
1585 		case RESET_KIND_SHUTDOWN:
1586 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587 				      DRV_STATE_UNLOAD);
1588 			break;
1589 
1590 		case RESET_KIND_SUSPEND:
1591 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592 				      DRV_STATE_SUSPEND);
1593 			break;
1594 
1595 		default:
1596 			break;
1597 		}
1598 	}
1599 }
1600 
1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603 	int i;
1604 	u32 val;
1605 
1606 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607 		/* Wait up to 20ms for init done. */
1608 		for (i = 0; i < 200; i++) {
1609 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610 				return 0;
1611 			udelay(100);
1612 		}
1613 		return -ENODEV;
1614 	}
1615 
1616 	/* Wait for firmware initialization to complete. */
1617 	for (i = 0; i < 100000; i++) {
1618 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620 			break;
1621 		udelay(10);
1622 	}
1623 
1624 	/* Chip might not be fitted with firmware.  Some Sun onboard
1625 	 * parts are configured like that.  So don't signal the timeout
1626 	 * of the above loop as an error, but do report the lack of
1627 	 * running firmware once.
1628 	 */
1629 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1631 
1632 		netdev_info(tp->dev, "No firmware running\n");
1633 	}
1634 
1635 	if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636 		/* The 57765 A0 needs a little more
1637 		 * time to do some important work.
1638 		 */
1639 		mdelay(10);
1640 	}
1641 
1642 	return 0;
1643 }
1644 
1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647 	if (!netif_carrier_ok(tp->dev)) {
1648 		netif_info(tp, link, tp->dev, "Link is down\n");
1649 		tg3_ump_link_report(tp);
1650 	} else if (netif_msg_link(tp)) {
1651 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652 			    (tp->link_config.active_speed == SPEED_1000 ?
1653 			     1000 :
1654 			     (tp->link_config.active_speed == SPEED_100 ?
1655 			      100 : 10)),
1656 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1657 			     "full" : "half"));
1658 
1659 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661 			    "on" : "off",
1662 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663 			    "on" : "off");
1664 
1665 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666 			netdev_info(tp->dev, "EEE is %s\n",
1667 				    tp->setlpicnt ? "enabled" : "disabled");
1668 
1669 		tg3_ump_link_report(tp);
1670 	}
1671 }
1672 
1673 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1674 {
1675 	u16 miireg;
1676 
1677 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678 		miireg = ADVERTISE_PAUSE_CAP;
1679 	else if (flow_ctrl & FLOW_CTRL_TX)
1680 		miireg = ADVERTISE_PAUSE_ASYM;
1681 	else if (flow_ctrl & FLOW_CTRL_RX)
1682 		miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1683 	else
1684 		miireg = 0;
1685 
1686 	return miireg;
1687 }
1688 
1689 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1690 {
1691 	u16 miireg;
1692 
1693 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1694 		miireg = ADVERTISE_1000XPAUSE;
1695 	else if (flow_ctrl & FLOW_CTRL_TX)
1696 		miireg = ADVERTISE_1000XPSE_ASYM;
1697 	else if (flow_ctrl & FLOW_CTRL_RX)
1698 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1699 	else
1700 		miireg = 0;
1701 
1702 	return miireg;
1703 }
1704 
1705 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1706 {
1707 	u8 cap = 0;
1708 
1709 	if (lcladv & ADVERTISE_1000XPAUSE) {
1710 		if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1711 			if (rmtadv & LPA_1000XPAUSE)
1712 				cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1713 			else if (rmtadv & LPA_1000XPAUSE_ASYM)
1714 				cap = FLOW_CTRL_RX;
1715 		} else {
1716 			if (rmtadv & LPA_1000XPAUSE)
1717 				cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1718 		}
1719 	} else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1720 		if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1721 			cap = FLOW_CTRL_TX;
1722 	}
1723 
1724 	return cap;
1725 }
1726 
1727 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1728 {
1729 	u8 autoneg;
1730 	u8 flowctrl = 0;
1731 	u32 old_rx_mode = tp->rx_mode;
1732 	u32 old_tx_mode = tp->tx_mode;
1733 
1734 	if (tg3_flag(tp, USE_PHYLIB))
1735 		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1736 	else
1737 		autoneg = tp->link_config.autoneg;
1738 
1739 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1740 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1741 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1742 		else
1743 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1744 	} else
1745 		flowctrl = tp->link_config.flowctrl;
1746 
1747 	tp->link_config.active_flowctrl = flowctrl;
1748 
1749 	if (flowctrl & FLOW_CTRL_RX)
1750 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1751 	else
1752 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1753 
1754 	if (old_rx_mode != tp->rx_mode)
1755 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1756 
1757 	if (flowctrl & FLOW_CTRL_TX)
1758 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1759 	else
1760 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1761 
1762 	if (old_tx_mode != tp->tx_mode)
1763 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1764 }
1765 
1766 static void tg3_adjust_link(struct net_device *dev)
1767 {
1768 	u8 oldflowctrl, linkmesg = 0;
1769 	u32 mac_mode, lcl_adv, rmt_adv;
1770 	struct tg3 *tp = netdev_priv(dev);
1771 	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1772 
1773 	spin_lock_bh(&tp->lock);
1774 
1775 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1776 				    MAC_MODE_HALF_DUPLEX);
1777 
1778 	oldflowctrl = tp->link_config.active_flowctrl;
1779 
1780 	if (phydev->link) {
1781 		lcl_adv = 0;
1782 		rmt_adv = 0;
1783 
1784 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1785 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1786 		else if (phydev->speed == SPEED_1000 ||
1787 			 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1788 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
1789 		else
1790 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1791 
1792 		if (phydev->duplex == DUPLEX_HALF)
1793 			mac_mode |= MAC_MODE_HALF_DUPLEX;
1794 		else {
1795 			lcl_adv = tg3_advert_flowctrl_1000T(
1796 				  tp->link_config.flowctrl);
1797 
1798 			if (phydev->pause)
1799 				rmt_adv = LPA_PAUSE_CAP;
1800 			if (phydev->asym_pause)
1801 				rmt_adv |= LPA_PAUSE_ASYM;
1802 		}
1803 
1804 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1805 	} else
1806 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
1807 
1808 	if (mac_mode != tp->mac_mode) {
1809 		tp->mac_mode = mac_mode;
1810 		tw32_f(MAC_MODE, tp->mac_mode);
1811 		udelay(40);
1812 	}
1813 
1814 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1815 		if (phydev->speed == SPEED_10)
1816 			tw32(MAC_MI_STAT,
1817 			     MAC_MI_STAT_10MBPS_MODE |
1818 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1819 		else
1820 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1821 	}
1822 
1823 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1824 		tw32(MAC_TX_LENGTHS,
1825 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1826 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1827 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1828 	else
1829 		tw32(MAC_TX_LENGTHS,
1830 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1831 		      (6 << TX_LENGTHS_IPG_SHIFT) |
1832 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1833 
1834 	if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1835 	    (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1836 	    phydev->speed != tp->link_config.active_speed ||
1837 	    phydev->duplex != tp->link_config.active_duplex ||
1838 	    oldflowctrl != tp->link_config.active_flowctrl)
1839 		linkmesg = 1;
1840 
1841 	tp->link_config.active_speed = phydev->speed;
1842 	tp->link_config.active_duplex = phydev->duplex;
1843 
1844 	spin_unlock_bh(&tp->lock);
1845 
1846 	if (linkmesg)
1847 		tg3_link_report(tp);
1848 }
1849 
1850 static int tg3_phy_init(struct tg3 *tp)
1851 {
1852 	struct phy_device *phydev;
1853 
1854 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1855 		return 0;
1856 
1857 	/* Bring the PHY back to a known state. */
1858 	tg3_bmcr_reset(tp);
1859 
1860 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1861 
1862 	/* Attach the MAC to the PHY. */
1863 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1864 			     phydev->dev_flags, phydev->interface);
1865 	if (IS_ERR(phydev)) {
1866 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1867 		return PTR_ERR(phydev);
1868 	}
1869 
1870 	/* Mask with MAC supported features. */
1871 	switch (phydev->interface) {
1872 	case PHY_INTERFACE_MODE_GMII:
1873 	case PHY_INTERFACE_MODE_RGMII:
1874 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1875 			phydev->supported &= (PHY_GBIT_FEATURES |
1876 					      SUPPORTED_Pause |
1877 					      SUPPORTED_Asym_Pause);
1878 			break;
1879 		}
1880 		/* fallthru */
1881 	case PHY_INTERFACE_MODE_MII:
1882 		phydev->supported &= (PHY_BASIC_FEATURES |
1883 				      SUPPORTED_Pause |
1884 				      SUPPORTED_Asym_Pause);
1885 		break;
1886 	default:
1887 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1888 		return -EINVAL;
1889 	}
1890 
1891 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1892 
1893 	phydev->advertising = phydev->supported;
1894 
1895 	return 0;
1896 }
1897 
1898 static void tg3_phy_start(struct tg3 *tp)
1899 {
1900 	struct phy_device *phydev;
1901 
1902 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1903 		return;
1904 
1905 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1906 
1907 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1908 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1909 		phydev->speed = tp->link_config.orig_speed;
1910 		phydev->duplex = tp->link_config.orig_duplex;
1911 		phydev->autoneg = tp->link_config.orig_autoneg;
1912 		phydev->advertising = tp->link_config.orig_advertising;
1913 	}
1914 
1915 	phy_start(phydev);
1916 
1917 	phy_start_aneg(phydev);
1918 }
1919 
1920 static void tg3_phy_stop(struct tg3 *tp)
1921 {
1922 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1923 		return;
1924 
1925 	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1926 }
1927 
1928 static void tg3_phy_fini(struct tg3 *tp)
1929 {
1930 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1931 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1932 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1933 	}
1934 }
1935 
1936 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1937 {
1938 	int err;
1939 	u32 val;
1940 
1941 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1942 		return 0;
1943 
1944 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1945 		/* Cannot do read-modify-write on 5401 */
1946 		err = tg3_phy_auxctl_write(tp,
1947 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1948 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1949 					   0x4c20);
1950 		goto done;
1951 	}
1952 
1953 	err = tg3_phy_auxctl_read(tp,
1954 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1955 	if (err)
1956 		return err;
1957 
1958 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1959 	err = tg3_phy_auxctl_write(tp,
1960 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1961 
1962 done:
1963 	return err;
1964 }
1965 
1966 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1967 {
1968 	u32 phytest;
1969 
1970 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1971 		u32 phy;
1972 
1973 		tg3_writephy(tp, MII_TG3_FET_TEST,
1974 			     phytest | MII_TG3_FET_SHADOW_EN);
1975 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1976 			if (enable)
1977 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1978 			else
1979 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1980 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1981 		}
1982 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1983 	}
1984 }
1985 
1986 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1987 {
1988 	u32 reg;
1989 
1990 	if (!tg3_flag(tp, 5705_PLUS) ||
1991 	    (tg3_flag(tp, 5717_PLUS) &&
1992 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1993 		return;
1994 
1995 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1996 		tg3_phy_fet_toggle_apd(tp, enable);
1997 		return;
1998 	}
1999 
2000 	reg = MII_TG3_MISC_SHDW_WREN |
2001 	      MII_TG3_MISC_SHDW_SCR5_SEL |
2002 	      MII_TG3_MISC_SHDW_SCR5_LPED |
2003 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2004 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2005 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2006 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2007 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2008 
2009 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2010 
2011 
2012 	reg = MII_TG3_MISC_SHDW_WREN |
2013 	      MII_TG3_MISC_SHDW_APD_SEL |
2014 	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2015 	if (enable)
2016 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2017 
2018 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2019 }
2020 
2021 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2022 {
2023 	u32 phy;
2024 
2025 	if (!tg3_flag(tp, 5705_PLUS) ||
2026 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2027 		return;
2028 
2029 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2030 		u32 ephy;
2031 
2032 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2033 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2034 
2035 			tg3_writephy(tp, MII_TG3_FET_TEST,
2036 				     ephy | MII_TG3_FET_SHADOW_EN);
2037 			if (!tg3_readphy(tp, reg, &phy)) {
2038 				if (enable)
2039 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2040 				else
2041 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2042 				tg3_writephy(tp, reg, phy);
2043 			}
2044 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2045 		}
2046 	} else {
2047 		int ret;
2048 
2049 		ret = tg3_phy_auxctl_read(tp,
2050 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2051 		if (!ret) {
2052 			if (enable)
2053 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2054 			else
2055 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2056 			tg3_phy_auxctl_write(tp,
2057 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2058 		}
2059 	}
2060 }
2061 
2062 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2063 {
2064 	int ret;
2065 	u32 val;
2066 
2067 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2068 		return;
2069 
2070 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2071 	if (!ret)
2072 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2073 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2074 }
2075 
2076 static void tg3_phy_apply_otp(struct tg3 *tp)
2077 {
2078 	u32 otp, phy;
2079 
2080 	if (!tp->phy_otp)
2081 		return;
2082 
2083 	otp = tp->phy_otp;
2084 
2085 	if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2086 		return;
2087 
2088 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2089 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2090 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2091 
2092 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2093 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2094 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2095 
2096 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2097 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2098 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2099 
2100 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2101 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2102 
2103 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2104 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2105 
2106 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2107 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2108 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2109 
2110 	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2111 }
2112 
2113 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2114 {
2115 	u32 val;
2116 
2117 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2118 		return;
2119 
2120 	tp->setlpicnt = 0;
2121 
2122 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2123 	    current_link_up == 1 &&
2124 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2125 	    (tp->link_config.active_speed == SPEED_100 ||
2126 	     tp->link_config.active_speed == SPEED_1000)) {
2127 		u32 eeectl;
2128 
2129 		if (tp->link_config.active_speed == SPEED_1000)
2130 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2131 		else
2132 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2133 
2134 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2135 
2136 		tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2137 				  TG3_CL45_D7_EEERES_STAT, &val);
2138 
2139 		if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2140 		    val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2141 			tp->setlpicnt = 2;
2142 	}
2143 
2144 	if (!tp->setlpicnt) {
2145 		if (current_link_up == 1 &&
2146 		   !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2147 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2148 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2149 		}
2150 
2151 		val = tr32(TG3_CPMU_EEE_MODE);
2152 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2153 	}
2154 }
2155 
2156 static void tg3_phy_eee_enable(struct tg3 *tp)
2157 {
2158 	u32 val;
2159 
2160 	if (tp->link_config.active_speed == SPEED_1000 &&
2161 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2162 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2163 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2164 	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2165 		val = MII_TG3_DSP_TAP26_ALNOKO |
2166 		      MII_TG3_DSP_TAP26_RMRXSTO;
2167 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2168 		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2169 	}
2170 
2171 	val = tr32(TG3_CPMU_EEE_MODE);
2172 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2173 }
2174 
2175 static int tg3_wait_macro_done(struct tg3 *tp)
2176 {
2177 	int limit = 100;
2178 
2179 	while (limit--) {
2180 		u32 tmp32;
2181 
2182 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2183 			if ((tmp32 & 0x1000) == 0)
2184 				break;
2185 		}
2186 	}
2187 	if (limit < 0)
2188 		return -EBUSY;
2189 
2190 	return 0;
2191 }
2192 
2193 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2194 {
2195 	static const u32 test_pat[4][6] = {
2196 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2197 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2198 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2199 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2200 	};
2201 	int chan;
2202 
2203 	for (chan = 0; chan < 4; chan++) {
2204 		int i;
2205 
2206 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2207 			     (chan * 0x2000) | 0x0200);
2208 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2209 
2210 		for (i = 0; i < 6; i++)
2211 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2212 				     test_pat[chan][i]);
2213 
2214 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2215 		if (tg3_wait_macro_done(tp)) {
2216 			*resetp = 1;
2217 			return -EBUSY;
2218 		}
2219 
2220 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2221 			     (chan * 0x2000) | 0x0200);
2222 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2223 		if (tg3_wait_macro_done(tp)) {
2224 			*resetp = 1;
2225 			return -EBUSY;
2226 		}
2227 
2228 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2229 		if (tg3_wait_macro_done(tp)) {
2230 			*resetp = 1;
2231 			return -EBUSY;
2232 		}
2233 
2234 		for (i = 0; i < 6; i += 2) {
2235 			u32 low, high;
2236 
2237 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2238 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2239 			    tg3_wait_macro_done(tp)) {
2240 				*resetp = 1;
2241 				return -EBUSY;
2242 			}
2243 			low &= 0x7fff;
2244 			high &= 0x000f;
2245 			if (low != test_pat[chan][i] ||
2246 			    high != test_pat[chan][i+1]) {
2247 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2248 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2249 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2250 
2251 				return -EBUSY;
2252 			}
2253 		}
2254 	}
2255 
2256 	return 0;
2257 }
2258 
2259 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2260 {
2261 	int chan;
2262 
2263 	for (chan = 0; chan < 4; chan++) {
2264 		int i;
2265 
2266 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2267 			     (chan * 0x2000) | 0x0200);
2268 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2269 		for (i = 0; i < 6; i++)
2270 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2271 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2272 		if (tg3_wait_macro_done(tp))
2273 			return -EBUSY;
2274 	}
2275 
2276 	return 0;
2277 }
2278 
2279 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2280 {
2281 	u32 reg32, phy9_orig;
2282 	int retries, do_phy_reset, err;
2283 
2284 	retries = 10;
2285 	do_phy_reset = 1;
2286 	do {
2287 		if (do_phy_reset) {
2288 			err = tg3_bmcr_reset(tp);
2289 			if (err)
2290 				return err;
2291 			do_phy_reset = 0;
2292 		}
2293 
2294 		/* Disable transmitter and interrupt.  */
2295 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2296 			continue;
2297 
2298 		reg32 |= 0x3000;
2299 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2300 
2301 		/* Set full-duplex, 1000 mbps.  */
2302 		tg3_writephy(tp, MII_BMCR,
2303 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2304 
2305 		/* Set to master mode.  */
2306 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2307 			continue;
2308 
2309 		tg3_writephy(tp, MII_CTRL1000,
2310 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2311 
2312 		err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2313 		if (err)
2314 			return err;
2315 
2316 		/* Block the PHY control access.  */
2317 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2318 
2319 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2320 		if (!err)
2321 			break;
2322 	} while (--retries);
2323 
2324 	err = tg3_phy_reset_chanpat(tp);
2325 	if (err)
2326 		return err;
2327 
2328 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2329 
2330 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2331 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2332 
2333 	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2334 
2335 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2336 
2337 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2338 		reg32 &= ~0x3000;
2339 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2340 	} else if (!err)
2341 		err = -EBUSY;
2342 
2343 	return err;
2344 }
2345 
2346 /* This will reset the tigon3 PHY if there is no valid
2347  * link unless the FORCE argument is non-zero.
2348  */
2349 static int tg3_phy_reset(struct tg3 *tp)
2350 {
2351 	u32 val, cpmuctrl;
2352 	int err;
2353 
2354 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2355 		val = tr32(GRC_MISC_CFG);
2356 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2357 		udelay(40);
2358 	}
2359 	err  = tg3_readphy(tp, MII_BMSR, &val);
2360 	err |= tg3_readphy(tp, MII_BMSR, &val);
2361 	if (err != 0)
2362 		return -EBUSY;
2363 
2364 	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2365 		netif_carrier_off(tp->dev);
2366 		tg3_link_report(tp);
2367 	}
2368 
2369 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2370 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2371 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2372 		err = tg3_phy_reset_5703_4_5(tp);
2373 		if (err)
2374 			return err;
2375 		goto out;
2376 	}
2377 
2378 	cpmuctrl = 0;
2379 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2380 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2381 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2382 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2383 			tw32(TG3_CPMU_CTRL,
2384 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2385 	}
2386 
2387 	err = tg3_bmcr_reset(tp);
2388 	if (err)
2389 		return err;
2390 
2391 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2392 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2393 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2394 
2395 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2396 	}
2397 
2398 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2399 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2400 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2401 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2402 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2403 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2404 			udelay(40);
2405 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2406 		}
2407 	}
2408 
2409 	if (tg3_flag(tp, 5717_PLUS) &&
2410 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2411 		return 0;
2412 
2413 	tg3_phy_apply_otp(tp);
2414 
2415 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2416 		tg3_phy_toggle_apd(tp, true);
2417 	else
2418 		tg3_phy_toggle_apd(tp, false);
2419 
2420 out:
2421 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2422 	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2423 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2424 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2425 		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2426 	}
2427 
2428 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2429 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2430 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2431 	}
2432 
2433 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2434 		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2435 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2436 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2437 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2438 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439 		}
2440 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2441 		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2442 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2443 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2444 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2445 				tg3_writephy(tp, MII_TG3_TEST1,
2446 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2447 			} else
2448 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2449 
2450 			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2451 		}
2452 	}
2453 
2454 	/* Set Extended packet length bit (bit 14) on all chips that */
2455 	/* support jumbo frames */
2456 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2457 		/* Cannot do read-modify-write on 5401 */
2458 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2459 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2460 		/* Set bit 14 with read-modify-write to preserve other bits */
2461 		err = tg3_phy_auxctl_read(tp,
2462 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2463 		if (!err)
2464 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2465 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2466 	}
2467 
2468 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2469 	 * jumbo frames transmission.
2470 	 */
2471 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2472 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2473 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2474 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2475 	}
2476 
2477 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2478 		/* adjust output voltage */
2479 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2480 	}
2481 
2482 	tg3_phy_toggle_automdix(tp, 1);
2483 	tg3_phy_set_wirespeed(tp);
2484 	return 0;
2485 }
2486 
2487 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2488 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2489 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2490 					  TG3_GPIO_MSG_NEED_VAUX)
2491 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2492 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2493 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2494 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2495 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2496 
2497 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2498 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2499 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2500 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2501 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2502 
2503 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2504 {
2505 	u32 status, shift;
2506 
2507 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2508 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2509 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2510 	else
2511 		status = tr32(TG3_CPMU_DRV_STATUS);
2512 
2513 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2514 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2515 	status |= (newstat << shift);
2516 
2517 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2518 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2519 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2520 	else
2521 		tw32(TG3_CPMU_DRV_STATUS, status);
2522 
2523 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2524 }
2525 
2526 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2527 {
2528 	if (!tg3_flag(tp, IS_NIC))
2529 		return 0;
2530 
2531 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2532 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2533 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2534 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2535 			return -EIO;
2536 
2537 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2538 
2539 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2540 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2541 
2542 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2543 	} else {
2544 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2545 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2546 	}
2547 
2548 	return 0;
2549 }
2550 
2551 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2552 {
2553 	u32 grc_local_ctrl;
2554 
2555 	if (!tg3_flag(tp, IS_NIC) ||
2556 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2557 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2558 		return;
2559 
2560 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2561 
2562 	tw32_wait_f(GRC_LOCAL_CTRL,
2563 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2564 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2565 
2566 	tw32_wait_f(GRC_LOCAL_CTRL,
2567 		    grc_local_ctrl,
2568 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2569 
2570 	tw32_wait_f(GRC_LOCAL_CTRL,
2571 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2572 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2573 }
2574 
2575 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2576 {
2577 	if (!tg3_flag(tp, IS_NIC))
2578 		return;
2579 
2580 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2581 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2582 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2583 			    (GRC_LCLCTRL_GPIO_OE0 |
2584 			     GRC_LCLCTRL_GPIO_OE1 |
2585 			     GRC_LCLCTRL_GPIO_OE2 |
2586 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2587 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2588 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2589 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2590 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2591 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2592 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2593 				     GRC_LCLCTRL_GPIO_OE1 |
2594 				     GRC_LCLCTRL_GPIO_OE2 |
2595 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2596 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2597 				     tp->grc_local_ctrl;
2598 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2599 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2600 
2601 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2602 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2603 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2604 
2605 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2606 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2607 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2608 	} else {
2609 		u32 no_gpio2;
2610 		u32 grc_local_ctrl = 0;
2611 
2612 		/* Workaround to prevent overdrawing Amps. */
2613 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2614 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2615 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2616 				    grc_local_ctrl,
2617 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2618 		}
2619 
2620 		/* On 5753 and variants, GPIO2 cannot be used. */
2621 		no_gpio2 = tp->nic_sram_data_cfg &
2622 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2623 
2624 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2625 				  GRC_LCLCTRL_GPIO_OE1 |
2626 				  GRC_LCLCTRL_GPIO_OE2 |
2627 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2628 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2629 		if (no_gpio2) {
2630 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2631 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2632 		}
2633 		tw32_wait_f(GRC_LOCAL_CTRL,
2634 			    tp->grc_local_ctrl | grc_local_ctrl,
2635 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2636 
2637 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2638 
2639 		tw32_wait_f(GRC_LOCAL_CTRL,
2640 			    tp->grc_local_ctrl | grc_local_ctrl,
2641 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2642 
2643 		if (!no_gpio2) {
2644 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2645 			tw32_wait_f(GRC_LOCAL_CTRL,
2646 				    tp->grc_local_ctrl | grc_local_ctrl,
2647 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2648 		}
2649 	}
2650 }
2651 
2652 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2653 {
2654 	u32 msg = 0;
2655 
2656 	/* Serialize power state transitions */
2657 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2658 		return;
2659 
2660 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2661 		msg = TG3_GPIO_MSG_NEED_VAUX;
2662 
2663 	msg = tg3_set_function_status(tp, msg);
2664 
2665 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2666 		goto done;
2667 
2668 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2669 		tg3_pwrsrc_switch_to_vaux(tp);
2670 	else
2671 		tg3_pwrsrc_die_with_vmain(tp);
2672 
2673 done:
2674 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2675 }
2676 
2677 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2678 {
2679 	bool need_vaux = false;
2680 
2681 	/* The GPIOs do something completely different on 57765. */
2682 	if (!tg3_flag(tp, IS_NIC) ||
2683 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2684 		return;
2685 
2686 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2687 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2688 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2689 		tg3_frob_aux_power_5717(tp, include_wol ?
2690 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2691 		return;
2692 	}
2693 
2694 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2695 		struct net_device *dev_peer;
2696 
2697 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2698 
2699 		/* remove_one() may have been run on the peer. */
2700 		if (dev_peer) {
2701 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2702 
2703 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2704 				return;
2705 
2706 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2707 			    tg3_flag(tp_peer, ENABLE_ASF))
2708 				need_vaux = true;
2709 		}
2710 	}
2711 
2712 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2713 	    tg3_flag(tp, ENABLE_ASF))
2714 		need_vaux = true;
2715 
2716 	if (need_vaux)
2717 		tg3_pwrsrc_switch_to_vaux(tp);
2718 	else
2719 		tg3_pwrsrc_die_with_vmain(tp);
2720 }
2721 
2722 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2723 {
2724 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2725 		return 1;
2726 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2727 		if (speed != SPEED_10)
2728 			return 1;
2729 	} else if (speed == SPEED_10)
2730 		return 1;
2731 
2732 	return 0;
2733 }
2734 
2735 static int tg3_setup_phy(struct tg3 *, int);
2736 static int tg3_halt_cpu(struct tg3 *, u32);
2737 
2738 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2739 {
2740 	u32 val;
2741 
2742 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2743 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2744 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2745 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2746 
2747 			sg_dig_ctrl |=
2748 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2749 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
2750 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2751 		}
2752 		return;
2753 	}
2754 
2755 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2756 		tg3_bmcr_reset(tp);
2757 		val = tr32(GRC_MISC_CFG);
2758 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2759 		udelay(40);
2760 		return;
2761 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2762 		u32 phytest;
2763 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2764 			u32 phy;
2765 
2766 			tg3_writephy(tp, MII_ADVERTISE, 0);
2767 			tg3_writephy(tp, MII_BMCR,
2768 				     BMCR_ANENABLE | BMCR_ANRESTART);
2769 
2770 			tg3_writephy(tp, MII_TG3_FET_TEST,
2771 				     phytest | MII_TG3_FET_SHADOW_EN);
2772 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2773 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2774 				tg3_writephy(tp,
2775 					     MII_TG3_FET_SHDW_AUXMODE4,
2776 					     phy);
2777 			}
2778 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2779 		}
2780 		return;
2781 	} else if (do_low_power) {
2782 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
2783 			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2784 
2785 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2786 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2787 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
2788 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2789 	}
2790 
2791 	/* The PHY should not be powered down on some chips because
2792 	 * of bugs.
2793 	 */
2794 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2795 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2796 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2797 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2798 		return;
2799 
2800 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2801 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2802 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2803 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2804 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2805 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2806 	}
2807 
2808 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2809 }
2810 
2811 /* tp->lock is held. */
2812 static int tg3_nvram_lock(struct tg3 *tp)
2813 {
2814 	if (tg3_flag(tp, NVRAM)) {
2815 		int i;
2816 
2817 		if (tp->nvram_lock_cnt == 0) {
2818 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2819 			for (i = 0; i < 8000; i++) {
2820 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2821 					break;
2822 				udelay(20);
2823 			}
2824 			if (i == 8000) {
2825 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2826 				return -ENODEV;
2827 			}
2828 		}
2829 		tp->nvram_lock_cnt++;
2830 	}
2831 	return 0;
2832 }
2833 
2834 /* tp->lock is held. */
2835 static void tg3_nvram_unlock(struct tg3 *tp)
2836 {
2837 	if (tg3_flag(tp, NVRAM)) {
2838 		if (tp->nvram_lock_cnt > 0)
2839 			tp->nvram_lock_cnt--;
2840 		if (tp->nvram_lock_cnt == 0)
2841 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2842 	}
2843 }
2844 
2845 /* tp->lock is held. */
2846 static void tg3_enable_nvram_access(struct tg3 *tp)
2847 {
2848 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2849 		u32 nvaccess = tr32(NVRAM_ACCESS);
2850 
2851 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2852 	}
2853 }
2854 
2855 /* tp->lock is held. */
2856 static void tg3_disable_nvram_access(struct tg3 *tp)
2857 {
2858 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2859 		u32 nvaccess = tr32(NVRAM_ACCESS);
2860 
2861 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2862 	}
2863 }
2864 
2865 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2866 					u32 offset, u32 *val)
2867 {
2868 	u32 tmp;
2869 	int i;
2870 
2871 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2872 		return -EINVAL;
2873 
2874 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2875 					EEPROM_ADDR_DEVID_MASK |
2876 					EEPROM_ADDR_READ);
2877 	tw32(GRC_EEPROM_ADDR,
2878 	     tmp |
2879 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
2880 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2881 	      EEPROM_ADDR_ADDR_MASK) |
2882 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
2883 
2884 	for (i = 0; i < 1000; i++) {
2885 		tmp = tr32(GRC_EEPROM_ADDR);
2886 
2887 		if (tmp & EEPROM_ADDR_COMPLETE)
2888 			break;
2889 		msleep(1);
2890 	}
2891 	if (!(tmp & EEPROM_ADDR_COMPLETE))
2892 		return -EBUSY;
2893 
2894 	tmp = tr32(GRC_EEPROM_DATA);
2895 
2896 	/*
2897 	 * The data will always be opposite the native endian
2898 	 * format.  Perform a blind byteswap to compensate.
2899 	 */
2900 	*val = swab32(tmp);
2901 
2902 	return 0;
2903 }
2904 
2905 #define NVRAM_CMD_TIMEOUT 10000
2906 
2907 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2908 {
2909 	int i;
2910 
2911 	tw32(NVRAM_CMD, nvram_cmd);
2912 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2913 		udelay(10);
2914 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2915 			udelay(10);
2916 			break;
2917 		}
2918 	}
2919 
2920 	if (i == NVRAM_CMD_TIMEOUT)
2921 		return -EBUSY;
2922 
2923 	return 0;
2924 }
2925 
2926 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2927 {
2928 	if (tg3_flag(tp, NVRAM) &&
2929 	    tg3_flag(tp, NVRAM_BUFFERED) &&
2930 	    tg3_flag(tp, FLASH) &&
2931 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2932 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
2933 
2934 		addr = ((addr / tp->nvram_pagesize) <<
2935 			ATMEL_AT45DB0X1B_PAGE_POS) +
2936 		       (addr % tp->nvram_pagesize);
2937 
2938 	return addr;
2939 }
2940 
2941 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2942 {
2943 	if (tg3_flag(tp, NVRAM) &&
2944 	    tg3_flag(tp, NVRAM_BUFFERED) &&
2945 	    tg3_flag(tp, FLASH) &&
2946 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2947 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
2948 
2949 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2950 			tp->nvram_pagesize) +
2951 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2952 
2953 	return addr;
2954 }
2955 
2956 /* NOTE: Data read in from NVRAM is byteswapped according to
2957  * the byteswapping settings for all other register accesses.
2958  * tg3 devices are BE devices, so on a BE machine, the data
2959  * returned will be exactly as it is seen in NVRAM.  On a LE
2960  * machine, the 32-bit value will be byteswapped.
2961  */
2962 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2963 {
2964 	int ret;
2965 
2966 	if (!tg3_flag(tp, NVRAM))
2967 		return tg3_nvram_read_using_eeprom(tp, offset, val);
2968 
2969 	offset = tg3_nvram_phys_addr(tp, offset);
2970 
2971 	if (offset > NVRAM_ADDR_MSK)
2972 		return -EINVAL;
2973 
2974 	ret = tg3_nvram_lock(tp);
2975 	if (ret)
2976 		return ret;
2977 
2978 	tg3_enable_nvram_access(tp);
2979 
2980 	tw32(NVRAM_ADDR, offset);
2981 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2982 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2983 
2984 	if (ret == 0)
2985 		*val = tr32(NVRAM_RDDATA);
2986 
2987 	tg3_disable_nvram_access(tp);
2988 
2989 	tg3_nvram_unlock(tp);
2990 
2991 	return ret;
2992 }
2993 
2994 /* Ensures NVRAM data is in bytestream format. */
2995 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2996 {
2997 	u32 v;
2998 	int res = tg3_nvram_read(tp, offset, &v);
2999 	if (!res)
3000 		*val = cpu_to_be32(v);
3001 	return res;
3002 }
3003 
3004 #define RX_CPU_SCRATCH_BASE	0x30000
3005 #define RX_CPU_SCRATCH_SIZE	0x04000
3006 #define TX_CPU_SCRATCH_BASE	0x34000
3007 #define TX_CPU_SCRATCH_SIZE	0x04000
3008 
3009 /* tp->lock is held. */
3010 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3011 {
3012 	int i;
3013 
3014 	BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3015 
3016 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3017 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3018 
3019 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3020 		return 0;
3021 	}
3022 	if (offset == RX_CPU_BASE) {
3023 		for (i = 0; i < 10000; i++) {
3024 			tw32(offset + CPU_STATE, 0xffffffff);
3025 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3026 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3027 				break;
3028 		}
3029 
3030 		tw32(offset + CPU_STATE, 0xffffffff);
3031 		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3032 		udelay(10);
3033 	} else {
3034 		for (i = 0; i < 10000; i++) {
3035 			tw32(offset + CPU_STATE, 0xffffffff);
3036 			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3037 			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3038 				break;
3039 		}
3040 	}
3041 
3042 	if (i >= 10000) {
3043 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3044 			   __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3045 		return -ENODEV;
3046 	}
3047 
3048 	/* Clear firmware's nvram arbitration. */
3049 	if (tg3_flag(tp, NVRAM))
3050 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3051 	return 0;
3052 }
3053 
3054 struct fw_info {
3055 	unsigned int fw_base;
3056 	unsigned int fw_len;
3057 	const __be32 *fw_data;
3058 };
3059 
3060 /* tp->lock is held. */
3061 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3062 				 u32 cpu_scratch_base, int cpu_scratch_size,
3063 				 struct fw_info *info)
3064 {
3065 	int err, lock_err, i;
3066 	void (*write_op)(struct tg3 *, u32, u32);
3067 
3068 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3069 		netdev_err(tp->dev,
3070 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3071 			   __func__);
3072 		return -EINVAL;
3073 	}
3074 
3075 	if (tg3_flag(tp, 5705_PLUS))
3076 		write_op = tg3_write_mem;
3077 	else
3078 		write_op = tg3_write_indirect_reg32;
3079 
3080 	/* It is possible that bootcode is still loading at this point.
3081 	 * Get the nvram lock first before halting the cpu.
3082 	 */
3083 	lock_err = tg3_nvram_lock(tp);
3084 	err = tg3_halt_cpu(tp, cpu_base);
3085 	if (!lock_err)
3086 		tg3_nvram_unlock(tp);
3087 	if (err)
3088 		goto out;
3089 
3090 	for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3091 		write_op(tp, cpu_scratch_base + i, 0);
3092 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3093 	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3094 	for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3095 		write_op(tp, (cpu_scratch_base +
3096 			      (info->fw_base & 0xffff) +
3097 			      (i * sizeof(u32))),
3098 			      be32_to_cpu(info->fw_data[i]));
3099 
3100 	err = 0;
3101 
3102 out:
3103 	return err;
3104 }
3105 
3106 /* tp->lock is held. */
3107 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3108 {
3109 	struct fw_info info;
3110 	const __be32 *fw_data;
3111 	int err, i;
3112 
3113 	fw_data = (void *)tp->fw->data;
3114 
3115 	/* Firmware blob starts with version numbers, followed by
3116 	   start address and length. We are setting complete length.
3117 	   length = end_address_of_bss - start_address_of_text.
3118 	   Remainder is the blob to be loaded contiguously
3119 	   from start address. */
3120 
3121 	info.fw_base = be32_to_cpu(fw_data[1]);
3122 	info.fw_len = tp->fw->size - 12;
3123 	info.fw_data = &fw_data[3];
3124 
3125 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3126 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3127 				    &info);
3128 	if (err)
3129 		return err;
3130 
3131 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3132 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3133 				    &info);
3134 	if (err)
3135 		return err;
3136 
3137 	/* Now startup only the RX cpu. */
3138 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3139 	tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3140 
3141 	for (i = 0; i < 5; i++) {
3142 		if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3143 			break;
3144 		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3145 		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3146 		tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3147 		udelay(1000);
3148 	}
3149 	if (i >= 5) {
3150 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3151 			   "should be %08x\n", __func__,
3152 			   tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3153 		return -ENODEV;
3154 	}
3155 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3156 	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3157 
3158 	return 0;
3159 }
3160 
3161 /* tp->lock is held. */
3162 static int tg3_load_tso_firmware(struct tg3 *tp)
3163 {
3164 	struct fw_info info;
3165 	const __be32 *fw_data;
3166 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3167 	int err, i;
3168 
3169 	if (tg3_flag(tp, HW_TSO_1) ||
3170 	    tg3_flag(tp, HW_TSO_2) ||
3171 	    tg3_flag(tp, HW_TSO_3))
3172 		return 0;
3173 
3174 	fw_data = (void *)tp->fw->data;
3175 
3176 	/* Firmware blob starts with version numbers, followed by
3177 	   start address and length. We are setting complete length.
3178 	   length = end_address_of_bss - start_address_of_text.
3179 	   Remainder is the blob to be loaded contiguously
3180 	   from start address. */
3181 
3182 	info.fw_base = be32_to_cpu(fw_data[1]);
3183 	cpu_scratch_size = tp->fw_len;
3184 	info.fw_len = tp->fw->size - 12;
3185 	info.fw_data = &fw_data[3];
3186 
3187 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3188 		cpu_base = RX_CPU_BASE;
3189 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3190 	} else {
3191 		cpu_base = TX_CPU_BASE;
3192 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3193 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3194 	}
3195 
3196 	err = tg3_load_firmware_cpu(tp, cpu_base,
3197 				    cpu_scratch_base, cpu_scratch_size,
3198 				    &info);
3199 	if (err)
3200 		return err;
3201 
3202 	/* Now startup the cpu. */
3203 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3204 	tw32_f(cpu_base + CPU_PC, info.fw_base);
3205 
3206 	for (i = 0; i < 5; i++) {
3207 		if (tr32(cpu_base + CPU_PC) == info.fw_base)
3208 			break;
3209 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3210 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3211 		tw32_f(cpu_base + CPU_PC, info.fw_base);
3212 		udelay(1000);
3213 	}
3214 	if (i >= 5) {
3215 		netdev_err(tp->dev,
3216 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3217 			   __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3218 		return -ENODEV;
3219 	}
3220 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3221 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3222 	return 0;
3223 }
3224 
3225 
3226 /* tp->lock is held. */
3227 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3228 {
3229 	u32 addr_high, addr_low;
3230 	int i;
3231 
3232 	addr_high = ((tp->dev->dev_addr[0] << 8) |
3233 		     tp->dev->dev_addr[1]);
3234 	addr_low = ((tp->dev->dev_addr[2] << 24) |
3235 		    (tp->dev->dev_addr[3] << 16) |
3236 		    (tp->dev->dev_addr[4] <<  8) |
3237 		    (tp->dev->dev_addr[5] <<  0));
3238 	for (i = 0; i < 4; i++) {
3239 		if (i == 1 && skip_mac_1)
3240 			continue;
3241 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3242 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3243 	}
3244 
3245 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3246 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3247 		for (i = 0; i < 12; i++) {
3248 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3249 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3250 		}
3251 	}
3252 
3253 	addr_high = (tp->dev->dev_addr[0] +
3254 		     tp->dev->dev_addr[1] +
3255 		     tp->dev->dev_addr[2] +
3256 		     tp->dev->dev_addr[3] +
3257 		     tp->dev->dev_addr[4] +
3258 		     tp->dev->dev_addr[5]) &
3259 		TX_BACKOFF_SEED_MASK;
3260 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3261 }
3262 
3263 static void tg3_enable_register_access(struct tg3 *tp)
3264 {
3265 	/*
3266 	 * Make sure register accesses (indirect or otherwise) will function
3267 	 * correctly.
3268 	 */
3269 	pci_write_config_dword(tp->pdev,
3270 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3271 }
3272 
3273 static int tg3_power_up(struct tg3 *tp)
3274 {
3275 	int err;
3276 
3277 	tg3_enable_register_access(tp);
3278 
3279 	err = pci_set_power_state(tp->pdev, PCI_D0);
3280 	if (!err) {
3281 		/* Switch out of Vaux if it is a NIC */
3282 		tg3_pwrsrc_switch_to_vmain(tp);
3283 	} else {
3284 		netdev_err(tp->dev, "Transition to D0 failed\n");
3285 	}
3286 
3287 	return err;
3288 }
3289 
3290 static int tg3_power_down_prepare(struct tg3 *tp)
3291 {
3292 	u32 misc_host_ctrl;
3293 	bool device_should_wake, do_low_power;
3294 
3295 	tg3_enable_register_access(tp);
3296 
3297 	/* Restore the CLKREQ setting. */
3298 	if (tg3_flag(tp, CLKREQ_BUG)) {
3299 		u16 lnkctl;
3300 
3301 		pci_read_config_word(tp->pdev,
3302 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3303 				     &lnkctl);
3304 		lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3305 		pci_write_config_word(tp->pdev,
3306 				      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3307 				      lnkctl);
3308 	}
3309 
3310 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3311 	tw32(TG3PCI_MISC_HOST_CTRL,
3312 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3313 
3314 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3315 			     tg3_flag(tp, WOL_ENABLE);
3316 
3317 	if (tg3_flag(tp, USE_PHYLIB)) {
3318 		do_low_power = false;
3319 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3320 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3321 			struct phy_device *phydev;
3322 			u32 phyid, advertising;
3323 
3324 			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3325 
3326 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3327 
3328 			tp->link_config.orig_speed = phydev->speed;
3329 			tp->link_config.orig_duplex = phydev->duplex;
3330 			tp->link_config.orig_autoneg = phydev->autoneg;
3331 			tp->link_config.orig_advertising = phydev->advertising;
3332 
3333 			advertising = ADVERTISED_TP |
3334 				      ADVERTISED_Pause |
3335 				      ADVERTISED_Autoneg |
3336 				      ADVERTISED_10baseT_Half;
3337 
3338 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3339 				if (tg3_flag(tp, WOL_SPEED_100MB))
3340 					advertising |=
3341 						ADVERTISED_100baseT_Half |
3342 						ADVERTISED_100baseT_Full |
3343 						ADVERTISED_10baseT_Full;
3344 				else
3345 					advertising |= ADVERTISED_10baseT_Full;
3346 			}
3347 
3348 			phydev->advertising = advertising;
3349 
3350 			phy_start_aneg(phydev);
3351 
3352 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3353 			if (phyid != PHY_ID_BCMAC131) {
3354 				phyid &= PHY_BCM_OUI_MASK;
3355 				if (phyid == PHY_BCM_OUI_1 ||
3356 				    phyid == PHY_BCM_OUI_2 ||
3357 				    phyid == PHY_BCM_OUI_3)
3358 					do_low_power = true;
3359 			}
3360 		}
3361 	} else {
3362 		do_low_power = true;
3363 
3364 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3365 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3366 			tp->link_config.orig_speed = tp->link_config.speed;
3367 			tp->link_config.orig_duplex = tp->link_config.duplex;
3368 			tp->link_config.orig_autoneg = tp->link_config.autoneg;
3369 		}
3370 
3371 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3372 			tp->link_config.speed = SPEED_10;
3373 			tp->link_config.duplex = DUPLEX_HALF;
3374 			tp->link_config.autoneg = AUTONEG_ENABLE;
3375 			tg3_setup_phy(tp, 0);
3376 		}
3377 	}
3378 
3379 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3380 		u32 val;
3381 
3382 		val = tr32(GRC_VCPU_EXT_CTRL);
3383 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3384 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
3385 		int i;
3386 		u32 val;
3387 
3388 		for (i = 0; i < 200; i++) {
3389 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3390 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3391 				break;
3392 			msleep(1);
3393 		}
3394 	}
3395 	if (tg3_flag(tp, WOL_CAP))
3396 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3397 						     WOL_DRV_STATE_SHUTDOWN |
3398 						     WOL_DRV_WOL |
3399 						     WOL_SET_MAGIC_PKT);
3400 
3401 	if (device_should_wake) {
3402 		u32 mac_mode;
3403 
3404 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3405 			if (do_low_power &&
3406 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3407 				tg3_phy_auxctl_write(tp,
3408 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3409 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
3410 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3411 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3412 				udelay(40);
3413 			}
3414 
3415 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3416 				mac_mode = MAC_MODE_PORT_MODE_GMII;
3417 			else
3418 				mac_mode = MAC_MODE_PORT_MODE_MII;
3419 
3420 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3421 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3422 			    ASIC_REV_5700) {
3423 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3424 					     SPEED_100 : SPEED_10;
3425 				if (tg3_5700_link_polarity(tp, speed))
3426 					mac_mode |= MAC_MODE_LINK_POLARITY;
3427 				else
3428 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
3429 			}
3430 		} else {
3431 			mac_mode = MAC_MODE_PORT_MODE_TBI;
3432 		}
3433 
3434 		if (!tg3_flag(tp, 5750_PLUS))
3435 			tw32(MAC_LED_CTRL, tp->led_ctrl);
3436 
3437 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3438 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3439 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3440 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3441 
3442 		if (tg3_flag(tp, ENABLE_APE))
3443 			mac_mode |= MAC_MODE_APE_TX_EN |
3444 				    MAC_MODE_APE_RX_EN |
3445 				    MAC_MODE_TDE_ENABLE;
3446 
3447 		tw32_f(MAC_MODE, mac_mode);
3448 		udelay(100);
3449 
3450 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3451 		udelay(10);
3452 	}
3453 
3454 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3455 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3456 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3457 		u32 base_val;
3458 
3459 		base_val = tp->pci_clock_ctrl;
3460 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3461 			     CLOCK_CTRL_TXCLK_DISABLE);
3462 
3463 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3464 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
3465 	} else if (tg3_flag(tp, 5780_CLASS) ||
3466 		   tg3_flag(tp, CPMU_PRESENT) ||
3467 		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3468 		/* do nothing */
3469 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3470 		u32 newbits1, newbits2;
3471 
3472 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3473 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3474 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3475 				    CLOCK_CTRL_TXCLK_DISABLE |
3476 				    CLOCK_CTRL_ALTCLK);
3477 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3478 		} else if (tg3_flag(tp, 5705_PLUS)) {
3479 			newbits1 = CLOCK_CTRL_625_CORE;
3480 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3481 		} else {
3482 			newbits1 = CLOCK_CTRL_ALTCLK;
3483 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3484 		}
3485 
3486 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3487 			    40);
3488 
3489 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3490 			    40);
3491 
3492 		if (!tg3_flag(tp, 5705_PLUS)) {
3493 			u32 newbits3;
3494 
3495 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3496 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3497 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3498 					    CLOCK_CTRL_TXCLK_DISABLE |
3499 					    CLOCK_CTRL_44MHZ_CORE);
3500 			} else {
3501 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
3502 			}
3503 
3504 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
3505 				    tp->pci_clock_ctrl | newbits3, 40);
3506 		}
3507 	}
3508 
3509 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3510 		tg3_power_down_phy(tp, do_low_power);
3511 
3512 	tg3_frob_aux_power(tp, true);
3513 
3514 	/* Workaround for unstable PLL clock */
3515 	if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3516 	    (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3517 		u32 val = tr32(0x7d00);
3518 
3519 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3520 		tw32(0x7d00, val);
3521 		if (!tg3_flag(tp, ENABLE_ASF)) {
3522 			int err;
3523 
3524 			err = tg3_nvram_lock(tp);
3525 			tg3_halt_cpu(tp, RX_CPU_BASE);
3526 			if (!err)
3527 				tg3_nvram_unlock(tp);
3528 		}
3529 	}
3530 
3531 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3532 
3533 	return 0;
3534 }
3535 
3536 static void tg3_power_down(struct tg3 *tp)
3537 {
3538 	tg3_power_down_prepare(tp);
3539 
3540 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3541 	pci_set_power_state(tp->pdev, PCI_D3hot);
3542 }
3543 
3544 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3545 {
3546 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3547 	case MII_TG3_AUX_STAT_10HALF:
3548 		*speed = SPEED_10;
3549 		*duplex = DUPLEX_HALF;
3550 		break;
3551 
3552 	case MII_TG3_AUX_STAT_10FULL:
3553 		*speed = SPEED_10;
3554 		*duplex = DUPLEX_FULL;
3555 		break;
3556 
3557 	case MII_TG3_AUX_STAT_100HALF:
3558 		*speed = SPEED_100;
3559 		*duplex = DUPLEX_HALF;
3560 		break;
3561 
3562 	case MII_TG3_AUX_STAT_100FULL:
3563 		*speed = SPEED_100;
3564 		*duplex = DUPLEX_FULL;
3565 		break;
3566 
3567 	case MII_TG3_AUX_STAT_1000HALF:
3568 		*speed = SPEED_1000;
3569 		*duplex = DUPLEX_HALF;
3570 		break;
3571 
3572 	case MII_TG3_AUX_STAT_1000FULL:
3573 		*speed = SPEED_1000;
3574 		*duplex = DUPLEX_FULL;
3575 		break;
3576 
3577 	default:
3578 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3579 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3580 				 SPEED_10;
3581 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3582 				  DUPLEX_HALF;
3583 			break;
3584 		}
3585 		*speed = SPEED_INVALID;
3586 		*duplex = DUPLEX_INVALID;
3587 		break;
3588 	}
3589 }
3590 
3591 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3592 {
3593 	int err = 0;
3594 	u32 val, new_adv;
3595 
3596 	new_adv = ADVERTISE_CSMA;
3597 	if (advertise & ADVERTISED_10baseT_Half)
3598 		new_adv |= ADVERTISE_10HALF;
3599 	if (advertise & ADVERTISED_10baseT_Full)
3600 		new_adv |= ADVERTISE_10FULL;
3601 	if (advertise & ADVERTISED_100baseT_Half)
3602 		new_adv |= ADVERTISE_100HALF;
3603 	if (advertise & ADVERTISED_100baseT_Full)
3604 		new_adv |= ADVERTISE_100FULL;
3605 
3606 	new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3607 
3608 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3609 	if (err)
3610 		goto done;
3611 
3612 	if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3613 		goto done;
3614 
3615 	new_adv = 0;
3616 	if (advertise & ADVERTISED_1000baseT_Half)
3617 		new_adv |= ADVERTISE_1000HALF;
3618 	if (advertise & ADVERTISED_1000baseT_Full)
3619 		new_adv |= ADVERTISE_1000FULL;
3620 
3621 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3622 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3623 		new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3624 
3625 	err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3626 	if (err)
3627 		goto done;
3628 
3629 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3630 		goto done;
3631 
3632 	tw32(TG3_CPMU_EEE_MODE,
3633 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3634 
3635 	err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3636 	if (!err) {
3637 		u32 err2;
3638 
3639 		val = 0;
3640 		/* Advertise 100-BaseTX EEE ability */
3641 		if (advertise & ADVERTISED_100baseT_Full)
3642 			val |= MDIO_AN_EEE_ADV_100TX;
3643 		/* Advertise 1000-BaseT EEE ability */
3644 		if (advertise & ADVERTISED_1000baseT_Full)
3645 			val |= MDIO_AN_EEE_ADV_1000T;
3646 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3647 		if (err)
3648 			val = 0;
3649 
3650 		switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3651 		case ASIC_REV_5717:
3652 		case ASIC_REV_57765:
3653 		case ASIC_REV_5719:
3654 			/* If we advertised any eee advertisements above... */
3655 			if (val)
3656 				val = MII_TG3_DSP_TAP26_ALNOKO |
3657 				      MII_TG3_DSP_TAP26_RMRXSTO |
3658 				      MII_TG3_DSP_TAP26_OPCSINPT;
3659 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3660 			/* Fall through */
3661 		case ASIC_REV_5720:
3662 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3663 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3664 						 MII_TG3_DSP_CH34TP2_HIBW01);
3665 		}
3666 
3667 		err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3668 		if (!err)
3669 			err = err2;
3670 	}
3671 
3672 done:
3673 	return err;
3674 }
3675 
3676 static void tg3_phy_copper_begin(struct tg3 *tp)
3677 {
3678 	u32 new_adv;
3679 	int i;
3680 
3681 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3682 		new_adv = ADVERTISED_10baseT_Half |
3683 			  ADVERTISED_10baseT_Full;
3684 		if (tg3_flag(tp, WOL_SPEED_100MB))
3685 			new_adv |= ADVERTISED_100baseT_Half |
3686 				   ADVERTISED_100baseT_Full;
3687 
3688 		tg3_phy_autoneg_cfg(tp, new_adv,
3689 				    FLOW_CTRL_TX | FLOW_CTRL_RX);
3690 	} else if (tp->link_config.speed == SPEED_INVALID) {
3691 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3692 			tp->link_config.advertising &=
3693 				~(ADVERTISED_1000baseT_Half |
3694 				  ADVERTISED_1000baseT_Full);
3695 
3696 		tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3697 				    tp->link_config.flowctrl);
3698 	} else {
3699 		/* Asking for a specific link mode. */
3700 		if (tp->link_config.speed == SPEED_1000) {
3701 			if (tp->link_config.duplex == DUPLEX_FULL)
3702 				new_adv = ADVERTISED_1000baseT_Full;
3703 			else
3704 				new_adv = ADVERTISED_1000baseT_Half;
3705 		} else if (tp->link_config.speed == SPEED_100) {
3706 			if (tp->link_config.duplex == DUPLEX_FULL)
3707 				new_adv = ADVERTISED_100baseT_Full;
3708 			else
3709 				new_adv = ADVERTISED_100baseT_Half;
3710 		} else {
3711 			if (tp->link_config.duplex == DUPLEX_FULL)
3712 				new_adv = ADVERTISED_10baseT_Full;
3713 			else
3714 				new_adv = ADVERTISED_10baseT_Half;
3715 		}
3716 
3717 		tg3_phy_autoneg_cfg(tp, new_adv,
3718 				    tp->link_config.flowctrl);
3719 	}
3720 
3721 	if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3722 	    tp->link_config.speed != SPEED_INVALID) {
3723 		u32 bmcr, orig_bmcr;
3724 
3725 		tp->link_config.active_speed = tp->link_config.speed;
3726 		tp->link_config.active_duplex = tp->link_config.duplex;
3727 
3728 		bmcr = 0;
3729 		switch (tp->link_config.speed) {
3730 		default:
3731 		case SPEED_10:
3732 			break;
3733 
3734 		case SPEED_100:
3735 			bmcr |= BMCR_SPEED100;
3736 			break;
3737 
3738 		case SPEED_1000:
3739 			bmcr |= BMCR_SPEED1000;
3740 			break;
3741 		}
3742 
3743 		if (tp->link_config.duplex == DUPLEX_FULL)
3744 			bmcr |= BMCR_FULLDPLX;
3745 
3746 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3747 		    (bmcr != orig_bmcr)) {
3748 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3749 			for (i = 0; i < 1500; i++) {
3750 				u32 tmp;
3751 
3752 				udelay(10);
3753 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3754 				    tg3_readphy(tp, MII_BMSR, &tmp))
3755 					continue;
3756 				if (!(tmp & BMSR_LSTATUS)) {
3757 					udelay(40);
3758 					break;
3759 				}
3760 			}
3761 			tg3_writephy(tp, MII_BMCR, bmcr);
3762 			udelay(40);
3763 		}
3764 	} else {
3765 		tg3_writephy(tp, MII_BMCR,
3766 			     BMCR_ANENABLE | BMCR_ANRESTART);
3767 	}
3768 }
3769 
3770 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3771 {
3772 	int err;
3773 
3774 	/* Turn off tap power management. */
3775 	/* Set Extended packet length bit */
3776 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3777 
3778 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3779 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3780 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3781 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3782 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3783 
3784 	udelay(40);
3785 
3786 	return err;
3787 }
3788 
3789 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3790 {
3791 	u32 adv_reg, all_mask = 0;
3792 
3793 	if (mask & ADVERTISED_10baseT_Half)
3794 		all_mask |= ADVERTISE_10HALF;
3795 	if (mask & ADVERTISED_10baseT_Full)
3796 		all_mask |= ADVERTISE_10FULL;
3797 	if (mask & ADVERTISED_100baseT_Half)
3798 		all_mask |= ADVERTISE_100HALF;
3799 	if (mask & ADVERTISED_100baseT_Full)
3800 		all_mask |= ADVERTISE_100FULL;
3801 
3802 	if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3803 		return 0;
3804 
3805 	if ((adv_reg & ADVERTISE_ALL) != all_mask)
3806 		return 0;
3807 
3808 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3809 		u32 tg3_ctrl;
3810 
3811 		all_mask = 0;
3812 		if (mask & ADVERTISED_1000baseT_Half)
3813 			all_mask |= ADVERTISE_1000HALF;
3814 		if (mask & ADVERTISED_1000baseT_Full)
3815 			all_mask |= ADVERTISE_1000FULL;
3816 
3817 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3818 			return 0;
3819 
3820 		tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3821 		if (tg3_ctrl != all_mask)
3822 			return 0;
3823 	}
3824 
3825 	return 1;
3826 }
3827 
3828 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3829 {
3830 	u32 curadv, reqadv;
3831 
3832 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3833 		return 1;
3834 
3835 	curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3836 	reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3837 
3838 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
3839 		if (curadv != reqadv)
3840 			return 0;
3841 
3842 		if (tg3_flag(tp, PAUSE_AUTONEG))
3843 			tg3_readphy(tp, MII_LPA, rmtadv);
3844 	} else {
3845 		/* Reprogram the advertisement register, even if it
3846 		 * does not affect the current link.  If the link
3847 		 * gets renegotiated in the future, we can save an
3848 		 * additional renegotiation cycle by advertising
3849 		 * it correctly in the first place.
3850 		 */
3851 		if (curadv != reqadv) {
3852 			*lcladv &= ~(ADVERTISE_PAUSE_CAP |
3853 				     ADVERTISE_PAUSE_ASYM);
3854 			tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3855 		}
3856 	}
3857 
3858 	return 1;
3859 }
3860 
3861 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3862 {
3863 	int current_link_up;
3864 	u32 bmsr, val;
3865 	u32 lcl_adv, rmt_adv;
3866 	u16 current_speed;
3867 	u8 current_duplex;
3868 	int i, err;
3869 
3870 	tw32(MAC_EVENT, 0);
3871 
3872 	tw32_f(MAC_STATUS,
3873 	     (MAC_STATUS_SYNC_CHANGED |
3874 	      MAC_STATUS_CFG_CHANGED |
3875 	      MAC_STATUS_MI_COMPLETION |
3876 	      MAC_STATUS_LNKSTATE_CHANGED));
3877 	udelay(40);
3878 
3879 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3880 		tw32_f(MAC_MI_MODE,
3881 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3882 		udelay(80);
3883 	}
3884 
3885 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3886 
3887 	/* Some third-party PHYs need to be reset on link going
3888 	 * down.
3889 	 */
3890 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3891 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3892 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3893 	    netif_carrier_ok(tp->dev)) {
3894 		tg3_readphy(tp, MII_BMSR, &bmsr);
3895 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3896 		    !(bmsr & BMSR_LSTATUS))
3897 			force_reset = 1;
3898 	}
3899 	if (force_reset)
3900 		tg3_phy_reset(tp);
3901 
3902 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3903 		tg3_readphy(tp, MII_BMSR, &bmsr);
3904 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3905 		    !tg3_flag(tp, INIT_COMPLETE))
3906 			bmsr = 0;
3907 
3908 		if (!(bmsr & BMSR_LSTATUS)) {
3909 			err = tg3_init_5401phy_dsp(tp);
3910 			if (err)
3911 				return err;
3912 
3913 			tg3_readphy(tp, MII_BMSR, &bmsr);
3914 			for (i = 0; i < 1000; i++) {
3915 				udelay(10);
3916 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3917 				    (bmsr & BMSR_LSTATUS)) {
3918 					udelay(40);
3919 					break;
3920 				}
3921 			}
3922 
3923 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3924 			    TG3_PHY_REV_BCM5401_B0 &&
3925 			    !(bmsr & BMSR_LSTATUS) &&
3926 			    tp->link_config.active_speed == SPEED_1000) {
3927 				err = tg3_phy_reset(tp);
3928 				if (!err)
3929 					err = tg3_init_5401phy_dsp(tp);
3930 				if (err)
3931 					return err;
3932 			}
3933 		}
3934 	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3935 		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3936 		/* 5701 {A0,B0} CRC bug workaround */
3937 		tg3_writephy(tp, 0x15, 0x0a75);
3938 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3939 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3940 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3941 	}
3942 
3943 	/* Clear pending interrupts... */
3944 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
3945 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
3946 
3947 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3948 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3949 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3950 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
3951 
3952 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3953 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3954 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3955 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3956 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3957 		else
3958 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3959 	}
3960 
3961 	current_link_up = 0;
3962 	current_speed = SPEED_INVALID;
3963 	current_duplex = DUPLEX_INVALID;
3964 
3965 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3966 		err = tg3_phy_auxctl_read(tp,
3967 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3968 					  &val);
3969 		if (!err && !(val & (1 << 10))) {
3970 			tg3_phy_auxctl_write(tp,
3971 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3972 					     val | (1 << 10));
3973 			goto relink;
3974 		}
3975 	}
3976 
3977 	bmsr = 0;
3978 	for (i = 0; i < 100; i++) {
3979 		tg3_readphy(tp, MII_BMSR, &bmsr);
3980 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3981 		    (bmsr & BMSR_LSTATUS))
3982 			break;
3983 		udelay(40);
3984 	}
3985 
3986 	if (bmsr & BMSR_LSTATUS) {
3987 		u32 aux_stat, bmcr;
3988 
3989 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3990 		for (i = 0; i < 2000; i++) {
3991 			udelay(10);
3992 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3993 			    aux_stat)
3994 				break;
3995 		}
3996 
3997 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3998 					     &current_speed,
3999 					     &current_duplex);
4000 
4001 		bmcr = 0;
4002 		for (i = 0; i < 200; i++) {
4003 			tg3_readphy(tp, MII_BMCR, &bmcr);
4004 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4005 				continue;
4006 			if (bmcr && bmcr != 0x7fff)
4007 				break;
4008 			udelay(10);
4009 		}
4010 
4011 		lcl_adv = 0;
4012 		rmt_adv = 0;
4013 
4014 		tp->link_config.active_speed = current_speed;
4015 		tp->link_config.active_duplex = current_duplex;
4016 
4017 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4018 			if ((bmcr & BMCR_ANENABLE) &&
4019 			    tg3_copper_is_advertising_all(tp,
4020 						tp->link_config.advertising)) {
4021 				if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
4022 								  &rmt_adv))
4023 					current_link_up = 1;
4024 			}
4025 		} else {
4026 			if (!(bmcr & BMCR_ANENABLE) &&
4027 			    tp->link_config.speed == current_speed &&
4028 			    tp->link_config.duplex == current_duplex &&
4029 			    tp->link_config.flowctrl ==
4030 			    tp->link_config.active_flowctrl) {
4031 				current_link_up = 1;
4032 			}
4033 		}
4034 
4035 		if (current_link_up == 1 &&
4036 		    tp->link_config.active_duplex == DUPLEX_FULL)
4037 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4038 	}
4039 
4040 relink:
4041 	if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4042 		tg3_phy_copper_begin(tp);
4043 
4044 		tg3_readphy(tp, MII_BMSR, &bmsr);
4045 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4046 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4047 			current_link_up = 1;
4048 	}
4049 
4050 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4051 	if (current_link_up == 1) {
4052 		if (tp->link_config.active_speed == SPEED_100 ||
4053 		    tp->link_config.active_speed == SPEED_10)
4054 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4055 		else
4056 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4057 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4058 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4059 	else
4060 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4061 
4062 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4063 	if (tp->link_config.active_duplex == DUPLEX_HALF)
4064 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4065 
4066 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4067 		if (current_link_up == 1 &&
4068 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4069 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4070 		else
4071 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4072 	}
4073 
4074 	/* ??? Without this setting Netgear GA302T PHY does not
4075 	 * ??? send/receive packets...
4076 	 */
4077 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4078 	    tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4079 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4080 		tw32_f(MAC_MI_MODE, tp->mi_mode);
4081 		udelay(80);
4082 	}
4083 
4084 	tw32_f(MAC_MODE, tp->mac_mode);
4085 	udelay(40);
4086 
4087 	tg3_phy_eee_adjust(tp, current_link_up);
4088 
4089 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
4090 		/* Polled via timer. */
4091 		tw32_f(MAC_EVENT, 0);
4092 	} else {
4093 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4094 	}
4095 	udelay(40);
4096 
4097 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4098 	    current_link_up == 1 &&
4099 	    tp->link_config.active_speed == SPEED_1000 &&
4100 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4101 		udelay(120);
4102 		tw32_f(MAC_STATUS,
4103 		     (MAC_STATUS_SYNC_CHANGED |
4104 		      MAC_STATUS_CFG_CHANGED));
4105 		udelay(40);
4106 		tg3_write_mem(tp,
4107 			      NIC_SRAM_FIRMWARE_MBOX,
4108 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4109 	}
4110 
4111 	/* Prevent send BD corruption. */
4112 	if (tg3_flag(tp, CLKREQ_BUG)) {
4113 		u16 oldlnkctl, newlnkctl;
4114 
4115 		pci_read_config_word(tp->pdev,
4116 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4117 				     &oldlnkctl);
4118 		if (tp->link_config.active_speed == SPEED_100 ||
4119 		    tp->link_config.active_speed == SPEED_10)
4120 			newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4121 		else
4122 			newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4123 		if (newlnkctl != oldlnkctl)
4124 			pci_write_config_word(tp->pdev,
4125 					      pci_pcie_cap(tp->pdev) +
4126 					      PCI_EXP_LNKCTL, newlnkctl);
4127 	}
4128 
4129 	if (current_link_up != netif_carrier_ok(tp->dev)) {
4130 		if (current_link_up)
4131 			netif_carrier_on(tp->dev);
4132 		else
4133 			netif_carrier_off(tp->dev);
4134 		tg3_link_report(tp);
4135 	}
4136 
4137 	return 0;
4138 }
4139 
4140 struct tg3_fiber_aneginfo {
4141 	int state;
4142 #define ANEG_STATE_UNKNOWN		0
4143 #define ANEG_STATE_AN_ENABLE		1
4144 #define ANEG_STATE_RESTART_INIT		2
4145 #define ANEG_STATE_RESTART		3
4146 #define ANEG_STATE_DISABLE_LINK_OK	4
4147 #define ANEG_STATE_ABILITY_DETECT_INIT	5
4148 #define ANEG_STATE_ABILITY_DETECT	6
4149 #define ANEG_STATE_ACK_DETECT_INIT	7
4150 #define ANEG_STATE_ACK_DETECT		8
4151 #define ANEG_STATE_COMPLETE_ACK_INIT	9
4152 #define ANEG_STATE_COMPLETE_ACK		10
4153 #define ANEG_STATE_IDLE_DETECT_INIT	11
4154 #define ANEG_STATE_IDLE_DETECT		12
4155 #define ANEG_STATE_LINK_OK		13
4156 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
4157 #define ANEG_STATE_NEXT_PAGE_WAIT	15
4158 
4159 	u32 flags;
4160 #define MR_AN_ENABLE		0x00000001
4161 #define MR_RESTART_AN		0x00000002
4162 #define MR_AN_COMPLETE		0x00000004
4163 #define MR_PAGE_RX		0x00000008
4164 #define MR_NP_LOADED		0x00000010
4165 #define MR_TOGGLE_TX		0x00000020
4166 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
4167 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
4168 #define MR_LP_ADV_SYM_PAUSE	0x00000100
4169 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
4170 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
4171 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
4172 #define MR_LP_ADV_NEXT_PAGE	0x00001000
4173 #define MR_TOGGLE_RX		0x00002000
4174 #define MR_NP_RX		0x00004000
4175 
4176 #define MR_LINK_OK		0x80000000
4177 
4178 	unsigned long link_time, cur_time;
4179 
4180 	u32 ability_match_cfg;
4181 	int ability_match_count;
4182 
4183 	char ability_match, idle_match, ack_match;
4184 
4185 	u32 txconfig, rxconfig;
4186 #define ANEG_CFG_NP		0x00000080
4187 #define ANEG_CFG_ACK		0x00000040
4188 #define ANEG_CFG_RF2		0x00000020
4189 #define ANEG_CFG_RF1		0x00000010
4190 #define ANEG_CFG_PS2		0x00000001
4191 #define ANEG_CFG_PS1		0x00008000
4192 #define ANEG_CFG_HD		0x00004000
4193 #define ANEG_CFG_FD		0x00002000
4194 #define ANEG_CFG_INVAL		0x00001f06
4195 
4196 };
4197 #define ANEG_OK		0
4198 #define ANEG_DONE	1
4199 #define ANEG_TIMER_ENAB	2
4200 #define ANEG_FAILED	-1
4201 
4202 #define ANEG_STATE_SETTLE_TIME	10000
4203 
4204 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4205 				   struct tg3_fiber_aneginfo *ap)
4206 {
4207 	u16 flowctrl;
4208 	unsigned long delta;
4209 	u32 rx_cfg_reg;
4210 	int ret;
4211 
4212 	if (ap->state == ANEG_STATE_UNKNOWN) {
4213 		ap->rxconfig = 0;
4214 		ap->link_time = 0;
4215 		ap->cur_time = 0;
4216 		ap->ability_match_cfg = 0;
4217 		ap->ability_match_count = 0;
4218 		ap->ability_match = 0;
4219 		ap->idle_match = 0;
4220 		ap->ack_match = 0;
4221 	}
4222 	ap->cur_time++;
4223 
4224 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4225 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4226 
4227 		if (rx_cfg_reg != ap->ability_match_cfg) {
4228 			ap->ability_match_cfg = rx_cfg_reg;
4229 			ap->ability_match = 0;
4230 			ap->ability_match_count = 0;
4231 		} else {
4232 			if (++ap->ability_match_count > 1) {
4233 				ap->ability_match = 1;
4234 				ap->ability_match_cfg = rx_cfg_reg;
4235 			}
4236 		}
4237 		if (rx_cfg_reg & ANEG_CFG_ACK)
4238 			ap->ack_match = 1;
4239 		else
4240 			ap->ack_match = 0;
4241 
4242 		ap->idle_match = 0;
4243 	} else {
4244 		ap->idle_match = 1;
4245 		ap->ability_match_cfg = 0;
4246 		ap->ability_match_count = 0;
4247 		ap->ability_match = 0;
4248 		ap->ack_match = 0;
4249 
4250 		rx_cfg_reg = 0;
4251 	}
4252 
4253 	ap->rxconfig = rx_cfg_reg;
4254 	ret = ANEG_OK;
4255 
4256 	switch (ap->state) {
4257 	case ANEG_STATE_UNKNOWN:
4258 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4259 			ap->state = ANEG_STATE_AN_ENABLE;
4260 
4261 		/* fallthru */
4262 	case ANEG_STATE_AN_ENABLE:
4263 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4264 		if (ap->flags & MR_AN_ENABLE) {
4265 			ap->link_time = 0;
4266 			ap->cur_time = 0;
4267 			ap->ability_match_cfg = 0;
4268 			ap->ability_match_count = 0;
4269 			ap->ability_match = 0;
4270 			ap->idle_match = 0;
4271 			ap->ack_match = 0;
4272 
4273 			ap->state = ANEG_STATE_RESTART_INIT;
4274 		} else {
4275 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
4276 		}
4277 		break;
4278 
4279 	case ANEG_STATE_RESTART_INIT:
4280 		ap->link_time = ap->cur_time;
4281 		ap->flags &= ~(MR_NP_LOADED);
4282 		ap->txconfig = 0;
4283 		tw32(MAC_TX_AUTO_NEG, 0);
4284 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4285 		tw32_f(MAC_MODE, tp->mac_mode);
4286 		udelay(40);
4287 
4288 		ret = ANEG_TIMER_ENAB;
4289 		ap->state = ANEG_STATE_RESTART;
4290 
4291 		/* fallthru */
4292 	case ANEG_STATE_RESTART:
4293 		delta = ap->cur_time - ap->link_time;
4294 		if (delta > ANEG_STATE_SETTLE_TIME)
4295 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4296 		else
4297 			ret = ANEG_TIMER_ENAB;
4298 		break;
4299 
4300 	case ANEG_STATE_DISABLE_LINK_OK:
4301 		ret = ANEG_DONE;
4302 		break;
4303 
4304 	case ANEG_STATE_ABILITY_DETECT_INIT:
4305 		ap->flags &= ~(MR_TOGGLE_TX);
4306 		ap->txconfig = ANEG_CFG_FD;
4307 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4308 		if (flowctrl & ADVERTISE_1000XPAUSE)
4309 			ap->txconfig |= ANEG_CFG_PS1;
4310 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4311 			ap->txconfig |= ANEG_CFG_PS2;
4312 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4313 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4314 		tw32_f(MAC_MODE, tp->mac_mode);
4315 		udelay(40);
4316 
4317 		ap->state = ANEG_STATE_ABILITY_DETECT;
4318 		break;
4319 
4320 	case ANEG_STATE_ABILITY_DETECT:
4321 		if (ap->ability_match != 0 && ap->rxconfig != 0)
4322 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
4323 		break;
4324 
4325 	case ANEG_STATE_ACK_DETECT_INIT:
4326 		ap->txconfig |= ANEG_CFG_ACK;
4327 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4328 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4329 		tw32_f(MAC_MODE, tp->mac_mode);
4330 		udelay(40);
4331 
4332 		ap->state = ANEG_STATE_ACK_DETECT;
4333 
4334 		/* fallthru */
4335 	case ANEG_STATE_ACK_DETECT:
4336 		if (ap->ack_match != 0) {
4337 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4338 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4339 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4340 			} else {
4341 				ap->state = ANEG_STATE_AN_ENABLE;
4342 			}
4343 		} else if (ap->ability_match != 0 &&
4344 			   ap->rxconfig == 0) {
4345 			ap->state = ANEG_STATE_AN_ENABLE;
4346 		}
4347 		break;
4348 
4349 	case ANEG_STATE_COMPLETE_ACK_INIT:
4350 		if (ap->rxconfig & ANEG_CFG_INVAL) {
4351 			ret = ANEG_FAILED;
4352 			break;
4353 		}
4354 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4355 			       MR_LP_ADV_HALF_DUPLEX |
4356 			       MR_LP_ADV_SYM_PAUSE |
4357 			       MR_LP_ADV_ASYM_PAUSE |
4358 			       MR_LP_ADV_REMOTE_FAULT1 |
4359 			       MR_LP_ADV_REMOTE_FAULT2 |
4360 			       MR_LP_ADV_NEXT_PAGE |
4361 			       MR_TOGGLE_RX |
4362 			       MR_NP_RX);
4363 		if (ap->rxconfig & ANEG_CFG_FD)
4364 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4365 		if (ap->rxconfig & ANEG_CFG_HD)
4366 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4367 		if (ap->rxconfig & ANEG_CFG_PS1)
4368 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
4369 		if (ap->rxconfig & ANEG_CFG_PS2)
4370 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4371 		if (ap->rxconfig & ANEG_CFG_RF1)
4372 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4373 		if (ap->rxconfig & ANEG_CFG_RF2)
4374 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4375 		if (ap->rxconfig & ANEG_CFG_NP)
4376 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
4377 
4378 		ap->link_time = ap->cur_time;
4379 
4380 		ap->flags ^= (MR_TOGGLE_TX);
4381 		if (ap->rxconfig & 0x0008)
4382 			ap->flags |= MR_TOGGLE_RX;
4383 		if (ap->rxconfig & ANEG_CFG_NP)
4384 			ap->flags |= MR_NP_RX;
4385 		ap->flags |= MR_PAGE_RX;
4386 
4387 		ap->state = ANEG_STATE_COMPLETE_ACK;
4388 		ret = ANEG_TIMER_ENAB;
4389 		break;
4390 
4391 	case ANEG_STATE_COMPLETE_ACK:
4392 		if (ap->ability_match != 0 &&
4393 		    ap->rxconfig == 0) {
4394 			ap->state = ANEG_STATE_AN_ENABLE;
4395 			break;
4396 		}
4397 		delta = ap->cur_time - ap->link_time;
4398 		if (delta > ANEG_STATE_SETTLE_TIME) {
4399 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4400 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4401 			} else {
4402 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4403 				    !(ap->flags & MR_NP_RX)) {
4404 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4405 				} else {
4406 					ret = ANEG_FAILED;
4407 				}
4408 			}
4409 		}
4410 		break;
4411 
4412 	case ANEG_STATE_IDLE_DETECT_INIT:
4413 		ap->link_time = ap->cur_time;
4414 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4415 		tw32_f(MAC_MODE, tp->mac_mode);
4416 		udelay(40);
4417 
4418 		ap->state = ANEG_STATE_IDLE_DETECT;
4419 		ret = ANEG_TIMER_ENAB;
4420 		break;
4421 
4422 	case ANEG_STATE_IDLE_DETECT:
4423 		if (ap->ability_match != 0 &&
4424 		    ap->rxconfig == 0) {
4425 			ap->state = ANEG_STATE_AN_ENABLE;
4426 			break;
4427 		}
4428 		delta = ap->cur_time - ap->link_time;
4429 		if (delta > ANEG_STATE_SETTLE_TIME) {
4430 			/* XXX another gem from the Broadcom driver :( */
4431 			ap->state = ANEG_STATE_LINK_OK;
4432 		}
4433 		break;
4434 
4435 	case ANEG_STATE_LINK_OK:
4436 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4437 		ret = ANEG_DONE;
4438 		break;
4439 
4440 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4441 		/* ??? unimplemented */
4442 		break;
4443 
4444 	case ANEG_STATE_NEXT_PAGE_WAIT:
4445 		/* ??? unimplemented */
4446 		break;
4447 
4448 	default:
4449 		ret = ANEG_FAILED;
4450 		break;
4451 	}
4452 
4453 	return ret;
4454 }
4455 
4456 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4457 {
4458 	int res = 0;
4459 	struct tg3_fiber_aneginfo aninfo;
4460 	int status = ANEG_FAILED;
4461 	unsigned int tick;
4462 	u32 tmp;
4463 
4464 	tw32_f(MAC_TX_AUTO_NEG, 0);
4465 
4466 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4467 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4468 	udelay(40);
4469 
4470 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4471 	udelay(40);
4472 
4473 	memset(&aninfo, 0, sizeof(aninfo));
4474 	aninfo.flags |= MR_AN_ENABLE;
4475 	aninfo.state = ANEG_STATE_UNKNOWN;
4476 	aninfo.cur_time = 0;
4477 	tick = 0;
4478 	while (++tick < 195000) {
4479 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
4480 		if (status == ANEG_DONE || status == ANEG_FAILED)
4481 			break;
4482 
4483 		udelay(1);
4484 	}
4485 
4486 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4487 	tw32_f(MAC_MODE, tp->mac_mode);
4488 	udelay(40);
4489 
4490 	*txflags = aninfo.txconfig;
4491 	*rxflags = aninfo.flags;
4492 
4493 	if (status == ANEG_DONE &&
4494 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4495 			     MR_LP_ADV_FULL_DUPLEX)))
4496 		res = 1;
4497 
4498 	return res;
4499 }
4500 
4501 static void tg3_init_bcm8002(struct tg3 *tp)
4502 {
4503 	u32 mac_status = tr32(MAC_STATUS);
4504 	int i;
4505 
4506 	/* Reset when initting first time or we have a link. */
4507 	if (tg3_flag(tp, INIT_COMPLETE) &&
4508 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
4509 		return;
4510 
4511 	/* Set PLL lock range. */
4512 	tg3_writephy(tp, 0x16, 0x8007);
4513 
4514 	/* SW reset */
4515 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4516 
4517 	/* Wait for reset to complete. */
4518 	/* XXX schedule_timeout() ... */
4519 	for (i = 0; i < 500; i++)
4520 		udelay(10);
4521 
4522 	/* Config mode; select PMA/Ch 1 regs. */
4523 	tg3_writephy(tp, 0x10, 0x8411);
4524 
4525 	/* Enable auto-lock and comdet, select txclk for tx. */
4526 	tg3_writephy(tp, 0x11, 0x0a10);
4527 
4528 	tg3_writephy(tp, 0x18, 0x00a0);
4529 	tg3_writephy(tp, 0x16, 0x41ff);
4530 
4531 	/* Assert and deassert POR. */
4532 	tg3_writephy(tp, 0x13, 0x0400);
4533 	udelay(40);
4534 	tg3_writephy(tp, 0x13, 0x0000);
4535 
4536 	tg3_writephy(tp, 0x11, 0x0a50);
4537 	udelay(40);
4538 	tg3_writephy(tp, 0x11, 0x0a10);
4539 
4540 	/* Wait for signal to stabilize */
4541 	/* XXX schedule_timeout() ... */
4542 	for (i = 0; i < 15000; i++)
4543 		udelay(10);
4544 
4545 	/* Deselect the channel register so we can read the PHYID
4546 	 * later.
4547 	 */
4548 	tg3_writephy(tp, 0x10, 0x8011);
4549 }
4550 
4551 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4552 {
4553 	u16 flowctrl;
4554 	u32 sg_dig_ctrl, sg_dig_status;
4555 	u32 serdes_cfg, expected_sg_dig_ctrl;
4556 	int workaround, port_a;
4557 	int current_link_up;
4558 
4559 	serdes_cfg = 0;
4560 	expected_sg_dig_ctrl = 0;
4561 	workaround = 0;
4562 	port_a = 1;
4563 	current_link_up = 0;
4564 
4565 	if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4566 	    tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4567 		workaround = 1;
4568 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4569 			port_a = 0;
4570 
4571 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
4572 		/* preserve bits 20-23 for voltage regulator */
4573 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4574 	}
4575 
4576 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
4577 
4578 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4579 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4580 			if (workaround) {
4581 				u32 val = serdes_cfg;
4582 
4583 				if (port_a)
4584 					val |= 0xc010000;
4585 				else
4586 					val |= 0x4010000;
4587 				tw32_f(MAC_SERDES_CFG, val);
4588 			}
4589 
4590 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4591 		}
4592 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
4593 			tg3_setup_flow_control(tp, 0, 0);
4594 			current_link_up = 1;
4595 		}
4596 		goto out;
4597 	}
4598 
4599 	/* Want auto-negotiation.  */
4600 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4601 
4602 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4603 	if (flowctrl & ADVERTISE_1000XPAUSE)
4604 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4605 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4606 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4607 
4608 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4609 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4610 		    tp->serdes_counter &&
4611 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
4612 				    MAC_STATUS_RCVD_CFG)) ==
4613 		     MAC_STATUS_PCS_SYNCED)) {
4614 			tp->serdes_counter--;
4615 			current_link_up = 1;
4616 			goto out;
4617 		}
4618 restart_autoneg:
4619 		if (workaround)
4620 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4621 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4622 		udelay(5);
4623 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4624 
4625 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4626 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4627 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4628 				 MAC_STATUS_SIGNAL_DET)) {
4629 		sg_dig_status = tr32(SG_DIG_STATUS);
4630 		mac_status = tr32(MAC_STATUS);
4631 
4632 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4633 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
4634 			u32 local_adv = 0, remote_adv = 0;
4635 
4636 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4637 				local_adv |= ADVERTISE_1000XPAUSE;
4638 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4639 				local_adv |= ADVERTISE_1000XPSE_ASYM;
4640 
4641 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4642 				remote_adv |= LPA_1000XPAUSE;
4643 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4644 				remote_adv |= LPA_1000XPAUSE_ASYM;
4645 
4646 			tg3_setup_flow_control(tp, local_adv, remote_adv);
4647 			current_link_up = 1;
4648 			tp->serdes_counter = 0;
4649 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4650 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4651 			if (tp->serdes_counter)
4652 				tp->serdes_counter--;
4653 			else {
4654 				if (workaround) {
4655 					u32 val = serdes_cfg;
4656 
4657 					if (port_a)
4658 						val |= 0xc010000;
4659 					else
4660 						val |= 0x4010000;
4661 
4662 					tw32_f(MAC_SERDES_CFG, val);
4663 				}
4664 
4665 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4666 				udelay(40);
4667 
4668 				/* Link parallel detection - link is up */
4669 				/* only if we have PCS_SYNC and not */
4670 				/* receiving config code words */
4671 				mac_status = tr32(MAC_STATUS);
4672 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4673 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
4674 					tg3_setup_flow_control(tp, 0, 0);
4675 					current_link_up = 1;
4676 					tp->phy_flags |=
4677 						TG3_PHYFLG_PARALLEL_DETECT;
4678 					tp->serdes_counter =
4679 						SERDES_PARALLEL_DET_TIMEOUT;
4680 				} else
4681 					goto restart_autoneg;
4682 			}
4683 		}
4684 	} else {
4685 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4686 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4687 	}
4688 
4689 out:
4690 	return current_link_up;
4691 }
4692 
4693 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4694 {
4695 	int current_link_up = 0;
4696 
4697 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4698 		goto out;
4699 
4700 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4701 		u32 txflags, rxflags;
4702 		int i;
4703 
4704 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
4705 			u32 local_adv = 0, remote_adv = 0;
4706 
4707 			if (txflags & ANEG_CFG_PS1)
4708 				local_adv |= ADVERTISE_1000XPAUSE;
4709 			if (txflags & ANEG_CFG_PS2)
4710 				local_adv |= ADVERTISE_1000XPSE_ASYM;
4711 
4712 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
4713 				remote_adv |= LPA_1000XPAUSE;
4714 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4715 				remote_adv |= LPA_1000XPAUSE_ASYM;
4716 
4717 			tg3_setup_flow_control(tp, local_adv, remote_adv);
4718 
4719 			current_link_up = 1;
4720 		}
4721 		for (i = 0; i < 30; i++) {
4722 			udelay(20);
4723 			tw32_f(MAC_STATUS,
4724 			       (MAC_STATUS_SYNC_CHANGED |
4725 				MAC_STATUS_CFG_CHANGED));
4726 			udelay(40);
4727 			if ((tr32(MAC_STATUS) &
4728 			     (MAC_STATUS_SYNC_CHANGED |
4729 			      MAC_STATUS_CFG_CHANGED)) == 0)
4730 				break;
4731 		}
4732 
4733 		mac_status = tr32(MAC_STATUS);
4734 		if (current_link_up == 0 &&
4735 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
4736 		    !(mac_status & MAC_STATUS_RCVD_CFG))
4737 			current_link_up = 1;
4738 	} else {
4739 		tg3_setup_flow_control(tp, 0, 0);
4740 
4741 		/* Forcing 1000FD link up. */
4742 		current_link_up = 1;
4743 
4744 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4745 		udelay(40);
4746 
4747 		tw32_f(MAC_MODE, tp->mac_mode);
4748 		udelay(40);
4749 	}
4750 
4751 out:
4752 	return current_link_up;
4753 }
4754 
4755 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4756 {
4757 	u32 orig_pause_cfg;
4758 	u16 orig_active_speed;
4759 	u8 orig_active_duplex;
4760 	u32 mac_status;
4761 	int current_link_up;
4762 	int i;
4763 
4764 	orig_pause_cfg = tp->link_config.active_flowctrl;
4765 	orig_active_speed = tp->link_config.active_speed;
4766 	orig_active_duplex = tp->link_config.active_duplex;
4767 
4768 	if (!tg3_flag(tp, HW_AUTONEG) &&
4769 	    netif_carrier_ok(tp->dev) &&
4770 	    tg3_flag(tp, INIT_COMPLETE)) {
4771 		mac_status = tr32(MAC_STATUS);
4772 		mac_status &= (MAC_STATUS_PCS_SYNCED |
4773 			       MAC_STATUS_SIGNAL_DET |
4774 			       MAC_STATUS_CFG_CHANGED |
4775 			       MAC_STATUS_RCVD_CFG);
4776 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
4777 				   MAC_STATUS_SIGNAL_DET)) {
4778 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4779 					    MAC_STATUS_CFG_CHANGED));
4780 			return 0;
4781 		}
4782 	}
4783 
4784 	tw32_f(MAC_TX_AUTO_NEG, 0);
4785 
4786 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4787 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4788 	tw32_f(MAC_MODE, tp->mac_mode);
4789 	udelay(40);
4790 
4791 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
4792 		tg3_init_bcm8002(tp);
4793 
4794 	/* Enable link change event even when serdes polling.  */
4795 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4796 	udelay(40);
4797 
4798 	current_link_up = 0;
4799 	mac_status = tr32(MAC_STATUS);
4800 
4801 	if (tg3_flag(tp, HW_AUTONEG))
4802 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4803 	else
4804 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4805 
4806 	tp->napi[0].hw_status->status =
4807 		(SD_STATUS_UPDATED |
4808 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4809 
4810 	for (i = 0; i < 100; i++) {
4811 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4812 				    MAC_STATUS_CFG_CHANGED));
4813 		udelay(5);
4814 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4815 					 MAC_STATUS_CFG_CHANGED |
4816 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4817 			break;
4818 	}
4819 
4820 	mac_status = tr32(MAC_STATUS);
4821 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4822 		current_link_up = 0;
4823 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4824 		    tp->serdes_counter == 0) {
4825 			tw32_f(MAC_MODE, (tp->mac_mode |
4826 					  MAC_MODE_SEND_CONFIGS));
4827 			udelay(1);
4828 			tw32_f(MAC_MODE, tp->mac_mode);
4829 		}
4830 	}
4831 
4832 	if (current_link_up == 1) {
4833 		tp->link_config.active_speed = SPEED_1000;
4834 		tp->link_config.active_duplex = DUPLEX_FULL;
4835 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
4836 				    LED_CTRL_LNKLED_OVERRIDE |
4837 				    LED_CTRL_1000MBPS_ON));
4838 	} else {
4839 		tp->link_config.active_speed = SPEED_INVALID;
4840 		tp->link_config.active_duplex = DUPLEX_INVALID;
4841 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
4842 				    LED_CTRL_LNKLED_OVERRIDE |
4843 				    LED_CTRL_TRAFFIC_OVERRIDE));
4844 	}
4845 
4846 	if (current_link_up != netif_carrier_ok(tp->dev)) {
4847 		if (current_link_up)
4848 			netif_carrier_on(tp->dev);
4849 		else
4850 			netif_carrier_off(tp->dev);
4851 		tg3_link_report(tp);
4852 	} else {
4853 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
4854 		if (orig_pause_cfg != now_pause_cfg ||
4855 		    orig_active_speed != tp->link_config.active_speed ||
4856 		    orig_active_duplex != tp->link_config.active_duplex)
4857 			tg3_link_report(tp);
4858 	}
4859 
4860 	return 0;
4861 }
4862 
4863 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4864 {
4865 	int current_link_up, err = 0;
4866 	u32 bmsr, bmcr;
4867 	u16 current_speed;
4868 	u8 current_duplex;
4869 	u32 local_adv, remote_adv;
4870 
4871 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4872 	tw32_f(MAC_MODE, tp->mac_mode);
4873 	udelay(40);
4874 
4875 	tw32(MAC_EVENT, 0);
4876 
4877 	tw32_f(MAC_STATUS,
4878 	     (MAC_STATUS_SYNC_CHANGED |
4879 	      MAC_STATUS_CFG_CHANGED |
4880 	      MAC_STATUS_MI_COMPLETION |
4881 	      MAC_STATUS_LNKSTATE_CHANGED));
4882 	udelay(40);
4883 
4884 	if (force_reset)
4885 		tg3_phy_reset(tp);
4886 
4887 	current_link_up = 0;
4888 	current_speed = SPEED_INVALID;
4889 	current_duplex = DUPLEX_INVALID;
4890 
4891 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4892 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4893 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4894 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4895 			bmsr |= BMSR_LSTATUS;
4896 		else
4897 			bmsr &= ~BMSR_LSTATUS;
4898 	}
4899 
4900 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4901 
4902 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4903 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4904 		/* do nothing, just check for link up at the end */
4905 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4906 		u32 adv, new_adv;
4907 
4908 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4909 		new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4910 				  ADVERTISE_1000XPAUSE |
4911 				  ADVERTISE_1000XPSE_ASYM |
4912 				  ADVERTISE_SLCT);
4913 
4914 		new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4915 
4916 		if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4917 			new_adv |= ADVERTISE_1000XHALF;
4918 		if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4919 			new_adv |= ADVERTISE_1000XFULL;
4920 
4921 		if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4922 			tg3_writephy(tp, MII_ADVERTISE, new_adv);
4923 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4924 			tg3_writephy(tp, MII_BMCR, bmcr);
4925 
4926 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4927 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4928 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4929 
4930 			return err;
4931 		}
4932 	} else {
4933 		u32 new_bmcr;
4934 
4935 		bmcr &= ~BMCR_SPEED1000;
4936 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4937 
4938 		if (tp->link_config.duplex == DUPLEX_FULL)
4939 			new_bmcr |= BMCR_FULLDPLX;
4940 
4941 		if (new_bmcr != bmcr) {
4942 			/* BMCR_SPEED1000 is a reserved bit that needs
4943 			 * to be set on write.
4944 			 */
4945 			new_bmcr |= BMCR_SPEED1000;
4946 
4947 			/* Force a linkdown */
4948 			if (netif_carrier_ok(tp->dev)) {
4949 				u32 adv;
4950 
4951 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4952 				adv &= ~(ADVERTISE_1000XFULL |
4953 					 ADVERTISE_1000XHALF |
4954 					 ADVERTISE_SLCT);
4955 				tg3_writephy(tp, MII_ADVERTISE, adv);
4956 				tg3_writephy(tp, MII_BMCR, bmcr |
4957 							   BMCR_ANRESTART |
4958 							   BMCR_ANENABLE);
4959 				udelay(10);
4960 				netif_carrier_off(tp->dev);
4961 			}
4962 			tg3_writephy(tp, MII_BMCR, new_bmcr);
4963 			bmcr = new_bmcr;
4964 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4965 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4966 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4967 			    ASIC_REV_5714) {
4968 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4969 					bmsr |= BMSR_LSTATUS;
4970 				else
4971 					bmsr &= ~BMSR_LSTATUS;
4972 			}
4973 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4974 		}
4975 	}
4976 
4977 	if (bmsr & BMSR_LSTATUS) {
4978 		current_speed = SPEED_1000;
4979 		current_link_up = 1;
4980 		if (bmcr & BMCR_FULLDPLX)
4981 			current_duplex = DUPLEX_FULL;
4982 		else
4983 			current_duplex = DUPLEX_HALF;
4984 
4985 		local_adv = 0;
4986 		remote_adv = 0;
4987 
4988 		if (bmcr & BMCR_ANENABLE) {
4989 			u32 common;
4990 
4991 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4992 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4993 			common = local_adv & remote_adv;
4994 			if (common & (ADVERTISE_1000XHALF |
4995 				      ADVERTISE_1000XFULL)) {
4996 				if (common & ADVERTISE_1000XFULL)
4997 					current_duplex = DUPLEX_FULL;
4998 				else
4999 					current_duplex = DUPLEX_HALF;
5000 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5001 				/* Link is up via parallel detect */
5002 			} else {
5003 				current_link_up = 0;
5004 			}
5005 		}
5006 	}
5007 
5008 	if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5009 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5010 
5011 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5012 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5013 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5014 
5015 	tw32_f(MAC_MODE, tp->mac_mode);
5016 	udelay(40);
5017 
5018 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5019 
5020 	tp->link_config.active_speed = current_speed;
5021 	tp->link_config.active_duplex = current_duplex;
5022 
5023 	if (current_link_up != netif_carrier_ok(tp->dev)) {
5024 		if (current_link_up)
5025 			netif_carrier_on(tp->dev);
5026 		else {
5027 			netif_carrier_off(tp->dev);
5028 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5029 		}
5030 		tg3_link_report(tp);
5031 	}
5032 	return err;
5033 }
5034 
5035 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5036 {
5037 	if (tp->serdes_counter) {
5038 		/* Give autoneg time to complete. */
5039 		tp->serdes_counter--;
5040 		return;
5041 	}
5042 
5043 	if (!netif_carrier_ok(tp->dev) &&
5044 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5045 		u32 bmcr;
5046 
5047 		tg3_readphy(tp, MII_BMCR, &bmcr);
5048 		if (bmcr & BMCR_ANENABLE) {
5049 			u32 phy1, phy2;
5050 
5051 			/* Select shadow register 0x1f */
5052 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5053 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5054 
5055 			/* Select expansion interrupt status register */
5056 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5057 					 MII_TG3_DSP_EXP1_INT_STAT);
5058 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5059 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5060 
5061 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5062 				/* We have signal detect and not receiving
5063 				 * config code words, link is up by parallel
5064 				 * detection.
5065 				 */
5066 
5067 				bmcr &= ~BMCR_ANENABLE;
5068 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5069 				tg3_writephy(tp, MII_BMCR, bmcr);
5070 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5071 			}
5072 		}
5073 	} else if (netif_carrier_ok(tp->dev) &&
5074 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5075 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5076 		u32 phy2;
5077 
5078 		/* Select expansion interrupt status register */
5079 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5080 				 MII_TG3_DSP_EXP1_INT_STAT);
5081 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5082 		if (phy2 & 0x20) {
5083 			u32 bmcr;
5084 
5085 			/* Config code words received, turn on autoneg. */
5086 			tg3_readphy(tp, MII_BMCR, &bmcr);
5087 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5088 
5089 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5090 
5091 		}
5092 	}
5093 }
5094 
5095 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5096 {
5097 	u32 val;
5098 	int err;
5099 
5100 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5101 		err = tg3_setup_fiber_phy(tp, force_reset);
5102 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5103 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
5104 	else
5105 		err = tg3_setup_copper_phy(tp, force_reset);
5106 
5107 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5108 		u32 scale;
5109 
5110 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5111 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5112 			scale = 65;
5113 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5114 			scale = 6;
5115 		else
5116 			scale = 12;
5117 
5118 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5119 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5120 		tw32(GRC_MISC_CFG, val);
5121 	}
5122 
5123 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5124 	      (6 << TX_LENGTHS_IPG_SHIFT);
5125 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5126 		val |= tr32(MAC_TX_LENGTHS) &
5127 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
5128 			TX_LENGTHS_CNT_DWN_VAL_MSK);
5129 
5130 	if (tp->link_config.active_speed == SPEED_1000 &&
5131 	    tp->link_config.active_duplex == DUPLEX_HALF)
5132 		tw32(MAC_TX_LENGTHS, val |
5133 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5134 	else
5135 		tw32(MAC_TX_LENGTHS, val |
5136 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5137 
5138 	if (!tg3_flag(tp, 5705_PLUS)) {
5139 		if (netif_carrier_ok(tp->dev)) {
5140 			tw32(HOSTCC_STAT_COAL_TICKS,
5141 			     tp->coal.stats_block_coalesce_usecs);
5142 		} else {
5143 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
5144 		}
5145 	}
5146 
5147 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
5148 		val = tr32(PCIE_PWR_MGMT_THRESH);
5149 		if (!netif_carrier_ok(tp->dev))
5150 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5151 			      tp->pwrmgmt_thresh;
5152 		else
5153 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5154 		tw32(PCIE_PWR_MGMT_THRESH, val);
5155 	}
5156 
5157 	return err;
5158 }
5159 
5160 static inline int tg3_irq_sync(struct tg3 *tp)
5161 {
5162 	return tp->irq_sync;
5163 }
5164 
5165 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5166 {
5167 	int i;
5168 
5169 	dst = (u32 *)((u8 *)dst + off);
5170 	for (i = 0; i < len; i += sizeof(u32))
5171 		*dst++ = tr32(off + i);
5172 }
5173 
5174 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5175 {
5176 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5177 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5178 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5179 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5180 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5181 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5182 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5183 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5184 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5185 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5186 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5187 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5188 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5189 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5190 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5191 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5192 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5193 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5194 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5195 
5196 	if (tg3_flag(tp, SUPPORT_MSIX))
5197 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5198 
5199 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5200 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5201 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5202 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5203 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5204 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5205 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5206 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5207 
5208 	if (!tg3_flag(tp, 5705_PLUS)) {
5209 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5210 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5211 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5212 	}
5213 
5214 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5215 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5216 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5217 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5218 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5219 
5220 	if (tg3_flag(tp, NVRAM))
5221 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5222 }
5223 
5224 static void tg3_dump_state(struct tg3 *tp)
5225 {
5226 	int i;
5227 	u32 *regs;
5228 
5229 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5230 	if (!regs) {
5231 		netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5232 		return;
5233 	}
5234 
5235 	if (tg3_flag(tp, PCI_EXPRESS)) {
5236 		/* Read up to but not including private PCI registers */
5237 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5238 			regs[i / sizeof(u32)] = tr32(i);
5239 	} else
5240 		tg3_dump_legacy_regs(tp, regs);
5241 
5242 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5243 		if (!regs[i + 0] && !regs[i + 1] &&
5244 		    !regs[i + 2] && !regs[i + 3])
5245 			continue;
5246 
5247 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5248 			   i * 4,
5249 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5250 	}
5251 
5252 	kfree(regs);
5253 
5254 	for (i = 0; i < tp->irq_cnt; i++) {
5255 		struct tg3_napi *tnapi = &tp->napi[i];
5256 
5257 		/* SW status block */
5258 		netdev_err(tp->dev,
5259 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5260 			   i,
5261 			   tnapi->hw_status->status,
5262 			   tnapi->hw_status->status_tag,
5263 			   tnapi->hw_status->rx_jumbo_consumer,
5264 			   tnapi->hw_status->rx_consumer,
5265 			   tnapi->hw_status->rx_mini_consumer,
5266 			   tnapi->hw_status->idx[0].rx_producer,
5267 			   tnapi->hw_status->idx[0].tx_consumer);
5268 
5269 		netdev_err(tp->dev,
5270 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5271 			   i,
5272 			   tnapi->last_tag, tnapi->last_irq_tag,
5273 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5274 			   tnapi->rx_rcb_ptr,
5275 			   tnapi->prodring.rx_std_prod_idx,
5276 			   tnapi->prodring.rx_std_cons_idx,
5277 			   tnapi->prodring.rx_jmb_prod_idx,
5278 			   tnapi->prodring.rx_jmb_cons_idx);
5279 	}
5280 }
5281 
5282 /* This is called whenever we suspect that the system chipset is re-
5283  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5284  * is bogus tx completions. We try to recover by setting the
5285  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5286  * in the workqueue.
5287  */
5288 static void tg3_tx_recover(struct tg3 *tp)
5289 {
5290 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5291 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
5292 
5293 	netdev_warn(tp->dev,
5294 		    "The system may be re-ordering memory-mapped I/O "
5295 		    "cycles to the network device, attempting to recover. "
5296 		    "Please report the problem to the driver maintainer "
5297 		    "and include system chipset information.\n");
5298 
5299 	spin_lock(&tp->lock);
5300 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
5301 	spin_unlock(&tp->lock);
5302 }
5303 
5304 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5305 {
5306 	/* Tell compiler to fetch tx indices from memory. */
5307 	barrier();
5308 	return tnapi->tx_pending -
5309 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5310 }
5311 
5312 /* Tigon3 never reports partial packet sends.  So we do not
5313  * need special logic to handle SKBs that have not had all
5314  * of their frags sent yet, like SunGEM does.
5315  */
5316 static void tg3_tx(struct tg3_napi *tnapi)
5317 {
5318 	struct tg3 *tp = tnapi->tp;
5319 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5320 	u32 sw_idx = tnapi->tx_cons;
5321 	struct netdev_queue *txq;
5322 	int index = tnapi - tp->napi;
5323 
5324 	if (tg3_flag(tp, ENABLE_TSS))
5325 		index--;
5326 
5327 	txq = netdev_get_tx_queue(tp->dev, index);
5328 
5329 	while (sw_idx != hw_idx) {
5330 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5331 		struct sk_buff *skb = ri->skb;
5332 		int i, tx_bug = 0;
5333 
5334 		if (unlikely(skb == NULL)) {
5335 			tg3_tx_recover(tp);
5336 			return;
5337 		}
5338 
5339 		pci_unmap_single(tp->pdev,
5340 				 dma_unmap_addr(ri, mapping),
5341 				 skb_headlen(skb),
5342 				 PCI_DMA_TODEVICE);
5343 
5344 		ri->skb = NULL;
5345 
5346 		while (ri->fragmented) {
5347 			ri->fragmented = false;
5348 			sw_idx = NEXT_TX(sw_idx);
5349 			ri = &tnapi->tx_buffers[sw_idx];
5350 		}
5351 
5352 		sw_idx = NEXT_TX(sw_idx);
5353 
5354 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5355 			ri = &tnapi->tx_buffers[sw_idx];
5356 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5357 				tx_bug = 1;
5358 
5359 			pci_unmap_page(tp->pdev,
5360 				       dma_unmap_addr(ri, mapping),
5361 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
5362 				       PCI_DMA_TODEVICE);
5363 
5364 			while (ri->fragmented) {
5365 				ri->fragmented = false;
5366 				sw_idx = NEXT_TX(sw_idx);
5367 				ri = &tnapi->tx_buffers[sw_idx];
5368 			}
5369 
5370 			sw_idx = NEXT_TX(sw_idx);
5371 		}
5372 
5373 		dev_kfree_skb(skb);
5374 
5375 		if (unlikely(tx_bug)) {
5376 			tg3_tx_recover(tp);
5377 			return;
5378 		}
5379 	}
5380 
5381 	tnapi->tx_cons = sw_idx;
5382 
5383 	/* Need to make the tx_cons update visible to tg3_start_xmit()
5384 	 * before checking for netif_queue_stopped().  Without the
5385 	 * memory barrier, there is a small possibility that tg3_start_xmit()
5386 	 * will miss it and cause the queue to be stopped forever.
5387 	 */
5388 	smp_mb();
5389 
5390 	if (unlikely(netif_tx_queue_stopped(txq) &&
5391 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5392 		__netif_tx_lock(txq, smp_processor_id());
5393 		if (netif_tx_queue_stopped(txq) &&
5394 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5395 			netif_tx_wake_queue(txq);
5396 		__netif_tx_unlock(txq);
5397 	}
5398 }
5399 
5400 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5401 {
5402 	if (!ri->skb)
5403 		return;
5404 
5405 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5406 			 map_sz, PCI_DMA_FROMDEVICE);
5407 	dev_kfree_skb_any(ri->skb);
5408 	ri->skb = NULL;
5409 }
5410 
5411 /* Returns size of skb allocated or < 0 on error.
5412  *
5413  * We only need to fill in the address because the other members
5414  * of the RX descriptor are invariant, see tg3_init_rings.
5415  *
5416  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5417  * posting buffers we only dirty the first cache line of the RX
5418  * descriptor (containing the address).  Whereas for the RX status
5419  * buffers the cpu only reads the last cacheline of the RX descriptor
5420  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5421  */
5422 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5423 			    u32 opaque_key, u32 dest_idx_unmasked)
5424 {
5425 	struct tg3_rx_buffer_desc *desc;
5426 	struct ring_info *map;
5427 	struct sk_buff *skb;
5428 	dma_addr_t mapping;
5429 	int skb_size, dest_idx;
5430 
5431 	switch (opaque_key) {
5432 	case RXD_OPAQUE_RING_STD:
5433 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5434 		desc = &tpr->rx_std[dest_idx];
5435 		map = &tpr->rx_std_buffers[dest_idx];
5436 		skb_size = tp->rx_pkt_map_sz;
5437 		break;
5438 
5439 	case RXD_OPAQUE_RING_JUMBO:
5440 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5441 		desc = &tpr->rx_jmb[dest_idx].std;
5442 		map = &tpr->rx_jmb_buffers[dest_idx];
5443 		skb_size = TG3_RX_JMB_MAP_SZ;
5444 		break;
5445 
5446 	default:
5447 		return -EINVAL;
5448 	}
5449 
5450 	/* Do not overwrite any of the map or rp information
5451 	 * until we are sure we can commit to a new buffer.
5452 	 *
5453 	 * Callers depend upon this behavior and assume that
5454 	 * we leave everything unchanged if we fail.
5455 	 */
5456 	skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
5457 	if (skb == NULL)
5458 		return -ENOMEM;
5459 
5460 	skb_reserve(skb, TG3_RX_OFFSET(tp));
5461 
5462 	mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5463 				 PCI_DMA_FROMDEVICE);
5464 	if (pci_dma_mapping_error(tp->pdev, mapping)) {
5465 		dev_kfree_skb(skb);
5466 		return -EIO;
5467 	}
5468 
5469 	map->skb = skb;
5470 	dma_unmap_addr_set(map, mapping, mapping);
5471 
5472 	desc->addr_hi = ((u64)mapping >> 32);
5473 	desc->addr_lo = ((u64)mapping & 0xffffffff);
5474 
5475 	return skb_size;
5476 }
5477 
5478 /* We only need to move over in the address because the other
5479  * members of the RX descriptor are invariant.  See notes above
5480  * tg3_alloc_rx_skb for full details.
5481  */
5482 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5483 			   struct tg3_rx_prodring_set *dpr,
5484 			   u32 opaque_key, int src_idx,
5485 			   u32 dest_idx_unmasked)
5486 {
5487 	struct tg3 *tp = tnapi->tp;
5488 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5489 	struct ring_info *src_map, *dest_map;
5490 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5491 	int dest_idx;
5492 
5493 	switch (opaque_key) {
5494 	case RXD_OPAQUE_RING_STD:
5495 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5496 		dest_desc = &dpr->rx_std[dest_idx];
5497 		dest_map = &dpr->rx_std_buffers[dest_idx];
5498 		src_desc = &spr->rx_std[src_idx];
5499 		src_map = &spr->rx_std_buffers[src_idx];
5500 		break;
5501 
5502 	case RXD_OPAQUE_RING_JUMBO:
5503 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5504 		dest_desc = &dpr->rx_jmb[dest_idx].std;
5505 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
5506 		src_desc = &spr->rx_jmb[src_idx].std;
5507 		src_map = &spr->rx_jmb_buffers[src_idx];
5508 		break;
5509 
5510 	default:
5511 		return;
5512 	}
5513 
5514 	dest_map->skb = src_map->skb;
5515 	dma_unmap_addr_set(dest_map, mapping,
5516 			   dma_unmap_addr(src_map, mapping));
5517 	dest_desc->addr_hi = src_desc->addr_hi;
5518 	dest_desc->addr_lo = src_desc->addr_lo;
5519 
5520 	/* Ensure that the update to the skb happens after the physical
5521 	 * addresses have been transferred to the new BD location.
5522 	 */
5523 	smp_wmb();
5524 
5525 	src_map->skb = NULL;
5526 }
5527 
5528 /* The RX ring scheme is composed of multiple rings which post fresh
5529  * buffers to the chip, and one special ring the chip uses to report
5530  * status back to the host.
5531  *
5532  * The special ring reports the status of received packets to the
5533  * host.  The chip does not write into the original descriptor the
5534  * RX buffer was obtained from.  The chip simply takes the original
5535  * descriptor as provided by the host, updates the status and length
5536  * field, then writes this into the next status ring entry.
5537  *
5538  * Each ring the host uses to post buffers to the chip is described
5539  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5540  * it is first placed into the on-chip ram.  When the packet's length
5541  * is known, it walks down the TG3_BDINFO entries to select the ring.
5542  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5543  * which is within the range of the new packet's length is chosen.
5544  *
5545  * The "separate ring for rx status" scheme may sound queer, but it makes
5546  * sense from a cache coherency perspective.  If only the host writes
5547  * to the buffer post rings, and only the chip writes to the rx status
5548  * rings, then cache lines never move beyond shared-modified state.
5549  * If both the host and chip were to write into the same ring, cache line
5550  * eviction could occur since both entities want it in an exclusive state.
5551  */
5552 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5553 {
5554 	struct tg3 *tp = tnapi->tp;
5555 	u32 work_mask, rx_std_posted = 0;
5556 	u32 std_prod_idx, jmb_prod_idx;
5557 	u32 sw_idx = tnapi->rx_rcb_ptr;
5558 	u16 hw_idx;
5559 	int received;
5560 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5561 
5562 	hw_idx = *(tnapi->rx_rcb_prod_idx);
5563 	/*
5564 	 * We need to order the read of hw_idx and the read of
5565 	 * the opaque cookie.
5566 	 */
5567 	rmb();
5568 	work_mask = 0;
5569 	received = 0;
5570 	std_prod_idx = tpr->rx_std_prod_idx;
5571 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
5572 	while (sw_idx != hw_idx && budget > 0) {
5573 		struct ring_info *ri;
5574 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5575 		unsigned int len;
5576 		struct sk_buff *skb;
5577 		dma_addr_t dma_addr;
5578 		u32 opaque_key, desc_idx, *post_ptr;
5579 
5580 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5581 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5582 		if (opaque_key == RXD_OPAQUE_RING_STD) {
5583 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5584 			dma_addr = dma_unmap_addr(ri, mapping);
5585 			skb = ri->skb;
5586 			post_ptr = &std_prod_idx;
5587 			rx_std_posted++;
5588 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5589 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5590 			dma_addr = dma_unmap_addr(ri, mapping);
5591 			skb = ri->skb;
5592 			post_ptr = &jmb_prod_idx;
5593 		} else
5594 			goto next_pkt_nopost;
5595 
5596 		work_mask |= opaque_key;
5597 
5598 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5599 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5600 		drop_it:
5601 			tg3_recycle_rx(tnapi, tpr, opaque_key,
5602 				       desc_idx, *post_ptr);
5603 		drop_it_no_recycle:
5604 			/* Other statistics kept track of by card. */
5605 			tp->rx_dropped++;
5606 			goto next_pkt;
5607 		}
5608 
5609 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5610 		      ETH_FCS_LEN;
5611 
5612 		if (len > TG3_RX_COPY_THRESH(tp)) {
5613 			int skb_size;
5614 
5615 			skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5616 						    *post_ptr);
5617 			if (skb_size < 0)
5618 				goto drop_it;
5619 
5620 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
5621 					 PCI_DMA_FROMDEVICE);
5622 
5623 			/* Ensure that the update to the skb happens
5624 			 * after the usage of the old DMA mapping.
5625 			 */
5626 			smp_wmb();
5627 
5628 			ri->skb = NULL;
5629 
5630 			skb_put(skb, len);
5631 		} else {
5632 			struct sk_buff *copy_skb;
5633 
5634 			tg3_recycle_rx(tnapi, tpr, opaque_key,
5635 				       desc_idx, *post_ptr);
5636 
5637 			copy_skb = netdev_alloc_skb(tp->dev, len +
5638 						    TG3_RAW_IP_ALIGN);
5639 			if (copy_skb == NULL)
5640 				goto drop_it_no_recycle;
5641 
5642 			skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5643 			skb_put(copy_skb, len);
5644 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5645 			skb_copy_from_linear_data(skb, copy_skb->data, len);
5646 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5647 
5648 			/* We'll reuse the original ring buffer. */
5649 			skb = copy_skb;
5650 		}
5651 
5652 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
5653 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5654 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5655 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
5656 			skb->ip_summed = CHECKSUM_UNNECESSARY;
5657 		else
5658 			skb_checksum_none_assert(skb);
5659 
5660 		skb->protocol = eth_type_trans(skb, tp->dev);
5661 
5662 		if (len > (tp->dev->mtu + ETH_HLEN) &&
5663 		    skb->protocol != htons(ETH_P_8021Q)) {
5664 			dev_kfree_skb(skb);
5665 			goto drop_it_no_recycle;
5666 		}
5667 
5668 		if (desc->type_flags & RXD_FLAG_VLAN &&
5669 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5670 			__vlan_hwaccel_put_tag(skb,
5671 					       desc->err_vlan & RXD_VLAN_MASK);
5672 
5673 		napi_gro_receive(&tnapi->napi, skb);
5674 
5675 		received++;
5676 		budget--;
5677 
5678 next_pkt:
5679 		(*post_ptr)++;
5680 
5681 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5682 			tpr->rx_std_prod_idx = std_prod_idx &
5683 					       tp->rx_std_ring_mask;
5684 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5685 				     tpr->rx_std_prod_idx);
5686 			work_mask &= ~RXD_OPAQUE_RING_STD;
5687 			rx_std_posted = 0;
5688 		}
5689 next_pkt_nopost:
5690 		sw_idx++;
5691 		sw_idx &= tp->rx_ret_ring_mask;
5692 
5693 		/* Refresh hw_idx to see if there is new work */
5694 		if (sw_idx == hw_idx) {
5695 			hw_idx = *(tnapi->rx_rcb_prod_idx);
5696 			rmb();
5697 		}
5698 	}
5699 
5700 	/* ACK the status ring. */
5701 	tnapi->rx_rcb_ptr = sw_idx;
5702 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
5703 
5704 	/* Refill RX ring(s). */
5705 	if (!tg3_flag(tp, ENABLE_RSS)) {
5706 		if (work_mask & RXD_OPAQUE_RING_STD) {
5707 			tpr->rx_std_prod_idx = std_prod_idx &
5708 					       tp->rx_std_ring_mask;
5709 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5710 				     tpr->rx_std_prod_idx);
5711 		}
5712 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5713 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
5714 					       tp->rx_jmb_ring_mask;
5715 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5716 				     tpr->rx_jmb_prod_idx);
5717 		}
5718 		mmiowb();
5719 	} else if (work_mask) {
5720 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5721 		 * updated before the producer indices can be updated.
5722 		 */
5723 		smp_wmb();
5724 
5725 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5726 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5727 
5728 		if (tnapi != &tp->napi[1])
5729 			napi_schedule(&tp->napi[1].napi);
5730 	}
5731 
5732 	return received;
5733 }
5734 
5735 static void tg3_poll_link(struct tg3 *tp)
5736 {
5737 	/* handle link change and other phy events */
5738 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5739 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5740 
5741 		if (sblk->status & SD_STATUS_LINK_CHG) {
5742 			sblk->status = SD_STATUS_UPDATED |
5743 				       (sblk->status & ~SD_STATUS_LINK_CHG);
5744 			spin_lock(&tp->lock);
5745 			if (tg3_flag(tp, USE_PHYLIB)) {
5746 				tw32_f(MAC_STATUS,
5747 				     (MAC_STATUS_SYNC_CHANGED |
5748 				      MAC_STATUS_CFG_CHANGED |
5749 				      MAC_STATUS_MI_COMPLETION |
5750 				      MAC_STATUS_LNKSTATE_CHANGED));
5751 				udelay(40);
5752 			} else
5753 				tg3_setup_phy(tp, 0);
5754 			spin_unlock(&tp->lock);
5755 		}
5756 	}
5757 }
5758 
5759 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5760 				struct tg3_rx_prodring_set *dpr,
5761 				struct tg3_rx_prodring_set *spr)
5762 {
5763 	u32 si, di, cpycnt, src_prod_idx;
5764 	int i, err = 0;
5765 
5766 	while (1) {
5767 		src_prod_idx = spr->rx_std_prod_idx;
5768 
5769 		/* Make sure updates to the rx_std_buffers[] entries and the
5770 		 * standard producer index are seen in the correct order.
5771 		 */
5772 		smp_rmb();
5773 
5774 		if (spr->rx_std_cons_idx == src_prod_idx)
5775 			break;
5776 
5777 		if (spr->rx_std_cons_idx < src_prod_idx)
5778 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5779 		else
5780 			cpycnt = tp->rx_std_ring_mask + 1 -
5781 				 spr->rx_std_cons_idx;
5782 
5783 		cpycnt = min(cpycnt,
5784 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5785 
5786 		si = spr->rx_std_cons_idx;
5787 		di = dpr->rx_std_prod_idx;
5788 
5789 		for (i = di; i < di + cpycnt; i++) {
5790 			if (dpr->rx_std_buffers[i].skb) {
5791 				cpycnt = i - di;
5792 				err = -ENOSPC;
5793 				break;
5794 			}
5795 		}
5796 
5797 		if (!cpycnt)
5798 			break;
5799 
5800 		/* Ensure that updates to the rx_std_buffers ring and the
5801 		 * shadowed hardware producer ring from tg3_recycle_skb() are
5802 		 * ordered correctly WRT the skb check above.
5803 		 */
5804 		smp_rmb();
5805 
5806 		memcpy(&dpr->rx_std_buffers[di],
5807 		       &spr->rx_std_buffers[si],
5808 		       cpycnt * sizeof(struct ring_info));
5809 
5810 		for (i = 0; i < cpycnt; i++, di++, si++) {
5811 			struct tg3_rx_buffer_desc *sbd, *dbd;
5812 			sbd = &spr->rx_std[si];
5813 			dbd = &dpr->rx_std[di];
5814 			dbd->addr_hi = sbd->addr_hi;
5815 			dbd->addr_lo = sbd->addr_lo;
5816 		}
5817 
5818 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5819 				       tp->rx_std_ring_mask;
5820 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5821 				       tp->rx_std_ring_mask;
5822 	}
5823 
5824 	while (1) {
5825 		src_prod_idx = spr->rx_jmb_prod_idx;
5826 
5827 		/* Make sure updates to the rx_jmb_buffers[] entries and
5828 		 * the jumbo producer index are seen in the correct order.
5829 		 */
5830 		smp_rmb();
5831 
5832 		if (spr->rx_jmb_cons_idx == src_prod_idx)
5833 			break;
5834 
5835 		if (spr->rx_jmb_cons_idx < src_prod_idx)
5836 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5837 		else
5838 			cpycnt = tp->rx_jmb_ring_mask + 1 -
5839 				 spr->rx_jmb_cons_idx;
5840 
5841 		cpycnt = min(cpycnt,
5842 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5843 
5844 		si = spr->rx_jmb_cons_idx;
5845 		di = dpr->rx_jmb_prod_idx;
5846 
5847 		for (i = di; i < di + cpycnt; i++) {
5848 			if (dpr->rx_jmb_buffers[i].skb) {
5849 				cpycnt = i - di;
5850 				err = -ENOSPC;
5851 				break;
5852 			}
5853 		}
5854 
5855 		if (!cpycnt)
5856 			break;
5857 
5858 		/* Ensure that updates to the rx_jmb_buffers ring and the
5859 		 * shadowed hardware producer ring from tg3_recycle_skb() are
5860 		 * ordered correctly WRT the skb check above.
5861 		 */
5862 		smp_rmb();
5863 
5864 		memcpy(&dpr->rx_jmb_buffers[di],
5865 		       &spr->rx_jmb_buffers[si],
5866 		       cpycnt * sizeof(struct ring_info));
5867 
5868 		for (i = 0; i < cpycnt; i++, di++, si++) {
5869 			struct tg3_rx_buffer_desc *sbd, *dbd;
5870 			sbd = &spr->rx_jmb[si].std;
5871 			dbd = &dpr->rx_jmb[di].std;
5872 			dbd->addr_hi = sbd->addr_hi;
5873 			dbd->addr_lo = sbd->addr_lo;
5874 		}
5875 
5876 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5877 				       tp->rx_jmb_ring_mask;
5878 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5879 				       tp->rx_jmb_ring_mask;
5880 	}
5881 
5882 	return err;
5883 }
5884 
5885 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5886 {
5887 	struct tg3 *tp = tnapi->tp;
5888 
5889 	/* run TX completion thread */
5890 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5891 		tg3_tx(tnapi);
5892 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5893 			return work_done;
5894 	}
5895 
5896 	/* run RX thread, within the bounds set by NAPI.
5897 	 * All RX "locking" is done by ensuring outside
5898 	 * code synchronizes with tg3->napi.poll()
5899 	 */
5900 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5901 		work_done += tg3_rx(tnapi, budget - work_done);
5902 
5903 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5904 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5905 		int i, err = 0;
5906 		u32 std_prod_idx = dpr->rx_std_prod_idx;
5907 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5908 
5909 		for (i = 1; i < tp->irq_cnt; i++)
5910 			err |= tg3_rx_prodring_xfer(tp, dpr,
5911 						    &tp->napi[i].prodring);
5912 
5913 		wmb();
5914 
5915 		if (std_prod_idx != dpr->rx_std_prod_idx)
5916 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5917 				     dpr->rx_std_prod_idx);
5918 
5919 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5920 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5921 				     dpr->rx_jmb_prod_idx);
5922 
5923 		mmiowb();
5924 
5925 		if (err)
5926 			tw32_f(HOSTCC_MODE, tp->coal_now);
5927 	}
5928 
5929 	return work_done;
5930 }
5931 
5932 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5933 {
5934 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5935 		schedule_work(&tp->reset_task);
5936 }
5937 
5938 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5939 {
5940 	cancel_work_sync(&tp->reset_task);
5941 	tg3_flag_clear(tp, RESET_TASK_PENDING);
5942 }
5943 
5944 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5945 {
5946 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5947 	struct tg3 *tp = tnapi->tp;
5948 	int work_done = 0;
5949 	struct tg3_hw_status *sblk = tnapi->hw_status;
5950 
5951 	while (1) {
5952 		work_done = tg3_poll_work(tnapi, work_done, budget);
5953 
5954 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5955 			goto tx_recovery;
5956 
5957 		if (unlikely(work_done >= budget))
5958 			break;
5959 
5960 		/* tp->last_tag is used in tg3_int_reenable() below
5961 		 * to tell the hw how much work has been processed,
5962 		 * so we must read it before checking for more work.
5963 		 */
5964 		tnapi->last_tag = sblk->status_tag;
5965 		tnapi->last_irq_tag = tnapi->last_tag;
5966 		rmb();
5967 
5968 		/* check for RX/TX work to do */
5969 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5970 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5971 			napi_complete(napi);
5972 			/* Reenable interrupts. */
5973 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5974 			mmiowb();
5975 			break;
5976 		}
5977 	}
5978 
5979 	return work_done;
5980 
5981 tx_recovery:
5982 	/* work_done is guaranteed to be less than budget. */
5983 	napi_complete(napi);
5984 	tg3_reset_task_schedule(tp);
5985 	return work_done;
5986 }
5987 
5988 static void tg3_process_error(struct tg3 *tp)
5989 {
5990 	u32 val;
5991 	bool real_error = false;
5992 
5993 	if (tg3_flag(tp, ERROR_PROCESSED))
5994 		return;
5995 
5996 	/* Check Flow Attention register */
5997 	val = tr32(HOSTCC_FLOW_ATTN);
5998 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5999 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6000 		real_error = true;
6001 	}
6002 
6003 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6004 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6005 		real_error = true;
6006 	}
6007 
6008 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6009 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6010 		real_error = true;
6011 	}
6012 
6013 	if (!real_error)
6014 		return;
6015 
6016 	tg3_dump_state(tp);
6017 
6018 	tg3_flag_set(tp, ERROR_PROCESSED);
6019 	tg3_reset_task_schedule(tp);
6020 }
6021 
6022 static int tg3_poll(struct napi_struct *napi, int budget)
6023 {
6024 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6025 	struct tg3 *tp = tnapi->tp;
6026 	int work_done = 0;
6027 	struct tg3_hw_status *sblk = tnapi->hw_status;
6028 
6029 	while (1) {
6030 		if (sblk->status & SD_STATUS_ERROR)
6031 			tg3_process_error(tp);
6032 
6033 		tg3_poll_link(tp);
6034 
6035 		work_done = tg3_poll_work(tnapi, work_done, budget);
6036 
6037 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6038 			goto tx_recovery;
6039 
6040 		if (unlikely(work_done >= budget))
6041 			break;
6042 
6043 		if (tg3_flag(tp, TAGGED_STATUS)) {
6044 			/* tp->last_tag is used in tg3_int_reenable() below
6045 			 * to tell the hw how much work has been processed,
6046 			 * so we must read it before checking for more work.
6047 			 */
6048 			tnapi->last_tag = sblk->status_tag;
6049 			tnapi->last_irq_tag = tnapi->last_tag;
6050 			rmb();
6051 		} else
6052 			sblk->status &= ~SD_STATUS_UPDATED;
6053 
6054 		if (likely(!tg3_has_work(tnapi))) {
6055 			napi_complete(napi);
6056 			tg3_int_reenable(tnapi);
6057 			break;
6058 		}
6059 	}
6060 
6061 	return work_done;
6062 
6063 tx_recovery:
6064 	/* work_done is guaranteed to be less than budget. */
6065 	napi_complete(napi);
6066 	tg3_reset_task_schedule(tp);
6067 	return work_done;
6068 }
6069 
6070 static void tg3_napi_disable(struct tg3 *tp)
6071 {
6072 	int i;
6073 
6074 	for (i = tp->irq_cnt - 1; i >= 0; i--)
6075 		napi_disable(&tp->napi[i].napi);
6076 }
6077 
6078 static void tg3_napi_enable(struct tg3 *tp)
6079 {
6080 	int i;
6081 
6082 	for (i = 0; i < tp->irq_cnt; i++)
6083 		napi_enable(&tp->napi[i].napi);
6084 }
6085 
6086 static void tg3_napi_init(struct tg3 *tp)
6087 {
6088 	int i;
6089 
6090 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6091 	for (i = 1; i < tp->irq_cnt; i++)
6092 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6093 }
6094 
6095 static void tg3_napi_fini(struct tg3 *tp)
6096 {
6097 	int i;
6098 
6099 	for (i = 0; i < tp->irq_cnt; i++)
6100 		netif_napi_del(&tp->napi[i].napi);
6101 }
6102 
6103 static inline void tg3_netif_stop(struct tg3 *tp)
6104 {
6105 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
6106 	tg3_napi_disable(tp);
6107 	netif_tx_disable(tp->dev);
6108 }
6109 
6110 static inline void tg3_netif_start(struct tg3 *tp)
6111 {
6112 	/* NOTE: unconditional netif_tx_wake_all_queues is only
6113 	 * appropriate so long as all callers are assured to
6114 	 * have free tx slots (such as after tg3_init_hw)
6115 	 */
6116 	netif_tx_wake_all_queues(tp->dev);
6117 
6118 	tg3_napi_enable(tp);
6119 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6120 	tg3_enable_ints(tp);
6121 }
6122 
6123 static void tg3_irq_quiesce(struct tg3 *tp)
6124 {
6125 	int i;
6126 
6127 	BUG_ON(tp->irq_sync);
6128 
6129 	tp->irq_sync = 1;
6130 	smp_mb();
6131 
6132 	for (i = 0; i < tp->irq_cnt; i++)
6133 		synchronize_irq(tp->napi[i].irq_vec);
6134 }
6135 
6136 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6137  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6138  * with as well.  Most of the time, this is not necessary except when
6139  * shutting down the device.
6140  */
6141 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6142 {
6143 	spin_lock_bh(&tp->lock);
6144 	if (irq_sync)
6145 		tg3_irq_quiesce(tp);
6146 }
6147 
6148 static inline void tg3_full_unlock(struct tg3 *tp)
6149 {
6150 	spin_unlock_bh(&tp->lock);
6151 }
6152 
6153 /* One-shot MSI handler - Chip automatically disables interrupt
6154  * after sending MSI so driver doesn't have to do it.
6155  */
6156 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6157 {
6158 	struct tg3_napi *tnapi = dev_id;
6159 	struct tg3 *tp = tnapi->tp;
6160 
6161 	prefetch(tnapi->hw_status);
6162 	if (tnapi->rx_rcb)
6163 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6164 
6165 	if (likely(!tg3_irq_sync(tp)))
6166 		napi_schedule(&tnapi->napi);
6167 
6168 	return IRQ_HANDLED;
6169 }
6170 
6171 /* MSI ISR - No need to check for interrupt sharing and no need to
6172  * flush status block and interrupt mailbox. PCI ordering rules
6173  * guarantee that MSI will arrive after the status block.
6174  */
6175 static irqreturn_t tg3_msi(int irq, void *dev_id)
6176 {
6177 	struct tg3_napi *tnapi = dev_id;
6178 	struct tg3 *tp = tnapi->tp;
6179 
6180 	prefetch(tnapi->hw_status);
6181 	if (tnapi->rx_rcb)
6182 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6183 	/*
6184 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6185 	 * chip-internal interrupt pending events.
6186 	 * Writing non-zero to intr-mbox-0 additional tells the
6187 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6188 	 * event coalescing.
6189 	 */
6190 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
6191 	if (likely(!tg3_irq_sync(tp)))
6192 		napi_schedule(&tnapi->napi);
6193 
6194 	return IRQ_RETVAL(1);
6195 }
6196 
6197 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6198 {
6199 	struct tg3_napi *tnapi = dev_id;
6200 	struct tg3 *tp = tnapi->tp;
6201 	struct tg3_hw_status *sblk = tnapi->hw_status;
6202 	unsigned int handled = 1;
6203 
6204 	/* In INTx mode, it is possible for the interrupt to arrive at
6205 	 * the CPU before the status block posted prior to the interrupt.
6206 	 * Reading the PCI State register will confirm whether the
6207 	 * interrupt is ours and will flush the status block.
6208 	 */
6209 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6210 		if (tg3_flag(tp, CHIP_RESETTING) ||
6211 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6212 			handled = 0;
6213 			goto out;
6214 		}
6215 	}
6216 
6217 	/*
6218 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
6219 	 * chip-internal interrupt pending events.
6220 	 * Writing non-zero to intr-mbox-0 additional tells the
6221 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6222 	 * event coalescing.
6223 	 *
6224 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6225 	 * spurious interrupts.  The flush impacts performance but
6226 	 * excessive spurious interrupts can be worse in some cases.
6227 	 */
6228 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6229 	if (tg3_irq_sync(tp))
6230 		goto out;
6231 	sblk->status &= ~SD_STATUS_UPDATED;
6232 	if (likely(tg3_has_work(tnapi))) {
6233 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6234 		napi_schedule(&tnapi->napi);
6235 	} else {
6236 		/* No work, shared interrupt perhaps?  re-enable
6237 		 * interrupts, and flush that PCI write
6238 		 */
6239 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6240 			       0x00000000);
6241 	}
6242 out:
6243 	return IRQ_RETVAL(handled);
6244 }
6245 
6246 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6247 {
6248 	struct tg3_napi *tnapi = dev_id;
6249 	struct tg3 *tp = tnapi->tp;
6250 	struct tg3_hw_status *sblk = tnapi->hw_status;
6251 	unsigned int handled = 1;
6252 
6253 	/* In INTx mode, it is possible for the interrupt to arrive at
6254 	 * the CPU before the status block posted prior to the interrupt.
6255 	 * Reading the PCI State register will confirm whether the
6256 	 * interrupt is ours and will flush the status block.
6257 	 */
6258 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6259 		if (tg3_flag(tp, CHIP_RESETTING) ||
6260 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6261 			handled = 0;
6262 			goto out;
6263 		}
6264 	}
6265 
6266 	/*
6267 	 * writing any value to intr-mbox-0 clears PCI INTA# and
6268 	 * chip-internal interrupt pending events.
6269 	 * writing non-zero to intr-mbox-0 additional tells the
6270 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
6271 	 * event coalescing.
6272 	 *
6273 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
6274 	 * spurious interrupts.  The flush impacts performance but
6275 	 * excessive spurious interrupts can be worse in some cases.
6276 	 */
6277 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6278 
6279 	/*
6280 	 * In a shared interrupt configuration, sometimes other devices'
6281 	 * interrupts will scream.  We record the current status tag here
6282 	 * so that the above check can report that the screaming interrupts
6283 	 * are unhandled.  Eventually they will be silenced.
6284 	 */
6285 	tnapi->last_irq_tag = sblk->status_tag;
6286 
6287 	if (tg3_irq_sync(tp))
6288 		goto out;
6289 
6290 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6291 
6292 	napi_schedule(&tnapi->napi);
6293 
6294 out:
6295 	return IRQ_RETVAL(handled);
6296 }
6297 
6298 /* ISR for interrupt test */
6299 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6300 {
6301 	struct tg3_napi *tnapi = dev_id;
6302 	struct tg3 *tp = tnapi->tp;
6303 	struct tg3_hw_status *sblk = tnapi->hw_status;
6304 
6305 	if ((sblk->status & SD_STATUS_UPDATED) ||
6306 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6307 		tg3_disable_ints(tp);
6308 		return IRQ_RETVAL(1);
6309 	}
6310 	return IRQ_RETVAL(0);
6311 }
6312 
6313 static int tg3_init_hw(struct tg3 *, int);
6314 static int tg3_halt(struct tg3 *, int, int);
6315 
6316 /* Restart hardware after configuration changes, self-test, etc.
6317  * Invoked with tp->lock held.
6318  */
6319 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6320 	__releases(tp->lock)
6321 	__acquires(tp->lock)
6322 {
6323 	int err;
6324 
6325 	err = tg3_init_hw(tp, reset_phy);
6326 	if (err) {
6327 		netdev_err(tp->dev,
6328 			   "Failed to re-initialize device, aborting\n");
6329 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6330 		tg3_full_unlock(tp);
6331 		del_timer_sync(&tp->timer);
6332 		tp->irq_sync = 0;
6333 		tg3_napi_enable(tp);
6334 		dev_close(tp->dev);
6335 		tg3_full_lock(tp, 0);
6336 	}
6337 	return err;
6338 }
6339 
6340 #ifdef CONFIG_NET_POLL_CONTROLLER
6341 static void tg3_poll_controller(struct net_device *dev)
6342 {
6343 	int i;
6344 	struct tg3 *tp = netdev_priv(dev);
6345 
6346 	for (i = 0; i < tp->irq_cnt; i++)
6347 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6348 }
6349 #endif
6350 
6351 static void tg3_reset_task(struct work_struct *work)
6352 {
6353 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
6354 	int err;
6355 
6356 	tg3_full_lock(tp, 0);
6357 
6358 	if (!netif_running(tp->dev)) {
6359 		tg3_flag_clear(tp, RESET_TASK_PENDING);
6360 		tg3_full_unlock(tp);
6361 		return;
6362 	}
6363 
6364 	tg3_full_unlock(tp);
6365 
6366 	tg3_phy_stop(tp);
6367 
6368 	tg3_netif_stop(tp);
6369 
6370 	tg3_full_lock(tp, 1);
6371 
6372 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6373 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
6374 		tp->write32_rx_mbox = tg3_write_flush_reg32;
6375 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
6376 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6377 	}
6378 
6379 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6380 	err = tg3_init_hw(tp, 1);
6381 	if (err)
6382 		goto out;
6383 
6384 	tg3_netif_start(tp);
6385 
6386 out:
6387 	tg3_full_unlock(tp);
6388 
6389 	if (!err)
6390 		tg3_phy_start(tp);
6391 
6392 	tg3_flag_clear(tp, RESET_TASK_PENDING);
6393 }
6394 
6395 static void tg3_tx_timeout(struct net_device *dev)
6396 {
6397 	struct tg3 *tp = netdev_priv(dev);
6398 
6399 	if (netif_msg_tx_err(tp)) {
6400 		netdev_err(dev, "transmit timed out, resetting\n");
6401 		tg3_dump_state(tp);
6402 	}
6403 
6404 	tg3_reset_task_schedule(tp);
6405 }
6406 
6407 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6408 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6409 {
6410 	u32 base = (u32) mapping & 0xffffffff;
6411 
6412 	return (base > 0xffffdcc0) && (base + len + 8 < base);
6413 }
6414 
6415 /* Test for DMA addresses > 40-bit */
6416 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6417 					  int len)
6418 {
6419 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6420 	if (tg3_flag(tp, 40BIT_DMA_BUG))
6421 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
6422 	return 0;
6423 #else
6424 	return 0;
6425 #endif
6426 }
6427 
6428 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6429 				 dma_addr_t mapping, u32 len, u32 flags,
6430 				 u32 mss, u32 vlan)
6431 {
6432 	txbd->addr_hi = ((u64) mapping >> 32);
6433 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
6434 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6435 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6436 }
6437 
6438 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6439 			    dma_addr_t map, u32 len, u32 flags,
6440 			    u32 mss, u32 vlan)
6441 {
6442 	struct tg3 *tp = tnapi->tp;
6443 	bool hwbug = false;
6444 
6445 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6446 		hwbug = 1;
6447 
6448 	if (tg3_4g_overflow_test(map, len))
6449 		hwbug = 1;
6450 
6451 	if (tg3_40bit_overflow_test(tp, map, len))
6452 		hwbug = 1;
6453 
6454 	if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6455 		u32 prvidx = *entry;
6456 		u32 tmp_flag = flags & ~TXD_FLAG_END;
6457 		while (len > TG3_TX_BD_DMA_MAX && *budget) {
6458 			u32 frag_len = TG3_TX_BD_DMA_MAX;
6459 			len -= TG3_TX_BD_DMA_MAX;
6460 
6461 			/* Avoid the 8byte DMA problem */
6462 			if (len <= 8) {
6463 				len += TG3_TX_BD_DMA_MAX / 2;
6464 				frag_len = TG3_TX_BD_DMA_MAX / 2;
6465 			}
6466 
6467 			tnapi->tx_buffers[*entry].fragmented = true;
6468 
6469 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6470 				      frag_len, tmp_flag, mss, vlan);
6471 			*budget -= 1;
6472 			prvidx = *entry;
6473 			*entry = NEXT_TX(*entry);
6474 
6475 			map += frag_len;
6476 		}
6477 
6478 		if (len) {
6479 			if (*budget) {
6480 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6481 					      len, flags, mss, vlan);
6482 				*budget -= 1;
6483 				*entry = NEXT_TX(*entry);
6484 			} else {
6485 				hwbug = 1;
6486 				tnapi->tx_buffers[prvidx].fragmented = false;
6487 			}
6488 		}
6489 	} else {
6490 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6491 			      len, flags, mss, vlan);
6492 		*entry = NEXT_TX(*entry);
6493 	}
6494 
6495 	return hwbug;
6496 }
6497 
6498 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6499 {
6500 	int i;
6501 	struct sk_buff *skb;
6502 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6503 
6504 	skb = txb->skb;
6505 	txb->skb = NULL;
6506 
6507 	pci_unmap_single(tnapi->tp->pdev,
6508 			 dma_unmap_addr(txb, mapping),
6509 			 skb_headlen(skb),
6510 			 PCI_DMA_TODEVICE);
6511 
6512 	while (txb->fragmented) {
6513 		txb->fragmented = false;
6514 		entry = NEXT_TX(entry);
6515 		txb = &tnapi->tx_buffers[entry];
6516 	}
6517 
6518 	for (i = 0; i <= last; i++) {
6519 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6520 
6521 		entry = NEXT_TX(entry);
6522 		txb = &tnapi->tx_buffers[entry];
6523 
6524 		pci_unmap_page(tnapi->tp->pdev,
6525 			       dma_unmap_addr(txb, mapping),
6526 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
6527 
6528 		while (txb->fragmented) {
6529 			txb->fragmented = false;
6530 			entry = NEXT_TX(entry);
6531 			txb = &tnapi->tx_buffers[entry];
6532 		}
6533 	}
6534 }
6535 
6536 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6537 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6538 				       struct sk_buff **pskb,
6539 				       u32 *entry, u32 *budget,
6540 				       u32 base_flags, u32 mss, u32 vlan)
6541 {
6542 	struct tg3 *tp = tnapi->tp;
6543 	struct sk_buff *new_skb, *skb = *pskb;
6544 	dma_addr_t new_addr = 0;
6545 	int ret = 0;
6546 
6547 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6548 		new_skb = skb_copy(skb, GFP_ATOMIC);
6549 	else {
6550 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
6551 
6552 		new_skb = skb_copy_expand(skb,
6553 					  skb_headroom(skb) + more_headroom,
6554 					  skb_tailroom(skb), GFP_ATOMIC);
6555 	}
6556 
6557 	if (!new_skb) {
6558 		ret = -1;
6559 	} else {
6560 		/* New SKB is guaranteed to be linear. */
6561 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6562 					  PCI_DMA_TODEVICE);
6563 		/* Make sure the mapping succeeded */
6564 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6565 			dev_kfree_skb(new_skb);
6566 			ret = -1;
6567 		} else {
6568 			u32 save_entry = *entry;
6569 
6570 			base_flags |= TXD_FLAG_END;
6571 
6572 			tnapi->tx_buffers[*entry].skb = new_skb;
6573 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6574 					   mapping, new_addr);
6575 
6576 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6577 					    new_skb->len, base_flags,
6578 					    mss, vlan)) {
6579 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
6580 				dev_kfree_skb(new_skb);
6581 				ret = -1;
6582 			}
6583 		}
6584 	}
6585 
6586 	dev_kfree_skb(skb);
6587 	*pskb = new_skb;
6588 	return ret;
6589 }
6590 
6591 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6592 
6593 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6594  * TSO header is greater than 80 bytes.
6595  */
6596 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6597 {
6598 	struct sk_buff *segs, *nskb;
6599 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6600 
6601 	/* Estimate the number of fragments in the worst case */
6602 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6603 		netif_stop_queue(tp->dev);
6604 
6605 		/* netif_tx_stop_queue() must be done before checking
6606 		 * checking tx index in tg3_tx_avail() below, because in
6607 		 * tg3_tx(), we update tx index before checking for
6608 		 * netif_tx_queue_stopped().
6609 		 */
6610 		smp_mb();
6611 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6612 			return NETDEV_TX_BUSY;
6613 
6614 		netif_wake_queue(tp->dev);
6615 	}
6616 
6617 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6618 	if (IS_ERR(segs))
6619 		goto tg3_tso_bug_end;
6620 
6621 	do {
6622 		nskb = segs;
6623 		segs = segs->next;
6624 		nskb->next = NULL;
6625 		tg3_start_xmit(nskb, tp->dev);
6626 	} while (segs);
6627 
6628 tg3_tso_bug_end:
6629 	dev_kfree_skb(skb);
6630 
6631 	return NETDEV_TX_OK;
6632 }
6633 
6634 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6635  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6636  */
6637 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6638 {
6639 	struct tg3 *tp = netdev_priv(dev);
6640 	u32 len, entry, base_flags, mss, vlan = 0;
6641 	u32 budget;
6642 	int i = -1, would_hit_hwbug;
6643 	dma_addr_t mapping;
6644 	struct tg3_napi *tnapi;
6645 	struct netdev_queue *txq;
6646 	unsigned int last;
6647 
6648 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6649 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6650 	if (tg3_flag(tp, ENABLE_TSS))
6651 		tnapi++;
6652 
6653 	budget = tg3_tx_avail(tnapi);
6654 
6655 	/* We are running in BH disabled context with netif_tx_lock
6656 	 * and TX reclaim runs via tp->napi.poll inside of a software
6657 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
6658 	 * no IRQ context deadlocks to worry about either.  Rejoice!
6659 	 */
6660 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6661 		if (!netif_tx_queue_stopped(txq)) {
6662 			netif_tx_stop_queue(txq);
6663 
6664 			/* This is a hard error, log it. */
6665 			netdev_err(dev,
6666 				   "BUG! Tx Ring full when queue awake!\n");
6667 		}
6668 		return NETDEV_TX_BUSY;
6669 	}
6670 
6671 	entry = tnapi->tx_prod;
6672 	base_flags = 0;
6673 	if (skb->ip_summed == CHECKSUM_PARTIAL)
6674 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
6675 
6676 	mss = skb_shinfo(skb)->gso_size;
6677 	if (mss) {
6678 		struct iphdr *iph;
6679 		u32 tcp_opt_len, hdr_len;
6680 
6681 		if (skb_header_cloned(skb) &&
6682 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6683 			goto drop;
6684 
6685 		iph = ip_hdr(skb);
6686 		tcp_opt_len = tcp_optlen(skb);
6687 
6688 		if (skb_is_gso_v6(skb)) {
6689 			hdr_len = skb_headlen(skb) - ETH_HLEN;
6690 		} else {
6691 			u32 ip_tcp_len;
6692 
6693 			ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6694 			hdr_len = ip_tcp_len + tcp_opt_len;
6695 
6696 			iph->check = 0;
6697 			iph->tot_len = htons(mss + hdr_len);
6698 		}
6699 
6700 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6701 		    tg3_flag(tp, TSO_BUG))
6702 			return tg3_tso_bug(tp, skb);
6703 
6704 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6705 			       TXD_FLAG_CPU_POST_DMA);
6706 
6707 		if (tg3_flag(tp, HW_TSO_1) ||
6708 		    tg3_flag(tp, HW_TSO_2) ||
6709 		    tg3_flag(tp, HW_TSO_3)) {
6710 			tcp_hdr(skb)->check = 0;
6711 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6712 		} else
6713 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6714 								 iph->daddr, 0,
6715 								 IPPROTO_TCP,
6716 								 0);
6717 
6718 		if (tg3_flag(tp, HW_TSO_3)) {
6719 			mss |= (hdr_len & 0xc) << 12;
6720 			if (hdr_len & 0x10)
6721 				base_flags |= 0x00000010;
6722 			base_flags |= (hdr_len & 0x3e0) << 5;
6723 		} else if (tg3_flag(tp, HW_TSO_2))
6724 			mss |= hdr_len << 9;
6725 		else if (tg3_flag(tp, HW_TSO_1) ||
6726 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6727 			if (tcp_opt_len || iph->ihl > 5) {
6728 				int tsflags;
6729 
6730 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6731 				mss |= (tsflags << 11);
6732 			}
6733 		} else {
6734 			if (tcp_opt_len || iph->ihl > 5) {
6735 				int tsflags;
6736 
6737 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6738 				base_flags |= tsflags << 12;
6739 			}
6740 		}
6741 	}
6742 
6743 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6744 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
6745 		base_flags |= TXD_FLAG_JMB_PKT;
6746 
6747 	if (vlan_tx_tag_present(skb)) {
6748 		base_flags |= TXD_FLAG_VLAN;
6749 		vlan = vlan_tx_tag_get(skb);
6750 	}
6751 
6752 	len = skb_headlen(skb);
6753 
6754 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6755 	if (pci_dma_mapping_error(tp->pdev, mapping))
6756 		goto drop;
6757 
6758 
6759 	tnapi->tx_buffers[entry].skb = skb;
6760 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6761 
6762 	would_hit_hwbug = 0;
6763 
6764 	if (tg3_flag(tp, 5701_DMA_BUG))
6765 		would_hit_hwbug = 1;
6766 
6767 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6768 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6769 			    mss, vlan)) {
6770 		would_hit_hwbug = 1;
6771 	/* Now loop through additional data fragments, and queue them. */
6772 	} else if (skb_shinfo(skb)->nr_frags > 0) {
6773 		u32 tmp_mss = mss;
6774 
6775 		if (!tg3_flag(tp, HW_TSO_1) &&
6776 		    !tg3_flag(tp, HW_TSO_2) &&
6777 		    !tg3_flag(tp, HW_TSO_3))
6778 			tmp_mss = 0;
6779 
6780 		last = skb_shinfo(skb)->nr_frags - 1;
6781 		for (i = 0; i <= last; i++) {
6782 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6783 
6784 			len = skb_frag_size(frag);
6785 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6786 						   len, DMA_TO_DEVICE);
6787 
6788 			tnapi->tx_buffers[entry].skb = NULL;
6789 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6790 					   mapping);
6791 			if (dma_mapping_error(&tp->pdev->dev, mapping))
6792 				goto dma_error;
6793 
6794 			if (!budget ||
6795 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6796 					    len, base_flags |
6797 					    ((i == last) ? TXD_FLAG_END : 0),
6798 					    tmp_mss, vlan)) {
6799 				would_hit_hwbug = 1;
6800 				break;
6801 			}
6802 		}
6803 	}
6804 
6805 	if (would_hit_hwbug) {
6806 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6807 
6808 		/* If the workaround fails due to memory/mapping
6809 		 * failure, silently drop this packet.
6810 		 */
6811 		entry = tnapi->tx_prod;
6812 		budget = tg3_tx_avail(tnapi);
6813 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6814 						base_flags, mss, vlan))
6815 			goto drop_nofree;
6816 	}
6817 
6818 	skb_tx_timestamp(skb);
6819 
6820 	/* Packets are ready, update Tx producer idx local and on card. */
6821 	tw32_tx_mbox(tnapi->prodmbox, entry);
6822 
6823 	tnapi->tx_prod = entry;
6824 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6825 		netif_tx_stop_queue(txq);
6826 
6827 		/* netif_tx_stop_queue() must be done before checking
6828 		 * checking tx index in tg3_tx_avail() below, because in
6829 		 * tg3_tx(), we update tx index before checking for
6830 		 * netif_tx_queue_stopped().
6831 		 */
6832 		smp_mb();
6833 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6834 			netif_tx_wake_queue(txq);
6835 	}
6836 
6837 	mmiowb();
6838 	return NETDEV_TX_OK;
6839 
6840 dma_error:
6841 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6842 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6843 drop:
6844 	dev_kfree_skb(skb);
6845 drop_nofree:
6846 	tp->tx_dropped++;
6847 	return NETDEV_TX_OK;
6848 }
6849 
6850 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6851 {
6852 	if (enable) {
6853 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6854 				  MAC_MODE_PORT_MODE_MASK);
6855 
6856 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6857 
6858 		if (!tg3_flag(tp, 5705_PLUS))
6859 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6860 
6861 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6862 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6863 		else
6864 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6865 	} else {
6866 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6867 
6868 		if (tg3_flag(tp, 5705_PLUS) ||
6869 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6870 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6871 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6872 	}
6873 
6874 	tw32(MAC_MODE, tp->mac_mode);
6875 	udelay(40);
6876 }
6877 
6878 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6879 {
6880 	u32 val, bmcr, mac_mode, ptest = 0;
6881 
6882 	tg3_phy_toggle_apd(tp, false);
6883 	tg3_phy_toggle_automdix(tp, 0);
6884 
6885 	if (extlpbk && tg3_phy_set_extloopbk(tp))
6886 		return -EIO;
6887 
6888 	bmcr = BMCR_FULLDPLX;
6889 	switch (speed) {
6890 	case SPEED_10:
6891 		break;
6892 	case SPEED_100:
6893 		bmcr |= BMCR_SPEED100;
6894 		break;
6895 	case SPEED_1000:
6896 	default:
6897 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6898 			speed = SPEED_100;
6899 			bmcr |= BMCR_SPEED100;
6900 		} else {
6901 			speed = SPEED_1000;
6902 			bmcr |= BMCR_SPEED1000;
6903 		}
6904 	}
6905 
6906 	if (extlpbk) {
6907 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6908 			tg3_readphy(tp, MII_CTRL1000, &val);
6909 			val |= CTL1000_AS_MASTER |
6910 			       CTL1000_ENABLE_MASTER;
6911 			tg3_writephy(tp, MII_CTRL1000, val);
6912 		} else {
6913 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6914 				MII_TG3_FET_PTEST_TRIM_2;
6915 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6916 		}
6917 	} else
6918 		bmcr |= BMCR_LOOPBACK;
6919 
6920 	tg3_writephy(tp, MII_BMCR, bmcr);
6921 
6922 	/* The write needs to be flushed for the FETs */
6923 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6924 		tg3_readphy(tp, MII_BMCR, &bmcr);
6925 
6926 	udelay(40);
6927 
6928 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6929 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6930 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6931 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
6932 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
6933 
6934 		/* The write needs to be flushed for the AC131 */
6935 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6936 	}
6937 
6938 	/* Reset to prevent losing 1st rx packet intermittently */
6939 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6940 	    tg3_flag(tp, 5780_CLASS)) {
6941 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6942 		udelay(10);
6943 		tw32_f(MAC_RX_MODE, tp->rx_mode);
6944 	}
6945 
6946 	mac_mode = tp->mac_mode &
6947 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6948 	if (speed == SPEED_1000)
6949 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
6950 	else
6951 		mac_mode |= MAC_MODE_PORT_MODE_MII;
6952 
6953 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6954 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6955 
6956 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
6957 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
6958 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6959 			mac_mode |= MAC_MODE_LINK_POLARITY;
6960 
6961 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
6962 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6963 	}
6964 
6965 	tw32(MAC_MODE, mac_mode);
6966 	udelay(40);
6967 
6968 	return 0;
6969 }
6970 
6971 static void tg3_set_loopback(struct net_device *dev, u32 features)
6972 {
6973 	struct tg3 *tp = netdev_priv(dev);
6974 
6975 	if (features & NETIF_F_LOOPBACK) {
6976 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6977 			return;
6978 
6979 		spin_lock_bh(&tp->lock);
6980 		tg3_mac_loopback(tp, true);
6981 		netif_carrier_on(tp->dev);
6982 		spin_unlock_bh(&tp->lock);
6983 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6984 	} else {
6985 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6986 			return;
6987 
6988 		spin_lock_bh(&tp->lock);
6989 		tg3_mac_loopback(tp, false);
6990 		/* Force link status check */
6991 		tg3_setup_phy(tp, 1);
6992 		spin_unlock_bh(&tp->lock);
6993 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6994 	}
6995 }
6996 
6997 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6998 {
6999 	struct tg3 *tp = netdev_priv(dev);
7000 
7001 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7002 		features &= ~NETIF_F_ALL_TSO;
7003 
7004 	return features;
7005 }
7006 
7007 static int tg3_set_features(struct net_device *dev, u32 features)
7008 {
7009 	u32 changed = dev->features ^ features;
7010 
7011 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7012 		tg3_set_loopback(dev, features);
7013 
7014 	return 0;
7015 }
7016 
7017 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7018 			       int new_mtu)
7019 {
7020 	dev->mtu = new_mtu;
7021 
7022 	if (new_mtu > ETH_DATA_LEN) {
7023 		if (tg3_flag(tp, 5780_CLASS)) {
7024 			netdev_update_features(dev);
7025 			tg3_flag_clear(tp, TSO_CAPABLE);
7026 		} else {
7027 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
7028 		}
7029 	} else {
7030 		if (tg3_flag(tp, 5780_CLASS)) {
7031 			tg3_flag_set(tp, TSO_CAPABLE);
7032 			netdev_update_features(dev);
7033 		}
7034 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7035 	}
7036 }
7037 
7038 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7039 {
7040 	struct tg3 *tp = netdev_priv(dev);
7041 	int err;
7042 
7043 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7044 		return -EINVAL;
7045 
7046 	if (!netif_running(dev)) {
7047 		/* We'll just catch it later when the
7048 		 * device is up'd.
7049 		 */
7050 		tg3_set_mtu(dev, tp, new_mtu);
7051 		return 0;
7052 	}
7053 
7054 	tg3_phy_stop(tp);
7055 
7056 	tg3_netif_stop(tp);
7057 
7058 	tg3_full_lock(tp, 1);
7059 
7060 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7061 
7062 	tg3_set_mtu(dev, tp, new_mtu);
7063 
7064 	err = tg3_restart_hw(tp, 0);
7065 
7066 	if (!err)
7067 		tg3_netif_start(tp);
7068 
7069 	tg3_full_unlock(tp);
7070 
7071 	if (!err)
7072 		tg3_phy_start(tp);
7073 
7074 	return err;
7075 }
7076 
7077 static void tg3_rx_prodring_free(struct tg3 *tp,
7078 				 struct tg3_rx_prodring_set *tpr)
7079 {
7080 	int i;
7081 
7082 	if (tpr != &tp->napi[0].prodring) {
7083 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7084 		     i = (i + 1) & tp->rx_std_ring_mask)
7085 			tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7086 					tp->rx_pkt_map_sz);
7087 
7088 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
7089 			for (i = tpr->rx_jmb_cons_idx;
7090 			     i != tpr->rx_jmb_prod_idx;
7091 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
7092 				tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7093 						TG3_RX_JMB_MAP_SZ);
7094 			}
7095 		}
7096 
7097 		return;
7098 	}
7099 
7100 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
7101 		tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7102 				tp->rx_pkt_map_sz);
7103 
7104 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7105 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7106 			tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7107 					TG3_RX_JMB_MAP_SZ);
7108 	}
7109 }
7110 
7111 /* Initialize rx rings for packet processing.
7112  *
7113  * The chip has been shut down and the driver detached from
7114  * the networking, so no interrupts or new tx packets will
7115  * end up in the driver.  tp->{tx,}lock are held and thus
7116  * we may not sleep.
7117  */
7118 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7119 				 struct tg3_rx_prodring_set *tpr)
7120 {
7121 	u32 i, rx_pkt_dma_sz;
7122 
7123 	tpr->rx_std_cons_idx = 0;
7124 	tpr->rx_std_prod_idx = 0;
7125 	tpr->rx_jmb_cons_idx = 0;
7126 	tpr->rx_jmb_prod_idx = 0;
7127 
7128 	if (tpr != &tp->napi[0].prodring) {
7129 		memset(&tpr->rx_std_buffers[0], 0,
7130 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
7131 		if (tpr->rx_jmb_buffers)
7132 			memset(&tpr->rx_jmb_buffers[0], 0,
7133 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
7134 		goto done;
7135 	}
7136 
7137 	/* Zero out all descriptors. */
7138 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7139 
7140 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7141 	if (tg3_flag(tp, 5780_CLASS) &&
7142 	    tp->dev->mtu > ETH_DATA_LEN)
7143 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7144 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7145 
7146 	/* Initialize invariants of the rings, we only set this
7147 	 * stuff once.  This works because the card does not
7148 	 * write into the rx buffer posting rings.
7149 	 */
7150 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7151 		struct tg3_rx_buffer_desc *rxd;
7152 
7153 		rxd = &tpr->rx_std[i];
7154 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7155 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7156 		rxd->opaque = (RXD_OPAQUE_RING_STD |
7157 			       (i << RXD_OPAQUE_INDEX_SHIFT));
7158 	}
7159 
7160 	/* Now allocate fresh SKBs for each rx ring. */
7161 	for (i = 0; i < tp->rx_pending; i++) {
7162 		if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7163 			netdev_warn(tp->dev,
7164 				    "Using a smaller RX standard ring. Only "
7165 				    "%d out of %d buffers were allocated "
7166 				    "successfully\n", i, tp->rx_pending);
7167 			if (i == 0)
7168 				goto initfail;
7169 			tp->rx_pending = i;
7170 			break;
7171 		}
7172 	}
7173 
7174 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7175 		goto done;
7176 
7177 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7178 
7179 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7180 		goto done;
7181 
7182 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7183 		struct tg3_rx_buffer_desc *rxd;
7184 
7185 		rxd = &tpr->rx_jmb[i].std;
7186 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7187 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7188 				  RXD_FLAG_JUMBO;
7189 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7190 		       (i << RXD_OPAQUE_INDEX_SHIFT));
7191 	}
7192 
7193 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
7194 		if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7195 			netdev_warn(tp->dev,
7196 				    "Using a smaller RX jumbo ring. Only %d "
7197 				    "out of %d buffers were allocated "
7198 				    "successfully\n", i, tp->rx_jumbo_pending);
7199 			if (i == 0)
7200 				goto initfail;
7201 			tp->rx_jumbo_pending = i;
7202 			break;
7203 		}
7204 	}
7205 
7206 done:
7207 	return 0;
7208 
7209 initfail:
7210 	tg3_rx_prodring_free(tp, tpr);
7211 	return -ENOMEM;
7212 }
7213 
7214 static void tg3_rx_prodring_fini(struct tg3 *tp,
7215 				 struct tg3_rx_prodring_set *tpr)
7216 {
7217 	kfree(tpr->rx_std_buffers);
7218 	tpr->rx_std_buffers = NULL;
7219 	kfree(tpr->rx_jmb_buffers);
7220 	tpr->rx_jmb_buffers = NULL;
7221 	if (tpr->rx_std) {
7222 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7223 				  tpr->rx_std, tpr->rx_std_mapping);
7224 		tpr->rx_std = NULL;
7225 	}
7226 	if (tpr->rx_jmb) {
7227 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7228 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
7229 		tpr->rx_jmb = NULL;
7230 	}
7231 }
7232 
7233 static int tg3_rx_prodring_init(struct tg3 *tp,
7234 				struct tg3_rx_prodring_set *tpr)
7235 {
7236 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7237 				      GFP_KERNEL);
7238 	if (!tpr->rx_std_buffers)
7239 		return -ENOMEM;
7240 
7241 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7242 					 TG3_RX_STD_RING_BYTES(tp),
7243 					 &tpr->rx_std_mapping,
7244 					 GFP_KERNEL);
7245 	if (!tpr->rx_std)
7246 		goto err_out;
7247 
7248 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7249 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7250 					      GFP_KERNEL);
7251 		if (!tpr->rx_jmb_buffers)
7252 			goto err_out;
7253 
7254 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7255 						 TG3_RX_JMB_RING_BYTES(tp),
7256 						 &tpr->rx_jmb_mapping,
7257 						 GFP_KERNEL);
7258 		if (!tpr->rx_jmb)
7259 			goto err_out;
7260 	}
7261 
7262 	return 0;
7263 
7264 err_out:
7265 	tg3_rx_prodring_fini(tp, tpr);
7266 	return -ENOMEM;
7267 }
7268 
7269 /* Free up pending packets in all rx/tx rings.
7270  *
7271  * The chip has been shut down and the driver detached from
7272  * the networking, so no interrupts or new tx packets will
7273  * end up in the driver.  tp->{tx,}lock is not held and we are not
7274  * in an interrupt context and thus may sleep.
7275  */
7276 static void tg3_free_rings(struct tg3 *tp)
7277 {
7278 	int i, j;
7279 
7280 	for (j = 0; j < tp->irq_cnt; j++) {
7281 		struct tg3_napi *tnapi = &tp->napi[j];
7282 
7283 		tg3_rx_prodring_free(tp, &tnapi->prodring);
7284 
7285 		if (!tnapi->tx_buffers)
7286 			continue;
7287 
7288 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7289 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7290 
7291 			if (!skb)
7292 				continue;
7293 
7294 			tg3_tx_skb_unmap(tnapi, i,
7295 					 skb_shinfo(skb)->nr_frags - 1);
7296 
7297 			dev_kfree_skb_any(skb);
7298 		}
7299 	}
7300 }
7301 
7302 /* Initialize tx/rx rings for packet processing.
7303  *
7304  * The chip has been shut down and the driver detached from
7305  * the networking, so no interrupts or new tx packets will
7306  * end up in the driver.  tp->{tx,}lock are held and thus
7307  * we may not sleep.
7308  */
7309 static int tg3_init_rings(struct tg3 *tp)
7310 {
7311 	int i;
7312 
7313 	/* Free up all the SKBs. */
7314 	tg3_free_rings(tp);
7315 
7316 	for (i = 0; i < tp->irq_cnt; i++) {
7317 		struct tg3_napi *tnapi = &tp->napi[i];
7318 
7319 		tnapi->last_tag = 0;
7320 		tnapi->last_irq_tag = 0;
7321 		tnapi->hw_status->status = 0;
7322 		tnapi->hw_status->status_tag = 0;
7323 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7324 
7325 		tnapi->tx_prod = 0;
7326 		tnapi->tx_cons = 0;
7327 		if (tnapi->tx_ring)
7328 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7329 
7330 		tnapi->rx_rcb_ptr = 0;
7331 		if (tnapi->rx_rcb)
7332 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7333 
7334 		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7335 			tg3_free_rings(tp);
7336 			return -ENOMEM;
7337 		}
7338 	}
7339 
7340 	return 0;
7341 }
7342 
7343 /*
7344  * Must not be invoked with interrupt sources disabled and
7345  * the hardware shutdown down.
7346  */
7347 static void tg3_free_consistent(struct tg3 *tp)
7348 {
7349 	int i;
7350 
7351 	for (i = 0; i < tp->irq_cnt; i++) {
7352 		struct tg3_napi *tnapi = &tp->napi[i];
7353 
7354 		if (tnapi->tx_ring) {
7355 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7356 				tnapi->tx_ring, tnapi->tx_desc_mapping);
7357 			tnapi->tx_ring = NULL;
7358 		}
7359 
7360 		kfree(tnapi->tx_buffers);
7361 		tnapi->tx_buffers = NULL;
7362 
7363 		if (tnapi->rx_rcb) {
7364 			dma_free_coherent(&tp->pdev->dev,
7365 					  TG3_RX_RCB_RING_BYTES(tp),
7366 					  tnapi->rx_rcb,
7367 					  tnapi->rx_rcb_mapping);
7368 			tnapi->rx_rcb = NULL;
7369 		}
7370 
7371 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
7372 
7373 		if (tnapi->hw_status) {
7374 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7375 					  tnapi->hw_status,
7376 					  tnapi->status_mapping);
7377 			tnapi->hw_status = NULL;
7378 		}
7379 	}
7380 
7381 	if (tp->hw_stats) {
7382 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7383 				  tp->hw_stats, tp->stats_mapping);
7384 		tp->hw_stats = NULL;
7385 	}
7386 }
7387 
7388 /*
7389  * Must not be invoked with interrupt sources disabled and
7390  * the hardware shutdown down.  Can sleep.
7391  */
7392 static int tg3_alloc_consistent(struct tg3 *tp)
7393 {
7394 	int i;
7395 
7396 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7397 					  sizeof(struct tg3_hw_stats),
7398 					  &tp->stats_mapping,
7399 					  GFP_KERNEL);
7400 	if (!tp->hw_stats)
7401 		goto err_out;
7402 
7403 	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7404 
7405 	for (i = 0; i < tp->irq_cnt; i++) {
7406 		struct tg3_napi *tnapi = &tp->napi[i];
7407 		struct tg3_hw_status *sblk;
7408 
7409 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7410 						      TG3_HW_STATUS_SIZE,
7411 						      &tnapi->status_mapping,
7412 						      GFP_KERNEL);
7413 		if (!tnapi->hw_status)
7414 			goto err_out;
7415 
7416 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7417 		sblk = tnapi->hw_status;
7418 
7419 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7420 			goto err_out;
7421 
7422 		/* If multivector TSS is enabled, vector 0 does not handle
7423 		 * tx interrupts.  Don't allocate any resources for it.
7424 		 */
7425 		if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7426 		    (i && tg3_flag(tp, ENABLE_TSS))) {
7427 			tnapi->tx_buffers = kzalloc(
7428 					       sizeof(struct tg3_tx_ring_info) *
7429 					       TG3_TX_RING_SIZE, GFP_KERNEL);
7430 			if (!tnapi->tx_buffers)
7431 				goto err_out;
7432 
7433 			tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7434 							    TG3_TX_RING_BYTES,
7435 							&tnapi->tx_desc_mapping,
7436 							    GFP_KERNEL);
7437 			if (!tnapi->tx_ring)
7438 				goto err_out;
7439 		}
7440 
7441 		/*
7442 		 * When RSS is enabled, the status block format changes
7443 		 * slightly.  The "rx_jumbo_consumer", "reserved",
7444 		 * and "rx_mini_consumer" members get mapped to the
7445 		 * other three rx return ring producer indexes.
7446 		 */
7447 		switch (i) {
7448 		default:
7449 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7450 			break;
7451 		case 2:
7452 			tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7453 			break;
7454 		case 3:
7455 			tnapi->rx_rcb_prod_idx = &sblk->reserved;
7456 			break;
7457 		case 4:
7458 			tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7459 			break;
7460 		}
7461 
7462 		/*
7463 		 * If multivector RSS is enabled, vector 0 does not handle
7464 		 * rx or tx interrupts.  Don't allocate any resources for it.
7465 		 */
7466 		if (!i && tg3_flag(tp, ENABLE_RSS))
7467 			continue;
7468 
7469 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7470 						   TG3_RX_RCB_RING_BYTES(tp),
7471 						   &tnapi->rx_rcb_mapping,
7472 						   GFP_KERNEL);
7473 		if (!tnapi->rx_rcb)
7474 			goto err_out;
7475 
7476 		memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7477 	}
7478 
7479 	return 0;
7480 
7481 err_out:
7482 	tg3_free_consistent(tp);
7483 	return -ENOMEM;
7484 }
7485 
7486 #define MAX_WAIT_CNT 1000
7487 
7488 /* To stop a block, clear the enable bit and poll till it
7489  * clears.  tp->lock is held.
7490  */
7491 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7492 {
7493 	unsigned int i;
7494 	u32 val;
7495 
7496 	if (tg3_flag(tp, 5705_PLUS)) {
7497 		switch (ofs) {
7498 		case RCVLSC_MODE:
7499 		case DMAC_MODE:
7500 		case MBFREE_MODE:
7501 		case BUFMGR_MODE:
7502 		case MEMARB_MODE:
7503 			/* We can't enable/disable these bits of the
7504 			 * 5705/5750, just say success.
7505 			 */
7506 			return 0;
7507 
7508 		default:
7509 			break;
7510 		}
7511 	}
7512 
7513 	val = tr32(ofs);
7514 	val &= ~enable_bit;
7515 	tw32_f(ofs, val);
7516 
7517 	for (i = 0; i < MAX_WAIT_CNT; i++) {
7518 		udelay(100);
7519 		val = tr32(ofs);
7520 		if ((val & enable_bit) == 0)
7521 			break;
7522 	}
7523 
7524 	if (i == MAX_WAIT_CNT && !silent) {
7525 		dev_err(&tp->pdev->dev,
7526 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7527 			ofs, enable_bit);
7528 		return -ENODEV;
7529 	}
7530 
7531 	return 0;
7532 }
7533 
7534 /* tp->lock is held. */
7535 static int tg3_abort_hw(struct tg3 *tp, int silent)
7536 {
7537 	int i, err;
7538 
7539 	tg3_disable_ints(tp);
7540 
7541 	tp->rx_mode &= ~RX_MODE_ENABLE;
7542 	tw32_f(MAC_RX_MODE, tp->rx_mode);
7543 	udelay(10);
7544 
7545 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7546 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7547 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7548 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7549 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7550 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7551 
7552 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7553 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7554 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7555 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7556 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7557 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7558 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7559 
7560 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7561 	tw32_f(MAC_MODE, tp->mac_mode);
7562 	udelay(40);
7563 
7564 	tp->tx_mode &= ~TX_MODE_ENABLE;
7565 	tw32_f(MAC_TX_MODE, tp->tx_mode);
7566 
7567 	for (i = 0; i < MAX_WAIT_CNT; i++) {
7568 		udelay(100);
7569 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7570 			break;
7571 	}
7572 	if (i >= MAX_WAIT_CNT) {
7573 		dev_err(&tp->pdev->dev,
7574 			"%s timed out, TX_MODE_ENABLE will not clear "
7575 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7576 		err |= -ENODEV;
7577 	}
7578 
7579 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7580 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7581 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7582 
7583 	tw32(FTQ_RESET, 0xffffffff);
7584 	tw32(FTQ_RESET, 0x00000000);
7585 
7586 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7587 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7588 
7589 	for (i = 0; i < tp->irq_cnt; i++) {
7590 		struct tg3_napi *tnapi = &tp->napi[i];
7591 		if (tnapi->hw_status)
7592 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7593 	}
7594 	if (tp->hw_stats)
7595 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7596 
7597 	return err;
7598 }
7599 
7600 /* Save PCI command register before chip reset */
7601 static void tg3_save_pci_state(struct tg3 *tp)
7602 {
7603 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7604 }
7605 
7606 /* Restore PCI state after chip reset */
7607 static void tg3_restore_pci_state(struct tg3 *tp)
7608 {
7609 	u32 val;
7610 
7611 	/* Re-enable indirect register accesses. */
7612 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7613 			       tp->misc_host_ctrl);
7614 
7615 	/* Set MAX PCI retry to zero. */
7616 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7617 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7618 	    tg3_flag(tp, PCIX_MODE))
7619 		val |= PCISTATE_RETRY_SAME_DMA;
7620 	/* Allow reads and writes to the APE register and memory space. */
7621 	if (tg3_flag(tp, ENABLE_APE))
7622 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7623 		       PCISTATE_ALLOW_APE_SHMEM_WR |
7624 		       PCISTATE_ALLOW_APE_PSPACE_WR;
7625 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7626 
7627 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7628 
7629 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7630 		if (tg3_flag(tp, PCI_EXPRESS))
7631 			pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7632 		else {
7633 			pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7634 					      tp->pci_cacheline_sz);
7635 			pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7636 					      tp->pci_lat_timer);
7637 		}
7638 	}
7639 
7640 	/* Make sure PCI-X relaxed ordering bit is clear. */
7641 	if (tg3_flag(tp, PCIX_MODE)) {
7642 		u16 pcix_cmd;
7643 
7644 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7645 				     &pcix_cmd);
7646 		pcix_cmd &= ~PCI_X_CMD_ERO;
7647 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7648 				      pcix_cmd);
7649 	}
7650 
7651 	if (tg3_flag(tp, 5780_CLASS)) {
7652 
7653 		/* Chip reset on 5780 will reset MSI enable bit,
7654 		 * so need to restore it.
7655 		 */
7656 		if (tg3_flag(tp, USING_MSI)) {
7657 			u16 ctrl;
7658 
7659 			pci_read_config_word(tp->pdev,
7660 					     tp->msi_cap + PCI_MSI_FLAGS,
7661 					     &ctrl);
7662 			pci_write_config_word(tp->pdev,
7663 					      tp->msi_cap + PCI_MSI_FLAGS,
7664 					      ctrl | PCI_MSI_FLAGS_ENABLE);
7665 			val = tr32(MSGINT_MODE);
7666 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7667 		}
7668 	}
7669 }
7670 
7671 /* tp->lock is held. */
7672 static int tg3_chip_reset(struct tg3 *tp)
7673 {
7674 	u32 val;
7675 	void (*write_op)(struct tg3 *, u32, u32);
7676 	int i, err;
7677 
7678 	tg3_nvram_lock(tp);
7679 
7680 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7681 
7682 	/* No matching tg3_nvram_unlock() after this because
7683 	 * chip reset below will undo the nvram lock.
7684 	 */
7685 	tp->nvram_lock_cnt = 0;
7686 
7687 	/* GRC_MISC_CFG core clock reset will clear the memory
7688 	 * enable bit in PCI register 4 and the MSI enable bit
7689 	 * on some chips, so we save relevant registers here.
7690 	 */
7691 	tg3_save_pci_state(tp);
7692 
7693 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7694 	    tg3_flag(tp, 5755_PLUS))
7695 		tw32(GRC_FASTBOOT_PC, 0);
7696 
7697 	/*
7698 	 * We must avoid the readl() that normally takes place.
7699 	 * It locks machines, causes machine checks, and other
7700 	 * fun things.  So, temporarily disable the 5701
7701 	 * hardware workaround, while we do the reset.
7702 	 */
7703 	write_op = tp->write32;
7704 	if (write_op == tg3_write_flush_reg32)
7705 		tp->write32 = tg3_write32;
7706 
7707 	/* Prevent the irq handler from reading or writing PCI registers
7708 	 * during chip reset when the memory enable bit in the PCI command
7709 	 * register may be cleared.  The chip does not generate interrupt
7710 	 * at this time, but the irq handler may still be called due to irq
7711 	 * sharing or irqpoll.
7712 	 */
7713 	tg3_flag_set(tp, CHIP_RESETTING);
7714 	for (i = 0; i < tp->irq_cnt; i++) {
7715 		struct tg3_napi *tnapi = &tp->napi[i];
7716 		if (tnapi->hw_status) {
7717 			tnapi->hw_status->status = 0;
7718 			tnapi->hw_status->status_tag = 0;
7719 		}
7720 		tnapi->last_tag = 0;
7721 		tnapi->last_irq_tag = 0;
7722 	}
7723 	smp_mb();
7724 
7725 	for (i = 0; i < tp->irq_cnt; i++)
7726 		synchronize_irq(tp->napi[i].irq_vec);
7727 
7728 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7729 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7730 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7731 	}
7732 
7733 	/* do the reset */
7734 	val = GRC_MISC_CFG_CORECLK_RESET;
7735 
7736 	if (tg3_flag(tp, PCI_EXPRESS)) {
7737 		/* Force PCIe 1.0a mode */
7738 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7739 		    !tg3_flag(tp, 57765_PLUS) &&
7740 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
7741 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7742 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7743 
7744 		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7745 			tw32(GRC_MISC_CFG, (1 << 29));
7746 			val |= (1 << 29);
7747 		}
7748 	}
7749 
7750 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7751 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7752 		tw32(GRC_VCPU_EXT_CTRL,
7753 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7754 	}
7755 
7756 	/* Manage gphy power for all CPMU absent PCIe devices. */
7757 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7758 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7759 
7760 	tw32(GRC_MISC_CFG, val);
7761 
7762 	/* restore 5701 hardware bug workaround write method */
7763 	tp->write32 = write_op;
7764 
7765 	/* Unfortunately, we have to delay before the PCI read back.
7766 	 * Some 575X chips even will not respond to a PCI cfg access
7767 	 * when the reset command is given to the chip.
7768 	 *
7769 	 * How do these hardware designers expect things to work
7770 	 * properly if the PCI write is posted for a long period
7771 	 * of time?  It is always necessary to have some method by
7772 	 * which a register read back can occur to push the write
7773 	 * out which does the reset.
7774 	 *
7775 	 * For most tg3 variants the trick below was working.
7776 	 * Ho hum...
7777 	 */
7778 	udelay(120);
7779 
7780 	/* Flush PCI posted writes.  The normal MMIO registers
7781 	 * are inaccessible at this time so this is the only
7782 	 * way to make this reliably (actually, this is no longer
7783 	 * the case, see above).  I tried to use indirect
7784 	 * register read/write but this upset some 5701 variants.
7785 	 */
7786 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7787 
7788 	udelay(120);
7789 
7790 	if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7791 		u16 val16;
7792 
7793 		if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7794 			int i;
7795 			u32 cfg_val;
7796 
7797 			/* Wait for link training to complete.  */
7798 			for (i = 0; i < 5000; i++)
7799 				udelay(100);
7800 
7801 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7802 			pci_write_config_dword(tp->pdev, 0xc4,
7803 					       cfg_val | (1 << 15));
7804 		}
7805 
7806 		/* Clear the "no snoop" and "relaxed ordering" bits. */
7807 		pci_read_config_word(tp->pdev,
7808 				     pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7809 				     &val16);
7810 		val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7811 			   PCI_EXP_DEVCTL_NOSNOOP_EN);
7812 		/*
7813 		 * Older PCIe devices only support the 128 byte
7814 		 * MPS setting.  Enforce the restriction.
7815 		 */
7816 		if (!tg3_flag(tp, CPMU_PRESENT))
7817 			val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7818 		pci_write_config_word(tp->pdev,
7819 				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7820 				      val16);
7821 
7822 		pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7823 
7824 		/* Clear error status */
7825 		pci_write_config_word(tp->pdev,
7826 				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7827 				      PCI_EXP_DEVSTA_CED |
7828 				      PCI_EXP_DEVSTA_NFED |
7829 				      PCI_EXP_DEVSTA_FED |
7830 				      PCI_EXP_DEVSTA_URD);
7831 	}
7832 
7833 	tg3_restore_pci_state(tp);
7834 
7835 	tg3_flag_clear(tp, CHIP_RESETTING);
7836 	tg3_flag_clear(tp, ERROR_PROCESSED);
7837 
7838 	val = 0;
7839 	if (tg3_flag(tp, 5780_CLASS))
7840 		val = tr32(MEMARB_MODE);
7841 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7842 
7843 	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7844 		tg3_stop_fw(tp);
7845 		tw32(0x5000, 0x400);
7846 	}
7847 
7848 	tw32(GRC_MODE, tp->grc_mode);
7849 
7850 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7851 		val = tr32(0xc4);
7852 
7853 		tw32(0xc4, val | (1 << 15));
7854 	}
7855 
7856 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7857 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7858 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7859 		if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7860 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7861 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7862 	}
7863 
7864 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7865 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7866 		val = tp->mac_mode;
7867 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7868 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7869 		val = tp->mac_mode;
7870 	} else
7871 		val = 0;
7872 
7873 	tw32_f(MAC_MODE, val);
7874 	udelay(40);
7875 
7876 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7877 
7878 	err = tg3_poll_fw(tp);
7879 	if (err)
7880 		return err;
7881 
7882 	tg3_mdio_start(tp);
7883 
7884 	if (tg3_flag(tp, PCI_EXPRESS) &&
7885 	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7886 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7887 	    !tg3_flag(tp, 57765_PLUS)) {
7888 		val = tr32(0x7c00);
7889 
7890 		tw32(0x7c00, val | (1 << 25));
7891 	}
7892 
7893 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7894 		val = tr32(TG3_CPMU_CLCK_ORIDE);
7895 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7896 	}
7897 
7898 	/* Reprobe ASF enable state.  */
7899 	tg3_flag_clear(tp, ENABLE_ASF);
7900 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7901 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7902 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7903 		u32 nic_cfg;
7904 
7905 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7906 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7907 			tg3_flag_set(tp, ENABLE_ASF);
7908 			tp->last_event_jiffies = jiffies;
7909 			if (tg3_flag(tp, 5750_PLUS))
7910 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7911 		}
7912 	}
7913 
7914 	return 0;
7915 }
7916 
7917 /* tp->lock is held. */
7918 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7919 {
7920 	int err;
7921 
7922 	tg3_stop_fw(tp);
7923 
7924 	tg3_write_sig_pre_reset(tp, kind);
7925 
7926 	tg3_abort_hw(tp, silent);
7927 	err = tg3_chip_reset(tp);
7928 
7929 	__tg3_set_mac_addr(tp, 0);
7930 
7931 	tg3_write_sig_legacy(tp, kind);
7932 	tg3_write_sig_post_reset(tp, kind);
7933 
7934 	if (err)
7935 		return err;
7936 
7937 	return 0;
7938 }
7939 
7940 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7941 {
7942 	struct tg3 *tp = netdev_priv(dev);
7943 	struct sockaddr *addr = p;
7944 	int err = 0, skip_mac_1 = 0;
7945 
7946 	if (!is_valid_ether_addr(addr->sa_data))
7947 		return -EINVAL;
7948 
7949 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7950 
7951 	if (!netif_running(dev))
7952 		return 0;
7953 
7954 	if (tg3_flag(tp, ENABLE_ASF)) {
7955 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
7956 
7957 		addr0_high = tr32(MAC_ADDR_0_HIGH);
7958 		addr0_low = tr32(MAC_ADDR_0_LOW);
7959 		addr1_high = tr32(MAC_ADDR_1_HIGH);
7960 		addr1_low = tr32(MAC_ADDR_1_LOW);
7961 
7962 		/* Skip MAC addr 1 if ASF is using it. */
7963 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7964 		    !(addr1_high == 0 && addr1_low == 0))
7965 			skip_mac_1 = 1;
7966 	}
7967 	spin_lock_bh(&tp->lock);
7968 	__tg3_set_mac_addr(tp, skip_mac_1);
7969 	spin_unlock_bh(&tp->lock);
7970 
7971 	return err;
7972 }
7973 
7974 /* tp->lock is held. */
7975 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7976 			   dma_addr_t mapping, u32 maxlen_flags,
7977 			   u32 nic_addr)
7978 {
7979 	tg3_write_mem(tp,
7980 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7981 		      ((u64) mapping >> 32));
7982 	tg3_write_mem(tp,
7983 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7984 		      ((u64) mapping & 0xffffffff));
7985 	tg3_write_mem(tp,
7986 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7987 		       maxlen_flags);
7988 
7989 	if (!tg3_flag(tp, 5705_PLUS))
7990 		tg3_write_mem(tp,
7991 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7992 			      nic_addr);
7993 }
7994 
7995 static void __tg3_set_rx_mode(struct net_device *);
7996 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7997 {
7998 	int i;
7999 
8000 	if (!tg3_flag(tp, ENABLE_TSS)) {
8001 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8002 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8003 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8004 	} else {
8005 		tw32(HOSTCC_TXCOL_TICKS, 0);
8006 		tw32(HOSTCC_TXMAX_FRAMES, 0);
8007 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8008 	}
8009 
8010 	if (!tg3_flag(tp, ENABLE_RSS)) {
8011 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8012 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8013 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8014 	} else {
8015 		tw32(HOSTCC_RXCOL_TICKS, 0);
8016 		tw32(HOSTCC_RXMAX_FRAMES, 0);
8017 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8018 	}
8019 
8020 	if (!tg3_flag(tp, 5705_PLUS)) {
8021 		u32 val = ec->stats_block_coalesce_usecs;
8022 
8023 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8024 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8025 
8026 		if (!netif_carrier_ok(tp->dev))
8027 			val = 0;
8028 
8029 		tw32(HOSTCC_STAT_COAL_TICKS, val);
8030 	}
8031 
8032 	for (i = 0; i < tp->irq_cnt - 1; i++) {
8033 		u32 reg;
8034 
8035 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8036 		tw32(reg, ec->rx_coalesce_usecs);
8037 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8038 		tw32(reg, ec->rx_max_coalesced_frames);
8039 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8040 		tw32(reg, ec->rx_max_coalesced_frames_irq);
8041 
8042 		if (tg3_flag(tp, ENABLE_TSS)) {
8043 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8044 			tw32(reg, ec->tx_coalesce_usecs);
8045 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8046 			tw32(reg, ec->tx_max_coalesced_frames);
8047 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8048 			tw32(reg, ec->tx_max_coalesced_frames_irq);
8049 		}
8050 	}
8051 
8052 	for (; i < tp->irq_max - 1; i++) {
8053 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8054 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8055 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8056 
8057 		if (tg3_flag(tp, ENABLE_TSS)) {
8058 			tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8059 			tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8060 			tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8061 		}
8062 	}
8063 }
8064 
8065 /* tp->lock is held. */
8066 static void tg3_rings_reset(struct tg3 *tp)
8067 {
8068 	int i;
8069 	u32 stblk, txrcb, rxrcb, limit;
8070 	struct tg3_napi *tnapi = &tp->napi[0];
8071 
8072 	/* Disable all transmit rings but the first. */
8073 	if (!tg3_flag(tp, 5705_PLUS))
8074 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8075 	else if (tg3_flag(tp, 5717_PLUS))
8076 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8077 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8078 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8079 	else
8080 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8081 
8082 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8083 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8084 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8085 			      BDINFO_FLAGS_DISABLED);
8086 
8087 
8088 	/* Disable all receive return rings but the first. */
8089 	if (tg3_flag(tp, 5717_PLUS))
8090 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8091 	else if (!tg3_flag(tp, 5705_PLUS))
8092 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8093 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8094 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8095 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8096 	else
8097 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8098 
8099 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8100 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8101 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8102 			      BDINFO_FLAGS_DISABLED);
8103 
8104 	/* Disable interrupts */
8105 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8106 	tp->napi[0].chk_msi_cnt = 0;
8107 	tp->napi[0].last_rx_cons = 0;
8108 	tp->napi[0].last_tx_cons = 0;
8109 
8110 	/* Zero mailbox registers. */
8111 	if (tg3_flag(tp, SUPPORT_MSIX)) {
8112 		for (i = 1; i < tp->irq_max; i++) {
8113 			tp->napi[i].tx_prod = 0;
8114 			tp->napi[i].tx_cons = 0;
8115 			if (tg3_flag(tp, ENABLE_TSS))
8116 				tw32_mailbox(tp->napi[i].prodmbox, 0);
8117 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
8118 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8119 			tp->napi[i].chk_msi_cnt = 0;
8120 			tp->napi[i].last_rx_cons = 0;
8121 			tp->napi[i].last_tx_cons = 0;
8122 		}
8123 		if (!tg3_flag(tp, ENABLE_TSS))
8124 			tw32_mailbox(tp->napi[0].prodmbox, 0);
8125 	} else {
8126 		tp->napi[0].tx_prod = 0;
8127 		tp->napi[0].tx_cons = 0;
8128 		tw32_mailbox(tp->napi[0].prodmbox, 0);
8129 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
8130 	}
8131 
8132 	/* Make sure the NIC-based send BD rings are disabled. */
8133 	if (!tg3_flag(tp, 5705_PLUS)) {
8134 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8135 		for (i = 0; i < 16; i++)
8136 			tw32_tx_mbox(mbox + i * 8, 0);
8137 	}
8138 
8139 	txrcb = NIC_SRAM_SEND_RCB;
8140 	rxrcb = NIC_SRAM_RCV_RET_RCB;
8141 
8142 	/* Clear status block in ram. */
8143 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8144 
8145 	/* Set status block DMA address */
8146 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8147 	     ((u64) tnapi->status_mapping >> 32));
8148 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8149 	     ((u64) tnapi->status_mapping & 0xffffffff));
8150 
8151 	if (tnapi->tx_ring) {
8152 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8153 			       (TG3_TX_RING_SIZE <<
8154 				BDINFO_FLAGS_MAXLEN_SHIFT),
8155 			       NIC_SRAM_TX_BUFFER_DESC);
8156 		txrcb += TG3_BDINFO_SIZE;
8157 	}
8158 
8159 	if (tnapi->rx_rcb) {
8160 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8161 			       (tp->rx_ret_ring_mask + 1) <<
8162 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8163 		rxrcb += TG3_BDINFO_SIZE;
8164 	}
8165 
8166 	stblk = HOSTCC_STATBLCK_RING1;
8167 
8168 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8169 		u64 mapping = (u64)tnapi->status_mapping;
8170 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8171 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8172 
8173 		/* Clear status block in ram. */
8174 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8175 
8176 		if (tnapi->tx_ring) {
8177 			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8178 				       (TG3_TX_RING_SIZE <<
8179 					BDINFO_FLAGS_MAXLEN_SHIFT),
8180 				       NIC_SRAM_TX_BUFFER_DESC);
8181 			txrcb += TG3_BDINFO_SIZE;
8182 		}
8183 
8184 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8185 			       ((tp->rx_ret_ring_mask + 1) <<
8186 				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8187 
8188 		stblk += 8;
8189 		rxrcb += TG3_BDINFO_SIZE;
8190 	}
8191 }
8192 
8193 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8194 {
8195 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8196 
8197 	if (!tg3_flag(tp, 5750_PLUS) ||
8198 	    tg3_flag(tp, 5780_CLASS) ||
8199 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8200 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8201 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8202 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8203 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8204 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8205 	else
8206 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8207 
8208 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8209 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8210 
8211 	val = min(nic_rep_thresh, host_rep_thresh);
8212 	tw32(RCVBDI_STD_THRESH, val);
8213 
8214 	if (tg3_flag(tp, 57765_PLUS))
8215 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8216 
8217 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8218 		return;
8219 
8220 	if (!tg3_flag(tp, 5705_PLUS))
8221 		bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8222 	else
8223 		bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8224 
8225 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8226 
8227 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
8228 	tw32(RCVBDI_JUMBO_THRESH, val);
8229 
8230 	if (tg3_flag(tp, 57765_PLUS))
8231 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8232 }
8233 
8234 /* tp->lock is held. */
8235 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8236 {
8237 	u32 val, rdmac_mode;
8238 	int i, err, limit;
8239 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8240 
8241 	tg3_disable_ints(tp);
8242 
8243 	tg3_stop_fw(tp);
8244 
8245 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8246 
8247 	if (tg3_flag(tp, INIT_COMPLETE))
8248 		tg3_abort_hw(tp, 1);
8249 
8250 	/* Enable MAC control of LPI */
8251 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8252 		tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8253 		       TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8254 		       TG3_CPMU_EEE_LNKIDL_UART_IDL);
8255 
8256 		tw32_f(TG3_CPMU_EEE_CTRL,
8257 		       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8258 
8259 		val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8260 		      TG3_CPMU_EEEMD_LPI_IN_TX |
8261 		      TG3_CPMU_EEEMD_LPI_IN_RX |
8262 		      TG3_CPMU_EEEMD_EEE_ENABLE;
8263 
8264 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8265 			val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8266 
8267 		if (tg3_flag(tp, ENABLE_APE))
8268 			val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8269 
8270 		tw32_f(TG3_CPMU_EEE_MODE, val);
8271 
8272 		tw32_f(TG3_CPMU_EEE_DBTMR1,
8273 		       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8274 		       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8275 
8276 		tw32_f(TG3_CPMU_EEE_DBTMR2,
8277 		       TG3_CPMU_DBTMR2_APE_TX_2047US |
8278 		       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8279 	}
8280 
8281 	if (reset_phy)
8282 		tg3_phy_reset(tp);
8283 
8284 	err = tg3_chip_reset(tp);
8285 	if (err)
8286 		return err;
8287 
8288 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8289 
8290 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8291 		val = tr32(TG3_CPMU_CTRL);
8292 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8293 		tw32(TG3_CPMU_CTRL, val);
8294 
8295 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8296 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8297 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
8298 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8299 
8300 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8301 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8302 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
8303 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8304 
8305 		val = tr32(TG3_CPMU_HST_ACC);
8306 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
8307 		val |= CPMU_HST_ACC_MACCLK_6_25;
8308 		tw32(TG3_CPMU_HST_ACC, val);
8309 	}
8310 
8311 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8312 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8313 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8314 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
8315 		tw32(PCIE_PWR_MGMT_THRESH, val);
8316 
8317 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8318 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8319 
8320 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8321 
8322 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8323 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8324 	}
8325 
8326 	if (tg3_flag(tp, L1PLLPD_EN)) {
8327 		u32 grc_mode = tr32(GRC_MODE);
8328 
8329 		/* Access the lower 1K of PL PCIE block registers. */
8330 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8331 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8332 
8333 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8334 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8335 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8336 
8337 		tw32(GRC_MODE, grc_mode);
8338 	}
8339 
8340 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8341 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8342 			u32 grc_mode = tr32(GRC_MODE);
8343 
8344 			/* Access the lower 1K of PL PCIE block registers. */
8345 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8346 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8347 
8348 			val = tr32(TG3_PCIE_TLDLPL_PORT +
8349 				   TG3_PCIE_PL_LO_PHYCTL5);
8350 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8351 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8352 
8353 			tw32(GRC_MODE, grc_mode);
8354 		}
8355 
8356 		if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8357 			u32 grc_mode = tr32(GRC_MODE);
8358 
8359 			/* Access the lower 1K of DL PCIE block registers. */
8360 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8361 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8362 
8363 			val = tr32(TG3_PCIE_TLDLPL_PORT +
8364 				   TG3_PCIE_DL_LO_FTSMAX);
8365 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8366 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8367 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8368 
8369 			tw32(GRC_MODE, grc_mode);
8370 		}
8371 
8372 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8373 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8374 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
8375 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8376 	}
8377 
8378 	/* This works around an issue with Athlon chipsets on
8379 	 * B3 tigon3 silicon.  This bit has no effect on any
8380 	 * other revision.  But do not set this on PCI Express
8381 	 * chips and don't even touch the clocks if the CPMU is present.
8382 	 */
8383 	if (!tg3_flag(tp, CPMU_PRESENT)) {
8384 		if (!tg3_flag(tp, PCI_EXPRESS))
8385 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8386 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8387 	}
8388 
8389 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8390 	    tg3_flag(tp, PCIX_MODE)) {
8391 		val = tr32(TG3PCI_PCISTATE);
8392 		val |= PCISTATE_RETRY_SAME_DMA;
8393 		tw32(TG3PCI_PCISTATE, val);
8394 	}
8395 
8396 	if (tg3_flag(tp, ENABLE_APE)) {
8397 		/* Allow reads and writes to the
8398 		 * APE register and memory space.
8399 		 */
8400 		val = tr32(TG3PCI_PCISTATE);
8401 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8402 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8403 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8404 		tw32(TG3PCI_PCISTATE, val);
8405 	}
8406 
8407 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8408 		/* Enable some hw fixes.  */
8409 		val = tr32(TG3PCI_MSI_DATA);
8410 		val |= (1 << 26) | (1 << 28) | (1 << 29);
8411 		tw32(TG3PCI_MSI_DATA, val);
8412 	}
8413 
8414 	/* Descriptor ring init may make accesses to the
8415 	 * NIC SRAM area to setup the TX descriptors, so we
8416 	 * can only do this after the hardware has been
8417 	 * successfully reset.
8418 	 */
8419 	err = tg3_init_rings(tp);
8420 	if (err)
8421 		return err;
8422 
8423 	if (tg3_flag(tp, 57765_PLUS)) {
8424 		val = tr32(TG3PCI_DMA_RW_CTRL) &
8425 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8426 		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8427 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8428 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8429 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8430 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
8431 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8432 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8433 		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8434 		/* This value is determined during the probe time DMA
8435 		 * engine test, tg3_test_dma.
8436 		 */
8437 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8438 	}
8439 
8440 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8441 			  GRC_MODE_4X_NIC_SEND_RINGS |
8442 			  GRC_MODE_NO_TX_PHDR_CSUM |
8443 			  GRC_MODE_NO_RX_PHDR_CSUM);
8444 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8445 
8446 	/* Pseudo-header checksum is done by hardware logic and not
8447 	 * the offload processers, so make the chip do the pseudo-
8448 	 * header checksums on receive.  For transmit it is more
8449 	 * convenient to do the pseudo-header checksum in software
8450 	 * as Linux does that on transmit for us in all cases.
8451 	 */
8452 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8453 
8454 	tw32(GRC_MODE,
8455 	     tp->grc_mode |
8456 	     (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8457 
8458 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
8459 	val = tr32(GRC_MISC_CFG);
8460 	val &= ~0xff;
8461 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8462 	tw32(GRC_MISC_CFG, val);
8463 
8464 	/* Initialize MBUF/DESC pool. */
8465 	if (tg3_flag(tp, 5750_PLUS)) {
8466 		/* Do nothing.  */
8467 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8468 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8469 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8470 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8471 		else
8472 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8473 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8474 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8475 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
8476 		int fw_len;
8477 
8478 		fw_len = tp->fw_len;
8479 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8480 		tw32(BUFMGR_MB_POOL_ADDR,
8481 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8482 		tw32(BUFMGR_MB_POOL_SIZE,
8483 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8484 	}
8485 
8486 	if (tp->dev->mtu <= ETH_DATA_LEN) {
8487 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
8488 		     tp->bufmgr_config.mbuf_read_dma_low_water);
8489 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
8490 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
8491 		tw32(BUFMGR_MB_HIGH_WATER,
8492 		     tp->bufmgr_config.mbuf_high_water);
8493 	} else {
8494 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
8495 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8496 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
8497 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8498 		tw32(BUFMGR_MB_HIGH_WATER,
8499 		     tp->bufmgr_config.mbuf_high_water_jumbo);
8500 	}
8501 	tw32(BUFMGR_DMA_LOW_WATER,
8502 	     tp->bufmgr_config.dma_low_water);
8503 	tw32(BUFMGR_DMA_HIGH_WATER,
8504 	     tp->bufmgr_config.dma_high_water);
8505 
8506 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8507 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8508 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8509 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8510 	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8511 	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8512 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8513 	tw32(BUFMGR_MODE, val);
8514 	for (i = 0; i < 2000; i++) {
8515 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8516 			break;
8517 		udelay(10);
8518 	}
8519 	if (i >= 2000) {
8520 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8521 		return -ENODEV;
8522 	}
8523 
8524 	if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8525 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8526 
8527 	tg3_setup_rxbd_thresholds(tp);
8528 
8529 	/* Initialize TG3_BDINFO's at:
8530 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
8531 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
8532 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
8533 	 *
8534 	 * like so:
8535 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
8536 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
8537 	 *                              ring attribute flags
8538 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
8539 	 *
8540 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8541 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8542 	 *
8543 	 * The size of each ring is fixed in the firmware, but the location is
8544 	 * configurable.
8545 	 */
8546 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8547 	     ((u64) tpr->rx_std_mapping >> 32));
8548 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8549 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
8550 	if (!tg3_flag(tp, 5717_PLUS))
8551 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8552 		     NIC_SRAM_RX_BUFFER_DESC);
8553 
8554 	/* Disable the mini ring */
8555 	if (!tg3_flag(tp, 5705_PLUS))
8556 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8557 		     BDINFO_FLAGS_DISABLED);
8558 
8559 	/* Program the jumbo buffer descriptor ring control
8560 	 * blocks on those devices that have them.
8561 	 */
8562 	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8563 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8564 
8565 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8566 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8567 			     ((u64) tpr->rx_jmb_mapping >> 32));
8568 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8569 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8570 			val = TG3_RX_JMB_RING_SIZE(tp) <<
8571 			      BDINFO_FLAGS_MAXLEN_SHIFT;
8572 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8573 			     val | BDINFO_FLAGS_USE_EXT_RECV);
8574 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8575 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8576 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8577 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8578 		} else {
8579 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8580 			     BDINFO_FLAGS_DISABLED);
8581 		}
8582 
8583 		if (tg3_flag(tp, 57765_PLUS)) {
8584 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8585 				val = TG3_RX_STD_MAX_SIZE_5700;
8586 			else
8587 				val = TG3_RX_STD_MAX_SIZE_5717;
8588 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8589 			val |= (TG3_RX_STD_DMA_SZ << 2);
8590 		} else
8591 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8592 	} else
8593 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8594 
8595 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8596 
8597 	tpr->rx_std_prod_idx = tp->rx_pending;
8598 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8599 
8600 	tpr->rx_jmb_prod_idx =
8601 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8602 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8603 
8604 	tg3_rings_reset(tp);
8605 
8606 	/* Initialize MAC address and backoff seed. */
8607 	__tg3_set_mac_addr(tp, 0);
8608 
8609 	/* MTU + ethernet header + FCS + optional VLAN tag */
8610 	tw32(MAC_RX_MTU_SIZE,
8611 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8612 
8613 	/* The slot time is changed by tg3_setup_phy if we
8614 	 * run at gigabit with half duplex.
8615 	 */
8616 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8617 	      (6 << TX_LENGTHS_IPG_SHIFT) |
8618 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8619 
8620 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8621 		val |= tr32(MAC_TX_LENGTHS) &
8622 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
8623 			TX_LENGTHS_CNT_DWN_VAL_MSK);
8624 
8625 	tw32(MAC_TX_LENGTHS, val);
8626 
8627 	/* Receive rules. */
8628 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8629 	tw32(RCVLPC_CONFIG, 0x0181);
8630 
8631 	/* Calculate RDMAC_MODE setting early, we need it to determine
8632 	 * the RCVLPC_STATE_ENABLE mask.
8633 	 */
8634 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8635 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8636 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8637 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8638 		      RDMAC_MODE_LNGREAD_ENAB);
8639 
8640 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8641 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8642 
8643 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8644 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8645 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8646 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8647 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8648 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8649 
8650 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8651 	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8652 		if (tg3_flag(tp, TSO_CAPABLE) &&
8653 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8654 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8655 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8656 			   !tg3_flag(tp, IS_5788)) {
8657 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8658 		}
8659 	}
8660 
8661 	if (tg3_flag(tp, PCI_EXPRESS))
8662 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8663 
8664 	if (tg3_flag(tp, HW_TSO_1) ||
8665 	    tg3_flag(tp, HW_TSO_2) ||
8666 	    tg3_flag(tp, HW_TSO_3))
8667 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8668 
8669 	if (tg3_flag(tp, 57765_PLUS) ||
8670 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8671 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8672 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8673 
8674 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8675 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8676 
8677 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8678 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8679 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8680 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8681 	    tg3_flag(tp, 57765_PLUS)) {
8682 		val = tr32(TG3_RDMA_RSRVCTRL_REG);
8683 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8684 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8685 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8686 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8687 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8688 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8689 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8690 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8691 		}
8692 		tw32(TG3_RDMA_RSRVCTRL_REG,
8693 		     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8694 	}
8695 
8696 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8697 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8698 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8699 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8700 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8701 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8702 	}
8703 
8704 	/* Receive/send statistics. */
8705 	if (tg3_flag(tp, 5750_PLUS)) {
8706 		val = tr32(RCVLPC_STATS_ENABLE);
8707 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
8708 		tw32(RCVLPC_STATS_ENABLE, val);
8709 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8710 		   tg3_flag(tp, TSO_CAPABLE)) {
8711 		val = tr32(RCVLPC_STATS_ENABLE);
8712 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8713 		tw32(RCVLPC_STATS_ENABLE, val);
8714 	} else {
8715 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8716 	}
8717 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8718 	tw32(SNDDATAI_STATSENAB, 0xffffff);
8719 	tw32(SNDDATAI_STATSCTRL,
8720 	     (SNDDATAI_SCTRL_ENABLE |
8721 	      SNDDATAI_SCTRL_FASTUPD));
8722 
8723 	/* Setup host coalescing engine. */
8724 	tw32(HOSTCC_MODE, 0);
8725 	for (i = 0; i < 2000; i++) {
8726 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8727 			break;
8728 		udelay(10);
8729 	}
8730 
8731 	__tg3_set_coalesce(tp, &tp->coal);
8732 
8733 	if (!tg3_flag(tp, 5705_PLUS)) {
8734 		/* Status/statistics block address.  See tg3_timer,
8735 		 * the tg3_periodic_fetch_stats call there, and
8736 		 * tg3_get_stats to see how this works for 5705/5750 chips.
8737 		 */
8738 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8739 		     ((u64) tp->stats_mapping >> 32));
8740 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8741 		     ((u64) tp->stats_mapping & 0xffffffff));
8742 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8743 
8744 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8745 
8746 		/* Clear statistics and status block memory areas */
8747 		for (i = NIC_SRAM_STATS_BLK;
8748 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8749 		     i += sizeof(u32)) {
8750 			tg3_write_mem(tp, i, 0);
8751 			udelay(40);
8752 		}
8753 	}
8754 
8755 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8756 
8757 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8758 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8759 	if (!tg3_flag(tp, 5705_PLUS))
8760 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8761 
8762 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8763 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8764 		/* reset to prevent losing 1st rx packet intermittently */
8765 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8766 		udelay(10);
8767 	}
8768 
8769 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8770 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8771 			MAC_MODE_FHDE_ENABLE;
8772 	if (tg3_flag(tp, ENABLE_APE))
8773 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8774 	if (!tg3_flag(tp, 5705_PLUS) &&
8775 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8776 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8777 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8778 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8779 	udelay(40);
8780 
8781 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8782 	 * If TG3_FLAG_IS_NIC is zero, we should read the
8783 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
8784 	 * whether used as inputs or outputs, are set by boot code after
8785 	 * reset.
8786 	 */
8787 	if (!tg3_flag(tp, IS_NIC)) {
8788 		u32 gpio_mask;
8789 
8790 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8791 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8792 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8793 
8794 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8795 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8796 				     GRC_LCLCTRL_GPIO_OUTPUT3;
8797 
8798 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8799 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8800 
8801 		tp->grc_local_ctrl &= ~gpio_mask;
8802 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8803 
8804 		/* GPIO1 must be driven high for eeprom write protect */
8805 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
8806 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8807 					       GRC_LCLCTRL_GPIO_OUTPUT1);
8808 	}
8809 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8810 	udelay(100);
8811 
8812 	if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8813 		val = tr32(MSGINT_MODE);
8814 		val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8815 		if (!tg3_flag(tp, 1SHOT_MSI))
8816 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8817 		tw32(MSGINT_MODE, val);
8818 	}
8819 
8820 	if (!tg3_flag(tp, 5705_PLUS)) {
8821 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8822 		udelay(40);
8823 	}
8824 
8825 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8826 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8827 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8828 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8829 	       WDMAC_MODE_LNGREAD_ENAB);
8830 
8831 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8832 	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8833 		if (tg3_flag(tp, TSO_CAPABLE) &&
8834 		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8835 		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8836 			/* nothing */
8837 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8838 			   !tg3_flag(tp, IS_5788)) {
8839 			val |= WDMAC_MODE_RX_ACCEL;
8840 		}
8841 	}
8842 
8843 	/* Enable host coalescing bug fix */
8844 	if (tg3_flag(tp, 5755_PLUS))
8845 		val |= WDMAC_MODE_STATUS_TAG_FIX;
8846 
8847 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8848 		val |= WDMAC_MODE_BURST_ALL_DATA;
8849 
8850 	tw32_f(WDMAC_MODE, val);
8851 	udelay(40);
8852 
8853 	if (tg3_flag(tp, PCIX_MODE)) {
8854 		u16 pcix_cmd;
8855 
8856 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8857 				     &pcix_cmd);
8858 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8859 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8860 			pcix_cmd |= PCI_X_CMD_READ_2K;
8861 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8862 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8863 			pcix_cmd |= PCI_X_CMD_READ_2K;
8864 		}
8865 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8866 				      pcix_cmd);
8867 	}
8868 
8869 	tw32_f(RDMAC_MODE, rdmac_mode);
8870 	udelay(40);
8871 
8872 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8873 	if (!tg3_flag(tp, 5705_PLUS))
8874 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8875 
8876 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8877 		tw32(SNDDATAC_MODE,
8878 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8879 	else
8880 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8881 
8882 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8883 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8884 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8885 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
8886 		val |= RCVDBDI_MODE_LRG_RING_SZ;
8887 	tw32(RCVDBDI_MODE, val);
8888 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8889 	if (tg3_flag(tp, HW_TSO_1) ||
8890 	    tg3_flag(tp, HW_TSO_2) ||
8891 	    tg3_flag(tp, HW_TSO_3))
8892 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8893 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8894 	if (tg3_flag(tp, ENABLE_TSS))
8895 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
8896 	tw32(SNDBDI_MODE, val);
8897 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8898 
8899 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8900 		err = tg3_load_5701_a0_firmware_fix(tp);
8901 		if (err)
8902 			return err;
8903 	}
8904 
8905 	if (tg3_flag(tp, TSO_CAPABLE)) {
8906 		err = tg3_load_tso_firmware(tp);
8907 		if (err)
8908 			return err;
8909 	}
8910 
8911 	tp->tx_mode = TX_MODE_ENABLE;
8912 
8913 	if (tg3_flag(tp, 5755_PLUS) ||
8914 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8915 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8916 
8917 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8918 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8919 		tp->tx_mode &= ~val;
8920 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8921 	}
8922 
8923 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8924 	udelay(100);
8925 
8926 	if (tg3_flag(tp, ENABLE_RSS)) {
8927 		int i = 0;
8928 		u32 reg = MAC_RSS_INDIR_TBL_0;
8929 
8930 		if (tp->irq_cnt == 2) {
8931 			for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8932 				tw32(reg, 0x0);
8933 				reg += 4;
8934 			}
8935 		} else {
8936 			u32 val;
8937 
8938 			while (i < TG3_RSS_INDIR_TBL_SIZE) {
8939 				val = i % (tp->irq_cnt - 1);
8940 				i++;
8941 				for (; i % 8; i++) {
8942 					val <<= 4;
8943 					val |= (i % (tp->irq_cnt - 1));
8944 				}
8945 				tw32(reg, val);
8946 				reg += 4;
8947 			}
8948 		}
8949 
8950 		/* Setup the "secret" hash key. */
8951 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8952 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8953 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8954 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8955 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8956 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8957 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8958 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8959 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8960 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8961 	}
8962 
8963 	tp->rx_mode = RX_MODE_ENABLE;
8964 	if (tg3_flag(tp, 5755_PLUS))
8965 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8966 
8967 	if (tg3_flag(tp, ENABLE_RSS))
8968 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
8969 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
8970 			       RX_MODE_RSS_IPV6_HASH_EN |
8971 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
8972 			       RX_MODE_RSS_IPV4_HASH_EN |
8973 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
8974 
8975 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8976 	udelay(10);
8977 
8978 	tw32(MAC_LED_CTRL, tp->led_ctrl);
8979 
8980 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8981 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8982 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8983 		udelay(10);
8984 	}
8985 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8986 	udelay(10);
8987 
8988 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8989 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8990 			!(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8991 			/* Set drive transmission level to 1.2V  */
8992 			/* only if the signal pre-emphasis bit is not set  */
8993 			val = tr32(MAC_SERDES_CFG);
8994 			val &= 0xfffff000;
8995 			val |= 0x880;
8996 			tw32(MAC_SERDES_CFG, val);
8997 		}
8998 		if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8999 			tw32(MAC_SERDES_CFG, 0x616000);
9000 	}
9001 
9002 	/* Prevent chip from dropping frames when flow control
9003 	 * is enabled.
9004 	 */
9005 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9006 		val = 1;
9007 	else
9008 		val = 2;
9009 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9010 
9011 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9012 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9013 		/* Use hardware link auto-negotiation */
9014 		tg3_flag_set(tp, HW_AUTONEG);
9015 	}
9016 
9017 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9018 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9019 		u32 tmp;
9020 
9021 		tmp = tr32(SERDES_RX_CTRL);
9022 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9023 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9024 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9025 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9026 	}
9027 
9028 	if (!tg3_flag(tp, USE_PHYLIB)) {
9029 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9030 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9031 			tp->link_config.speed = tp->link_config.orig_speed;
9032 			tp->link_config.duplex = tp->link_config.orig_duplex;
9033 			tp->link_config.autoneg = tp->link_config.orig_autoneg;
9034 		}
9035 
9036 		err = tg3_setup_phy(tp, 0);
9037 		if (err)
9038 			return err;
9039 
9040 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9041 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9042 			u32 tmp;
9043 
9044 			/* Clear CRC stats. */
9045 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9046 				tg3_writephy(tp, MII_TG3_TEST1,
9047 					     tmp | MII_TG3_TEST1_CRC_EN);
9048 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9049 			}
9050 		}
9051 	}
9052 
9053 	__tg3_set_rx_mode(tp->dev);
9054 
9055 	/* Initialize receive rules. */
9056 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9057 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9058 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9059 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9060 
9061 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9062 		limit = 8;
9063 	else
9064 		limit = 16;
9065 	if (tg3_flag(tp, ENABLE_ASF))
9066 		limit -= 4;
9067 	switch (limit) {
9068 	case 16:
9069 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9070 	case 15:
9071 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9072 	case 14:
9073 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9074 	case 13:
9075 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9076 	case 12:
9077 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9078 	case 11:
9079 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9080 	case 10:
9081 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9082 	case 9:
9083 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9084 	case 8:
9085 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9086 	case 7:
9087 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9088 	case 6:
9089 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9090 	case 5:
9091 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9092 	case 4:
9093 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9094 	case 3:
9095 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9096 	case 2:
9097 	case 1:
9098 
9099 	default:
9100 		break;
9101 	}
9102 
9103 	if (tg3_flag(tp, ENABLE_APE))
9104 		/* Write our heartbeat update interval to APE. */
9105 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9106 				APE_HOST_HEARTBEAT_INT_DISABLE);
9107 
9108 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9109 
9110 	return 0;
9111 }
9112 
9113 /* Called at device open time to get the chip ready for
9114  * packet processing.  Invoked with tp->lock held.
9115  */
9116 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9117 {
9118 	tg3_switch_clocks(tp);
9119 
9120 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9121 
9122 	return tg3_reset_hw(tp, reset_phy);
9123 }
9124 
9125 #define TG3_STAT_ADD32(PSTAT, REG) \
9126 do {	u32 __val = tr32(REG); \
9127 	(PSTAT)->low += __val; \
9128 	if ((PSTAT)->low < __val) \
9129 		(PSTAT)->high += 1; \
9130 } while (0)
9131 
9132 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9133 {
9134 	struct tg3_hw_stats *sp = tp->hw_stats;
9135 
9136 	if (!netif_carrier_ok(tp->dev))
9137 		return;
9138 
9139 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9140 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9141 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9142 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9143 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9144 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9145 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9146 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9147 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9148 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9149 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9150 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9151 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9152 
9153 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9154 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9155 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9156 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9157 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9158 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9159 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9160 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9161 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9162 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9163 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9164 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9165 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9166 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9167 
9168 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9169 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9170 	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9171 	    tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9172 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9173 	} else {
9174 		u32 val = tr32(HOSTCC_FLOW_ATTN);
9175 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9176 		if (val) {
9177 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9178 			sp->rx_discards.low += val;
9179 			if (sp->rx_discards.low < val)
9180 				sp->rx_discards.high += 1;
9181 		}
9182 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9183 	}
9184 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9185 }
9186 
9187 static void tg3_chk_missed_msi(struct tg3 *tp)
9188 {
9189 	u32 i;
9190 
9191 	for (i = 0; i < tp->irq_cnt; i++) {
9192 		struct tg3_napi *tnapi = &tp->napi[i];
9193 
9194 		if (tg3_has_work(tnapi)) {
9195 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9196 			    tnapi->last_tx_cons == tnapi->tx_cons) {
9197 				if (tnapi->chk_msi_cnt < 1) {
9198 					tnapi->chk_msi_cnt++;
9199 					return;
9200 				}
9201 				tg3_msi(0, tnapi);
9202 			}
9203 		}
9204 		tnapi->chk_msi_cnt = 0;
9205 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9206 		tnapi->last_tx_cons = tnapi->tx_cons;
9207 	}
9208 }
9209 
9210 static void tg3_timer(unsigned long __opaque)
9211 {
9212 	struct tg3 *tp = (struct tg3 *) __opaque;
9213 
9214 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9215 		goto restart_timer;
9216 
9217 	spin_lock(&tp->lock);
9218 
9219 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9220 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9221 		tg3_chk_missed_msi(tp);
9222 
9223 	if (!tg3_flag(tp, TAGGED_STATUS)) {
9224 		/* All of this garbage is because when using non-tagged
9225 		 * IRQ status the mailbox/status_block protocol the chip
9226 		 * uses with the cpu is race prone.
9227 		 */
9228 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9229 			tw32(GRC_LOCAL_CTRL,
9230 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9231 		} else {
9232 			tw32(HOSTCC_MODE, tp->coalesce_mode |
9233 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9234 		}
9235 
9236 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9237 			spin_unlock(&tp->lock);
9238 			tg3_reset_task_schedule(tp);
9239 			goto restart_timer;
9240 		}
9241 	}
9242 
9243 	/* This part only runs once per second. */
9244 	if (!--tp->timer_counter) {
9245 		if (tg3_flag(tp, 5705_PLUS))
9246 			tg3_periodic_fetch_stats(tp);
9247 
9248 		if (tp->setlpicnt && !--tp->setlpicnt)
9249 			tg3_phy_eee_enable(tp);
9250 
9251 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
9252 			u32 mac_stat;
9253 			int phy_event;
9254 
9255 			mac_stat = tr32(MAC_STATUS);
9256 
9257 			phy_event = 0;
9258 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9259 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9260 					phy_event = 1;
9261 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9262 				phy_event = 1;
9263 
9264 			if (phy_event)
9265 				tg3_setup_phy(tp, 0);
9266 		} else if (tg3_flag(tp, POLL_SERDES)) {
9267 			u32 mac_stat = tr32(MAC_STATUS);
9268 			int need_setup = 0;
9269 
9270 			if (netif_carrier_ok(tp->dev) &&
9271 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9272 				need_setup = 1;
9273 			}
9274 			if (!netif_carrier_ok(tp->dev) &&
9275 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
9276 					 MAC_STATUS_SIGNAL_DET))) {
9277 				need_setup = 1;
9278 			}
9279 			if (need_setup) {
9280 				if (!tp->serdes_counter) {
9281 					tw32_f(MAC_MODE,
9282 					     (tp->mac_mode &
9283 					      ~MAC_MODE_PORT_MODE_MASK));
9284 					udelay(40);
9285 					tw32_f(MAC_MODE, tp->mac_mode);
9286 					udelay(40);
9287 				}
9288 				tg3_setup_phy(tp, 0);
9289 			}
9290 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9291 			   tg3_flag(tp, 5780_CLASS)) {
9292 			tg3_serdes_parallel_detect(tp);
9293 		}
9294 
9295 		tp->timer_counter = tp->timer_multiplier;
9296 	}
9297 
9298 	/* Heartbeat is only sent once every 2 seconds.
9299 	 *
9300 	 * The heartbeat is to tell the ASF firmware that the host
9301 	 * driver is still alive.  In the event that the OS crashes,
9302 	 * ASF needs to reset the hardware to free up the FIFO space
9303 	 * that may be filled with rx packets destined for the host.
9304 	 * If the FIFO is full, ASF will no longer function properly.
9305 	 *
9306 	 * Unintended resets have been reported on real time kernels
9307 	 * where the timer doesn't run on time.  Netpoll will also have
9308 	 * same problem.
9309 	 *
9310 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9311 	 * to check the ring condition when the heartbeat is expiring
9312 	 * before doing the reset.  This will prevent most unintended
9313 	 * resets.
9314 	 */
9315 	if (!--tp->asf_counter) {
9316 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9317 			tg3_wait_for_event_ack(tp);
9318 
9319 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9320 				      FWCMD_NICDRV_ALIVE3);
9321 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9322 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9323 				      TG3_FW_UPDATE_TIMEOUT_SEC);
9324 
9325 			tg3_generate_fw_event(tp);
9326 		}
9327 		tp->asf_counter = tp->asf_multiplier;
9328 	}
9329 
9330 	spin_unlock(&tp->lock);
9331 
9332 restart_timer:
9333 	tp->timer.expires = jiffies + tp->timer_offset;
9334 	add_timer(&tp->timer);
9335 }
9336 
9337 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9338 {
9339 	irq_handler_t fn;
9340 	unsigned long flags;
9341 	char *name;
9342 	struct tg3_napi *tnapi = &tp->napi[irq_num];
9343 
9344 	if (tp->irq_cnt == 1)
9345 		name = tp->dev->name;
9346 	else {
9347 		name = &tnapi->irq_lbl[0];
9348 		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9349 		name[IFNAMSIZ-1] = 0;
9350 	}
9351 
9352 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9353 		fn = tg3_msi;
9354 		if (tg3_flag(tp, 1SHOT_MSI))
9355 			fn = tg3_msi_1shot;
9356 		flags = 0;
9357 	} else {
9358 		fn = tg3_interrupt;
9359 		if (tg3_flag(tp, TAGGED_STATUS))
9360 			fn = tg3_interrupt_tagged;
9361 		flags = IRQF_SHARED;
9362 	}
9363 
9364 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9365 }
9366 
9367 static int tg3_test_interrupt(struct tg3 *tp)
9368 {
9369 	struct tg3_napi *tnapi = &tp->napi[0];
9370 	struct net_device *dev = tp->dev;
9371 	int err, i, intr_ok = 0;
9372 	u32 val;
9373 
9374 	if (!netif_running(dev))
9375 		return -ENODEV;
9376 
9377 	tg3_disable_ints(tp);
9378 
9379 	free_irq(tnapi->irq_vec, tnapi);
9380 
9381 	/*
9382 	 * Turn off MSI one shot mode.  Otherwise this test has no
9383 	 * observable way to know whether the interrupt was delivered.
9384 	 */
9385 	if (tg3_flag(tp, 57765_PLUS)) {
9386 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9387 		tw32(MSGINT_MODE, val);
9388 	}
9389 
9390 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
9391 			  IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9392 	if (err)
9393 		return err;
9394 
9395 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9396 	tg3_enable_ints(tp);
9397 
9398 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9399 	       tnapi->coal_now);
9400 
9401 	for (i = 0; i < 5; i++) {
9402 		u32 int_mbox, misc_host_ctrl;
9403 
9404 		int_mbox = tr32_mailbox(tnapi->int_mbox);
9405 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9406 
9407 		if ((int_mbox != 0) ||
9408 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9409 			intr_ok = 1;
9410 			break;
9411 		}
9412 
9413 		if (tg3_flag(tp, 57765_PLUS) &&
9414 		    tnapi->hw_status->status_tag != tnapi->last_tag)
9415 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9416 
9417 		msleep(10);
9418 	}
9419 
9420 	tg3_disable_ints(tp);
9421 
9422 	free_irq(tnapi->irq_vec, tnapi);
9423 
9424 	err = tg3_request_irq(tp, 0);
9425 
9426 	if (err)
9427 		return err;
9428 
9429 	if (intr_ok) {
9430 		/* Reenable MSI one shot mode. */
9431 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9432 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9433 			tw32(MSGINT_MODE, val);
9434 		}
9435 		return 0;
9436 	}
9437 
9438 	return -EIO;
9439 }
9440 
9441 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9442  * successfully restored
9443  */
9444 static int tg3_test_msi(struct tg3 *tp)
9445 {
9446 	int err;
9447 	u16 pci_cmd;
9448 
9449 	if (!tg3_flag(tp, USING_MSI))
9450 		return 0;
9451 
9452 	/* Turn off SERR reporting in case MSI terminates with Master
9453 	 * Abort.
9454 	 */
9455 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9456 	pci_write_config_word(tp->pdev, PCI_COMMAND,
9457 			      pci_cmd & ~PCI_COMMAND_SERR);
9458 
9459 	err = tg3_test_interrupt(tp);
9460 
9461 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9462 
9463 	if (!err)
9464 		return 0;
9465 
9466 	/* other failures */
9467 	if (err != -EIO)
9468 		return err;
9469 
9470 	/* MSI test failed, go back to INTx mode */
9471 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9472 		    "to INTx mode. Please report this failure to the PCI "
9473 		    "maintainer and include system chipset information\n");
9474 
9475 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9476 
9477 	pci_disable_msi(tp->pdev);
9478 
9479 	tg3_flag_clear(tp, USING_MSI);
9480 	tp->napi[0].irq_vec = tp->pdev->irq;
9481 
9482 	err = tg3_request_irq(tp, 0);
9483 	if (err)
9484 		return err;
9485 
9486 	/* Need to reset the chip because the MSI cycle may have terminated
9487 	 * with Master Abort.
9488 	 */
9489 	tg3_full_lock(tp, 1);
9490 
9491 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9492 	err = tg3_init_hw(tp, 1);
9493 
9494 	tg3_full_unlock(tp);
9495 
9496 	if (err)
9497 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9498 
9499 	return err;
9500 }
9501 
9502 static int tg3_request_firmware(struct tg3 *tp)
9503 {
9504 	const __be32 *fw_data;
9505 
9506 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9507 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9508 			   tp->fw_needed);
9509 		return -ENOENT;
9510 	}
9511 
9512 	fw_data = (void *)tp->fw->data;
9513 
9514 	/* Firmware blob starts with version numbers, followed by
9515 	 * start address and _full_ length including BSS sections
9516 	 * (which must be longer than the actual data, of course
9517 	 */
9518 
9519 	tp->fw_len = be32_to_cpu(fw_data[2]);	/* includes bss */
9520 	if (tp->fw_len < (tp->fw->size - 12)) {
9521 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9522 			   tp->fw_len, tp->fw_needed);
9523 		release_firmware(tp->fw);
9524 		tp->fw = NULL;
9525 		return -EINVAL;
9526 	}
9527 
9528 	/* We no longer need firmware; we have it. */
9529 	tp->fw_needed = NULL;
9530 	return 0;
9531 }
9532 
9533 static bool tg3_enable_msix(struct tg3 *tp)
9534 {
9535 	int i, rc, cpus = num_online_cpus();
9536 	struct msix_entry msix_ent[tp->irq_max];
9537 
9538 	if (cpus == 1)
9539 		/* Just fallback to the simpler MSI mode. */
9540 		return false;
9541 
9542 	/*
9543 	 * We want as many rx rings enabled as there are cpus.
9544 	 * The first MSIX vector only deals with link interrupts, etc,
9545 	 * so we add one to the number of vectors we are requesting.
9546 	 */
9547 	tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9548 
9549 	for (i = 0; i < tp->irq_max; i++) {
9550 		msix_ent[i].entry  = i;
9551 		msix_ent[i].vector = 0;
9552 	}
9553 
9554 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9555 	if (rc < 0) {
9556 		return false;
9557 	} else if (rc != 0) {
9558 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
9559 			return false;
9560 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9561 			      tp->irq_cnt, rc);
9562 		tp->irq_cnt = rc;
9563 	}
9564 
9565 	for (i = 0; i < tp->irq_max; i++)
9566 		tp->napi[i].irq_vec = msix_ent[i].vector;
9567 
9568 	netif_set_real_num_tx_queues(tp->dev, 1);
9569 	rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9570 	if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9571 		pci_disable_msix(tp->pdev);
9572 		return false;
9573 	}
9574 
9575 	if (tp->irq_cnt > 1) {
9576 		tg3_flag_set(tp, ENABLE_RSS);
9577 
9578 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9579 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9580 			tg3_flag_set(tp, ENABLE_TSS);
9581 			netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9582 		}
9583 	}
9584 
9585 	return true;
9586 }
9587 
9588 static void tg3_ints_init(struct tg3 *tp)
9589 {
9590 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9591 	    !tg3_flag(tp, TAGGED_STATUS)) {
9592 		/* All MSI supporting chips should support tagged
9593 		 * status.  Assert that this is the case.
9594 		 */
9595 		netdev_warn(tp->dev,
9596 			    "MSI without TAGGED_STATUS? Not using MSI\n");
9597 		goto defcfg;
9598 	}
9599 
9600 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9601 		tg3_flag_set(tp, USING_MSIX);
9602 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9603 		tg3_flag_set(tp, USING_MSI);
9604 
9605 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9606 		u32 msi_mode = tr32(MSGINT_MODE);
9607 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9608 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9609 		if (!tg3_flag(tp, 1SHOT_MSI))
9610 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9611 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9612 	}
9613 defcfg:
9614 	if (!tg3_flag(tp, USING_MSIX)) {
9615 		tp->irq_cnt = 1;
9616 		tp->napi[0].irq_vec = tp->pdev->irq;
9617 		netif_set_real_num_tx_queues(tp->dev, 1);
9618 		netif_set_real_num_rx_queues(tp->dev, 1);
9619 	}
9620 }
9621 
9622 static void tg3_ints_fini(struct tg3 *tp)
9623 {
9624 	if (tg3_flag(tp, USING_MSIX))
9625 		pci_disable_msix(tp->pdev);
9626 	else if (tg3_flag(tp, USING_MSI))
9627 		pci_disable_msi(tp->pdev);
9628 	tg3_flag_clear(tp, USING_MSI);
9629 	tg3_flag_clear(tp, USING_MSIX);
9630 	tg3_flag_clear(tp, ENABLE_RSS);
9631 	tg3_flag_clear(tp, ENABLE_TSS);
9632 }
9633 
9634 static int tg3_open(struct net_device *dev)
9635 {
9636 	struct tg3 *tp = netdev_priv(dev);
9637 	int i, err;
9638 
9639 	if (tp->fw_needed) {
9640 		err = tg3_request_firmware(tp);
9641 		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9642 			if (err)
9643 				return err;
9644 		} else if (err) {
9645 			netdev_warn(tp->dev, "TSO capability disabled\n");
9646 			tg3_flag_clear(tp, TSO_CAPABLE);
9647 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
9648 			netdev_notice(tp->dev, "TSO capability restored\n");
9649 			tg3_flag_set(tp, TSO_CAPABLE);
9650 		}
9651 	}
9652 
9653 	netif_carrier_off(tp->dev);
9654 
9655 	err = tg3_power_up(tp);
9656 	if (err)
9657 		return err;
9658 
9659 	tg3_full_lock(tp, 0);
9660 
9661 	tg3_disable_ints(tp);
9662 	tg3_flag_clear(tp, INIT_COMPLETE);
9663 
9664 	tg3_full_unlock(tp);
9665 
9666 	/*
9667 	 * Setup interrupts first so we know how
9668 	 * many NAPI resources to allocate
9669 	 */
9670 	tg3_ints_init(tp);
9671 
9672 	/* The placement of this call is tied
9673 	 * to the setup and use of Host TX descriptors.
9674 	 */
9675 	err = tg3_alloc_consistent(tp);
9676 	if (err)
9677 		goto err_out1;
9678 
9679 	tg3_napi_init(tp);
9680 
9681 	tg3_napi_enable(tp);
9682 
9683 	for (i = 0; i < tp->irq_cnt; i++) {
9684 		struct tg3_napi *tnapi = &tp->napi[i];
9685 		err = tg3_request_irq(tp, i);
9686 		if (err) {
9687 			for (i--; i >= 0; i--) {
9688 				tnapi = &tp->napi[i];
9689 				free_irq(tnapi->irq_vec, tnapi);
9690 			}
9691 			goto err_out2;
9692 		}
9693 	}
9694 
9695 	tg3_full_lock(tp, 0);
9696 
9697 	err = tg3_init_hw(tp, 1);
9698 	if (err) {
9699 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9700 		tg3_free_rings(tp);
9701 	} else {
9702 		if (tg3_flag(tp, TAGGED_STATUS) &&
9703 			GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9704 			GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9705 			tp->timer_offset = HZ;
9706 		else
9707 			tp->timer_offset = HZ / 10;
9708 
9709 		BUG_ON(tp->timer_offset > HZ);
9710 		tp->timer_counter = tp->timer_multiplier =
9711 			(HZ / tp->timer_offset);
9712 		tp->asf_counter = tp->asf_multiplier =
9713 			((HZ / tp->timer_offset) * 2);
9714 
9715 		init_timer(&tp->timer);
9716 		tp->timer.expires = jiffies + tp->timer_offset;
9717 		tp->timer.data = (unsigned long) tp;
9718 		tp->timer.function = tg3_timer;
9719 	}
9720 
9721 	tg3_full_unlock(tp);
9722 
9723 	if (err)
9724 		goto err_out3;
9725 
9726 	if (tg3_flag(tp, USING_MSI)) {
9727 		err = tg3_test_msi(tp);
9728 
9729 		if (err) {
9730 			tg3_full_lock(tp, 0);
9731 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9732 			tg3_free_rings(tp);
9733 			tg3_full_unlock(tp);
9734 
9735 			goto err_out2;
9736 		}
9737 
9738 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9739 			u32 val = tr32(PCIE_TRANSACTION_CFG);
9740 
9741 			tw32(PCIE_TRANSACTION_CFG,
9742 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
9743 		}
9744 	}
9745 
9746 	tg3_phy_start(tp);
9747 
9748 	tg3_full_lock(tp, 0);
9749 
9750 	add_timer(&tp->timer);
9751 	tg3_flag_set(tp, INIT_COMPLETE);
9752 	tg3_enable_ints(tp);
9753 
9754 	tg3_full_unlock(tp);
9755 
9756 	netif_tx_start_all_queues(dev);
9757 
9758 	/*
9759 	 * Reset loopback feature if it was turned on while the device was down
9760 	 * make sure that it's installed properly now.
9761 	 */
9762 	if (dev->features & NETIF_F_LOOPBACK)
9763 		tg3_set_loopback(dev, dev->features);
9764 
9765 	return 0;
9766 
9767 err_out3:
9768 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
9769 		struct tg3_napi *tnapi = &tp->napi[i];
9770 		free_irq(tnapi->irq_vec, tnapi);
9771 	}
9772 
9773 err_out2:
9774 	tg3_napi_disable(tp);
9775 	tg3_napi_fini(tp);
9776 	tg3_free_consistent(tp);
9777 
9778 err_out1:
9779 	tg3_ints_fini(tp);
9780 	tg3_frob_aux_power(tp, false);
9781 	pci_set_power_state(tp->pdev, PCI_D3hot);
9782 	return err;
9783 }
9784 
9785 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9786 						 struct rtnl_link_stats64 *);
9787 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9788 
9789 static int tg3_close(struct net_device *dev)
9790 {
9791 	int i;
9792 	struct tg3 *tp = netdev_priv(dev);
9793 
9794 	tg3_napi_disable(tp);
9795 	tg3_reset_task_cancel(tp);
9796 
9797 	netif_tx_stop_all_queues(dev);
9798 
9799 	del_timer_sync(&tp->timer);
9800 
9801 	tg3_phy_stop(tp);
9802 
9803 	tg3_full_lock(tp, 1);
9804 
9805 	tg3_disable_ints(tp);
9806 
9807 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9808 	tg3_free_rings(tp);
9809 	tg3_flag_clear(tp, INIT_COMPLETE);
9810 
9811 	tg3_full_unlock(tp);
9812 
9813 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
9814 		struct tg3_napi *tnapi = &tp->napi[i];
9815 		free_irq(tnapi->irq_vec, tnapi);
9816 	}
9817 
9818 	tg3_ints_fini(tp);
9819 
9820 	tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9821 
9822 	memcpy(&tp->estats_prev, tg3_get_estats(tp),
9823 	       sizeof(tp->estats_prev));
9824 
9825 	tg3_napi_fini(tp);
9826 
9827 	tg3_free_consistent(tp);
9828 
9829 	tg3_power_down(tp);
9830 
9831 	netif_carrier_off(tp->dev);
9832 
9833 	return 0;
9834 }
9835 
9836 static inline u64 get_stat64(tg3_stat64_t *val)
9837 {
9838        return ((u64)val->high << 32) | ((u64)val->low);
9839 }
9840 
9841 static u64 calc_crc_errors(struct tg3 *tp)
9842 {
9843 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
9844 
9845 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9846 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9847 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9848 		u32 val;
9849 
9850 		spin_lock_bh(&tp->lock);
9851 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9852 			tg3_writephy(tp, MII_TG3_TEST1,
9853 				     val | MII_TG3_TEST1_CRC_EN);
9854 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9855 		} else
9856 			val = 0;
9857 		spin_unlock_bh(&tp->lock);
9858 
9859 		tp->phy_crc_errors += val;
9860 
9861 		return tp->phy_crc_errors;
9862 	}
9863 
9864 	return get_stat64(&hw_stats->rx_fcs_errors);
9865 }
9866 
9867 #define ESTAT_ADD(member) \
9868 	estats->member =	old_estats->member + \
9869 				get_stat64(&hw_stats->member)
9870 
9871 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9872 {
9873 	struct tg3_ethtool_stats *estats = &tp->estats;
9874 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9875 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
9876 
9877 	if (!hw_stats)
9878 		return old_estats;
9879 
9880 	ESTAT_ADD(rx_octets);
9881 	ESTAT_ADD(rx_fragments);
9882 	ESTAT_ADD(rx_ucast_packets);
9883 	ESTAT_ADD(rx_mcast_packets);
9884 	ESTAT_ADD(rx_bcast_packets);
9885 	ESTAT_ADD(rx_fcs_errors);
9886 	ESTAT_ADD(rx_align_errors);
9887 	ESTAT_ADD(rx_xon_pause_rcvd);
9888 	ESTAT_ADD(rx_xoff_pause_rcvd);
9889 	ESTAT_ADD(rx_mac_ctrl_rcvd);
9890 	ESTAT_ADD(rx_xoff_entered);
9891 	ESTAT_ADD(rx_frame_too_long_errors);
9892 	ESTAT_ADD(rx_jabbers);
9893 	ESTAT_ADD(rx_undersize_packets);
9894 	ESTAT_ADD(rx_in_length_errors);
9895 	ESTAT_ADD(rx_out_length_errors);
9896 	ESTAT_ADD(rx_64_or_less_octet_packets);
9897 	ESTAT_ADD(rx_65_to_127_octet_packets);
9898 	ESTAT_ADD(rx_128_to_255_octet_packets);
9899 	ESTAT_ADD(rx_256_to_511_octet_packets);
9900 	ESTAT_ADD(rx_512_to_1023_octet_packets);
9901 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
9902 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
9903 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
9904 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
9905 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
9906 
9907 	ESTAT_ADD(tx_octets);
9908 	ESTAT_ADD(tx_collisions);
9909 	ESTAT_ADD(tx_xon_sent);
9910 	ESTAT_ADD(tx_xoff_sent);
9911 	ESTAT_ADD(tx_flow_control);
9912 	ESTAT_ADD(tx_mac_errors);
9913 	ESTAT_ADD(tx_single_collisions);
9914 	ESTAT_ADD(tx_mult_collisions);
9915 	ESTAT_ADD(tx_deferred);
9916 	ESTAT_ADD(tx_excessive_collisions);
9917 	ESTAT_ADD(tx_late_collisions);
9918 	ESTAT_ADD(tx_collide_2times);
9919 	ESTAT_ADD(tx_collide_3times);
9920 	ESTAT_ADD(tx_collide_4times);
9921 	ESTAT_ADD(tx_collide_5times);
9922 	ESTAT_ADD(tx_collide_6times);
9923 	ESTAT_ADD(tx_collide_7times);
9924 	ESTAT_ADD(tx_collide_8times);
9925 	ESTAT_ADD(tx_collide_9times);
9926 	ESTAT_ADD(tx_collide_10times);
9927 	ESTAT_ADD(tx_collide_11times);
9928 	ESTAT_ADD(tx_collide_12times);
9929 	ESTAT_ADD(tx_collide_13times);
9930 	ESTAT_ADD(tx_collide_14times);
9931 	ESTAT_ADD(tx_collide_15times);
9932 	ESTAT_ADD(tx_ucast_packets);
9933 	ESTAT_ADD(tx_mcast_packets);
9934 	ESTAT_ADD(tx_bcast_packets);
9935 	ESTAT_ADD(tx_carrier_sense_errors);
9936 	ESTAT_ADD(tx_discards);
9937 	ESTAT_ADD(tx_errors);
9938 
9939 	ESTAT_ADD(dma_writeq_full);
9940 	ESTAT_ADD(dma_write_prioq_full);
9941 	ESTAT_ADD(rxbds_empty);
9942 	ESTAT_ADD(rx_discards);
9943 	ESTAT_ADD(rx_errors);
9944 	ESTAT_ADD(rx_threshold_hit);
9945 
9946 	ESTAT_ADD(dma_readq_full);
9947 	ESTAT_ADD(dma_read_prioq_full);
9948 	ESTAT_ADD(tx_comp_queue_full);
9949 
9950 	ESTAT_ADD(ring_set_send_prod_index);
9951 	ESTAT_ADD(ring_status_update);
9952 	ESTAT_ADD(nic_irqs);
9953 	ESTAT_ADD(nic_avoided_irqs);
9954 	ESTAT_ADD(nic_tx_threshold_hit);
9955 
9956 	ESTAT_ADD(mbuf_lwm_thresh_hit);
9957 
9958 	return estats;
9959 }
9960 
9961 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9962 						 struct rtnl_link_stats64 *stats)
9963 {
9964 	struct tg3 *tp = netdev_priv(dev);
9965 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9966 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
9967 
9968 	if (!hw_stats)
9969 		return old_stats;
9970 
9971 	stats->rx_packets = old_stats->rx_packets +
9972 		get_stat64(&hw_stats->rx_ucast_packets) +
9973 		get_stat64(&hw_stats->rx_mcast_packets) +
9974 		get_stat64(&hw_stats->rx_bcast_packets);
9975 
9976 	stats->tx_packets = old_stats->tx_packets +
9977 		get_stat64(&hw_stats->tx_ucast_packets) +
9978 		get_stat64(&hw_stats->tx_mcast_packets) +
9979 		get_stat64(&hw_stats->tx_bcast_packets);
9980 
9981 	stats->rx_bytes = old_stats->rx_bytes +
9982 		get_stat64(&hw_stats->rx_octets);
9983 	stats->tx_bytes = old_stats->tx_bytes +
9984 		get_stat64(&hw_stats->tx_octets);
9985 
9986 	stats->rx_errors = old_stats->rx_errors +
9987 		get_stat64(&hw_stats->rx_errors);
9988 	stats->tx_errors = old_stats->tx_errors +
9989 		get_stat64(&hw_stats->tx_errors) +
9990 		get_stat64(&hw_stats->tx_mac_errors) +
9991 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
9992 		get_stat64(&hw_stats->tx_discards);
9993 
9994 	stats->multicast = old_stats->multicast +
9995 		get_stat64(&hw_stats->rx_mcast_packets);
9996 	stats->collisions = old_stats->collisions +
9997 		get_stat64(&hw_stats->tx_collisions);
9998 
9999 	stats->rx_length_errors = old_stats->rx_length_errors +
10000 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
10001 		get_stat64(&hw_stats->rx_undersize_packets);
10002 
10003 	stats->rx_over_errors = old_stats->rx_over_errors +
10004 		get_stat64(&hw_stats->rxbds_empty);
10005 	stats->rx_frame_errors = old_stats->rx_frame_errors +
10006 		get_stat64(&hw_stats->rx_align_errors);
10007 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10008 		get_stat64(&hw_stats->tx_discards);
10009 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10010 		get_stat64(&hw_stats->tx_carrier_sense_errors);
10011 
10012 	stats->rx_crc_errors = old_stats->rx_crc_errors +
10013 		calc_crc_errors(tp);
10014 
10015 	stats->rx_missed_errors = old_stats->rx_missed_errors +
10016 		get_stat64(&hw_stats->rx_discards);
10017 
10018 	stats->rx_dropped = tp->rx_dropped;
10019 	stats->tx_dropped = tp->tx_dropped;
10020 
10021 	return stats;
10022 }
10023 
10024 static inline u32 calc_crc(unsigned char *buf, int len)
10025 {
10026 	u32 reg;
10027 	u32 tmp;
10028 	int j, k;
10029 
10030 	reg = 0xffffffff;
10031 
10032 	for (j = 0; j < len; j++) {
10033 		reg ^= buf[j];
10034 
10035 		for (k = 0; k < 8; k++) {
10036 			tmp = reg & 0x01;
10037 
10038 			reg >>= 1;
10039 
10040 			if (tmp)
10041 				reg ^= 0xedb88320;
10042 		}
10043 	}
10044 
10045 	return ~reg;
10046 }
10047 
10048 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10049 {
10050 	/* accept or reject all multicast frames */
10051 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10052 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10053 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10054 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10055 }
10056 
10057 static void __tg3_set_rx_mode(struct net_device *dev)
10058 {
10059 	struct tg3 *tp = netdev_priv(dev);
10060 	u32 rx_mode;
10061 
10062 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10063 				  RX_MODE_KEEP_VLAN_TAG);
10064 
10065 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10066 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10067 	 * flag clear.
10068 	 */
10069 	if (!tg3_flag(tp, ENABLE_ASF))
10070 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10071 #endif
10072 
10073 	if (dev->flags & IFF_PROMISC) {
10074 		/* Promiscuous mode. */
10075 		rx_mode |= RX_MODE_PROMISC;
10076 	} else if (dev->flags & IFF_ALLMULTI) {
10077 		/* Accept all multicast. */
10078 		tg3_set_multi(tp, 1);
10079 	} else if (netdev_mc_empty(dev)) {
10080 		/* Reject all multicast. */
10081 		tg3_set_multi(tp, 0);
10082 	} else {
10083 		/* Accept one or more multicast(s). */
10084 		struct netdev_hw_addr *ha;
10085 		u32 mc_filter[4] = { 0, };
10086 		u32 regidx;
10087 		u32 bit;
10088 		u32 crc;
10089 
10090 		netdev_for_each_mc_addr(ha, dev) {
10091 			crc = calc_crc(ha->addr, ETH_ALEN);
10092 			bit = ~crc & 0x7f;
10093 			regidx = (bit & 0x60) >> 5;
10094 			bit &= 0x1f;
10095 			mc_filter[regidx] |= (1 << bit);
10096 		}
10097 
10098 		tw32(MAC_HASH_REG_0, mc_filter[0]);
10099 		tw32(MAC_HASH_REG_1, mc_filter[1]);
10100 		tw32(MAC_HASH_REG_2, mc_filter[2]);
10101 		tw32(MAC_HASH_REG_3, mc_filter[3]);
10102 	}
10103 
10104 	if (rx_mode != tp->rx_mode) {
10105 		tp->rx_mode = rx_mode;
10106 		tw32_f(MAC_RX_MODE, rx_mode);
10107 		udelay(10);
10108 	}
10109 }
10110 
10111 static void tg3_set_rx_mode(struct net_device *dev)
10112 {
10113 	struct tg3 *tp = netdev_priv(dev);
10114 
10115 	if (!netif_running(dev))
10116 		return;
10117 
10118 	tg3_full_lock(tp, 0);
10119 	__tg3_set_rx_mode(dev);
10120 	tg3_full_unlock(tp);
10121 }
10122 
10123 static int tg3_get_regs_len(struct net_device *dev)
10124 {
10125 	return TG3_REG_BLK_SIZE;
10126 }
10127 
10128 static void tg3_get_regs(struct net_device *dev,
10129 		struct ethtool_regs *regs, void *_p)
10130 {
10131 	struct tg3 *tp = netdev_priv(dev);
10132 
10133 	regs->version = 0;
10134 
10135 	memset(_p, 0, TG3_REG_BLK_SIZE);
10136 
10137 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10138 		return;
10139 
10140 	tg3_full_lock(tp, 0);
10141 
10142 	tg3_dump_legacy_regs(tp, (u32 *)_p);
10143 
10144 	tg3_full_unlock(tp);
10145 }
10146 
10147 static int tg3_get_eeprom_len(struct net_device *dev)
10148 {
10149 	struct tg3 *tp = netdev_priv(dev);
10150 
10151 	return tp->nvram_size;
10152 }
10153 
10154 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10155 {
10156 	struct tg3 *tp = netdev_priv(dev);
10157 	int ret;
10158 	u8  *pd;
10159 	u32 i, offset, len, b_offset, b_count;
10160 	__be32 val;
10161 
10162 	if (tg3_flag(tp, NO_NVRAM))
10163 		return -EINVAL;
10164 
10165 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10166 		return -EAGAIN;
10167 
10168 	offset = eeprom->offset;
10169 	len = eeprom->len;
10170 	eeprom->len = 0;
10171 
10172 	eeprom->magic = TG3_EEPROM_MAGIC;
10173 
10174 	if (offset & 3) {
10175 		/* adjustments to start on required 4 byte boundary */
10176 		b_offset = offset & 3;
10177 		b_count = 4 - b_offset;
10178 		if (b_count > len) {
10179 			/* i.e. offset=1 len=2 */
10180 			b_count = len;
10181 		}
10182 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10183 		if (ret)
10184 			return ret;
10185 		memcpy(data, ((char *)&val) + b_offset, b_count);
10186 		len -= b_count;
10187 		offset += b_count;
10188 		eeprom->len += b_count;
10189 	}
10190 
10191 	/* read bytes up to the last 4 byte boundary */
10192 	pd = &data[eeprom->len];
10193 	for (i = 0; i < (len - (len & 3)); i += 4) {
10194 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
10195 		if (ret) {
10196 			eeprom->len += i;
10197 			return ret;
10198 		}
10199 		memcpy(pd + i, &val, 4);
10200 	}
10201 	eeprom->len += i;
10202 
10203 	if (len & 3) {
10204 		/* read last bytes not ending on 4 byte boundary */
10205 		pd = &data[eeprom->len];
10206 		b_count = len & 3;
10207 		b_offset = offset + len - b_count;
10208 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
10209 		if (ret)
10210 			return ret;
10211 		memcpy(pd, &val, b_count);
10212 		eeprom->len += b_count;
10213 	}
10214 	return 0;
10215 }
10216 
10217 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10218 
10219 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10220 {
10221 	struct tg3 *tp = netdev_priv(dev);
10222 	int ret;
10223 	u32 offset, len, b_offset, odd_len;
10224 	u8 *buf;
10225 	__be32 start, end;
10226 
10227 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10228 		return -EAGAIN;
10229 
10230 	if (tg3_flag(tp, NO_NVRAM) ||
10231 	    eeprom->magic != TG3_EEPROM_MAGIC)
10232 		return -EINVAL;
10233 
10234 	offset = eeprom->offset;
10235 	len = eeprom->len;
10236 
10237 	if ((b_offset = (offset & 3))) {
10238 		/* adjustments to start on required 4 byte boundary */
10239 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10240 		if (ret)
10241 			return ret;
10242 		len += b_offset;
10243 		offset &= ~3;
10244 		if (len < 4)
10245 			len = 4;
10246 	}
10247 
10248 	odd_len = 0;
10249 	if (len & 3) {
10250 		/* adjustments to end on required 4 byte boundary */
10251 		odd_len = 1;
10252 		len = (len + 3) & ~3;
10253 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10254 		if (ret)
10255 			return ret;
10256 	}
10257 
10258 	buf = data;
10259 	if (b_offset || odd_len) {
10260 		buf = kmalloc(len, GFP_KERNEL);
10261 		if (!buf)
10262 			return -ENOMEM;
10263 		if (b_offset)
10264 			memcpy(buf, &start, 4);
10265 		if (odd_len)
10266 			memcpy(buf+len-4, &end, 4);
10267 		memcpy(buf + b_offset, data, eeprom->len);
10268 	}
10269 
10270 	ret = tg3_nvram_write_block(tp, offset, len, buf);
10271 
10272 	if (buf != data)
10273 		kfree(buf);
10274 
10275 	return ret;
10276 }
10277 
10278 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10279 {
10280 	struct tg3 *tp = netdev_priv(dev);
10281 
10282 	if (tg3_flag(tp, USE_PHYLIB)) {
10283 		struct phy_device *phydev;
10284 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10285 			return -EAGAIN;
10286 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10287 		return phy_ethtool_gset(phydev, cmd);
10288 	}
10289 
10290 	cmd->supported = (SUPPORTED_Autoneg);
10291 
10292 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10293 		cmd->supported |= (SUPPORTED_1000baseT_Half |
10294 				   SUPPORTED_1000baseT_Full);
10295 
10296 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10297 		cmd->supported |= (SUPPORTED_100baseT_Half |
10298 				  SUPPORTED_100baseT_Full |
10299 				  SUPPORTED_10baseT_Half |
10300 				  SUPPORTED_10baseT_Full |
10301 				  SUPPORTED_TP);
10302 		cmd->port = PORT_TP;
10303 	} else {
10304 		cmd->supported |= SUPPORTED_FIBRE;
10305 		cmd->port = PORT_FIBRE;
10306 	}
10307 
10308 	cmd->advertising = tp->link_config.advertising;
10309 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
10310 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10311 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10312 				cmd->advertising |= ADVERTISED_Pause;
10313 			} else {
10314 				cmd->advertising |= ADVERTISED_Pause |
10315 						    ADVERTISED_Asym_Pause;
10316 			}
10317 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10318 			cmd->advertising |= ADVERTISED_Asym_Pause;
10319 		}
10320 	}
10321 	if (netif_running(dev)) {
10322 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10323 		cmd->duplex = tp->link_config.active_duplex;
10324 	} else {
10325 		ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10326 		cmd->duplex = DUPLEX_INVALID;
10327 	}
10328 	cmd->phy_address = tp->phy_addr;
10329 	cmd->transceiver = XCVR_INTERNAL;
10330 	cmd->autoneg = tp->link_config.autoneg;
10331 	cmd->maxtxpkt = 0;
10332 	cmd->maxrxpkt = 0;
10333 	return 0;
10334 }
10335 
10336 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10337 {
10338 	struct tg3 *tp = netdev_priv(dev);
10339 	u32 speed = ethtool_cmd_speed(cmd);
10340 
10341 	if (tg3_flag(tp, USE_PHYLIB)) {
10342 		struct phy_device *phydev;
10343 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10344 			return -EAGAIN;
10345 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10346 		return phy_ethtool_sset(phydev, cmd);
10347 	}
10348 
10349 	if (cmd->autoneg != AUTONEG_ENABLE &&
10350 	    cmd->autoneg != AUTONEG_DISABLE)
10351 		return -EINVAL;
10352 
10353 	if (cmd->autoneg == AUTONEG_DISABLE &&
10354 	    cmd->duplex != DUPLEX_FULL &&
10355 	    cmd->duplex != DUPLEX_HALF)
10356 		return -EINVAL;
10357 
10358 	if (cmd->autoneg == AUTONEG_ENABLE) {
10359 		u32 mask = ADVERTISED_Autoneg |
10360 			   ADVERTISED_Pause |
10361 			   ADVERTISED_Asym_Pause;
10362 
10363 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10364 			mask |= ADVERTISED_1000baseT_Half |
10365 				ADVERTISED_1000baseT_Full;
10366 
10367 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10368 			mask |= ADVERTISED_100baseT_Half |
10369 				ADVERTISED_100baseT_Full |
10370 				ADVERTISED_10baseT_Half |
10371 				ADVERTISED_10baseT_Full |
10372 				ADVERTISED_TP;
10373 		else
10374 			mask |= ADVERTISED_FIBRE;
10375 
10376 		if (cmd->advertising & ~mask)
10377 			return -EINVAL;
10378 
10379 		mask &= (ADVERTISED_1000baseT_Half |
10380 			 ADVERTISED_1000baseT_Full |
10381 			 ADVERTISED_100baseT_Half |
10382 			 ADVERTISED_100baseT_Full |
10383 			 ADVERTISED_10baseT_Half |
10384 			 ADVERTISED_10baseT_Full);
10385 
10386 		cmd->advertising &= mask;
10387 	} else {
10388 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10389 			if (speed != SPEED_1000)
10390 				return -EINVAL;
10391 
10392 			if (cmd->duplex != DUPLEX_FULL)
10393 				return -EINVAL;
10394 		} else {
10395 			if (speed != SPEED_100 &&
10396 			    speed != SPEED_10)
10397 				return -EINVAL;
10398 		}
10399 	}
10400 
10401 	tg3_full_lock(tp, 0);
10402 
10403 	tp->link_config.autoneg = cmd->autoneg;
10404 	if (cmd->autoneg == AUTONEG_ENABLE) {
10405 		tp->link_config.advertising = (cmd->advertising |
10406 					      ADVERTISED_Autoneg);
10407 		tp->link_config.speed = SPEED_INVALID;
10408 		tp->link_config.duplex = DUPLEX_INVALID;
10409 	} else {
10410 		tp->link_config.advertising = 0;
10411 		tp->link_config.speed = speed;
10412 		tp->link_config.duplex = cmd->duplex;
10413 	}
10414 
10415 	tp->link_config.orig_speed = tp->link_config.speed;
10416 	tp->link_config.orig_duplex = tp->link_config.duplex;
10417 	tp->link_config.orig_autoneg = tp->link_config.autoneg;
10418 
10419 	if (netif_running(dev))
10420 		tg3_setup_phy(tp, 1);
10421 
10422 	tg3_full_unlock(tp);
10423 
10424 	return 0;
10425 }
10426 
10427 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10428 {
10429 	struct tg3 *tp = netdev_priv(dev);
10430 
10431 	strcpy(info->driver, DRV_MODULE_NAME);
10432 	strcpy(info->version, DRV_MODULE_VERSION);
10433 	strcpy(info->fw_version, tp->fw_ver);
10434 	strcpy(info->bus_info, pci_name(tp->pdev));
10435 }
10436 
10437 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10438 {
10439 	struct tg3 *tp = netdev_priv(dev);
10440 
10441 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10442 		wol->supported = WAKE_MAGIC;
10443 	else
10444 		wol->supported = 0;
10445 	wol->wolopts = 0;
10446 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10447 		wol->wolopts = WAKE_MAGIC;
10448 	memset(&wol->sopass, 0, sizeof(wol->sopass));
10449 }
10450 
10451 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10452 {
10453 	struct tg3 *tp = netdev_priv(dev);
10454 	struct device *dp = &tp->pdev->dev;
10455 
10456 	if (wol->wolopts & ~WAKE_MAGIC)
10457 		return -EINVAL;
10458 	if ((wol->wolopts & WAKE_MAGIC) &&
10459 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10460 		return -EINVAL;
10461 
10462 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10463 
10464 	spin_lock_bh(&tp->lock);
10465 	if (device_may_wakeup(dp))
10466 		tg3_flag_set(tp, WOL_ENABLE);
10467 	else
10468 		tg3_flag_clear(tp, WOL_ENABLE);
10469 	spin_unlock_bh(&tp->lock);
10470 
10471 	return 0;
10472 }
10473 
10474 static u32 tg3_get_msglevel(struct net_device *dev)
10475 {
10476 	struct tg3 *tp = netdev_priv(dev);
10477 	return tp->msg_enable;
10478 }
10479 
10480 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10481 {
10482 	struct tg3 *tp = netdev_priv(dev);
10483 	tp->msg_enable = value;
10484 }
10485 
10486 static int tg3_nway_reset(struct net_device *dev)
10487 {
10488 	struct tg3 *tp = netdev_priv(dev);
10489 	int r;
10490 
10491 	if (!netif_running(dev))
10492 		return -EAGAIN;
10493 
10494 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10495 		return -EINVAL;
10496 
10497 	if (tg3_flag(tp, USE_PHYLIB)) {
10498 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10499 			return -EAGAIN;
10500 		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10501 	} else {
10502 		u32 bmcr;
10503 
10504 		spin_lock_bh(&tp->lock);
10505 		r = -EINVAL;
10506 		tg3_readphy(tp, MII_BMCR, &bmcr);
10507 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10508 		    ((bmcr & BMCR_ANENABLE) ||
10509 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10510 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10511 						   BMCR_ANENABLE);
10512 			r = 0;
10513 		}
10514 		spin_unlock_bh(&tp->lock);
10515 	}
10516 
10517 	return r;
10518 }
10519 
10520 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10521 {
10522 	struct tg3 *tp = netdev_priv(dev);
10523 
10524 	ering->rx_max_pending = tp->rx_std_ring_mask;
10525 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10526 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10527 	else
10528 		ering->rx_jumbo_max_pending = 0;
10529 
10530 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10531 
10532 	ering->rx_pending = tp->rx_pending;
10533 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
10534 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10535 	else
10536 		ering->rx_jumbo_pending = 0;
10537 
10538 	ering->tx_pending = tp->napi[0].tx_pending;
10539 }
10540 
10541 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10542 {
10543 	struct tg3 *tp = netdev_priv(dev);
10544 	int i, irq_sync = 0, err = 0;
10545 
10546 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10547 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10548 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10549 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
10550 	    (tg3_flag(tp, TSO_BUG) &&
10551 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10552 		return -EINVAL;
10553 
10554 	if (netif_running(dev)) {
10555 		tg3_phy_stop(tp);
10556 		tg3_netif_stop(tp);
10557 		irq_sync = 1;
10558 	}
10559 
10560 	tg3_full_lock(tp, irq_sync);
10561 
10562 	tp->rx_pending = ering->rx_pending;
10563 
10564 	if (tg3_flag(tp, MAX_RXPEND_64) &&
10565 	    tp->rx_pending > 63)
10566 		tp->rx_pending = 63;
10567 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10568 
10569 	for (i = 0; i < tp->irq_max; i++)
10570 		tp->napi[i].tx_pending = ering->tx_pending;
10571 
10572 	if (netif_running(dev)) {
10573 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10574 		err = tg3_restart_hw(tp, 1);
10575 		if (!err)
10576 			tg3_netif_start(tp);
10577 	}
10578 
10579 	tg3_full_unlock(tp);
10580 
10581 	if (irq_sync && !err)
10582 		tg3_phy_start(tp);
10583 
10584 	return err;
10585 }
10586 
10587 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10588 {
10589 	struct tg3 *tp = netdev_priv(dev);
10590 
10591 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10592 
10593 	if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10594 		epause->rx_pause = 1;
10595 	else
10596 		epause->rx_pause = 0;
10597 
10598 	if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10599 		epause->tx_pause = 1;
10600 	else
10601 		epause->tx_pause = 0;
10602 }
10603 
10604 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10605 {
10606 	struct tg3 *tp = netdev_priv(dev);
10607 	int err = 0;
10608 
10609 	if (tg3_flag(tp, USE_PHYLIB)) {
10610 		u32 newadv;
10611 		struct phy_device *phydev;
10612 
10613 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10614 
10615 		if (!(phydev->supported & SUPPORTED_Pause) ||
10616 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10617 		     (epause->rx_pause != epause->tx_pause)))
10618 			return -EINVAL;
10619 
10620 		tp->link_config.flowctrl = 0;
10621 		if (epause->rx_pause) {
10622 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
10623 
10624 			if (epause->tx_pause) {
10625 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
10626 				newadv = ADVERTISED_Pause;
10627 			} else
10628 				newadv = ADVERTISED_Pause |
10629 					 ADVERTISED_Asym_Pause;
10630 		} else if (epause->tx_pause) {
10631 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
10632 			newadv = ADVERTISED_Asym_Pause;
10633 		} else
10634 			newadv = 0;
10635 
10636 		if (epause->autoneg)
10637 			tg3_flag_set(tp, PAUSE_AUTONEG);
10638 		else
10639 			tg3_flag_clear(tp, PAUSE_AUTONEG);
10640 
10641 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10642 			u32 oldadv = phydev->advertising &
10643 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10644 			if (oldadv != newadv) {
10645 				phydev->advertising &=
10646 					~(ADVERTISED_Pause |
10647 					  ADVERTISED_Asym_Pause);
10648 				phydev->advertising |= newadv;
10649 				if (phydev->autoneg) {
10650 					/*
10651 					 * Always renegotiate the link to
10652 					 * inform our link partner of our
10653 					 * flow control settings, even if the
10654 					 * flow control is forced.  Let
10655 					 * tg3_adjust_link() do the final
10656 					 * flow control setup.
10657 					 */
10658 					return phy_start_aneg(phydev);
10659 				}
10660 			}
10661 
10662 			if (!epause->autoneg)
10663 				tg3_setup_flow_control(tp, 0, 0);
10664 		} else {
10665 			tp->link_config.orig_advertising &=
10666 					~(ADVERTISED_Pause |
10667 					  ADVERTISED_Asym_Pause);
10668 			tp->link_config.orig_advertising |= newadv;
10669 		}
10670 	} else {
10671 		int irq_sync = 0;
10672 
10673 		if (netif_running(dev)) {
10674 			tg3_netif_stop(tp);
10675 			irq_sync = 1;
10676 		}
10677 
10678 		tg3_full_lock(tp, irq_sync);
10679 
10680 		if (epause->autoneg)
10681 			tg3_flag_set(tp, PAUSE_AUTONEG);
10682 		else
10683 			tg3_flag_clear(tp, PAUSE_AUTONEG);
10684 		if (epause->rx_pause)
10685 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
10686 		else
10687 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10688 		if (epause->tx_pause)
10689 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
10690 		else
10691 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10692 
10693 		if (netif_running(dev)) {
10694 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10695 			err = tg3_restart_hw(tp, 1);
10696 			if (!err)
10697 				tg3_netif_start(tp);
10698 		}
10699 
10700 		tg3_full_unlock(tp);
10701 	}
10702 
10703 	return err;
10704 }
10705 
10706 static int tg3_get_sset_count(struct net_device *dev, int sset)
10707 {
10708 	switch (sset) {
10709 	case ETH_SS_TEST:
10710 		return TG3_NUM_TEST;
10711 	case ETH_SS_STATS:
10712 		return TG3_NUM_STATS;
10713 	default:
10714 		return -EOPNOTSUPP;
10715 	}
10716 }
10717 
10718 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10719 {
10720 	switch (stringset) {
10721 	case ETH_SS_STATS:
10722 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10723 		break;
10724 	case ETH_SS_TEST:
10725 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10726 		break;
10727 	default:
10728 		WARN_ON(1);	/* we need a WARN() */
10729 		break;
10730 	}
10731 }
10732 
10733 static int tg3_set_phys_id(struct net_device *dev,
10734 			    enum ethtool_phys_id_state state)
10735 {
10736 	struct tg3 *tp = netdev_priv(dev);
10737 
10738 	if (!netif_running(tp->dev))
10739 		return -EAGAIN;
10740 
10741 	switch (state) {
10742 	case ETHTOOL_ID_ACTIVE:
10743 		return 1;	/* cycle on/off once per second */
10744 
10745 	case ETHTOOL_ID_ON:
10746 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10747 		     LED_CTRL_1000MBPS_ON |
10748 		     LED_CTRL_100MBPS_ON |
10749 		     LED_CTRL_10MBPS_ON |
10750 		     LED_CTRL_TRAFFIC_OVERRIDE |
10751 		     LED_CTRL_TRAFFIC_BLINK |
10752 		     LED_CTRL_TRAFFIC_LED);
10753 		break;
10754 
10755 	case ETHTOOL_ID_OFF:
10756 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10757 		     LED_CTRL_TRAFFIC_OVERRIDE);
10758 		break;
10759 
10760 	case ETHTOOL_ID_INACTIVE:
10761 		tw32(MAC_LED_CTRL, tp->led_ctrl);
10762 		break;
10763 	}
10764 
10765 	return 0;
10766 }
10767 
10768 static void tg3_get_ethtool_stats(struct net_device *dev,
10769 				   struct ethtool_stats *estats, u64 *tmp_stats)
10770 {
10771 	struct tg3 *tp = netdev_priv(dev);
10772 	memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10773 }
10774 
10775 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10776 {
10777 	int i;
10778 	__be32 *buf;
10779 	u32 offset = 0, len = 0;
10780 	u32 magic, val;
10781 
10782 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10783 		return NULL;
10784 
10785 	if (magic == TG3_EEPROM_MAGIC) {
10786 		for (offset = TG3_NVM_DIR_START;
10787 		     offset < TG3_NVM_DIR_END;
10788 		     offset += TG3_NVM_DIRENT_SIZE) {
10789 			if (tg3_nvram_read(tp, offset, &val))
10790 				return NULL;
10791 
10792 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10793 			    TG3_NVM_DIRTYPE_EXTVPD)
10794 				break;
10795 		}
10796 
10797 		if (offset != TG3_NVM_DIR_END) {
10798 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10799 			if (tg3_nvram_read(tp, offset + 4, &offset))
10800 				return NULL;
10801 
10802 			offset = tg3_nvram_logical_addr(tp, offset);
10803 		}
10804 	}
10805 
10806 	if (!offset || !len) {
10807 		offset = TG3_NVM_VPD_OFF;
10808 		len = TG3_NVM_VPD_LEN;
10809 	}
10810 
10811 	buf = kmalloc(len, GFP_KERNEL);
10812 	if (buf == NULL)
10813 		return NULL;
10814 
10815 	if (magic == TG3_EEPROM_MAGIC) {
10816 		for (i = 0; i < len; i += 4) {
10817 			/* The data is in little-endian format in NVRAM.
10818 			 * Use the big-endian read routines to preserve
10819 			 * the byte order as it exists in NVRAM.
10820 			 */
10821 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10822 				goto error;
10823 		}
10824 	} else {
10825 		u8 *ptr;
10826 		ssize_t cnt;
10827 		unsigned int pos = 0;
10828 
10829 		ptr = (u8 *)&buf[0];
10830 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10831 			cnt = pci_read_vpd(tp->pdev, pos,
10832 					   len - pos, ptr);
10833 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
10834 				cnt = 0;
10835 			else if (cnt < 0)
10836 				goto error;
10837 		}
10838 		if (pos != len)
10839 			goto error;
10840 	}
10841 
10842 	*vpdlen = len;
10843 
10844 	return buf;
10845 
10846 error:
10847 	kfree(buf);
10848 	return NULL;
10849 }
10850 
10851 #define NVRAM_TEST_SIZE 0x100
10852 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
10853 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
10854 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
10855 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
10856 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
10857 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
10858 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10859 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10860 
10861 static int tg3_test_nvram(struct tg3 *tp)
10862 {
10863 	u32 csum, magic, len;
10864 	__be32 *buf;
10865 	int i, j, k, err = 0, size;
10866 
10867 	if (tg3_flag(tp, NO_NVRAM))
10868 		return 0;
10869 
10870 	if (tg3_nvram_read(tp, 0, &magic) != 0)
10871 		return -EIO;
10872 
10873 	if (magic == TG3_EEPROM_MAGIC)
10874 		size = NVRAM_TEST_SIZE;
10875 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10876 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10877 		    TG3_EEPROM_SB_FORMAT_1) {
10878 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10879 			case TG3_EEPROM_SB_REVISION_0:
10880 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10881 				break;
10882 			case TG3_EEPROM_SB_REVISION_2:
10883 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10884 				break;
10885 			case TG3_EEPROM_SB_REVISION_3:
10886 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10887 				break;
10888 			case TG3_EEPROM_SB_REVISION_4:
10889 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10890 				break;
10891 			case TG3_EEPROM_SB_REVISION_5:
10892 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10893 				break;
10894 			case TG3_EEPROM_SB_REVISION_6:
10895 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10896 				break;
10897 			default:
10898 				return -EIO;
10899 			}
10900 		} else
10901 			return 0;
10902 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10903 		size = NVRAM_SELFBOOT_HW_SIZE;
10904 	else
10905 		return -EIO;
10906 
10907 	buf = kmalloc(size, GFP_KERNEL);
10908 	if (buf == NULL)
10909 		return -ENOMEM;
10910 
10911 	err = -EIO;
10912 	for (i = 0, j = 0; i < size; i += 4, j++) {
10913 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
10914 		if (err)
10915 			break;
10916 	}
10917 	if (i < size)
10918 		goto out;
10919 
10920 	/* Selfboot format */
10921 	magic = be32_to_cpu(buf[0]);
10922 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10923 	    TG3_EEPROM_MAGIC_FW) {
10924 		u8 *buf8 = (u8 *) buf, csum8 = 0;
10925 
10926 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10927 		    TG3_EEPROM_SB_REVISION_2) {
10928 			/* For rev 2, the csum doesn't include the MBA. */
10929 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10930 				csum8 += buf8[i];
10931 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10932 				csum8 += buf8[i];
10933 		} else {
10934 			for (i = 0; i < size; i++)
10935 				csum8 += buf8[i];
10936 		}
10937 
10938 		if (csum8 == 0) {
10939 			err = 0;
10940 			goto out;
10941 		}
10942 
10943 		err = -EIO;
10944 		goto out;
10945 	}
10946 
10947 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10948 	    TG3_EEPROM_MAGIC_HW) {
10949 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10950 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10951 		u8 *buf8 = (u8 *) buf;
10952 
10953 		/* Separate the parity bits and the data bytes.  */
10954 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10955 			if ((i == 0) || (i == 8)) {
10956 				int l;
10957 				u8 msk;
10958 
10959 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10960 					parity[k++] = buf8[i] & msk;
10961 				i++;
10962 			} else if (i == 16) {
10963 				int l;
10964 				u8 msk;
10965 
10966 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10967 					parity[k++] = buf8[i] & msk;
10968 				i++;
10969 
10970 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10971 					parity[k++] = buf8[i] & msk;
10972 				i++;
10973 			}
10974 			data[j++] = buf8[i];
10975 		}
10976 
10977 		err = -EIO;
10978 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10979 			u8 hw8 = hweight8(data[i]);
10980 
10981 			if ((hw8 & 0x1) && parity[i])
10982 				goto out;
10983 			else if (!(hw8 & 0x1) && !parity[i])
10984 				goto out;
10985 		}
10986 		err = 0;
10987 		goto out;
10988 	}
10989 
10990 	err = -EIO;
10991 
10992 	/* Bootstrap checksum at offset 0x10 */
10993 	csum = calc_crc((unsigned char *) buf, 0x10);
10994 	if (csum != le32_to_cpu(buf[0x10/4]))
10995 		goto out;
10996 
10997 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10998 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10999 	if (csum != le32_to_cpu(buf[0xfc/4]))
11000 		goto out;
11001 
11002 	kfree(buf);
11003 
11004 	buf = tg3_vpd_readblock(tp, &len);
11005 	if (!buf)
11006 		return -ENOMEM;
11007 
11008 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11009 	if (i > 0) {
11010 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11011 		if (j < 0)
11012 			goto out;
11013 
11014 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11015 			goto out;
11016 
11017 		i += PCI_VPD_LRDT_TAG_SIZE;
11018 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11019 					      PCI_VPD_RO_KEYWORD_CHKSUM);
11020 		if (j > 0) {
11021 			u8 csum8 = 0;
11022 
11023 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
11024 
11025 			for (i = 0; i <= j; i++)
11026 				csum8 += ((u8 *)buf)[i];
11027 
11028 			if (csum8)
11029 				goto out;
11030 		}
11031 	}
11032 
11033 	err = 0;
11034 
11035 out:
11036 	kfree(buf);
11037 	return err;
11038 }
11039 
11040 #define TG3_SERDES_TIMEOUT_SEC	2
11041 #define TG3_COPPER_TIMEOUT_SEC	6
11042 
11043 static int tg3_test_link(struct tg3 *tp)
11044 {
11045 	int i, max;
11046 
11047 	if (!netif_running(tp->dev))
11048 		return -ENODEV;
11049 
11050 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11051 		max = TG3_SERDES_TIMEOUT_SEC;
11052 	else
11053 		max = TG3_COPPER_TIMEOUT_SEC;
11054 
11055 	for (i = 0; i < max; i++) {
11056 		if (netif_carrier_ok(tp->dev))
11057 			return 0;
11058 
11059 		if (msleep_interruptible(1000))
11060 			break;
11061 	}
11062 
11063 	return -EIO;
11064 }
11065 
11066 /* Only test the commonly used registers */
11067 static int tg3_test_registers(struct tg3 *tp)
11068 {
11069 	int i, is_5705, is_5750;
11070 	u32 offset, read_mask, write_mask, val, save_val, read_val;
11071 	static struct {
11072 		u16 offset;
11073 		u16 flags;
11074 #define TG3_FL_5705	0x1
11075 #define TG3_FL_NOT_5705	0x2
11076 #define TG3_FL_NOT_5788	0x4
11077 #define TG3_FL_NOT_5750	0x8
11078 		u32 read_mask;
11079 		u32 write_mask;
11080 	} reg_tbl[] = {
11081 		/* MAC Control Registers */
11082 		{ MAC_MODE, TG3_FL_NOT_5705,
11083 			0x00000000, 0x00ef6f8c },
11084 		{ MAC_MODE, TG3_FL_5705,
11085 			0x00000000, 0x01ef6b8c },
11086 		{ MAC_STATUS, TG3_FL_NOT_5705,
11087 			0x03800107, 0x00000000 },
11088 		{ MAC_STATUS, TG3_FL_5705,
11089 			0x03800100, 0x00000000 },
11090 		{ MAC_ADDR_0_HIGH, 0x0000,
11091 			0x00000000, 0x0000ffff },
11092 		{ MAC_ADDR_0_LOW, 0x0000,
11093 			0x00000000, 0xffffffff },
11094 		{ MAC_RX_MTU_SIZE, 0x0000,
11095 			0x00000000, 0x0000ffff },
11096 		{ MAC_TX_MODE, 0x0000,
11097 			0x00000000, 0x00000070 },
11098 		{ MAC_TX_LENGTHS, 0x0000,
11099 			0x00000000, 0x00003fff },
11100 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
11101 			0x00000000, 0x000007fc },
11102 		{ MAC_RX_MODE, TG3_FL_5705,
11103 			0x00000000, 0x000007dc },
11104 		{ MAC_HASH_REG_0, 0x0000,
11105 			0x00000000, 0xffffffff },
11106 		{ MAC_HASH_REG_1, 0x0000,
11107 			0x00000000, 0xffffffff },
11108 		{ MAC_HASH_REG_2, 0x0000,
11109 			0x00000000, 0xffffffff },
11110 		{ MAC_HASH_REG_3, 0x0000,
11111 			0x00000000, 0xffffffff },
11112 
11113 		/* Receive Data and Receive BD Initiator Control Registers. */
11114 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11115 			0x00000000, 0xffffffff },
11116 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11117 			0x00000000, 0xffffffff },
11118 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11119 			0x00000000, 0x00000003 },
11120 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11121 			0x00000000, 0xffffffff },
11122 		{ RCVDBDI_STD_BD+0, 0x0000,
11123 			0x00000000, 0xffffffff },
11124 		{ RCVDBDI_STD_BD+4, 0x0000,
11125 			0x00000000, 0xffffffff },
11126 		{ RCVDBDI_STD_BD+8, 0x0000,
11127 			0x00000000, 0xffff0002 },
11128 		{ RCVDBDI_STD_BD+0xc, 0x0000,
11129 			0x00000000, 0xffffffff },
11130 
11131 		/* Receive BD Initiator Control Registers. */
11132 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11133 			0x00000000, 0xffffffff },
11134 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
11135 			0x00000000, 0x000003ff },
11136 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11137 			0x00000000, 0xffffffff },
11138 
11139 		/* Host Coalescing Control Registers. */
11140 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
11141 			0x00000000, 0x00000004 },
11142 		{ HOSTCC_MODE, TG3_FL_5705,
11143 			0x00000000, 0x000000f6 },
11144 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11145 			0x00000000, 0xffffffff },
11146 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11147 			0x00000000, 0x000003ff },
11148 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11149 			0x00000000, 0xffffffff },
11150 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11151 			0x00000000, 0x000003ff },
11152 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11153 			0x00000000, 0xffffffff },
11154 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11155 			0x00000000, 0x000000ff },
11156 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11157 			0x00000000, 0xffffffff },
11158 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11159 			0x00000000, 0x000000ff },
11160 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11161 			0x00000000, 0xffffffff },
11162 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11163 			0x00000000, 0xffffffff },
11164 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11165 			0x00000000, 0xffffffff },
11166 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11167 			0x00000000, 0x000000ff },
11168 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11169 			0x00000000, 0xffffffff },
11170 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11171 			0x00000000, 0x000000ff },
11172 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11173 			0x00000000, 0xffffffff },
11174 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11175 			0x00000000, 0xffffffff },
11176 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11177 			0x00000000, 0xffffffff },
11178 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11179 			0x00000000, 0xffffffff },
11180 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11181 			0x00000000, 0xffffffff },
11182 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11183 			0xffffffff, 0x00000000 },
11184 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11185 			0xffffffff, 0x00000000 },
11186 
11187 		/* Buffer Manager Control Registers. */
11188 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11189 			0x00000000, 0x007fff80 },
11190 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11191 			0x00000000, 0x007fffff },
11192 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11193 			0x00000000, 0x0000003f },
11194 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11195 			0x00000000, 0x000001ff },
11196 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
11197 			0x00000000, 0x000001ff },
11198 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11199 			0xffffffff, 0x00000000 },
11200 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11201 			0xffffffff, 0x00000000 },
11202 
11203 		/* Mailbox Registers */
11204 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11205 			0x00000000, 0x000001ff },
11206 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11207 			0x00000000, 0x000001ff },
11208 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11209 			0x00000000, 0x000007ff },
11210 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11211 			0x00000000, 0x000001ff },
11212 
11213 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
11214 	};
11215 
11216 	is_5705 = is_5750 = 0;
11217 	if (tg3_flag(tp, 5705_PLUS)) {
11218 		is_5705 = 1;
11219 		if (tg3_flag(tp, 5750_PLUS))
11220 			is_5750 = 1;
11221 	}
11222 
11223 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11224 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11225 			continue;
11226 
11227 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11228 			continue;
11229 
11230 		if (tg3_flag(tp, IS_5788) &&
11231 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
11232 			continue;
11233 
11234 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11235 			continue;
11236 
11237 		offset = (u32) reg_tbl[i].offset;
11238 		read_mask = reg_tbl[i].read_mask;
11239 		write_mask = reg_tbl[i].write_mask;
11240 
11241 		/* Save the original register content */
11242 		save_val = tr32(offset);
11243 
11244 		/* Determine the read-only value. */
11245 		read_val = save_val & read_mask;
11246 
11247 		/* Write zero to the register, then make sure the read-only bits
11248 		 * are not changed and the read/write bits are all zeros.
11249 		 */
11250 		tw32(offset, 0);
11251 
11252 		val = tr32(offset);
11253 
11254 		/* Test the read-only and read/write bits. */
11255 		if (((val & read_mask) != read_val) || (val & write_mask))
11256 			goto out;
11257 
11258 		/* Write ones to all the bits defined by RdMask and WrMask, then
11259 		 * make sure the read-only bits are not changed and the
11260 		 * read/write bits are all ones.
11261 		 */
11262 		tw32(offset, read_mask | write_mask);
11263 
11264 		val = tr32(offset);
11265 
11266 		/* Test the read-only bits. */
11267 		if ((val & read_mask) != read_val)
11268 			goto out;
11269 
11270 		/* Test the read/write bits. */
11271 		if ((val & write_mask) != write_mask)
11272 			goto out;
11273 
11274 		tw32(offset, save_val);
11275 	}
11276 
11277 	return 0;
11278 
11279 out:
11280 	if (netif_msg_hw(tp))
11281 		netdev_err(tp->dev,
11282 			   "Register test failed at offset %x\n", offset);
11283 	tw32(offset, save_val);
11284 	return -EIO;
11285 }
11286 
11287 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11288 {
11289 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11290 	int i;
11291 	u32 j;
11292 
11293 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11294 		for (j = 0; j < len; j += 4) {
11295 			u32 val;
11296 
11297 			tg3_write_mem(tp, offset + j, test_pattern[i]);
11298 			tg3_read_mem(tp, offset + j, &val);
11299 			if (val != test_pattern[i])
11300 				return -EIO;
11301 		}
11302 	}
11303 	return 0;
11304 }
11305 
11306 static int tg3_test_memory(struct tg3 *tp)
11307 {
11308 	static struct mem_entry {
11309 		u32 offset;
11310 		u32 len;
11311 	} mem_tbl_570x[] = {
11312 		{ 0x00000000, 0x00b50},
11313 		{ 0x00002000, 0x1c000},
11314 		{ 0xffffffff, 0x00000}
11315 	}, mem_tbl_5705[] = {
11316 		{ 0x00000100, 0x0000c},
11317 		{ 0x00000200, 0x00008},
11318 		{ 0x00004000, 0x00800},
11319 		{ 0x00006000, 0x01000},
11320 		{ 0x00008000, 0x02000},
11321 		{ 0x00010000, 0x0e000},
11322 		{ 0xffffffff, 0x00000}
11323 	}, mem_tbl_5755[] = {
11324 		{ 0x00000200, 0x00008},
11325 		{ 0x00004000, 0x00800},
11326 		{ 0x00006000, 0x00800},
11327 		{ 0x00008000, 0x02000},
11328 		{ 0x00010000, 0x0c000},
11329 		{ 0xffffffff, 0x00000}
11330 	}, mem_tbl_5906[] = {
11331 		{ 0x00000200, 0x00008},
11332 		{ 0x00004000, 0x00400},
11333 		{ 0x00006000, 0x00400},
11334 		{ 0x00008000, 0x01000},
11335 		{ 0x00010000, 0x01000},
11336 		{ 0xffffffff, 0x00000}
11337 	}, mem_tbl_5717[] = {
11338 		{ 0x00000200, 0x00008},
11339 		{ 0x00010000, 0x0a000},
11340 		{ 0x00020000, 0x13c00},
11341 		{ 0xffffffff, 0x00000}
11342 	}, mem_tbl_57765[] = {
11343 		{ 0x00000200, 0x00008},
11344 		{ 0x00004000, 0x00800},
11345 		{ 0x00006000, 0x09800},
11346 		{ 0x00010000, 0x0a000},
11347 		{ 0xffffffff, 0x00000}
11348 	};
11349 	struct mem_entry *mem_tbl;
11350 	int err = 0;
11351 	int i;
11352 
11353 	if (tg3_flag(tp, 5717_PLUS))
11354 		mem_tbl = mem_tbl_5717;
11355 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11356 		mem_tbl = mem_tbl_57765;
11357 	else if (tg3_flag(tp, 5755_PLUS))
11358 		mem_tbl = mem_tbl_5755;
11359 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11360 		mem_tbl = mem_tbl_5906;
11361 	else if (tg3_flag(tp, 5705_PLUS))
11362 		mem_tbl = mem_tbl_5705;
11363 	else
11364 		mem_tbl = mem_tbl_570x;
11365 
11366 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11367 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11368 		if (err)
11369 			break;
11370 	}
11371 
11372 	return err;
11373 }
11374 
11375 #define TG3_TSO_MSS		500
11376 
11377 #define TG3_TSO_IP_HDR_LEN	20
11378 #define TG3_TSO_TCP_HDR_LEN	20
11379 #define TG3_TSO_TCP_OPT_LEN	12
11380 
11381 static const u8 tg3_tso_header[] = {
11382 0x08, 0x00,
11383 0x45, 0x00, 0x00, 0x00,
11384 0x00, 0x00, 0x40, 0x00,
11385 0x40, 0x06, 0x00, 0x00,
11386 0x0a, 0x00, 0x00, 0x01,
11387 0x0a, 0x00, 0x00, 0x02,
11388 0x0d, 0x00, 0xe0, 0x00,
11389 0x00, 0x00, 0x01, 0x00,
11390 0x00, 0x00, 0x02, 0x00,
11391 0x80, 0x10, 0x10, 0x00,
11392 0x14, 0x09, 0x00, 0x00,
11393 0x01, 0x01, 0x08, 0x0a,
11394 0x11, 0x11, 0x11, 0x11,
11395 0x11, 0x11, 0x11, 0x11,
11396 };
11397 
11398 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11399 {
11400 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11401 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11402 	u32 budget;
11403 	struct sk_buff *skb, *rx_skb;
11404 	u8 *tx_data;
11405 	dma_addr_t map;
11406 	int num_pkts, tx_len, rx_len, i, err;
11407 	struct tg3_rx_buffer_desc *desc;
11408 	struct tg3_napi *tnapi, *rnapi;
11409 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11410 
11411 	tnapi = &tp->napi[0];
11412 	rnapi = &tp->napi[0];
11413 	if (tp->irq_cnt > 1) {
11414 		if (tg3_flag(tp, ENABLE_RSS))
11415 			rnapi = &tp->napi[1];
11416 		if (tg3_flag(tp, ENABLE_TSS))
11417 			tnapi = &tp->napi[1];
11418 	}
11419 	coal_now = tnapi->coal_now | rnapi->coal_now;
11420 
11421 	err = -EIO;
11422 
11423 	tx_len = pktsz;
11424 	skb = netdev_alloc_skb(tp->dev, tx_len);
11425 	if (!skb)
11426 		return -ENOMEM;
11427 
11428 	tx_data = skb_put(skb, tx_len);
11429 	memcpy(tx_data, tp->dev->dev_addr, 6);
11430 	memset(tx_data + 6, 0x0, 8);
11431 
11432 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11433 
11434 	if (tso_loopback) {
11435 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11436 
11437 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11438 			      TG3_TSO_TCP_OPT_LEN;
11439 
11440 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11441 		       sizeof(tg3_tso_header));
11442 		mss = TG3_TSO_MSS;
11443 
11444 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11445 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11446 
11447 		/* Set the total length field in the IP header */
11448 		iph->tot_len = htons((u16)(mss + hdr_len));
11449 
11450 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
11451 			      TXD_FLAG_CPU_POST_DMA);
11452 
11453 		if (tg3_flag(tp, HW_TSO_1) ||
11454 		    tg3_flag(tp, HW_TSO_2) ||
11455 		    tg3_flag(tp, HW_TSO_3)) {
11456 			struct tcphdr *th;
11457 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11458 			th = (struct tcphdr *)&tx_data[val];
11459 			th->check = 0;
11460 		} else
11461 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
11462 
11463 		if (tg3_flag(tp, HW_TSO_3)) {
11464 			mss |= (hdr_len & 0xc) << 12;
11465 			if (hdr_len & 0x10)
11466 				base_flags |= 0x00000010;
11467 			base_flags |= (hdr_len & 0x3e0) << 5;
11468 		} else if (tg3_flag(tp, HW_TSO_2))
11469 			mss |= hdr_len << 9;
11470 		else if (tg3_flag(tp, HW_TSO_1) ||
11471 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11472 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11473 		} else {
11474 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11475 		}
11476 
11477 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11478 	} else {
11479 		num_pkts = 1;
11480 		data_off = ETH_HLEN;
11481 	}
11482 
11483 	for (i = data_off; i < tx_len; i++)
11484 		tx_data[i] = (u8) (i & 0xff);
11485 
11486 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11487 	if (pci_dma_mapping_error(tp->pdev, map)) {
11488 		dev_kfree_skb(skb);
11489 		return -EIO;
11490 	}
11491 
11492 	val = tnapi->tx_prod;
11493 	tnapi->tx_buffers[val].skb = skb;
11494 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11495 
11496 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11497 	       rnapi->coal_now);
11498 
11499 	udelay(10);
11500 
11501 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11502 
11503 	budget = tg3_tx_avail(tnapi);
11504 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11505 			    base_flags | TXD_FLAG_END, mss, 0)) {
11506 		tnapi->tx_buffers[val].skb = NULL;
11507 		dev_kfree_skb(skb);
11508 		return -EIO;
11509 	}
11510 
11511 	tnapi->tx_prod++;
11512 
11513 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11514 	tr32_mailbox(tnapi->prodmbox);
11515 
11516 	udelay(10);
11517 
11518 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11519 	for (i = 0; i < 35; i++) {
11520 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11521 		       coal_now);
11522 
11523 		udelay(10);
11524 
11525 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11526 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
11527 		if ((tx_idx == tnapi->tx_prod) &&
11528 		    (rx_idx == (rx_start_idx + num_pkts)))
11529 			break;
11530 	}
11531 
11532 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11533 	dev_kfree_skb(skb);
11534 
11535 	if (tx_idx != tnapi->tx_prod)
11536 		goto out;
11537 
11538 	if (rx_idx != rx_start_idx + num_pkts)
11539 		goto out;
11540 
11541 	val = data_off;
11542 	while (rx_idx != rx_start_idx) {
11543 		desc = &rnapi->rx_rcb[rx_start_idx++];
11544 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11545 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11546 
11547 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11548 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11549 			goto out;
11550 
11551 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11552 			 - ETH_FCS_LEN;
11553 
11554 		if (!tso_loopback) {
11555 			if (rx_len != tx_len)
11556 				goto out;
11557 
11558 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11559 				if (opaque_key != RXD_OPAQUE_RING_STD)
11560 					goto out;
11561 			} else {
11562 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11563 					goto out;
11564 			}
11565 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11566 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11567 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
11568 			goto out;
11569 		}
11570 
11571 		if (opaque_key == RXD_OPAQUE_RING_STD) {
11572 			rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11573 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11574 					     mapping);
11575 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11576 			rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11577 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11578 					     mapping);
11579 		} else
11580 			goto out;
11581 
11582 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11583 					    PCI_DMA_FROMDEVICE);
11584 
11585 		for (i = data_off; i < rx_len; i++, val++) {
11586 			if (*(rx_skb->data + i) != (u8) (val & 0xff))
11587 				goto out;
11588 		}
11589 	}
11590 
11591 	err = 0;
11592 
11593 	/* tg3_free_rings will unmap and free the rx_skb */
11594 out:
11595 	return err;
11596 }
11597 
11598 #define TG3_STD_LOOPBACK_FAILED		1
11599 #define TG3_JMB_LOOPBACK_FAILED		2
11600 #define TG3_TSO_LOOPBACK_FAILED		4
11601 #define TG3_LOOPBACK_FAILED \
11602 	(TG3_STD_LOOPBACK_FAILED | \
11603 	 TG3_JMB_LOOPBACK_FAILED | \
11604 	 TG3_TSO_LOOPBACK_FAILED)
11605 
11606 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11607 {
11608 	int err = -EIO;
11609 	u32 eee_cap;
11610 
11611 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11612 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11613 
11614 	if (!netif_running(tp->dev)) {
11615 		data[0] = TG3_LOOPBACK_FAILED;
11616 		data[1] = TG3_LOOPBACK_FAILED;
11617 		if (do_extlpbk)
11618 			data[2] = TG3_LOOPBACK_FAILED;
11619 		goto done;
11620 	}
11621 
11622 	err = tg3_reset_hw(tp, 1);
11623 	if (err) {
11624 		data[0] = TG3_LOOPBACK_FAILED;
11625 		data[1] = TG3_LOOPBACK_FAILED;
11626 		if (do_extlpbk)
11627 			data[2] = TG3_LOOPBACK_FAILED;
11628 		goto done;
11629 	}
11630 
11631 	if (tg3_flag(tp, ENABLE_RSS)) {
11632 		int i;
11633 
11634 		/* Reroute all rx packets to the 1st queue */
11635 		for (i = MAC_RSS_INDIR_TBL_0;
11636 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11637 			tw32(i, 0x0);
11638 	}
11639 
11640 	/* HW errata - mac loopback fails in some cases on 5780.
11641 	 * Normal traffic and PHY loopback are not affected by
11642 	 * errata.  Also, the MAC loopback test is deprecated for
11643 	 * all newer ASIC revisions.
11644 	 */
11645 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11646 	    !tg3_flag(tp, CPMU_PRESENT)) {
11647 		tg3_mac_loopback(tp, true);
11648 
11649 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11650 			data[0] |= TG3_STD_LOOPBACK_FAILED;
11651 
11652 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11653 		    tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11654 			data[0] |= TG3_JMB_LOOPBACK_FAILED;
11655 
11656 		tg3_mac_loopback(tp, false);
11657 	}
11658 
11659 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11660 	    !tg3_flag(tp, USE_PHYLIB)) {
11661 		int i;
11662 
11663 		tg3_phy_lpbk_set(tp, 0, false);
11664 
11665 		/* Wait for link */
11666 		for (i = 0; i < 100; i++) {
11667 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11668 				break;
11669 			mdelay(1);
11670 		}
11671 
11672 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11673 			data[1] |= TG3_STD_LOOPBACK_FAILED;
11674 		if (tg3_flag(tp, TSO_CAPABLE) &&
11675 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11676 			data[1] |= TG3_TSO_LOOPBACK_FAILED;
11677 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11678 		    tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11679 			data[1] |= TG3_JMB_LOOPBACK_FAILED;
11680 
11681 		if (do_extlpbk) {
11682 			tg3_phy_lpbk_set(tp, 0, true);
11683 
11684 			/* All link indications report up, but the hardware
11685 			 * isn't really ready for about 20 msec.  Double it
11686 			 * to be sure.
11687 			 */
11688 			mdelay(40);
11689 
11690 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11691 				data[2] |= TG3_STD_LOOPBACK_FAILED;
11692 			if (tg3_flag(tp, TSO_CAPABLE) &&
11693 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11694 				data[2] |= TG3_TSO_LOOPBACK_FAILED;
11695 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11696 			    tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11697 				data[2] |= TG3_JMB_LOOPBACK_FAILED;
11698 		}
11699 
11700 		/* Re-enable gphy autopowerdown. */
11701 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11702 			tg3_phy_toggle_apd(tp, true);
11703 	}
11704 
11705 	err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11706 
11707 done:
11708 	tp->phy_flags |= eee_cap;
11709 
11710 	return err;
11711 }
11712 
11713 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11714 			  u64 *data)
11715 {
11716 	struct tg3 *tp = netdev_priv(dev);
11717 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11718 
11719 	if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11720 	    tg3_power_up(tp)) {
11721 		etest->flags |= ETH_TEST_FL_FAILED;
11722 		memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11723 		return;
11724 	}
11725 
11726 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11727 
11728 	if (tg3_test_nvram(tp) != 0) {
11729 		etest->flags |= ETH_TEST_FL_FAILED;
11730 		data[0] = 1;
11731 	}
11732 	if (!doextlpbk && tg3_test_link(tp)) {
11733 		etest->flags |= ETH_TEST_FL_FAILED;
11734 		data[1] = 1;
11735 	}
11736 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
11737 		int err, err2 = 0, irq_sync = 0;
11738 
11739 		if (netif_running(dev)) {
11740 			tg3_phy_stop(tp);
11741 			tg3_netif_stop(tp);
11742 			irq_sync = 1;
11743 		}
11744 
11745 		tg3_full_lock(tp, irq_sync);
11746 
11747 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11748 		err = tg3_nvram_lock(tp);
11749 		tg3_halt_cpu(tp, RX_CPU_BASE);
11750 		if (!tg3_flag(tp, 5705_PLUS))
11751 			tg3_halt_cpu(tp, TX_CPU_BASE);
11752 		if (!err)
11753 			tg3_nvram_unlock(tp);
11754 
11755 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11756 			tg3_phy_reset(tp);
11757 
11758 		if (tg3_test_registers(tp) != 0) {
11759 			etest->flags |= ETH_TEST_FL_FAILED;
11760 			data[2] = 1;
11761 		}
11762 
11763 		if (tg3_test_memory(tp) != 0) {
11764 			etest->flags |= ETH_TEST_FL_FAILED;
11765 			data[3] = 1;
11766 		}
11767 
11768 		if (doextlpbk)
11769 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11770 
11771 		if (tg3_test_loopback(tp, &data[4], doextlpbk))
11772 			etest->flags |= ETH_TEST_FL_FAILED;
11773 
11774 		tg3_full_unlock(tp);
11775 
11776 		if (tg3_test_interrupt(tp) != 0) {
11777 			etest->flags |= ETH_TEST_FL_FAILED;
11778 			data[7] = 1;
11779 		}
11780 
11781 		tg3_full_lock(tp, 0);
11782 
11783 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11784 		if (netif_running(dev)) {
11785 			tg3_flag_set(tp, INIT_COMPLETE);
11786 			err2 = tg3_restart_hw(tp, 1);
11787 			if (!err2)
11788 				tg3_netif_start(tp);
11789 		}
11790 
11791 		tg3_full_unlock(tp);
11792 
11793 		if (irq_sync && !err2)
11794 			tg3_phy_start(tp);
11795 	}
11796 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11797 		tg3_power_down(tp);
11798 
11799 }
11800 
11801 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11802 {
11803 	struct mii_ioctl_data *data = if_mii(ifr);
11804 	struct tg3 *tp = netdev_priv(dev);
11805 	int err;
11806 
11807 	if (tg3_flag(tp, USE_PHYLIB)) {
11808 		struct phy_device *phydev;
11809 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11810 			return -EAGAIN;
11811 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11812 		return phy_mii_ioctl(phydev, ifr, cmd);
11813 	}
11814 
11815 	switch (cmd) {
11816 	case SIOCGMIIPHY:
11817 		data->phy_id = tp->phy_addr;
11818 
11819 		/* fallthru */
11820 	case SIOCGMIIREG: {
11821 		u32 mii_regval;
11822 
11823 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11824 			break;			/* We have no PHY */
11825 
11826 		if (!netif_running(dev))
11827 			return -EAGAIN;
11828 
11829 		spin_lock_bh(&tp->lock);
11830 		err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11831 		spin_unlock_bh(&tp->lock);
11832 
11833 		data->val_out = mii_regval;
11834 
11835 		return err;
11836 	}
11837 
11838 	case SIOCSMIIREG:
11839 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11840 			break;			/* We have no PHY */
11841 
11842 		if (!netif_running(dev))
11843 			return -EAGAIN;
11844 
11845 		spin_lock_bh(&tp->lock);
11846 		err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11847 		spin_unlock_bh(&tp->lock);
11848 
11849 		return err;
11850 
11851 	default:
11852 		/* do nothing */
11853 		break;
11854 	}
11855 	return -EOPNOTSUPP;
11856 }
11857 
11858 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11859 {
11860 	struct tg3 *tp = netdev_priv(dev);
11861 
11862 	memcpy(ec, &tp->coal, sizeof(*ec));
11863 	return 0;
11864 }
11865 
11866 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11867 {
11868 	struct tg3 *tp = netdev_priv(dev);
11869 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11870 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11871 
11872 	if (!tg3_flag(tp, 5705_PLUS)) {
11873 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11874 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11875 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11876 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11877 	}
11878 
11879 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11880 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11881 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11882 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11883 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11884 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11885 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11886 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11887 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11888 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11889 		return -EINVAL;
11890 
11891 	/* No rx interrupts will be generated if both are zero */
11892 	if ((ec->rx_coalesce_usecs == 0) &&
11893 	    (ec->rx_max_coalesced_frames == 0))
11894 		return -EINVAL;
11895 
11896 	/* No tx interrupts will be generated if both are zero */
11897 	if ((ec->tx_coalesce_usecs == 0) &&
11898 	    (ec->tx_max_coalesced_frames == 0))
11899 		return -EINVAL;
11900 
11901 	/* Only copy relevant parameters, ignore all others. */
11902 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11903 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11904 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11905 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11906 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11907 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11908 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11909 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11910 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11911 
11912 	if (netif_running(dev)) {
11913 		tg3_full_lock(tp, 0);
11914 		__tg3_set_coalesce(tp, &tp->coal);
11915 		tg3_full_unlock(tp);
11916 	}
11917 	return 0;
11918 }
11919 
11920 static const struct ethtool_ops tg3_ethtool_ops = {
11921 	.get_settings		= tg3_get_settings,
11922 	.set_settings		= tg3_set_settings,
11923 	.get_drvinfo		= tg3_get_drvinfo,
11924 	.get_regs_len		= tg3_get_regs_len,
11925 	.get_regs		= tg3_get_regs,
11926 	.get_wol		= tg3_get_wol,
11927 	.set_wol		= tg3_set_wol,
11928 	.get_msglevel		= tg3_get_msglevel,
11929 	.set_msglevel		= tg3_set_msglevel,
11930 	.nway_reset		= tg3_nway_reset,
11931 	.get_link		= ethtool_op_get_link,
11932 	.get_eeprom_len		= tg3_get_eeprom_len,
11933 	.get_eeprom		= tg3_get_eeprom,
11934 	.set_eeprom		= tg3_set_eeprom,
11935 	.get_ringparam		= tg3_get_ringparam,
11936 	.set_ringparam		= tg3_set_ringparam,
11937 	.get_pauseparam		= tg3_get_pauseparam,
11938 	.set_pauseparam		= tg3_set_pauseparam,
11939 	.self_test		= tg3_self_test,
11940 	.get_strings		= tg3_get_strings,
11941 	.set_phys_id		= tg3_set_phys_id,
11942 	.get_ethtool_stats	= tg3_get_ethtool_stats,
11943 	.get_coalesce		= tg3_get_coalesce,
11944 	.set_coalesce		= tg3_set_coalesce,
11945 	.get_sset_count		= tg3_get_sset_count,
11946 };
11947 
11948 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11949 {
11950 	u32 cursize, val, magic;
11951 
11952 	tp->nvram_size = EEPROM_CHIP_SIZE;
11953 
11954 	if (tg3_nvram_read(tp, 0, &magic) != 0)
11955 		return;
11956 
11957 	if ((magic != TG3_EEPROM_MAGIC) &&
11958 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11959 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11960 		return;
11961 
11962 	/*
11963 	 * Size the chip by reading offsets at increasing powers of two.
11964 	 * When we encounter our validation signature, we know the addressing
11965 	 * has wrapped around, and thus have our chip size.
11966 	 */
11967 	cursize = 0x10;
11968 
11969 	while (cursize < tp->nvram_size) {
11970 		if (tg3_nvram_read(tp, cursize, &val) != 0)
11971 			return;
11972 
11973 		if (val == magic)
11974 			break;
11975 
11976 		cursize <<= 1;
11977 	}
11978 
11979 	tp->nvram_size = cursize;
11980 }
11981 
11982 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11983 {
11984 	u32 val;
11985 
11986 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11987 		return;
11988 
11989 	/* Selfboot format */
11990 	if (val != TG3_EEPROM_MAGIC) {
11991 		tg3_get_eeprom_size(tp);
11992 		return;
11993 	}
11994 
11995 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11996 		if (val != 0) {
11997 			/* This is confusing.  We want to operate on the
11998 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11999 			 * call will read from NVRAM and byteswap the data
12000 			 * according to the byteswapping settings for all
12001 			 * other register accesses.  This ensures the data we
12002 			 * want will always reside in the lower 16-bits.
12003 			 * However, the data in NVRAM is in LE format, which
12004 			 * means the data from the NVRAM read will always be
12005 			 * opposite the endianness of the CPU.  The 16-bit
12006 			 * byteswap then brings the data to CPU endianness.
12007 			 */
12008 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12009 			return;
12010 		}
12011 	}
12012 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12013 }
12014 
12015 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12016 {
12017 	u32 nvcfg1;
12018 
12019 	nvcfg1 = tr32(NVRAM_CFG1);
12020 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12021 		tg3_flag_set(tp, FLASH);
12022 	} else {
12023 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12024 		tw32(NVRAM_CFG1, nvcfg1);
12025 	}
12026 
12027 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12028 	    tg3_flag(tp, 5780_CLASS)) {
12029 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12030 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12031 			tp->nvram_jedecnum = JEDEC_ATMEL;
12032 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12033 			tg3_flag_set(tp, NVRAM_BUFFERED);
12034 			break;
12035 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12036 			tp->nvram_jedecnum = JEDEC_ATMEL;
12037 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12038 			break;
12039 		case FLASH_VENDOR_ATMEL_EEPROM:
12040 			tp->nvram_jedecnum = JEDEC_ATMEL;
12041 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12042 			tg3_flag_set(tp, NVRAM_BUFFERED);
12043 			break;
12044 		case FLASH_VENDOR_ST:
12045 			tp->nvram_jedecnum = JEDEC_ST;
12046 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12047 			tg3_flag_set(tp, NVRAM_BUFFERED);
12048 			break;
12049 		case FLASH_VENDOR_SAIFUN:
12050 			tp->nvram_jedecnum = JEDEC_SAIFUN;
12051 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12052 			break;
12053 		case FLASH_VENDOR_SST_SMALL:
12054 		case FLASH_VENDOR_SST_LARGE:
12055 			tp->nvram_jedecnum = JEDEC_SST;
12056 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12057 			break;
12058 		}
12059 	} else {
12060 		tp->nvram_jedecnum = JEDEC_ATMEL;
12061 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12062 		tg3_flag_set(tp, NVRAM_BUFFERED);
12063 	}
12064 }
12065 
12066 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12067 {
12068 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12069 	case FLASH_5752PAGE_SIZE_256:
12070 		tp->nvram_pagesize = 256;
12071 		break;
12072 	case FLASH_5752PAGE_SIZE_512:
12073 		tp->nvram_pagesize = 512;
12074 		break;
12075 	case FLASH_5752PAGE_SIZE_1K:
12076 		tp->nvram_pagesize = 1024;
12077 		break;
12078 	case FLASH_5752PAGE_SIZE_2K:
12079 		tp->nvram_pagesize = 2048;
12080 		break;
12081 	case FLASH_5752PAGE_SIZE_4K:
12082 		tp->nvram_pagesize = 4096;
12083 		break;
12084 	case FLASH_5752PAGE_SIZE_264:
12085 		tp->nvram_pagesize = 264;
12086 		break;
12087 	case FLASH_5752PAGE_SIZE_528:
12088 		tp->nvram_pagesize = 528;
12089 		break;
12090 	}
12091 }
12092 
12093 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12094 {
12095 	u32 nvcfg1;
12096 
12097 	nvcfg1 = tr32(NVRAM_CFG1);
12098 
12099 	/* NVRAM protection for TPM */
12100 	if (nvcfg1 & (1 << 27))
12101 		tg3_flag_set(tp, PROTECTED_NVRAM);
12102 
12103 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12104 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12105 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12106 		tp->nvram_jedecnum = JEDEC_ATMEL;
12107 		tg3_flag_set(tp, NVRAM_BUFFERED);
12108 		break;
12109 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12110 		tp->nvram_jedecnum = JEDEC_ATMEL;
12111 		tg3_flag_set(tp, NVRAM_BUFFERED);
12112 		tg3_flag_set(tp, FLASH);
12113 		break;
12114 	case FLASH_5752VENDOR_ST_M45PE10:
12115 	case FLASH_5752VENDOR_ST_M45PE20:
12116 	case FLASH_5752VENDOR_ST_M45PE40:
12117 		tp->nvram_jedecnum = JEDEC_ST;
12118 		tg3_flag_set(tp, NVRAM_BUFFERED);
12119 		tg3_flag_set(tp, FLASH);
12120 		break;
12121 	}
12122 
12123 	if (tg3_flag(tp, FLASH)) {
12124 		tg3_nvram_get_pagesize(tp, nvcfg1);
12125 	} else {
12126 		/* For eeprom, set pagesize to maximum eeprom size */
12127 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12128 
12129 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12130 		tw32(NVRAM_CFG1, nvcfg1);
12131 	}
12132 }
12133 
12134 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12135 {
12136 	u32 nvcfg1, protect = 0;
12137 
12138 	nvcfg1 = tr32(NVRAM_CFG1);
12139 
12140 	/* NVRAM protection for TPM */
12141 	if (nvcfg1 & (1 << 27)) {
12142 		tg3_flag_set(tp, PROTECTED_NVRAM);
12143 		protect = 1;
12144 	}
12145 
12146 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12147 	switch (nvcfg1) {
12148 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12149 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12150 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12151 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
12152 		tp->nvram_jedecnum = JEDEC_ATMEL;
12153 		tg3_flag_set(tp, NVRAM_BUFFERED);
12154 		tg3_flag_set(tp, FLASH);
12155 		tp->nvram_pagesize = 264;
12156 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12157 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12158 			tp->nvram_size = (protect ? 0x3e200 :
12159 					  TG3_NVRAM_SIZE_512KB);
12160 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12161 			tp->nvram_size = (protect ? 0x1f200 :
12162 					  TG3_NVRAM_SIZE_256KB);
12163 		else
12164 			tp->nvram_size = (protect ? 0x1f200 :
12165 					  TG3_NVRAM_SIZE_128KB);
12166 		break;
12167 	case FLASH_5752VENDOR_ST_M45PE10:
12168 	case FLASH_5752VENDOR_ST_M45PE20:
12169 	case FLASH_5752VENDOR_ST_M45PE40:
12170 		tp->nvram_jedecnum = JEDEC_ST;
12171 		tg3_flag_set(tp, NVRAM_BUFFERED);
12172 		tg3_flag_set(tp, FLASH);
12173 		tp->nvram_pagesize = 256;
12174 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12175 			tp->nvram_size = (protect ?
12176 					  TG3_NVRAM_SIZE_64KB :
12177 					  TG3_NVRAM_SIZE_128KB);
12178 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12179 			tp->nvram_size = (protect ?
12180 					  TG3_NVRAM_SIZE_64KB :
12181 					  TG3_NVRAM_SIZE_256KB);
12182 		else
12183 			tp->nvram_size = (protect ?
12184 					  TG3_NVRAM_SIZE_128KB :
12185 					  TG3_NVRAM_SIZE_512KB);
12186 		break;
12187 	}
12188 }
12189 
12190 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12191 {
12192 	u32 nvcfg1;
12193 
12194 	nvcfg1 = tr32(NVRAM_CFG1);
12195 
12196 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12197 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12198 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12199 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12200 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12201 		tp->nvram_jedecnum = JEDEC_ATMEL;
12202 		tg3_flag_set(tp, NVRAM_BUFFERED);
12203 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12204 
12205 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12206 		tw32(NVRAM_CFG1, nvcfg1);
12207 		break;
12208 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12209 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
12210 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
12211 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
12212 		tp->nvram_jedecnum = JEDEC_ATMEL;
12213 		tg3_flag_set(tp, NVRAM_BUFFERED);
12214 		tg3_flag_set(tp, FLASH);
12215 		tp->nvram_pagesize = 264;
12216 		break;
12217 	case FLASH_5752VENDOR_ST_M45PE10:
12218 	case FLASH_5752VENDOR_ST_M45PE20:
12219 	case FLASH_5752VENDOR_ST_M45PE40:
12220 		tp->nvram_jedecnum = JEDEC_ST;
12221 		tg3_flag_set(tp, NVRAM_BUFFERED);
12222 		tg3_flag_set(tp, FLASH);
12223 		tp->nvram_pagesize = 256;
12224 		break;
12225 	}
12226 }
12227 
12228 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12229 {
12230 	u32 nvcfg1, protect = 0;
12231 
12232 	nvcfg1 = tr32(NVRAM_CFG1);
12233 
12234 	/* NVRAM protection for TPM */
12235 	if (nvcfg1 & (1 << 27)) {
12236 		tg3_flag_set(tp, PROTECTED_NVRAM);
12237 		protect = 1;
12238 	}
12239 
12240 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12241 	switch (nvcfg1) {
12242 	case FLASH_5761VENDOR_ATMEL_ADB021D:
12243 	case FLASH_5761VENDOR_ATMEL_ADB041D:
12244 	case FLASH_5761VENDOR_ATMEL_ADB081D:
12245 	case FLASH_5761VENDOR_ATMEL_ADB161D:
12246 	case FLASH_5761VENDOR_ATMEL_MDB021D:
12247 	case FLASH_5761VENDOR_ATMEL_MDB041D:
12248 	case FLASH_5761VENDOR_ATMEL_MDB081D:
12249 	case FLASH_5761VENDOR_ATMEL_MDB161D:
12250 		tp->nvram_jedecnum = JEDEC_ATMEL;
12251 		tg3_flag_set(tp, NVRAM_BUFFERED);
12252 		tg3_flag_set(tp, FLASH);
12253 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12254 		tp->nvram_pagesize = 256;
12255 		break;
12256 	case FLASH_5761VENDOR_ST_A_M45PE20:
12257 	case FLASH_5761VENDOR_ST_A_M45PE40:
12258 	case FLASH_5761VENDOR_ST_A_M45PE80:
12259 	case FLASH_5761VENDOR_ST_A_M45PE16:
12260 	case FLASH_5761VENDOR_ST_M_M45PE20:
12261 	case FLASH_5761VENDOR_ST_M_M45PE40:
12262 	case FLASH_5761VENDOR_ST_M_M45PE80:
12263 	case FLASH_5761VENDOR_ST_M_M45PE16:
12264 		tp->nvram_jedecnum = JEDEC_ST;
12265 		tg3_flag_set(tp, NVRAM_BUFFERED);
12266 		tg3_flag_set(tp, FLASH);
12267 		tp->nvram_pagesize = 256;
12268 		break;
12269 	}
12270 
12271 	if (protect) {
12272 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12273 	} else {
12274 		switch (nvcfg1) {
12275 		case FLASH_5761VENDOR_ATMEL_ADB161D:
12276 		case FLASH_5761VENDOR_ATMEL_MDB161D:
12277 		case FLASH_5761VENDOR_ST_A_M45PE16:
12278 		case FLASH_5761VENDOR_ST_M_M45PE16:
12279 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12280 			break;
12281 		case FLASH_5761VENDOR_ATMEL_ADB081D:
12282 		case FLASH_5761VENDOR_ATMEL_MDB081D:
12283 		case FLASH_5761VENDOR_ST_A_M45PE80:
12284 		case FLASH_5761VENDOR_ST_M_M45PE80:
12285 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12286 			break;
12287 		case FLASH_5761VENDOR_ATMEL_ADB041D:
12288 		case FLASH_5761VENDOR_ATMEL_MDB041D:
12289 		case FLASH_5761VENDOR_ST_A_M45PE40:
12290 		case FLASH_5761VENDOR_ST_M_M45PE40:
12291 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12292 			break;
12293 		case FLASH_5761VENDOR_ATMEL_ADB021D:
12294 		case FLASH_5761VENDOR_ATMEL_MDB021D:
12295 		case FLASH_5761VENDOR_ST_A_M45PE20:
12296 		case FLASH_5761VENDOR_ST_M_M45PE20:
12297 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12298 			break;
12299 		}
12300 	}
12301 }
12302 
12303 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12304 {
12305 	tp->nvram_jedecnum = JEDEC_ATMEL;
12306 	tg3_flag_set(tp, NVRAM_BUFFERED);
12307 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12308 }
12309 
12310 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12311 {
12312 	u32 nvcfg1;
12313 
12314 	nvcfg1 = tr32(NVRAM_CFG1);
12315 
12316 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12317 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12318 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12319 		tp->nvram_jedecnum = JEDEC_ATMEL;
12320 		tg3_flag_set(tp, NVRAM_BUFFERED);
12321 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12322 
12323 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12324 		tw32(NVRAM_CFG1, nvcfg1);
12325 		return;
12326 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12327 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12328 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12329 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12330 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12331 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12332 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12333 		tp->nvram_jedecnum = JEDEC_ATMEL;
12334 		tg3_flag_set(tp, NVRAM_BUFFERED);
12335 		tg3_flag_set(tp, FLASH);
12336 
12337 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12338 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12339 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12340 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12341 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12342 			break;
12343 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12344 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12345 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12346 			break;
12347 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12348 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12349 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12350 			break;
12351 		}
12352 		break;
12353 	case FLASH_5752VENDOR_ST_M45PE10:
12354 	case FLASH_5752VENDOR_ST_M45PE20:
12355 	case FLASH_5752VENDOR_ST_M45PE40:
12356 		tp->nvram_jedecnum = JEDEC_ST;
12357 		tg3_flag_set(tp, NVRAM_BUFFERED);
12358 		tg3_flag_set(tp, FLASH);
12359 
12360 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12361 		case FLASH_5752VENDOR_ST_M45PE10:
12362 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12363 			break;
12364 		case FLASH_5752VENDOR_ST_M45PE20:
12365 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12366 			break;
12367 		case FLASH_5752VENDOR_ST_M45PE40:
12368 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12369 			break;
12370 		}
12371 		break;
12372 	default:
12373 		tg3_flag_set(tp, NO_NVRAM);
12374 		return;
12375 	}
12376 
12377 	tg3_nvram_get_pagesize(tp, nvcfg1);
12378 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12379 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12380 }
12381 
12382 
12383 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12384 {
12385 	u32 nvcfg1;
12386 
12387 	nvcfg1 = tr32(NVRAM_CFG1);
12388 
12389 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12390 	case FLASH_5717VENDOR_ATMEL_EEPROM:
12391 	case FLASH_5717VENDOR_MICRO_EEPROM:
12392 		tp->nvram_jedecnum = JEDEC_ATMEL;
12393 		tg3_flag_set(tp, NVRAM_BUFFERED);
12394 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12395 
12396 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12397 		tw32(NVRAM_CFG1, nvcfg1);
12398 		return;
12399 	case FLASH_5717VENDOR_ATMEL_MDB011D:
12400 	case FLASH_5717VENDOR_ATMEL_ADB011B:
12401 	case FLASH_5717VENDOR_ATMEL_ADB011D:
12402 	case FLASH_5717VENDOR_ATMEL_MDB021D:
12403 	case FLASH_5717VENDOR_ATMEL_ADB021B:
12404 	case FLASH_5717VENDOR_ATMEL_ADB021D:
12405 	case FLASH_5717VENDOR_ATMEL_45USPT:
12406 		tp->nvram_jedecnum = JEDEC_ATMEL;
12407 		tg3_flag_set(tp, NVRAM_BUFFERED);
12408 		tg3_flag_set(tp, FLASH);
12409 
12410 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12411 		case FLASH_5717VENDOR_ATMEL_MDB021D:
12412 			/* Detect size with tg3_nvram_get_size() */
12413 			break;
12414 		case FLASH_5717VENDOR_ATMEL_ADB021B:
12415 		case FLASH_5717VENDOR_ATMEL_ADB021D:
12416 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12417 			break;
12418 		default:
12419 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12420 			break;
12421 		}
12422 		break;
12423 	case FLASH_5717VENDOR_ST_M_M25PE10:
12424 	case FLASH_5717VENDOR_ST_A_M25PE10:
12425 	case FLASH_5717VENDOR_ST_M_M45PE10:
12426 	case FLASH_5717VENDOR_ST_A_M45PE10:
12427 	case FLASH_5717VENDOR_ST_M_M25PE20:
12428 	case FLASH_5717VENDOR_ST_A_M25PE20:
12429 	case FLASH_5717VENDOR_ST_M_M45PE20:
12430 	case FLASH_5717VENDOR_ST_A_M45PE20:
12431 	case FLASH_5717VENDOR_ST_25USPT:
12432 	case FLASH_5717VENDOR_ST_45USPT:
12433 		tp->nvram_jedecnum = JEDEC_ST;
12434 		tg3_flag_set(tp, NVRAM_BUFFERED);
12435 		tg3_flag_set(tp, FLASH);
12436 
12437 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12438 		case FLASH_5717VENDOR_ST_M_M25PE20:
12439 		case FLASH_5717VENDOR_ST_M_M45PE20:
12440 			/* Detect size with tg3_nvram_get_size() */
12441 			break;
12442 		case FLASH_5717VENDOR_ST_A_M25PE20:
12443 		case FLASH_5717VENDOR_ST_A_M45PE20:
12444 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12445 			break;
12446 		default:
12447 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12448 			break;
12449 		}
12450 		break;
12451 	default:
12452 		tg3_flag_set(tp, NO_NVRAM);
12453 		return;
12454 	}
12455 
12456 	tg3_nvram_get_pagesize(tp, nvcfg1);
12457 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12458 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12459 }
12460 
12461 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12462 {
12463 	u32 nvcfg1, nvmpinstrp;
12464 
12465 	nvcfg1 = tr32(NVRAM_CFG1);
12466 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12467 
12468 	switch (nvmpinstrp) {
12469 	case FLASH_5720_EEPROM_HD:
12470 	case FLASH_5720_EEPROM_LD:
12471 		tp->nvram_jedecnum = JEDEC_ATMEL;
12472 		tg3_flag_set(tp, NVRAM_BUFFERED);
12473 
12474 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12475 		tw32(NVRAM_CFG1, nvcfg1);
12476 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12477 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12478 		else
12479 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12480 		return;
12481 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
12482 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
12483 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
12484 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
12485 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
12486 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
12487 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
12488 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
12489 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
12490 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
12491 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
12492 	case FLASH_5720VENDOR_ATMEL_45USPT:
12493 		tp->nvram_jedecnum = JEDEC_ATMEL;
12494 		tg3_flag_set(tp, NVRAM_BUFFERED);
12495 		tg3_flag_set(tp, FLASH);
12496 
12497 		switch (nvmpinstrp) {
12498 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
12499 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
12500 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
12501 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12502 			break;
12503 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
12504 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
12505 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
12506 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12507 			break;
12508 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
12509 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
12510 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12511 			break;
12512 		default:
12513 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12514 			break;
12515 		}
12516 		break;
12517 	case FLASH_5720VENDOR_M_ST_M25PE10:
12518 	case FLASH_5720VENDOR_M_ST_M45PE10:
12519 	case FLASH_5720VENDOR_A_ST_M25PE10:
12520 	case FLASH_5720VENDOR_A_ST_M45PE10:
12521 	case FLASH_5720VENDOR_M_ST_M25PE20:
12522 	case FLASH_5720VENDOR_M_ST_M45PE20:
12523 	case FLASH_5720VENDOR_A_ST_M25PE20:
12524 	case FLASH_5720VENDOR_A_ST_M45PE20:
12525 	case FLASH_5720VENDOR_M_ST_M25PE40:
12526 	case FLASH_5720VENDOR_M_ST_M45PE40:
12527 	case FLASH_5720VENDOR_A_ST_M25PE40:
12528 	case FLASH_5720VENDOR_A_ST_M45PE40:
12529 	case FLASH_5720VENDOR_M_ST_M25PE80:
12530 	case FLASH_5720VENDOR_M_ST_M45PE80:
12531 	case FLASH_5720VENDOR_A_ST_M25PE80:
12532 	case FLASH_5720VENDOR_A_ST_M45PE80:
12533 	case FLASH_5720VENDOR_ST_25USPT:
12534 	case FLASH_5720VENDOR_ST_45USPT:
12535 		tp->nvram_jedecnum = JEDEC_ST;
12536 		tg3_flag_set(tp, NVRAM_BUFFERED);
12537 		tg3_flag_set(tp, FLASH);
12538 
12539 		switch (nvmpinstrp) {
12540 		case FLASH_5720VENDOR_M_ST_M25PE20:
12541 		case FLASH_5720VENDOR_M_ST_M45PE20:
12542 		case FLASH_5720VENDOR_A_ST_M25PE20:
12543 		case FLASH_5720VENDOR_A_ST_M45PE20:
12544 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12545 			break;
12546 		case FLASH_5720VENDOR_M_ST_M25PE40:
12547 		case FLASH_5720VENDOR_M_ST_M45PE40:
12548 		case FLASH_5720VENDOR_A_ST_M25PE40:
12549 		case FLASH_5720VENDOR_A_ST_M45PE40:
12550 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12551 			break;
12552 		case FLASH_5720VENDOR_M_ST_M25PE80:
12553 		case FLASH_5720VENDOR_M_ST_M45PE80:
12554 		case FLASH_5720VENDOR_A_ST_M25PE80:
12555 		case FLASH_5720VENDOR_A_ST_M45PE80:
12556 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12557 			break;
12558 		default:
12559 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12560 			break;
12561 		}
12562 		break;
12563 	default:
12564 		tg3_flag_set(tp, NO_NVRAM);
12565 		return;
12566 	}
12567 
12568 	tg3_nvram_get_pagesize(tp, nvcfg1);
12569 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12570 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12571 }
12572 
12573 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12574 static void __devinit tg3_nvram_init(struct tg3 *tp)
12575 {
12576 	tw32_f(GRC_EEPROM_ADDR,
12577 	     (EEPROM_ADDR_FSM_RESET |
12578 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
12579 	       EEPROM_ADDR_CLKPERD_SHIFT)));
12580 
12581 	msleep(1);
12582 
12583 	/* Enable seeprom accesses. */
12584 	tw32_f(GRC_LOCAL_CTRL,
12585 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12586 	udelay(100);
12587 
12588 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12589 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12590 		tg3_flag_set(tp, NVRAM);
12591 
12592 		if (tg3_nvram_lock(tp)) {
12593 			netdev_warn(tp->dev,
12594 				    "Cannot get nvram lock, %s failed\n",
12595 				    __func__);
12596 			return;
12597 		}
12598 		tg3_enable_nvram_access(tp);
12599 
12600 		tp->nvram_size = 0;
12601 
12602 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12603 			tg3_get_5752_nvram_info(tp);
12604 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12605 			tg3_get_5755_nvram_info(tp);
12606 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12607 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12608 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12609 			tg3_get_5787_nvram_info(tp);
12610 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12611 			tg3_get_5761_nvram_info(tp);
12612 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12613 			tg3_get_5906_nvram_info(tp);
12614 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12615 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12616 			tg3_get_57780_nvram_info(tp);
12617 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12618 			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12619 			tg3_get_5717_nvram_info(tp);
12620 		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12621 			tg3_get_5720_nvram_info(tp);
12622 		else
12623 			tg3_get_nvram_info(tp);
12624 
12625 		if (tp->nvram_size == 0)
12626 			tg3_get_nvram_size(tp);
12627 
12628 		tg3_disable_nvram_access(tp);
12629 		tg3_nvram_unlock(tp);
12630 
12631 	} else {
12632 		tg3_flag_clear(tp, NVRAM);
12633 		tg3_flag_clear(tp, NVRAM_BUFFERED);
12634 
12635 		tg3_get_eeprom_size(tp);
12636 	}
12637 }
12638 
12639 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12640 				    u32 offset, u32 len, u8 *buf)
12641 {
12642 	int i, j, rc = 0;
12643 	u32 val;
12644 
12645 	for (i = 0; i < len; i += 4) {
12646 		u32 addr;
12647 		__be32 data;
12648 
12649 		addr = offset + i;
12650 
12651 		memcpy(&data, buf + i, 4);
12652 
12653 		/*
12654 		 * The SEEPROM interface expects the data to always be opposite
12655 		 * the native endian format.  We accomplish this by reversing
12656 		 * all the operations that would have been performed on the
12657 		 * data from a call to tg3_nvram_read_be32().
12658 		 */
12659 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12660 
12661 		val = tr32(GRC_EEPROM_ADDR);
12662 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12663 
12664 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12665 			EEPROM_ADDR_READ);
12666 		tw32(GRC_EEPROM_ADDR, val |
12667 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
12668 			(addr & EEPROM_ADDR_ADDR_MASK) |
12669 			EEPROM_ADDR_START |
12670 			EEPROM_ADDR_WRITE);
12671 
12672 		for (j = 0; j < 1000; j++) {
12673 			val = tr32(GRC_EEPROM_ADDR);
12674 
12675 			if (val & EEPROM_ADDR_COMPLETE)
12676 				break;
12677 			msleep(1);
12678 		}
12679 		if (!(val & EEPROM_ADDR_COMPLETE)) {
12680 			rc = -EBUSY;
12681 			break;
12682 		}
12683 	}
12684 
12685 	return rc;
12686 }
12687 
12688 /* offset and length are dword aligned */
12689 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12690 		u8 *buf)
12691 {
12692 	int ret = 0;
12693 	u32 pagesize = tp->nvram_pagesize;
12694 	u32 pagemask = pagesize - 1;
12695 	u32 nvram_cmd;
12696 	u8 *tmp;
12697 
12698 	tmp = kmalloc(pagesize, GFP_KERNEL);
12699 	if (tmp == NULL)
12700 		return -ENOMEM;
12701 
12702 	while (len) {
12703 		int j;
12704 		u32 phy_addr, page_off, size;
12705 
12706 		phy_addr = offset & ~pagemask;
12707 
12708 		for (j = 0; j < pagesize; j += 4) {
12709 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
12710 						  (__be32 *) (tmp + j));
12711 			if (ret)
12712 				break;
12713 		}
12714 		if (ret)
12715 			break;
12716 
12717 		page_off = offset & pagemask;
12718 		size = pagesize;
12719 		if (len < size)
12720 			size = len;
12721 
12722 		len -= size;
12723 
12724 		memcpy(tmp + page_off, buf, size);
12725 
12726 		offset = offset + (pagesize - page_off);
12727 
12728 		tg3_enable_nvram_access(tp);
12729 
12730 		/*
12731 		 * Before we can erase the flash page, we need
12732 		 * to issue a special "write enable" command.
12733 		 */
12734 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12735 
12736 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12737 			break;
12738 
12739 		/* Erase the target page */
12740 		tw32(NVRAM_ADDR, phy_addr);
12741 
12742 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12743 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12744 
12745 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12746 			break;
12747 
12748 		/* Issue another write enable to start the write. */
12749 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12750 
12751 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12752 			break;
12753 
12754 		for (j = 0; j < pagesize; j += 4) {
12755 			__be32 data;
12756 
12757 			data = *((__be32 *) (tmp + j));
12758 
12759 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
12760 
12761 			tw32(NVRAM_ADDR, phy_addr + j);
12762 
12763 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12764 				NVRAM_CMD_WR;
12765 
12766 			if (j == 0)
12767 				nvram_cmd |= NVRAM_CMD_FIRST;
12768 			else if (j == (pagesize - 4))
12769 				nvram_cmd |= NVRAM_CMD_LAST;
12770 
12771 			if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12772 				break;
12773 		}
12774 		if (ret)
12775 			break;
12776 	}
12777 
12778 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12779 	tg3_nvram_exec_cmd(tp, nvram_cmd);
12780 
12781 	kfree(tmp);
12782 
12783 	return ret;
12784 }
12785 
12786 /* offset and length are dword aligned */
12787 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12788 		u8 *buf)
12789 {
12790 	int i, ret = 0;
12791 
12792 	for (i = 0; i < len; i += 4, offset += 4) {
12793 		u32 page_off, phy_addr, nvram_cmd;
12794 		__be32 data;
12795 
12796 		memcpy(&data, buf + i, 4);
12797 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
12798 
12799 		page_off = offset % tp->nvram_pagesize;
12800 
12801 		phy_addr = tg3_nvram_phys_addr(tp, offset);
12802 
12803 		tw32(NVRAM_ADDR, phy_addr);
12804 
12805 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12806 
12807 		if (page_off == 0 || i == 0)
12808 			nvram_cmd |= NVRAM_CMD_FIRST;
12809 		if (page_off == (tp->nvram_pagesize - 4))
12810 			nvram_cmd |= NVRAM_CMD_LAST;
12811 
12812 		if (i == (len - 4))
12813 			nvram_cmd |= NVRAM_CMD_LAST;
12814 
12815 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12816 		    !tg3_flag(tp, 5755_PLUS) &&
12817 		    (tp->nvram_jedecnum == JEDEC_ST) &&
12818 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
12819 
12820 			if ((ret = tg3_nvram_exec_cmd(tp,
12821 				NVRAM_CMD_WREN | NVRAM_CMD_GO |
12822 				NVRAM_CMD_DONE)))
12823 
12824 				break;
12825 		}
12826 		if (!tg3_flag(tp, FLASH)) {
12827 			/* We always do complete word writes to eeprom. */
12828 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12829 		}
12830 
12831 		if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12832 			break;
12833 	}
12834 	return ret;
12835 }
12836 
12837 /* offset and length are dword aligned */
12838 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12839 {
12840 	int ret;
12841 
12842 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12843 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12844 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
12845 		udelay(40);
12846 	}
12847 
12848 	if (!tg3_flag(tp, NVRAM)) {
12849 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12850 	} else {
12851 		u32 grc_mode;
12852 
12853 		ret = tg3_nvram_lock(tp);
12854 		if (ret)
12855 			return ret;
12856 
12857 		tg3_enable_nvram_access(tp);
12858 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12859 			tw32(NVRAM_WRITE1, 0x406);
12860 
12861 		grc_mode = tr32(GRC_MODE);
12862 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12863 
12864 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12865 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
12866 				buf);
12867 		} else {
12868 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12869 				buf);
12870 		}
12871 
12872 		grc_mode = tr32(GRC_MODE);
12873 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12874 
12875 		tg3_disable_nvram_access(tp);
12876 		tg3_nvram_unlock(tp);
12877 	}
12878 
12879 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12880 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12881 		udelay(40);
12882 	}
12883 
12884 	return ret;
12885 }
12886 
12887 struct subsys_tbl_ent {
12888 	u16 subsys_vendor, subsys_devid;
12889 	u32 phy_id;
12890 };
12891 
12892 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12893 	/* Broadcom boards. */
12894 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12895 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12896 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12897 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12898 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12899 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12900 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12901 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12902 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12903 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12904 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12905 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12906 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12907 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12908 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12909 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12910 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12911 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12912 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12913 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12914 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
12915 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12916 
12917 	/* 3com boards. */
12918 	{ TG3PCI_SUBVENDOR_ID_3COM,
12919 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12920 	{ TG3PCI_SUBVENDOR_ID_3COM,
12921 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12922 	{ TG3PCI_SUBVENDOR_ID_3COM,
12923 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12924 	{ TG3PCI_SUBVENDOR_ID_3COM,
12925 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12926 	{ TG3PCI_SUBVENDOR_ID_3COM,
12927 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12928 
12929 	/* DELL boards. */
12930 	{ TG3PCI_SUBVENDOR_ID_DELL,
12931 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12932 	{ TG3PCI_SUBVENDOR_ID_DELL,
12933 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12934 	{ TG3PCI_SUBVENDOR_ID_DELL,
12935 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12936 	{ TG3PCI_SUBVENDOR_ID_DELL,
12937 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12938 
12939 	/* Compaq boards. */
12940 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
12941 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12942 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
12943 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12944 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
12945 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12946 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
12947 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12948 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
12949 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12950 
12951 	/* IBM boards. */
12952 	{ TG3PCI_SUBVENDOR_ID_IBM,
12953 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12954 };
12955 
12956 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12957 {
12958 	int i;
12959 
12960 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12961 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
12962 		     tp->pdev->subsystem_vendor) &&
12963 		    (subsys_id_to_phy_id[i].subsys_devid ==
12964 		     tp->pdev->subsystem_device))
12965 			return &subsys_id_to_phy_id[i];
12966 	}
12967 	return NULL;
12968 }
12969 
12970 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12971 {
12972 	u32 val;
12973 
12974 	tp->phy_id = TG3_PHY_ID_INVALID;
12975 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12976 
12977 	/* Assume an onboard device and WOL capable by default.  */
12978 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
12979 	tg3_flag_set(tp, WOL_CAP);
12980 
12981 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12982 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12983 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12984 			tg3_flag_set(tp, IS_NIC);
12985 		}
12986 		val = tr32(VCPU_CFGSHDW);
12987 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
12988 			tg3_flag_set(tp, ASPM_WORKAROUND);
12989 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12990 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12991 			tg3_flag_set(tp, WOL_ENABLE);
12992 			device_set_wakeup_enable(&tp->pdev->dev, true);
12993 		}
12994 		goto done;
12995 	}
12996 
12997 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12998 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12999 		u32 nic_cfg, led_cfg;
13000 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13001 		int eeprom_phy_serdes = 0;
13002 
13003 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13004 		tp->nic_sram_data_cfg = nic_cfg;
13005 
13006 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13007 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
13008 		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13009 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13010 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13011 		    (ver > 0) && (ver < 0x100))
13012 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13013 
13014 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13015 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13016 
13017 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13018 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13019 			eeprom_phy_serdes = 1;
13020 
13021 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13022 		if (nic_phy_id != 0) {
13023 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13024 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13025 
13026 			eeprom_phy_id  = (id1 >> 16) << 10;
13027 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
13028 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13029 		} else
13030 			eeprom_phy_id = 0;
13031 
13032 		tp->phy_id = eeprom_phy_id;
13033 		if (eeprom_phy_serdes) {
13034 			if (!tg3_flag(tp, 5705_PLUS))
13035 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13036 			else
13037 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13038 		}
13039 
13040 		if (tg3_flag(tp, 5750_PLUS))
13041 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13042 				    SHASTA_EXT_LED_MODE_MASK);
13043 		else
13044 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13045 
13046 		switch (led_cfg) {
13047 		default:
13048 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13049 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13050 			break;
13051 
13052 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13053 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13054 			break;
13055 
13056 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13057 			tp->led_ctrl = LED_CTRL_MODE_MAC;
13058 
13059 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
13060 			 * read on some older 5700/5701 bootcode.
13061 			 */
13062 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13063 			    ASIC_REV_5700 ||
13064 			    GET_ASIC_REV(tp->pci_chip_rev_id) ==
13065 			    ASIC_REV_5701)
13066 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13067 
13068 			break;
13069 
13070 		case SHASTA_EXT_LED_SHARED:
13071 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
13072 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13073 			    tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13074 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13075 						 LED_CTRL_MODE_PHY_2);
13076 			break;
13077 
13078 		case SHASTA_EXT_LED_MAC:
13079 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13080 			break;
13081 
13082 		case SHASTA_EXT_LED_COMBO:
13083 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
13084 			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13085 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13086 						 LED_CTRL_MODE_PHY_2);
13087 			break;
13088 
13089 		}
13090 
13091 		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13092 		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13093 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13094 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13095 
13096 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13097 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13098 
13099 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13100 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
13101 			if ((tp->pdev->subsystem_vendor ==
13102 			     PCI_VENDOR_ID_ARIMA) &&
13103 			    (tp->pdev->subsystem_device == 0x205a ||
13104 			     tp->pdev->subsystem_device == 0x2063))
13105 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13106 		} else {
13107 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13108 			tg3_flag_set(tp, IS_NIC);
13109 		}
13110 
13111 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13112 			tg3_flag_set(tp, ENABLE_ASF);
13113 			if (tg3_flag(tp, 5750_PLUS))
13114 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13115 		}
13116 
13117 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13118 		    tg3_flag(tp, 5750_PLUS))
13119 			tg3_flag_set(tp, ENABLE_APE);
13120 
13121 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13122 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13123 			tg3_flag_clear(tp, WOL_CAP);
13124 
13125 		if (tg3_flag(tp, WOL_CAP) &&
13126 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13127 			tg3_flag_set(tp, WOL_ENABLE);
13128 			device_set_wakeup_enable(&tp->pdev->dev, true);
13129 		}
13130 
13131 		if (cfg2 & (1 << 17))
13132 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13133 
13134 		/* serdes signal pre-emphasis in register 0x590 set by */
13135 		/* bootcode if bit 18 is set */
13136 		if (cfg2 & (1 << 18))
13137 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13138 
13139 		if ((tg3_flag(tp, 57765_PLUS) ||
13140 		     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13141 		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13142 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13143 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13144 
13145 		if (tg3_flag(tp, PCI_EXPRESS) &&
13146 		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13147 		    !tg3_flag(tp, 57765_PLUS)) {
13148 			u32 cfg3;
13149 
13150 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13151 			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13152 				tg3_flag_set(tp, ASPM_WORKAROUND);
13153 		}
13154 
13155 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13156 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13157 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13158 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13159 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13160 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13161 	}
13162 done:
13163 	if (tg3_flag(tp, WOL_CAP))
13164 		device_set_wakeup_enable(&tp->pdev->dev,
13165 					 tg3_flag(tp, WOL_ENABLE));
13166 	else
13167 		device_set_wakeup_capable(&tp->pdev->dev, false);
13168 }
13169 
13170 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13171 {
13172 	int i;
13173 	u32 val;
13174 
13175 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13176 	tw32(OTP_CTRL, cmd);
13177 
13178 	/* Wait for up to 1 ms for command to execute. */
13179 	for (i = 0; i < 100; i++) {
13180 		val = tr32(OTP_STATUS);
13181 		if (val & OTP_STATUS_CMD_DONE)
13182 			break;
13183 		udelay(10);
13184 	}
13185 
13186 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13187 }
13188 
13189 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13190  * configuration is a 32-bit value that straddles the alignment boundary.
13191  * We do two 32-bit reads and then shift and merge the results.
13192  */
13193 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13194 {
13195 	u32 bhalf_otp, thalf_otp;
13196 
13197 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13198 
13199 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13200 		return 0;
13201 
13202 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13203 
13204 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13205 		return 0;
13206 
13207 	thalf_otp = tr32(OTP_READ_DATA);
13208 
13209 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13210 
13211 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13212 		return 0;
13213 
13214 	bhalf_otp = tr32(OTP_READ_DATA);
13215 
13216 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13217 }
13218 
13219 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13220 {
13221 	u32 adv = ADVERTISED_Autoneg |
13222 		  ADVERTISED_Pause;
13223 
13224 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13225 		adv |= ADVERTISED_1000baseT_Half |
13226 		       ADVERTISED_1000baseT_Full;
13227 
13228 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13229 		adv |= ADVERTISED_100baseT_Half |
13230 		       ADVERTISED_100baseT_Full |
13231 		       ADVERTISED_10baseT_Half |
13232 		       ADVERTISED_10baseT_Full |
13233 		       ADVERTISED_TP;
13234 	else
13235 		adv |= ADVERTISED_FIBRE;
13236 
13237 	tp->link_config.advertising = adv;
13238 	tp->link_config.speed = SPEED_INVALID;
13239 	tp->link_config.duplex = DUPLEX_INVALID;
13240 	tp->link_config.autoneg = AUTONEG_ENABLE;
13241 	tp->link_config.active_speed = SPEED_INVALID;
13242 	tp->link_config.active_duplex = DUPLEX_INVALID;
13243 	tp->link_config.orig_speed = SPEED_INVALID;
13244 	tp->link_config.orig_duplex = DUPLEX_INVALID;
13245 	tp->link_config.orig_autoneg = AUTONEG_INVALID;
13246 }
13247 
13248 static int __devinit tg3_phy_probe(struct tg3 *tp)
13249 {
13250 	u32 hw_phy_id_1, hw_phy_id_2;
13251 	u32 hw_phy_id, hw_phy_id_masked;
13252 	int err;
13253 
13254 	/* flow control autonegotiation is default behavior */
13255 	tg3_flag_set(tp, PAUSE_AUTONEG);
13256 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13257 
13258 	if (tg3_flag(tp, USE_PHYLIB))
13259 		return tg3_phy_init(tp);
13260 
13261 	/* Reading the PHY ID register can conflict with ASF
13262 	 * firmware access to the PHY hardware.
13263 	 */
13264 	err = 0;
13265 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13266 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13267 	} else {
13268 		/* Now read the physical PHY_ID from the chip and verify
13269 		 * that it is sane.  If it doesn't look good, we fall back
13270 		 * to either the hard-coded table based PHY_ID and failing
13271 		 * that the value found in the eeprom area.
13272 		 */
13273 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13274 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13275 
13276 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13277 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13278 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13279 
13280 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13281 	}
13282 
13283 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13284 		tp->phy_id = hw_phy_id;
13285 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13286 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13287 		else
13288 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13289 	} else {
13290 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
13291 			/* Do nothing, phy ID already set up in
13292 			 * tg3_get_eeprom_hw_cfg().
13293 			 */
13294 		} else {
13295 			struct subsys_tbl_ent *p;
13296 
13297 			/* No eeprom signature?  Try the hardcoded
13298 			 * subsys device table.
13299 			 */
13300 			p = tg3_lookup_by_subsys(tp);
13301 			if (!p)
13302 				return -ENODEV;
13303 
13304 			tp->phy_id = p->phy_id;
13305 			if (!tp->phy_id ||
13306 			    tp->phy_id == TG3_PHY_ID_BCM8002)
13307 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13308 		}
13309 	}
13310 
13311 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13312 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13313 	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13314 	     (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13315 	      tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13316 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13317 	      tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13318 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13319 
13320 	tg3_phy_init_link_config(tp);
13321 
13322 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13323 	    !tg3_flag(tp, ENABLE_APE) &&
13324 	    !tg3_flag(tp, ENABLE_ASF)) {
13325 		u32 bmsr, mask;
13326 
13327 		tg3_readphy(tp, MII_BMSR, &bmsr);
13328 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13329 		    (bmsr & BMSR_LSTATUS))
13330 			goto skip_phy_reset;
13331 
13332 		err = tg3_phy_reset(tp);
13333 		if (err)
13334 			return err;
13335 
13336 		tg3_phy_set_wirespeed(tp);
13337 
13338 		mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13339 			ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13340 			ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13341 		if (!tg3_copper_is_advertising_all(tp, mask)) {
13342 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13343 					    tp->link_config.flowctrl);
13344 
13345 			tg3_writephy(tp, MII_BMCR,
13346 				     BMCR_ANENABLE | BMCR_ANRESTART);
13347 		}
13348 	}
13349 
13350 skip_phy_reset:
13351 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13352 		err = tg3_init_5401phy_dsp(tp);
13353 		if (err)
13354 			return err;
13355 
13356 		err = tg3_init_5401phy_dsp(tp);
13357 	}
13358 
13359 	return err;
13360 }
13361 
13362 static void __devinit tg3_read_vpd(struct tg3 *tp)
13363 {
13364 	u8 *vpd_data;
13365 	unsigned int block_end, rosize, len;
13366 	u32 vpdlen;
13367 	int j, i = 0;
13368 
13369 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13370 	if (!vpd_data)
13371 		goto out_no_vpd;
13372 
13373 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13374 	if (i < 0)
13375 		goto out_not_found;
13376 
13377 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13378 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13379 	i += PCI_VPD_LRDT_TAG_SIZE;
13380 
13381 	if (block_end > vpdlen)
13382 		goto out_not_found;
13383 
13384 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13385 				      PCI_VPD_RO_KEYWORD_MFR_ID);
13386 	if (j > 0) {
13387 		len = pci_vpd_info_field_size(&vpd_data[j]);
13388 
13389 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13390 		if (j + len > block_end || len != 4 ||
13391 		    memcmp(&vpd_data[j], "1028", 4))
13392 			goto partno;
13393 
13394 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13395 					      PCI_VPD_RO_KEYWORD_VENDOR0);
13396 		if (j < 0)
13397 			goto partno;
13398 
13399 		len = pci_vpd_info_field_size(&vpd_data[j]);
13400 
13401 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
13402 		if (j + len > block_end)
13403 			goto partno;
13404 
13405 		memcpy(tp->fw_ver, &vpd_data[j], len);
13406 		strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13407 	}
13408 
13409 partno:
13410 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13411 				      PCI_VPD_RO_KEYWORD_PARTNO);
13412 	if (i < 0)
13413 		goto out_not_found;
13414 
13415 	len = pci_vpd_info_field_size(&vpd_data[i]);
13416 
13417 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
13418 	if (len > TG3_BPN_SIZE ||
13419 	    (len + i) > vpdlen)
13420 		goto out_not_found;
13421 
13422 	memcpy(tp->board_part_number, &vpd_data[i], len);
13423 
13424 out_not_found:
13425 	kfree(vpd_data);
13426 	if (tp->board_part_number[0])
13427 		return;
13428 
13429 out_no_vpd:
13430 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13431 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13432 			strcpy(tp->board_part_number, "BCM5717");
13433 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13434 			strcpy(tp->board_part_number, "BCM5718");
13435 		else
13436 			goto nomatch;
13437 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13438 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13439 			strcpy(tp->board_part_number, "BCM57780");
13440 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13441 			strcpy(tp->board_part_number, "BCM57760");
13442 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13443 			strcpy(tp->board_part_number, "BCM57790");
13444 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13445 			strcpy(tp->board_part_number, "BCM57788");
13446 		else
13447 			goto nomatch;
13448 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13449 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13450 			strcpy(tp->board_part_number, "BCM57761");
13451 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13452 			strcpy(tp->board_part_number, "BCM57765");
13453 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13454 			strcpy(tp->board_part_number, "BCM57781");
13455 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13456 			strcpy(tp->board_part_number, "BCM57785");
13457 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13458 			strcpy(tp->board_part_number, "BCM57791");
13459 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13460 			strcpy(tp->board_part_number, "BCM57795");
13461 		else
13462 			goto nomatch;
13463 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13464 		strcpy(tp->board_part_number, "BCM95906");
13465 	} else {
13466 nomatch:
13467 		strcpy(tp->board_part_number, "none");
13468 	}
13469 }
13470 
13471 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13472 {
13473 	u32 val;
13474 
13475 	if (tg3_nvram_read(tp, offset, &val) ||
13476 	    (val & 0xfc000000) != 0x0c000000 ||
13477 	    tg3_nvram_read(tp, offset + 4, &val) ||
13478 	    val != 0)
13479 		return 0;
13480 
13481 	return 1;
13482 }
13483 
13484 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13485 {
13486 	u32 val, offset, start, ver_offset;
13487 	int i, dst_off;
13488 	bool newver = false;
13489 
13490 	if (tg3_nvram_read(tp, 0xc, &offset) ||
13491 	    tg3_nvram_read(tp, 0x4, &start))
13492 		return;
13493 
13494 	offset = tg3_nvram_logical_addr(tp, offset);
13495 
13496 	if (tg3_nvram_read(tp, offset, &val))
13497 		return;
13498 
13499 	if ((val & 0xfc000000) == 0x0c000000) {
13500 		if (tg3_nvram_read(tp, offset + 4, &val))
13501 			return;
13502 
13503 		if (val == 0)
13504 			newver = true;
13505 	}
13506 
13507 	dst_off = strlen(tp->fw_ver);
13508 
13509 	if (newver) {
13510 		if (TG3_VER_SIZE - dst_off < 16 ||
13511 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
13512 			return;
13513 
13514 		offset = offset + ver_offset - start;
13515 		for (i = 0; i < 16; i += 4) {
13516 			__be32 v;
13517 			if (tg3_nvram_read_be32(tp, offset + i, &v))
13518 				return;
13519 
13520 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13521 		}
13522 	} else {
13523 		u32 major, minor;
13524 
13525 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13526 			return;
13527 
13528 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13529 			TG3_NVM_BCVER_MAJSFT;
13530 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13531 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13532 			 "v%d.%02d", major, minor);
13533 	}
13534 }
13535 
13536 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13537 {
13538 	u32 val, major, minor;
13539 
13540 	/* Use native endian representation */
13541 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13542 		return;
13543 
13544 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13545 		TG3_NVM_HWSB_CFG1_MAJSFT;
13546 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13547 		TG3_NVM_HWSB_CFG1_MINSFT;
13548 
13549 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13550 }
13551 
13552 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13553 {
13554 	u32 offset, major, minor, build;
13555 
13556 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13557 
13558 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13559 		return;
13560 
13561 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13562 	case TG3_EEPROM_SB_REVISION_0:
13563 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13564 		break;
13565 	case TG3_EEPROM_SB_REVISION_2:
13566 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13567 		break;
13568 	case TG3_EEPROM_SB_REVISION_3:
13569 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13570 		break;
13571 	case TG3_EEPROM_SB_REVISION_4:
13572 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13573 		break;
13574 	case TG3_EEPROM_SB_REVISION_5:
13575 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13576 		break;
13577 	case TG3_EEPROM_SB_REVISION_6:
13578 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13579 		break;
13580 	default:
13581 		return;
13582 	}
13583 
13584 	if (tg3_nvram_read(tp, offset, &val))
13585 		return;
13586 
13587 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13588 		TG3_EEPROM_SB_EDH_BLD_SHFT;
13589 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13590 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
13591 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13592 
13593 	if (minor > 99 || build > 26)
13594 		return;
13595 
13596 	offset = strlen(tp->fw_ver);
13597 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13598 		 " v%d.%02d", major, minor);
13599 
13600 	if (build > 0) {
13601 		offset = strlen(tp->fw_ver);
13602 		if (offset < TG3_VER_SIZE - 1)
13603 			tp->fw_ver[offset] = 'a' + build - 1;
13604 	}
13605 }
13606 
13607 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13608 {
13609 	u32 val, offset, start;
13610 	int i, vlen;
13611 
13612 	for (offset = TG3_NVM_DIR_START;
13613 	     offset < TG3_NVM_DIR_END;
13614 	     offset += TG3_NVM_DIRENT_SIZE) {
13615 		if (tg3_nvram_read(tp, offset, &val))
13616 			return;
13617 
13618 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13619 			break;
13620 	}
13621 
13622 	if (offset == TG3_NVM_DIR_END)
13623 		return;
13624 
13625 	if (!tg3_flag(tp, 5705_PLUS))
13626 		start = 0x08000000;
13627 	else if (tg3_nvram_read(tp, offset - 4, &start))
13628 		return;
13629 
13630 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
13631 	    !tg3_fw_img_is_valid(tp, offset) ||
13632 	    tg3_nvram_read(tp, offset + 8, &val))
13633 		return;
13634 
13635 	offset += val - start;
13636 
13637 	vlen = strlen(tp->fw_ver);
13638 
13639 	tp->fw_ver[vlen++] = ',';
13640 	tp->fw_ver[vlen++] = ' ';
13641 
13642 	for (i = 0; i < 4; i++) {
13643 		__be32 v;
13644 		if (tg3_nvram_read_be32(tp, offset, &v))
13645 			return;
13646 
13647 		offset += sizeof(v);
13648 
13649 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
13650 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13651 			break;
13652 		}
13653 
13654 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13655 		vlen += sizeof(v);
13656 	}
13657 }
13658 
13659 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13660 {
13661 	int vlen;
13662 	u32 apedata;
13663 	char *fwtype;
13664 
13665 	if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13666 		return;
13667 
13668 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13669 	if (apedata != APE_SEG_SIG_MAGIC)
13670 		return;
13671 
13672 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13673 	if (!(apedata & APE_FW_STATUS_READY))
13674 		return;
13675 
13676 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13677 
13678 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13679 		tg3_flag_set(tp, APE_HAS_NCSI);
13680 		fwtype = "NCSI";
13681 	} else {
13682 		fwtype = "DASH";
13683 	}
13684 
13685 	vlen = strlen(tp->fw_ver);
13686 
13687 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13688 		 fwtype,
13689 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13690 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13691 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13692 		 (apedata & APE_FW_VERSION_BLDMSK));
13693 }
13694 
13695 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13696 {
13697 	u32 val;
13698 	bool vpd_vers = false;
13699 
13700 	if (tp->fw_ver[0] != 0)
13701 		vpd_vers = true;
13702 
13703 	if (tg3_flag(tp, NO_NVRAM)) {
13704 		strcat(tp->fw_ver, "sb");
13705 		return;
13706 	}
13707 
13708 	if (tg3_nvram_read(tp, 0, &val))
13709 		return;
13710 
13711 	if (val == TG3_EEPROM_MAGIC)
13712 		tg3_read_bc_ver(tp);
13713 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13714 		tg3_read_sb_ver(tp, val);
13715 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13716 		tg3_read_hwsb_ver(tp);
13717 	else
13718 		return;
13719 
13720 	if (vpd_vers)
13721 		goto done;
13722 
13723 	if (tg3_flag(tp, ENABLE_APE)) {
13724 		if (tg3_flag(tp, ENABLE_ASF))
13725 			tg3_read_dash_ver(tp);
13726 	} else if (tg3_flag(tp, ENABLE_ASF)) {
13727 		tg3_read_mgmtfw_ver(tp);
13728 	}
13729 
13730 done:
13731 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13732 }
13733 
13734 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13735 
13736 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13737 {
13738 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
13739 		return TG3_RX_RET_MAX_SIZE_5717;
13740 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13741 		return TG3_RX_RET_MAX_SIZE_5700;
13742 	else
13743 		return TG3_RX_RET_MAX_SIZE_5705;
13744 }
13745 
13746 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13747 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13748 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13749 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13750 	{ },
13751 };
13752 
13753 static int __devinit tg3_get_invariants(struct tg3 *tp)
13754 {
13755 	u32 misc_ctrl_reg;
13756 	u32 pci_state_reg, grc_misc_cfg;
13757 	u32 val;
13758 	u16 pci_cmd;
13759 	int err;
13760 
13761 	/* Force memory write invalidate off.  If we leave it on,
13762 	 * then on 5700_BX chips we have to enable a workaround.
13763 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13764 	 * to match the cacheline size.  The Broadcom driver have this
13765 	 * workaround but turns MWI off all the times so never uses
13766 	 * it.  This seems to suggest that the workaround is insufficient.
13767 	 */
13768 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13769 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13770 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13771 
13772 	/* Important! -- Make sure register accesses are byteswapped
13773 	 * correctly.  Also, for those chips that require it, make
13774 	 * sure that indirect register accesses are enabled before
13775 	 * the first operation.
13776 	 */
13777 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13778 			      &misc_ctrl_reg);
13779 	tp->misc_host_ctrl |= (misc_ctrl_reg &
13780 			       MISC_HOST_CTRL_CHIPREV);
13781 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13782 			       tp->misc_host_ctrl);
13783 
13784 	tp->pci_chip_rev_id = (misc_ctrl_reg >>
13785 			       MISC_HOST_CTRL_CHIPREV_SHIFT);
13786 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13787 		u32 prod_id_asic_rev;
13788 
13789 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13790 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13791 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13792 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13793 			pci_read_config_dword(tp->pdev,
13794 					      TG3PCI_GEN2_PRODID_ASICREV,
13795 					      &prod_id_asic_rev);
13796 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13797 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13798 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13799 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13800 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13801 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13802 			pci_read_config_dword(tp->pdev,
13803 					      TG3PCI_GEN15_PRODID_ASICREV,
13804 					      &prod_id_asic_rev);
13805 		else
13806 			pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13807 					      &prod_id_asic_rev);
13808 
13809 		tp->pci_chip_rev_id = prod_id_asic_rev;
13810 	}
13811 
13812 	/* Wrong chip ID in 5752 A0. This code can be removed later
13813 	 * as A0 is not in production.
13814 	 */
13815 	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13816 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13817 
13818 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13819 	 * we need to disable memory and use config. cycles
13820 	 * only to access all registers. The 5702/03 chips
13821 	 * can mistakenly decode the special cycles from the
13822 	 * ICH chipsets as memory write cycles, causing corruption
13823 	 * of register and memory space. Only certain ICH bridges
13824 	 * will drive special cycles with non-zero data during the
13825 	 * address phase which can fall within the 5703's address
13826 	 * range. This is not an ICH bug as the PCI spec allows
13827 	 * non-zero address during special cycles. However, only
13828 	 * these ICH bridges are known to drive non-zero addresses
13829 	 * during special cycles.
13830 	 *
13831 	 * Since special cycles do not cross PCI bridges, we only
13832 	 * enable this workaround if the 5703 is on the secondary
13833 	 * bus of these ICH bridges.
13834 	 */
13835 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13836 	    (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13837 		static struct tg3_dev_id {
13838 			u32	vendor;
13839 			u32	device;
13840 			u32	rev;
13841 		} ich_chipsets[] = {
13842 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13843 			  PCI_ANY_ID },
13844 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13845 			  PCI_ANY_ID },
13846 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13847 			  0xa },
13848 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13849 			  PCI_ANY_ID },
13850 			{ },
13851 		};
13852 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
13853 		struct pci_dev *bridge = NULL;
13854 
13855 		while (pci_id->vendor != 0) {
13856 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
13857 						bridge);
13858 			if (!bridge) {
13859 				pci_id++;
13860 				continue;
13861 			}
13862 			if (pci_id->rev != PCI_ANY_ID) {
13863 				if (bridge->revision > pci_id->rev)
13864 					continue;
13865 			}
13866 			if (bridge->subordinate &&
13867 			    (bridge->subordinate->number ==
13868 			     tp->pdev->bus->number)) {
13869 				tg3_flag_set(tp, ICH_WORKAROUND);
13870 				pci_dev_put(bridge);
13871 				break;
13872 			}
13873 		}
13874 	}
13875 
13876 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13877 		static struct tg3_dev_id {
13878 			u32	vendor;
13879 			u32	device;
13880 		} bridge_chipsets[] = {
13881 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13882 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13883 			{ },
13884 		};
13885 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13886 		struct pci_dev *bridge = NULL;
13887 
13888 		while (pci_id->vendor != 0) {
13889 			bridge = pci_get_device(pci_id->vendor,
13890 						pci_id->device,
13891 						bridge);
13892 			if (!bridge) {
13893 				pci_id++;
13894 				continue;
13895 			}
13896 			if (bridge->subordinate &&
13897 			    (bridge->subordinate->number <=
13898 			     tp->pdev->bus->number) &&
13899 			    (bridge->subordinate->subordinate >=
13900 			     tp->pdev->bus->number)) {
13901 				tg3_flag_set(tp, 5701_DMA_BUG);
13902 				pci_dev_put(bridge);
13903 				break;
13904 			}
13905 		}
13906 	}
13907 
13908 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
13909 	 * DMA addresses > 40-bit. This bridge may have other additional
13910 	 * 57xx devices behind it in some 4-port NIC designs for example.
13911 	 * Any tg3 device found behind the bridge will also need the 40-bit
13912 	 * DMA workaround.
13913 	 */
13914 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13915 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13916 		tg3_flag_set(tp, 5780_CLASS);
13917 		tg3_flag_set(tp, 40BIT_DMA_BUG);
13918 		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13919 	} else {
13920 		struct pci_dev *bridge = NULL;
13921 
13922 		do {
13923 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13924 						PCI_DEVICE_ID_SERVERWORKS_EPB,
13925 						bridge);
13926 			if (bridge && bridge->subordinate &&
13927 			    (bridge->subordinate->number <=
13928 			     tp->pdev->bus->number) &&
13929 			    (bridge->subordinate->subordinate >=
13930 			     tp->pdev->bus->number)) {
13931 				tg3_flag_set(tp, 40BIT_DMA_BUG);
13932 				pci_dev_put(bridge);
13933 				break;
13934 			}
13935 		} while (bridge);
13936 	}
13937 
13938 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13939 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13940 		tp->pdev_peer = tg3_find_peer(tp);
13941 
13942 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13943 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13944 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13945 		tg3_flag_set(tp, 5717_PLUS);
13946 
13947 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13948 	    tg3_flag(tp, 5717_PLUS))
13949 		tg3_flag_set(tp, 57765_PLUS);
13950 
13951 	/* Intentionally exclude ASIC_REV_5906 */
13952 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13953 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13954 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13955 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13956 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13957 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13958 	    tg3_flag(tp, 57765_PLUS))
13959 		tg3_flag_set(tp, 5755_PLUS);
13960 
13961 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13962 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13963 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13964 	    tg3_flag(tp, 5755_PLUS) ||
13965 	    tg3_flag(tp, 5780_CLASS))
13966 		tg3_flag_set(tp, 5750_PLUS);
13967 
13968 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13969 	    tg3_flag(tp, 5750_PLUS))
13970 		tg3_flag_set(tp, 5705_PLUS);
13971 
13972 	/* Determine TSO capabilities */
13973 	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13974 		; /* Do nothing. HW bug. */
13975 	else if (tg3_flag(tp, 57765_PLUS))
13976 		tg3_flag_set(tp, HW_TSO_3);
13977 	else if (tg3_flag(tp, 5755_PLUS) ||
13978 		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13979 		tg3_flag_set(tp, HW_TSO_2);
13980 	else if (tg3_flag(tp, 5750_PLUS)) {
13981 		tg3_flag_set(tp, HW_TSO_1);
13982 		tg3_flag_set(tp, TSO_BUG);
13983 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13984 		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13985 			tg3_flag_clear(tp, TSO_BUG);
13986 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13987 		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13988 		   tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13989 			tg3_flag_set(tp, TSO_BUG);
13990 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13991 			tp->fw_needed = FIRMWARE_TG3TSO5;
13992 		else
13993 			tp->fw_needed = FIRMWARE_TG3TSO;
13994 	}
13995 
13996 	/* Selectively allow TSO based on operating conditions */
13997 	if (tg3_flag(tp, HW_TSO_1) ||
13998 	    tg3_flag(tp, HW_TSO_2) ||
13999 	    tg3_flag(tp, HW_TSO_3) ||
14000 	    (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
14001 		tg3_flag_set(tp, TSO_CAPABLE);
14002 	else {
14003 		tg3_flag_clear(tp, TSO_CAPABLE);
14004 		tg3_flag_clear(tp, TSO_BUG);
14005 		tp->fw_needed = NULL;
14006 	}
14007 
14008 	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14009 		tp->fw_needed = FIRMWARE_TG3;
14010 
14011 	tp->irq_max = 1;
14012 
14013 	if (tg3_flag(tp, 5750_PLUS)) {
14014 		tg3_flag_set(tp, SUPPORT_MSI);
14015 		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14016 		    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14017 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14018 		     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14019 		     tp->pdev_peer == tp->pdev))
14020 			tg3_flag_clear(tp, SUPPORT_MSI);
14021 
14022 		if (tg3_flag(tp, 5755_PLUS) ||
14023 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14024 			tg3_flag_set(tp, 1SHOT_MSI);
14025 		}
14026 
14027 		if (tg3_flag(tp, 57765_PLUS)) {
14028 			tg3_flag_set(tp, SUPPORT_MSIX);
14029 			tp->irq_max = TG3_IRQ_MAX_VECS;
14030 		}
14031 	}
14032 
14033 	if (tg3_flag(tp, 5755_PLUS))
14034 		tg3_flag_set(tp, SHORT_DMA_BUG);
14035 
14036 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14037 		tg3_flag_set(tp, 4K_FIFO_LIMIT);
14038 
14039 	if (tg3_flag(tp, 5717_PLUS))
14040 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
14041 
14042 	if (tg3_flag(tp, 57765_PLUS) &&
14043 	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14044 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14045 
14046 	if (!tg3_flag(tp, 5705_PLUS) ||
14047 	    tg3_flag(tp, 5780_CLASS) ||
14048 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
14049 		tg3_flag_set(tp, JUMBO_CAPABLE);
14050 
14051 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14052 			      &pci_state_reg);
14053 
14054 	if (pci_is_pcie(tp->pdev)) {
14055 		u16 lnkctl;
14056 
14057 		tg3_flag_set(tp, PCI_EXPRESS);
14058 
14059 		tp->pcie_readrq = 4096;
14060 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14061 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14062 			tp->pcie_readrq = 2048;
14063 
14064 		pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14065 
14066 		pci_read_config_word(tp->pdev,
14067 				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14068 				     &lnkctl);
14069 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14070 			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14071 			    ASIC_REV_5906) {
14072 				tg3_flag_clear(tp, HW_TSO_2);
14073 				tg3_flag_clear(tp, TSO_CAPABLE);
14074 			}
14075 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14076 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14077 			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14078 			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14079 				tg3_flag_set(tp, CLKREQ_BUG);
14080 		} else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14081 			tg3_flag_set(tp, L1PLLPD_EN);
14082 		}
14083 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14084 		/* BCM5785 devices are effectively PCIe devices, and should
14085 		 * follow PCIe codepaths, but do not have a PCIe capabilities
14086 		 * section.
14087 		 */
14088 		tg3_flag_set(tp, PCI_EXPRESS);
14089 	} else if (!tg3_flag(tp, 5705_PLUS) ||
14090 		   tg3_flag(tp, 5780_CLASS)) {
14091 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14092 		if (!tp->pcix_cap) {
14093 			dev_err(&tp->pdev->dev,
14094 				"Cannot find PCI-X capability, aborting\n");
14095 			return -EIO;
14096 		}
14097 
14098 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14099 			tg3_flag_set(tp, PCIX_MODE);
14100 	}
14101 
14102 	/* If we have an AMD 762 or VIA K8T800 chipset, write
14103 	 * reordering to the mailbox registers done by the host
14104 	 * controller can cause major troubles.  We read back from
14105 	 * every mailbox register write to force the writes to be
14106 	 * posted to the chip in order.
14107 	 */
14108 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
14109 	    !tg3_flag(tp, PCI_EXPRESS))
14110 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
14111 
14112 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14113 			     &tp->pci_cacheline_sz);
14114 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14115 			     &tp->pci_lat_timer);
14116 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14117 	    tp->pci_lat_timer < 64) {
14118 		tp->pci_lat_timer = 64;
14119 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14120 				      tp->pci_lat_timer);
14121 	}
14122 
14123 	/* Important! -- It is critical that the PCI-X hw workaround
14124 	 * situation is decided before the first MMIO register access.
14125 	 */
14126 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14127 		/* 5700 BX chips need to have their TX producer index
14128 		 * mailboxes written twice to workaround a bug.
14129 		 */
14130 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
14131 
14132 		/* If we are in PCI-X mode, enable register write workaround.
14133 		 *
14134 		 * The workaround is to use indirect register accesses
14135 		 * for all chip writes not to mailbox registers.
14136 		 */
14137 		if (tg3_flag(tp, PCIX_MODE)) {
14138 			u32 pm_reg;
14139 
14140 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14141 
14142 			/* The chip can have it's power management PCI config
14143 			 * space registers clobbered due to this bug.
14144 			 * So explicitly force the chip into D0 here.
14145 			 */
14146 			pci_read_config_dword(tp->pdev,
14147 					      tp->pm_cap + PCI_PM_CTRL,
14148 					      &pm_reg);
14149 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14150 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14151 			pci_write_config_dword(tp->pdev,
14152 					       tp->pm_cap + PCI_PM_CTRL,
14153 					       pm_reg);
14154 
14155 			/* Also, force SERR#/PERR# in PCI command. */
14156 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14157 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14158 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14159 		}
14160 	}
14161 
14162 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14163 		tg3_flag_set(tp, PCI_HIGH_SPEED);
14164 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14165 		tg3_flag_set(tp, PCI_32BIT);
14166 
14167 	/* Chip-specific fixup from Broadcom driver */
14168 	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14169 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14170 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14171 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14172 	}
14173 
14174 	/* Default fast path register access methods */
14175 	tp->read32 = tg3_read32;
14176 	tp->write32 = tg3_write32;
14177 	tp->read32_mbox = tg3_read32;
14178 	tp->write32_mbox = tg3_write32;
14179 	tp->write32_tx_mbox = tg3_write32;
14180 	tp->write32_rx_mbox = tg3_write32;
14181 
14182 	/* Various workaround register access methods */
14183 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14184 		tp->write32 = tg3_write_indirect_reg32;
14185 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14186 		 (tg3_flag(tp, PCI_EXPRESS) &&
14187 		  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14188 		/*
14189 		 * Back to back register writes can cause problems on these
14190 		 * chips, the workaround is to read back all reg writes
14191 		 * except those to mailbox regs.
14192 		 *
14193 		 * See tg3_write_indirect_reg32().
14194 		 */
14195 		tp->write32 = tg3_write_flush_reg32;
14196 	}
14197 
14198 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14199 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
14200 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
14201 			tp->write32_rx_mbox = tg3_write_flush_reg32;
14202 	}
14203 
14204 	if (tg3_flag(tp, ICH_WORKAROUND)) {
14205 		tp->read32 = tg3_read_indirect_reg32;
14206 		tp->write32 = tg3_write_indirect_reg32;
14207 		tp->read32_mbox = tg3_read_indirect_mbox;
14208 		tp->write32_mbox = tg3_write_indirect_mbox;
14209 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
14210 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
14211 
14212 		iounmap(tp->regs);
14213 		tp->regs = NULL;
14214 
14215 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14216 		pci_cmd &= ~PCI_COMMAND_MEMORY;
14217 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14218 	}
14219 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14220 		tp->read32_mbox = tg3_read32_mbox_5906;
14221 		tp->write32_mbox = tg3_write32_mbox_5906;
14222 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
14223 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
14224 	}
14225 
14226 	if (tp->write32 == tg3_write_indirect_reg32 ||
14227 	    (tg3_flag(tp, PCIX_MODE) &&
14228 	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14229 	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14230 		tg3_flag_set(tp, SRAM_USE_CONFIG);
14231 
14232 	/* The memory arbiter has to be enabled in order for SRAM accesses
14233 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
14234 	 * sure it is enabled, but other entities such as system netboot
14235 	 * code might disable it.
14236 	 */
14237 	val = tr32(MEMARB_MODE);
14238 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14239 
14240 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14241 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14242 	    tg3_flag(tp, 5780_CLASS)) {
14243 		if (tg3_flag(tp, PCIX_MODE)) {
14244 			pci_read_config_dword(tp->pdev,
14245 					      tp->pcix_cap + PCI_X_STATUS,
14246 					      &val);
14247 			tp->pci_fn = val & 0x7;
14248 		}
14249 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14250 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14251 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14252 		    NIC_SRAM_CPMUSTAT_SIG) {
14253 			tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14254 			tp->pci_fn = tp->pci_fn ? 1 : 0;
14255 		}
14256 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14257 		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14258 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14259 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14260 		    NIC_SRAM_CPMUSTAT_SIG) {
14261 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14262 				     TG3_CPMU_STATUS_FSHFT_5719;
14263 		}
14264 	}
14265 
14266 	/* Get eeprom hw config before calling tg3_set_power_state().
14267 	 * In particular, the TG3_FLAG_IS_NIC flag must be
14268 	 * determined before calling tg3_set_power_state() so that
14269 	 * we know whether or not to switch out of Vaux power.
14270 	 * When the flag is set, it means that GPIO1 is used for eeprom
14271 	 * write protect and also implies that it is a LOM where GPIOs
14272 	 * are not used to switch power.
14273 	 */
14274 	tg3_get_eeprom_hw_cfg(tp);
14275 
14276 	if (tg3_flag(tp, ENABLE_APE)) {
14277 		/* Allow reads and writes to the
14278 		 * APE register and memory space.
14279 		 */
14280 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14281 				 PCISTATE_ALLOW_APE_SHMEM_WR |
14282 				 PCISTATE_ALLOW_APE_PSPACE_WR;
14283 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14284 				       pci_state_reg);
14285 
14286 		tg3_ape_lock_init(tp);
14287 	}
14288 
14289 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14290 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14291 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14292 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14293 	    tg3_flag(tp, 57765_PLUS))
14294 		tg3_flag_set(tp, CPMU_PRESENT);
14295 
14296 	/* Set up tp->grc_local_ctrl before calling
14297 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14298 	 * will bring 5700's external PHY out of reset.
14299 	 * It is also used as eeprom write protect on LOMs.
14300 	 */
14301 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14302 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14303 	    tg3_flag(tp, EEPROM_WRITE_PROT))
14304 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14305 				       GRC_LCLCTRL_GPIO_OUTPUT1);
14306 	/* Unused GPIO3 must be driven as output on 5752 because there
14307 	 * are no pull-up resistors on unused GPIO pins.
14308 	 */
14309 	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14310 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14311 
14312 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14313 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14314 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14315 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14316 
14317 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14318 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14319 		/* Turn off the debug UART. */
14320 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14321 		if (tg3_flag(tp, IS_NIC))
14322 			/* Keep VMain power. */
14323 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14324 					      GRC_LCLCTRL_GPIO_OUTPUT0;
14325 	}
14326 
14327 	/* Switch out of Vaux if it is a NIC */
14328 	tg3_pwrsrc_switch_to_vmain(tp);
14329 
14330 	/* Derive initial jumbo mode from MTU assigned in
14331 	 * ether_setup() via the alloc_etherdev() call
14332 	 */
14333 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14334 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
14335 
14336 	/* Determine WakeOnLan speed to use. */
14337 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14338 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14339 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14340 	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14341 		tg3_flag_clear(tp, WOL_SPEED_100MB);
14342 	} else {
14343 		tg3_flag_set(tp, WOL_SPEED_100MB);
14344 	}
14345 
14346 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14347 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
14348 
14349 	/* A few boards don't want Ethernet@WireSpeed phy feature */
14350 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14351 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14352 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14353 	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14354 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14355 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14356 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14357 
14358 	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14359 	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14360 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14361 	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14362 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14363 
14364 	if (tg3_flag(tp, 5705_PLUS) &&
14365 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14366 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14367 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14368 	    !tg3_flag(tp, 57765_PLUS)) {
14369 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14370 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14371 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14372 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14373 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14374 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14375 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14376 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14377 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14378 		} else
14379 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14380 	}
14381 
14382 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14383 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14384 		tp->phy_otp = tg3_read_otp_phycfg(tp);
14385 		if (tp->phy_otp == 0)
14386 			tp->phy_otp = TG3_OTP_DEFAULT;
14387 	}
14388 
14389 	if (tg3_flag(tp, CPMU_PRESENT))
14390 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14391 	else
14392 		tp->mi_mode = MAC_MI_MODE_BASE;
14393 
14394 	tp->coalesce_mode = 0;
14395 	if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14396 	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14397 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14398 
14399 	/* Set these bits to enable statistics workaround. */
14400 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14401 	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14402 	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14403 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14404 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14405 	}
14406 
14407 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14408 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14409 		tg3_flag_set(tp, USE_PHYLIB);
14410 
14411 	err = tg3_mdio_init(tp);
14412 	if (err)
14413 		return err;
14414 
14415 	/* Initialize data/descriptor byte/word swapping. */
14416 	val = tr32(GRC_MODE);
14417 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14418 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14419 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
14420 			GRC_MODE_B2HRX_ENABLE |
14421 			GRC_MODE_HTX2B_ENABLE |
14422 			GRC_MODE_HOST_STACKUP);
14423 	else
14424 		val &= GRC_MODE_HOST_STACKUP;
14425 
14426 	tw32(GRC_MODE, val | tp->grc_mode);
14427 
14428 	tg3_switch_clocks(tp);
14429 
14430 	/* Clear this out for sanity. */
14431 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14432 
14433 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14434 			      &pci_state_reg);
14435 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14436 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14437 		u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14438 
14439 		if (chiprevid == CHIPREV_ID_5701_A0 ||
14440 		    chiprevid == CHIPREV_ID_5701_B0 ||
14441 		    chiprevid == CHIPREV_ID_5701_B2 ||
14442 		    chiprevid == CHIPREV_ID_5701_B5) {
14443 			void __iomem *sram_base;
14444 
14445 			/* Write some dummy words into the SRAM status block
14446 			 * area, see if it reads back correctly.  If the return
14447 			 * value is bad, force enable the PCIX workaround.
14448 			 */
14449 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14450 
14451 			writel(0x00000000, sram_base);
14452 			writel(0x00000000, sram_base + 4);
14453 			writel(0xffffffff, sram_base + 4);
14454 			if (readl(sram_base) != 0x00000000)
14455 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14456 		}
14457 	}
14458 
14459 	udelay(50);
14460 	tg3_nvram_init(tp);
14461 
14462 	grc_misc_cfg = tr32(GRC_MISC_CFG);
14463 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14464 
14465 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14466 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14467 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14468 		tg3_flag_set(tp, IS_5788);
14469 
14470 	if (!tg3_flag(tp, IS_5788) &&
14471 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14472 		tg3_flag_set(tp, TAGGED_STATUS);
14473 	if (tg3_flag(tp, TAGGED_STATUS)) {
14474 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14475 				      HOSTCC_MODE_CLRTICK_TXBD);
14476 
14477 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14478 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14479 				       tp->misc_host_ctrl);
14480 	}
14481 
14482 	/* Preserve the APE MAC_MODE bits */
14483 	if (tg3_flag(tp, ENABLE_APE))
14484 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14485 	else
14486 		tp->mac_mode = 0;
14487 
14488 	/* these are limited to 10/100 only */
14489 	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14490 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14491 	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14492 	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14493 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14494 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14495 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14496 	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14497 	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14498 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14499 	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14500 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14501 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14502 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14503 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
14504 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14505 
14506 	err = tg3_phy_probe(tp);
14507 	if (err) {
14508 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14509 		/* ... but do not return immediately ... */
14510 		tg3_mdio_fini(tp);
14511 	}
14512 
14513 	tg3_read_vpd(tp);
14514 	tg3_read_fw_ver(tp);
14515 
14516 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14517 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14518 	} else {
14519 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14520 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14521 		else
14522 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14523 	}
14524 
14525 	/* 5700 {AX,BX} chips have a broken status block link
14526 	 * change bit implementation, so we must use the
14527 	 * status register in those cases.
14528 	 */
14529 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14530 		tg3_flag_set(tp, USE_LINKCHG_REG);
14531 	else
14532 		tg3_flag_clear(tp, USE_LINKCHG_REG);
14533 
14534 	/* The led_ctrl is set during tg3_phy_probe, here we might
14535 	 * have to force the link status polling mechanism based
14536 	 * upon subsystem IDs.
14537 	 */
14538 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14539 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14540 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14541 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14542 		tg3_flag_set(tp, USE_LINKCHG_REG);
14543 	}
14544 
14545 	/* For all SERDES we poll the MAC status register. */
14546 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14547 		tg3_flag_set(tp, POLL_SERDES);
14548 	else
14549 		tg3_flag_clear(tp, POLL_SERDES);
14550 
14551 	tp->rx_offset = NET_IP_ALIGN;
14552 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14553 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14554 	    tg3_flag(tp, PCIX_MODE)) {
14555 		tp->rx_offset = 0;
14556 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14557 		tp->rx_copy_thresh = ~(u16)0;
14558 #endif
14559 	}
14560 
14561 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14562 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14563 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14564 
14565 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14566 
14567 	/* Increment the rx prod index on the rx std ring by at most
14568 	 * 8 for these chips to workaround hw errata.
14569 	 */
14570 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14571 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14572 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14573 		tp->rx_std_max_post = 8;
14574 
14575 	if (tg3_flag(tp, ASPM_WORKAROUND))
14576 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14577 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
14578 
14579 	return err;
14580 }
14581 
14582 #ifdef CONFIG_SPARC
14583 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14584 {
14585 	struct net_device *dev = tp->dev;
14586 	struct pci_dev *pdev = tp->pdev;
14587 	struct device_node *dp = pci_device_to_OF_node(pdev);
14588 	const unsigned char *addr;
14589 	int len;
14590 
14591 	addr = of_get_property(dp, "local-mac-address", &len);
14592 	if (addr && len == 6) {
14593 		memcpy(dev->dev_addr, addr, 6);
14594 		memcpy(dev->perm_addr, dev->dev_addr, 6);
14595 		return 0;
14596 	}
14597 	return -ENODEV;
14598 }
14599 
14600 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14601 {
14602 	struct net_device *dev = tp->dev;
14603 
14604 	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14605 	memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14606 	return 0;
14607 }
14608 #endif
14609 
14610 static int __devinit tg3_get_device_address(struct tg3 *tp)
14611 {
14612 	struct net_device *dev = tp->dev;
14613 	u32 hi, lo, mac_offset;
14614 	int addr_ok = 0;
14615 
14616 #ifdef CONFIG_SPARC
14617 	if (!tg3_get_macaddr_sparc(tp))
14618 		return 0;
14619 #endif
14620 
14621 	mac_offset = 0x7c;
14622 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14623 	    tg3_flag(tp, 5780_CLASS)) {
14624 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14625 			mac_offset = 0xcc;
14626 		if (tg3_nvram_lock(tp))
14627 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14628 		else
14629 			tg3_nvram_unlock(tp);
14630 	} else if (tg3_flag(tp, 5717_PLUS)) {
14631 		if (tp->pci_fn & 1)
14632 			mac_offset = 0xcc;
14633 		if (tp->pci_fn > 1)
14634 			mac_offset += 0x18c;
14635 	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14636 		mac_offset = 0x10;
14637 
14638 	/* First try to get it from MAC address mailbox. */
14639 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14640 	if ((hi >> 16) == 0x484b) {
14641 		dev->dev_addr[0] = (hi >>  8) & 0xff;
14642 		dev->dev_addr[1] = (hi >>  0) & 0xff;
14643 
14644 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14645 		dev->dev_addr[2] = (lo >> 24) & 0xff;
14646 		dev->dev_addr[3] = (lo >> 16) & 0xff;
14647 		dev->dev_addr[4] = (lo >>  8) & 0xff;
14648 		dev->dev_addr[5] = (lo >>  0) & 0xff;
14649 
14650 		/* Some old bootcode may report a 0 MAC address in SRAM */
14651 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14652 	}
14653 	if (!addr_ok) {
14654 		/* Next, try NVRAM. */
14655 		if (!tg3_flag(tp, NO_NVRAM) &&
14656 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14657 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14658 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14659 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14660 		}
14661 		/* Finally just fetch it out of the MAC control regs. */
14662 		else {
14663 			hi = tr32(MAC_ADDR_0_HIGH);
14664 			lo = tr32(MAC_ADDR_0_LOW);
14665 
14666 			dev->dev_addr[5] = lo & 0xff;
14667 			dev->dev_addr[4] = (lo >> 8) & 0xff;
14668 			dev->dev_addr[3] = (lo >> 16) & 0xff;
14669 			dev->dev_addr[2] = (lo >> 24) & 0xff;
14670 			dev->dev_addr[1] = hi & 0xff;
14671 			dev->dev_addr[0] = (hi >> 8) & 0xff;
14672 		}
14673 	}
14674 
14675 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14676 #ifdef CONFIG_SPARC
14677 		if (!tg3_get_default_macaddr_sparc(tp))
14678 			return 0;
14679 #endif
14680 		return -EINVAL;
14681 	}
14682 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14683 	return 0;
14684 }
14685 
14686 #define BOUNDARY_SINGLE_CACHELINE	1
14687 #define BOUNDARY_MULTI_CACHELINE	2
14688 
14689 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14690 {
14691 	int cacheline_size;
14692 	u8 byte;
14693 	int goal;
14694 
14695 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14696 	if (byte == 0)
14697 		cacheline_size = 1024;
14698 	else
14699 		cacheline_size = (int) byte * 4;
14700 
14701 	/* On 5703 and later chips, the boundary bits have no
14702 	 * effect.
14703 	 */
14704 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14705 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14706 	    !tg3_flag(tp, PCI_EXPRESS))
14707 		goto out;
14708 
14709 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14710 	goal = BOUNDARY_MULTI_CACHELINE;
14711 #else
14712 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14713 	goal = BOUNDARY_SINGLE_CACHELINE;
14714 #else
14715 	goal = 0;
14716 #endif
14717 #endif
14718 
14719 	if (tg3_flag(tp, 57765_PLUS)) {
14720 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14721 		goto out;
14722 	}
14723 
14724 	if (!goal)
14725 		goto out;
14726 
14727 	/* PCI controllers on most RISC systems tend to disconnect
14728 	 * when a device tries to burst across a cache-line boundary.
14729 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14730 	 *
14731 	 * Unfortunately, for PCI-E there are only limited
14732 	 * write-side controls for this, and thus for reads
14733 	 * we will still get the disconnects.  We'll also waste
14734 	 * these PCI cycles for both read and write for chips
14735 	 * other than 5700 and 5701 which do not implement the
14736 	 * boundary bits.
14737 	 */
14738 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14739 		switch (cacheline_size) {
14740 		case 16:
14741 		case 32:
14742 		case 64:
14743 		case 128:
14744 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14745 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14746 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14747 			} else {
14748 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14749 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14750 			}
14751 			break;
14752 
14753 		case 256:
14754 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14755 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14756 			break;
14757 
14758 		default:
14759 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14760 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14761 			break;
14762 		}
14763 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
14764 		switch (cacheline_size) {
14765 		case 16:
14766 		case 32:
14767 		case 64:
14768 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14769 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14770 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14771 				break;
14772 			}
14773 			/* fallthrough */
14774 		case 128:
14775 		default:
14776 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14777 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14778 			break;
14779 		}
14780 	} else {
14781 		switch (cacheline_size) {
14782 		case 16:
14783 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14784 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
14785 					DMA_RWCTRL_WRITE_BNDRY_16);
14786 				break;
14787 			}
14788 			/* fallthrough */
14789 		case 32:
14790 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14791 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
14792 					DMA_RWCTRL_WRITE_BNDRY_32);
14793 				break;
14794 			}
14795 			/* fallthrough */
14796 		case 64:
14797 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14798 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
14799 					DMA_RWCTRL_WRITE_BNDRY_64);
14800 				break;
14801 			}
14802 			/* fallthrough */
14803 		case 128:
14804 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
14805 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
14806 					DMA_RWCTRL_WRITE_BNDRY_128);
14807 				break;
14808 			}
14809 			/* fallthrough */
14810 		case 256:
14811 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
14812 				DMA_RWCTRL_WRITE_BNDRY_256);
14813 			break;
14814 		case 512:
14815 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
14816 				DMA_RWCTRL_WRITE_BNDRY_512);
14817 			break;
14818 		case 1024:
14819 		default:
14820 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14821 				DMA_RWCTRL_WRITE_BNDRY_1024);
14822 			break;
14823 		}
14824 	}
14825 
14826 out:
14827 	return val;
14828 }
14829 
14830 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14831 {
14832 	struct tg3_internal_buffer_desc test_desc;
14833 	u32 sram_dma_descs;
14834 	int i, ret;
14835 
14836 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14837 
14838 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14839 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14840 	tw32(RDMAC_STATUS, 0);
14841 	tw32(WDMAC_STATUS, 0);
14842 
14843 	tw32(BUFMGR_MODE, 0);
14844 	tw32(FTQ_RESET, 0);
14845 
14846 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
14847 	test_desc.addr_lo = buf_dma & 0xffffffff;
14848 	test_desc.nic_mbuf = 0x00002100;
14849 	test_desc.len = size;
14850 
14851 	/*
14852 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14853 	 * the *second* time the tg3 driver was getting loaded after an
14854 	 * initial scan.
14855 	 *
14856 	 * Broadcom tells me:
14857 	 *   ...the DMA engine is connected to the GRC block and a DMA
14858 	 *   reset may affect the GRC block in some unpredictable way...
14859 	 *   The behavior of resets to individual blocks has not been tested.
14860 	 *
14861 	 * Broadcom noted the GRC reset will also reset all sub-components.
14862 	 */
14863 	if (to_device) {
14864 		test_desc.cqid_sqid = (13 << 8) | 2;
14865 
14866 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14867 		udelay(40);
14868 	} else {
14869 		test_desc.cqid_sqid = (16 << 8) | 7;
14870 
14871 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14872 		udelay(40);
14873 	}
14874 	test_desc.flags = 0x00000005;
14875 
14876 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14877 		u32 val;
14878 
14879 		val = *(((u32 *)&test_desc) + i);
14880 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14881 				       sram_dma_descs + (i * sizeof(u32)));
14882 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14883 	}
14884 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14885 
14886 	if (to_device)
14887 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14888 	else
14889 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14890 
14891 	ret = -ENODEV;
14892 	for (i = 0; i < 40; i++) {
14893 		u32 val;
14894 
14895 		if (to_device)
14896 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14897 		else
14898 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14899 		if ((val & 0xffff) == sram_dma_descs) {
14900 			ret = 0;
14901 			break;
14902 		}
14903 
14904 		udelay(100);
14905 	}
14906 
14907 	return ret;
14908 }
14909 
14910 #define TEST_BUFFER_SIZE	0x2000
14911 
14912 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14913 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14914 	{ },
14915 };
14916 
14917 static int __devinit tg3_test_dma(struct tg3 *tp)
14918 {
14919 	dma_addr_t buf_dma;
14920 	u32 *buf, saved_dma_rwctrl;
14921 	int ret = 0;
14922 
14923 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14924 				 &buf_dma, GFP_KERNEL);
14925 	if (!buf) {
14926 		ret = -ENOMEM;
14927 		goto out_nofree;
14928 	}
14929 
14930 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14931 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14932 
14933 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14934 
14935 	if (tg3_flag(tp, 57765_PLUS))
14936 		goto out;
14937 
14938 	if (tg3_flag(tp, PCI_EXPRESS)) {
14939 		/* DMA read watermark not used on PCIE */
14940 		tp->dma_rwctrl |= 0x00180000;
14941 	} else if (!tg3_flag(tp, PCIX_MODE)) {
14942 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14943 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14944 			tp->dma_rwctrl |= 0x003f0000;
14945 		else
14946 			tp->dma_rwctrl |= 0x003f000f;
14947 	} else {
14948 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14949 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14950 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14951 			u32 read_water = 0x7;
14952 
14953 			/* If the 5704 is behind the EPB bridge, we can
14954 			 * do the less restrictive ONE_DMA workaround for
14955 			 * better performance.
14956 			 */
14957 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14958 			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14959 				tp->dma_rwctrl |= 0x8000;
14960 			else if (ccval == 0x6 || ccval == 0x7)
14961 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14962 
14963 			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14964 				read_water = 4;
14965 			/* Set bit 23 to enable PCIX hw bug fix */
14966 			tp->dma_rwctrl |=
14967 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14968 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14969 				(1 << 23);
14970 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14971 			/* 5780 always in PCIX mode */
14972 			tp->dma_rwctrl |= 0x00144000;
14973 		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14974 			/* 5714 always in PCIX mode */
14975 			tp->dma_rwctrl |= 0x00148000;
14976 		} else {
14977 			tp->dma_rwctrl |= 0x001b000f;
14978 		}
14979 	}
14980 
14981 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14982 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14983 		tp->dma_rwctrl &= 0xfffffff0;
14984 
14985 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14986 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14987 		/* Remove this if it causes problems for some boards. */
14988 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14989 
14990 		/* On 5700/5701 chips, we need to set this bit.
14991 		 * Otherwise the chip will issue cacheline transactions
14992 		 * to streamable DMA memory with not all the byte
14993 		 * enables turned on.  This is an error on several
14994 		 * RISC PCI controllers, in particular sparc64.
14995 		 *
14996 		 * On 5703/5704 chips, this bit has been reassigned
14997 		 * a different meaning.  In particular, it is used
14998 		 * on those chips to enable a PCI-X workaround.
14999 		 */
15000 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15001 	}
15002 
15003 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15004 
15005 #if 0
15006 	/* Unneeded, already done by tg3_get_invariants.  */
15007 	tg3_switch_clocks(tp);
15008 #endif
15009 
15010 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15011 	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15012 		goto out;
15013 
15014 	/* It is best to perform DMA test with maximum write burst size
15015 	 * to expose the 5700/5701 write DMA bug.
15016 	 */
15017 	saved_dma_rwctrl = tp->dma_rwctrl;
15018 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15019 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15020 
15021 	while (1) {
15022 		u32 *p = buf, i;
15023 
15024 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15025 			p[i] = i;
15026 
15027 		/* Send the buffer to the chip. */
15028 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15029 		if (ret) {
15030 			dev_err(&tp->pdev->dev,
15031 				"%s: Buffer write failed. err = %d\n",
15032 				__func__, ret);
15033 			break;
15034 		}
15035 
15036 #if 0
15037 		/* validate data reached card RAM correctly. */
15038 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15039 			u32 val;
15040 			tg3_read_mem(tp, 0x2100 + (i*4), &val);
15041 			if (le32_to_cpu(val) != p[i]) {
15042 				dev_err(&tp->pdev->dev,
15043 					"%s: Buffer corrupted on device! "
15044 					"(%d != %d)\n", __func__, val, i);
15045 				/* ret = -ENODEV here? */
15046 			}
15047 			p[i] = 0;
15048 		}
15049 #endif
15050 		/* Now read it back. */
15051 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15052 		if (ret) {
15053 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15054 				"err = %d\n", __func__, ret);
15055 			break;
15056 		}
15057 
15058 		/* Verify it. */
15059 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15060 			if (p[i] == i)
15061 				continue;
15062 
15063 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15064 			    DMA_RWCTRL_WRITE_BNDRY_16) {
15065 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15066 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15067 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15068 				break;
15069 			} else {
15070 				dev_err(&tp->pdev->dev,
15071 					"%s: Buffer corrupted on read back! "
15072 					"(%d != %d)\n", __func__, p[i], i);
15073 				ret = -ENODEV;
15074 				goto out;
15075 			}
15076 		}
15077 
15078 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15079 			/* Success. */
15080 			ret = 0;
15081 			break;
15082 		}
15083 	}
15084 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15085 	    DMA_RWCTRL_WRITE_BNDRY_16) {
15086 		/* DMA test passed without adjusting DMA boundary,
15087 		 * now look for chipsets that are known to expose the
15088 		 * DMA bug without failing the test.
15089 		 */
15090 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15091 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15092 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15093 		} else {
15094 			/* Safe to use the calculated DMA boundary. */
15095 			tp->dma_rwctrl = saved_dma_rwctrl;
15096 		}
15097 
15098 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15099 	}
15100 
15101 out:
15102 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15103 out_nofree:
15104 	return ret;
15105 }
15106 
15107 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15108 {
15109 	if (tg3_flag(tp, 57765_PLUS)) {
15110 		tp->bufmgr_config.mbuf_read_dma_low_water =
15111 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15112 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15113 			DEFAULT_MB_MACRX_LOW_WATER_57765;
15114 		tp->bufmgr_config.mbuf_high_water =
15115 			DEFAULT_MB_HIGH_WATER_57765;
15116 
15117 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15118 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15119 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15120 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15121 		tp->bufmgr_config.mbuf_high_water_jumbo =
15122 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15123 	} else if (tg3_flag(tp, 5705_PLUS)) {
15124 		tp->bufmgr_config.mbuf_read_dma_low_water =
15125 			DEFAULT_MB_RDMA_LOW_WATER_5705;
15126 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15127 			DEFAULT_MB_MACRX_LOW_WATER_5705;
15128 		tp->bufmgr_config.mbuf_high_water =
15129 			DEFAULT_MB_HIGH_WATER_5705;
15130 		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15131 			tp->bufmgr_config.mbuf_mac_rx_low_water =
15132 				DEFAULT_MB_MACRX_LOW_WATER_5906;
15133 			tp->bufmgr_config.mbuf_high_water =
15134 				DEFAULT_MB_HIGH_WATER_5906;
15135 		}
15136 
15137 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15138 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15139 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15140 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15141 		tp->bufmgr_config.mbuf_high_water_jumbo =
15142 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15143 	} else {
15144 		tp->bufmgr_config.mbuf_read_dma_low_water =
15145 			DEFAULT_MB_RDMA_LOW_WATER;
15146 		tp->bufmgr_config.mbuf_mac_rx_low_water =
15147 			DEFAULT_MB_MACRX_LOW_WATER;
15148 		tp->bufmgr_config.mbuf_high_water =
15149 			DEFAULT_MB_HIGH_WATER;
15150 
15151 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15152 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15153 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15154 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15155 		tp->bufmgr_config.mbuf_high_water_jumbo =
15156 			DEFAULT_MB_HIGH_WATER_JUMBO;
15157 	}
15158 
15159 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15160 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15161 }
15162 
15163 static char * __devinit tg3_phy_string(struct tg3 *tp)
15164 {
15165 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
15166 	case TG3_PHY_ID_BCM5400:	return "5400";
15167 	case TG3_PHY_ID_BCM5401:	return "5401";
15168 	case TG3_PHY_ID_BCM5411:	return "5411";
15169 	case TG3_PHY_ID_BCM5701:	return "5701";
15170 	case TG3_PHY_ID_BCM5703:	return "5703";
15171 	case TG3_PHY_ID_BCM5704:	return "5704";
15172 	case TG3_PHY_ID_BCM5705:	return "5705";
15173 	case TG3_PHY_ID_BCM5750:	return "5750";
15174 	case TG3_PHY_ID_BCM5752:	return "5752";
15175 	case TG3_PHY_ID_BCM5714:	return "5714";
15176 	case TG3_PHY_ID_BCM5780:	return "5780";
15177 	case TG3_PHY_ID_BCM5755:	return "5755";
15178 	case TG3_PHY_ID_BCM5787:	return "5787";
15179 	case TG3_PHY_ID_BCM5784:	return "5784";
15180 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
15181 	case TG3_PHY_ID_BCM5906:	return "5906";
15182 	case TG3_PHY_ID_BCM5761:	return "5761";
15183 	case TG3_PHY_ID_BCM5718C:	return "5718C";
15184 	case TG3_PHY_ID_BCM5718S:	return "5718S";
15185 	case TG3_PHY_ID_BCM57765:	return "57765";
15186 	case TG3_PHY_ID_BCM5719C:	return "5719C";
15187 	case TG3_PHY_ID_BCM5720C:	return "5720C";
15188 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
15189 	case 0:			return "serdes";
15190 	default:		return "unknown";
15191 	}
15192 }
15193 
15194 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15195 {
15196 	if (tg3_flag(tp, PCI_EXPRESS)) {
15197 		strcpy(str, "PCI Express");
15198 		return str;
15199 	} else if (tg3_flag(tp, PCIX_MODE)) {
15200 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15201 
15202 		strcpy(str, "PCIX:");
15203 
15204 		if ((clock_ctrl == 7) ||
15205 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15206 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15207 			strcat(str, "133MHz");
15208 		else if (clock_ctrl == 0)
15209 			strcat(str, "33MHz");
15210 		else if (clock_ctrl == 2)
15211 			strcat(str, "50MHz");
15212 		else if (clock_ctrl == 4)
15213 			strcat(str, "66MHz");
15214 		else if (clock_ctrl == 6)
15215 			strcat(str, "100MHz");
15216 	} else {
15217 		strcpy(str, "PCI:");
15218 		if (tg3_flag(tp, PCI_HIGH_SPEED))
15219 			strcat(str, "66MHz");
15220 		else
15221 			strcat(str, "33MHz");
15222 	}
15223 	if (tg3_flag(tp, PCI_32BIT))
15224 		strcat(str, ":32-bit");
15225 	else
15226 		strcat(str, ":64-bit");
15227 	return str;
15228 }
15229 
15230 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15231 {
15232 	struct pci_dev *peer;
15233 	unsigned int func, devnr = tp->pdev->devfn & ~7;
15234 
15235 	for (func = 0; func < 8; func++) {
15236 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
15237 		if (peer && peer != tp->pdev)
15238 			break;
15239 		pci_dev_put(peer);
15240 	}
15241 	/* 5704 can be configured in single-port mode, set peer to
15242 	 * tp->pdev in that case.
15243 	 */
15244 	if (!peer) {
15245 		peer = tp->pdev;
15246 		return peer;
15247 	}
15248 
15249 	/*
15250 	 * We don't need to keep the refcount elevated; there's no way
15251 	 * to remove one half of this device without removing the other
15252 	 */
15253 	pci_dev_put(peer);
15254 
15255 	return peer;
15256 }
15257 
15258 static void __devinit tg3_init_coal(struct tg3 *tp)
15259 {
15260 	struct ethtool_coalesce *ec = &tp->coal;
15261 
15262 	memset(ec, 0, sizeof(*ec));
15263 	ec->cmd = ETHTOOL_GCOALESCE;
15264 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15265 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15266 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15267 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15268 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15269 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15270 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15271 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15272 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15273 
15274 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15275 				 HOSTCC_MODE_CLRTICK_TXBD)) {
15276 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15277 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15278 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15279 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15280 	}
15281 
15282 	if (tg3_flag(tp, 5705_PLUS)) {
15283 		ec->rx_coalesce_usecs_irq = 0;
15284 		ec->tx_coalesce_usecs_irq = 0;
15285 		ec->stats_block_coalesce_usecs = 0;
15286 	}
15287 }
15288 
15289 static const struct net_device_ops tg3_netdev_ops = {
15290 	.ndo_open		= tg3_open,
15291 	.ndo_stop		= tg3_close,
15292 	.ndo_start_xmit		= tg3_start_xmit,
15293 	.ndo_get_stats64	= tg3_get_stats64,
15294 	.ndo_validate_addr	= eth_validate_addr,
15295 	.ndo_set_rx_mode	= tg3_set_rx_mode,
15296 	.ndo_set_mac_address	= tg3_set_mac_addr,
15297 	.ndo_do_ioctl		= tg3_ioctl,
15298 	.ndo_tx_timeout		= tg3_tx_timeout,
15299 	.ndo_change_mtu		= tg3_change_mtu,
15300 	.ndo_fix_features	= tg3_fix_features,
15301 	.ndo_set_features	= tg3_set_features,
15302 #ifdef CONFIG_NET_POLL_CONTROLLER
15303 	.ndo_poll_controller	= tg3_poll_controller,
15304 #endif
15305 };
15306 
15307 static int __devinit tg3_init_one(struct pci_dev *pdev,
15308 				  const struct pci_device_id *ent)
15309 {
15310 	struct net_device *dev;
15311 	struct tg3 *tp;
15312 	int i, err, pm_cap;
15313 	u32 sndmbx, rcvmbx, intmbx;
15314 	char str[40];
15315 	u64 dma_mask, persist_dma_mask;
15316 	u32 features = 0;
15317 
15318 	printk_once(KERN_INFO "%s\n", version);
15319 
15320 	err = pci_enable_device(pdev);
15321 	if (err) {
15322 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15323 		return err;
15324 	}
15325 
15326 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
15327 	if (err) {
15328 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15329 		goto err_out_disable_pdev;
15330 	}
15331 
15332 	pci_set_master(pdev);
15333 
15334 	/* Find power-management capability. */
15335 	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15336 	if (pm_cap == 0) {
15337 		dev_err(&pdev->dev,
15338 			"Cannot find Power Management capability, aborting\n");
15339 		err = -EIO;
15340 		goto err_out_free_res;
15341 	}
15342 
15343 	err = pci_set_power_state(pdev, PCI_D0);
15344 	if (err) {
15345 		dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15346 		goto err_out_free_res;
15347 	}
15348 
15349 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15350 	if (!dev) {
15351 		dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15352 		err = -ENOMEM;
15353 		goto err_out_power_down;
15354 	}
15355 
15356 	SET_NETDEV_DEV(dev, &pdev->dev);
15357 
15358 	tp = netdev_priv(dev);
15359 	tp->pdev = pdev;
15360 	tp->dev = dev;
15361 	tp->pm_cap = pm_cap;
15362 	tp->rx_mode = TG3_DEF_RX_MODE;
15363 	tp->tx_mode = TG3_DEF_TX_MODE;
15364 
15365 	if (tg3_debug > 0)
15366 		tp->msg_enable = tg3_debug;
15367 	else
15368 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
15369 
15370 	/* The word/byte swap controls here control register access byte
15371 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15372 	 * setting below.
15373 	 */
15374 	tp->misc_host_ctrl =
15375 		MISC_HOST_CTRL_MASK_PCI_INT |
15376 		MISC_HOST_CTRL_WORD_SWAP |
15377 		MISC_HOST_CTRL_INDIR_ACCESS |
15378 		MISC_HOST_CTRL_PCISTATE_RW;
15379 
15380 	/* The NONFRM (non-frame) byte/word swap controls take effect
15381 	 * on descriptor entries, anything which isn't packet data.
15382 	 *
15383 	 * The StrongARM chips on the board (one for tx, one for rx)
15384 	 * are running in big-endian mode.
15385 	 */
15386 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15387 			GRC_MODE_WSWAP_NONFRM_DATA);
15388 #ifdef __BIG_ENDIAN
15389 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15390 #endif
15391 	spin_lock_init(&tp->lock);
15392 	spin_lock_init(&tp->indirect_lock);
15393 	INIT_WORK(&tp->reset_task, tg3_reset_task);
15394 
15395 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
15396 	if (!tp->regs) {
15397 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15398 		err = -ENOMEM;
15399 		goto err_out_free_dev;
15400 	}
15401 
15402 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15403 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15404 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15405 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15406 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15407 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15408 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15409 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15410 		tg3_flag_set(tp, ENABLE_APE);
15411 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15412 		if (!tp->aperegs) {
15413 			dev_err(&pdev->dev,
15414 				"Cannot map APE registers, aborting\n");
15415 			err = -ENOMEM;
15416 			goto err_out_iounmap;
15417 		}
15418 	}
15419 
15420 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15421 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15422 
15423 	dev->ethtool_ops = &tg3_ethtool_ops;
15424 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
15425 	dev->netdev_ops = &tg3_netdev_ops;
15426 	dev->irq = pdev->irq;
15427 
15428 	err = tg3_get_invariants(tp);
15429 	if (err) {
15430 		dev_err(&pdev->dev,
15431 			"Problem fetching invariants of chip, aborting\n");
15432 		goto err_out_apeunmap;
15433 	}
15434 
15435 	/* The EPB bridge inside 5714, 5715, and 5780 and any
15436 	 * device behind the EPB cannot support DMA addresses > 40-bit.
15437 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15438 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15439 	 * do DMA address check in tg3_start_xmit().
15440 	 */
15441 	if (tg3_flag(tp, IS_5788))
15442 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15443 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15444 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15445 #ifdef CONFIG_HIGHMEM
15446 		dma_mask = DMA_BIT_MASK(64);
15447 #endif
15448 	} else
15449 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15450 
15451 	/* Configure DMA attributes. */
15452 	if (dma_mask > DMA_BIT_MASK(32)) {
15453 		err = pci_set_dma_mask(pdev, dma_mask);
15454 		if (!err) {
15455 			features |= NETIF_F_HIGHDMA;
15456 			err = pci_set_consistent_dma_mask(pdev,
15457 							  persist_dma_mask);
15458 			if (err < 0) {
15459 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
15460 					"DMA for consistent allocations\n");
15461 				goto err_out_apeunmap;
15462 			}
15463 		}
15464 	}
15465 	if (err || dma_mask == DMA_BIT_MASK(32)) {
15466 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15467 		if (err) {
15468 			dev_err(&pdev->dev,
15469 				"No usable DMA configuration, aborting\n");
15470 			goto err_out_apeunmap;
15471 		}
15472 	}
15473 
15474 	tg3_init_bufmgr_config(tp);
15475 
15476 	features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15477 
15478 	/* 5700 B0 chips do not support checksumming correctly due
15479 	 * to hardware bugs.
15480 	 */
15481 	if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15482 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15483 
15484 		if (tg3_flag(tp, 5755_PLUS))
15485 			features |= NETIF_F_IPV6_CSUM;
15486 	}
15487 
15488 	/* TSO is on by default on chips that support hardware TSO.
15489 	 * Firmware TSO on older chips gives lower performance, so it
15490 	 * is off by default, but can be enabled using ethtool.
15491 	 */
15492 	if ((tg3_flag(tp, HW_TSO_1) ||
15493 	     tg3_flag(tp, HW_TSO_2) ||
15494 	     tg3_flag(tp, HW_TSO_3)) &&
15495 	    (features & NETIF_F_IP_CSUM))
15496 		features |= NETIF_F_TSO;
15497 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15498 		if (features & NETIF_F_IPV6_CSUM)
15499 			features |= NETIF_F_TSO6;
15500 		if (tg3_flag(tp, HW_TSO_3) ||
15501 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15502 		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15503 		     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15504 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15505 		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15506 			features |= NETIF_F_TSO_ECN;
15507 	}
15508 
15509 	dev->features |= features;
15510 	dev->vlan_features |= features;
15511 
15512 	/*
15513 	 * Add loopback capability only for a subset of devices that support
15514 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15515 	 * loopback for the remaining devices.
15516 	 */
15517 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15518 	    !tg3_flag(tp, CPMU_PRESENT))
15519 		/* Add the loopback capability */
15520 		features |= NETIF_F_LOOPBACK;
15521 
15522 	dev->hw_features |= features;
15523 
15524 	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15525 	    !tg3_flag(tp, TSO_CAPABLE) &&
15526 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15527 		tg3_flag_set(tp, MAX_RXPEND_64);
15528 		tp->rx_pending = 63;
15529 	}
15530 
15531 	err = tg3_get_device_address(tp);
15532 	if (err) {
15533 		dev_err(&pdev->dev,
15534 			"Could not obtain valid ethernet address, aborting\n");
15535 		goto err_out_apeunmap;
15536 	}
15537 
15538 	/*
15539 	 * Reset chip in case UNDI or EFI driver did not shutdown
15540 	 * DMA self test will enable WDMAC and we'll see (spurious)
15541 	 * pending DMA on the PCI bus at that point.
15542 	 */
15543 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15544 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15545 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15546 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15547 	}
15548 
15549 	err = tg3_test_dma(tp);
15550 	if (err) {
15551 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15552 		goto err_out_apeunmap;
15553 	}
15554 
15555 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15556 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15557 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15558 	for (i = 0; i < tp->irq_max; i++) {
15559 		struct tg3_napi *tnapi = &tp->napi[i];
15560 
15561 		tnapi->tp = tp;
15562 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15563 
15564 		tnapi->int_mbox = intmbx;
15565 		if (i <= 4)
15566 			intmbx += 0x8;
15567 		else
15568 			intmbx += 0x4;
15569 
15570 		tnapi->consmbox = rcvmbx;
15571 		tnapi->prodmbox = sndmbx;
15572 
15573 		if (i)
15574 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15575 		else
15576 			tnapi->coal_now = HOSTCC_MODE_NOW;
15577 
15578 		if (!tg3_flag(tp, SUPPORT_MSIX))
15579 			break;
15580 
15581 		/*
15582 		 * If we support MSIX, we'll be using RSS.  If we're using
15583 		 * RSS, the first vector only handles link interrupts and the
15584 		 * remaining vectors handle rx and tx interrupts.  Reuse the
15585 		 * mailbox values for the next iteration.  The values we setup
15586 		 * above are still useful for the single vectored mode.
15587 		 */
15588 		if (!i)
15589 			continue;
15590 
15591 		rcvmbx += 0x8;
15592 
15593 		if (sndmbx & 0x4)
15594 			sndmbx -= 0x4;
15595 		else
15596 			sndmbx += 0xc;
15597 	}
15598 
15599 	tg3_init_coal(tp);
15600 
15601 	pci_set_drvdata(pdev, dev);
15602 
15603 	if (tg3_flag(tp, 5717_PLUS)) {
15604 		/* Resume a low-power mode */
15605 		tg3_frob_aux_power(tp, false);
15606 	}
15607 
15608 	err = register_netdev(dev);
15609 	if (err) {
15610 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15611 		goto err_out_apeunmap;
15612 	}
15613 
15614 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15615 		    tp->board_part_number,
15616 		    tp->pci_chip_rev_id,
15617 		    tg3_bus_string(tp, str),
15618 		    dev->dev_addr);
15619 
15620 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15621 		struct phy_device *phydev;
15622 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15623 		netdev_info(dev,
15624 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15625 			    phydev->drv->name, dev_name(&phydev->dev));
15626 	} else {
15627 		char *ethtype;
15628 
15629 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15630 			ethtype = "10/100Base-TX";
15631 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15632 			ethtype = "1000Base-SX";
15633 		else
15634 			ethtype = "10/100/1000Base-T";
15635 
15636 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15637 			    "(WireSpeed[%d], EEE[%d])\n",
15638 			    tg3_phy_string(tp), ethtype,
15639 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15640 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15641 	}
15642 
15643 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15644 		    (dev->features & NETIF_F_RXCSUM) != 0,
15645 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
15646 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15647 		    tg3_flag(tp, ENABLE_ASF) != 0,
15648 		    tg3_flag(tp, TSO_CAPABLE) != 0);
15649 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15650 		    tp->dma_rwctrl,
15651 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15652 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15653 
15654 	pci_save_state(pdev);
15655 
15656 	return 0;
15657 
15658 err_out_apeunmap:
15659 	if (tp->aperegs) {
15660 		iounmap(tp->aperegs);
15661 		tp->aperegs = NULL;
15662 	}
15663 
15664 err_out_iounmap:
15665 	if (tp->regs) {
15666 		iounmap(tp->regs);
15667 		tp->regs = NULL;
15668 	}
15669 
15670 err_out_free_dev:
15671 	free_netdev(dev);
15672 
15673 err_out_power_down:
15674 	pci_set_power_state(pdev, PCI_D3hot);
15675 
15676 err_out_free_res:
15677 	pci_release_regions(pdev);
15678 
15679 err_out_disable_pdev:
15680 	pci_disable_device(pdev);
15681 	pci_set_drvdata(pdev, NULL);
15682 	return err;
15683 }
15684 
15685 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15686 {
15687 	struct net_device *dev = pci_get_drvdata(pdev);
15688 
15689 	if (dev) {
15690 		struct tg3 *tp = netdev_priv(dev);
15691 
15692 		if (tp->fw)
15693 			release_firmware(tp->fw);
15694 
15695 		tg3_reset_task_cancel(tp);
15696 
15697 		if (tg3_flag(tp, USE_PHYLIB)) {
15698 			tg3_phy_fini(tp);
15699 			tg3_mdio_fini(tp);
15700 		}
15701 
15702 		unregister_netdev(dev);
15703 		if (tp->aperegs) {
15704 			iounmap(tp->aperegs);
15705 			tp->aperegs = NULL;
15706 		}
15707 		if (tp->regs) {
15708 			iounmap(tp->regs);
15709 			tp->regs = NULL;
15710 		}
15711 		free_netdev(dev);
15712 		pci_release_regions(pdev);
15713 		pci_disable_device(pdev);
15714 		pci_set_drvdata(pdev, NULL);
15715 	}
15716 }
15717 
15718 #ifdef CONFIG_PM_SLEEP
15719 static int tg3_suspend(struct device *device)
15720 {
15721 	struct pci_dev *pdev = to_pci_dev(device);
15722 	struct net_device *dev = pci_get_drvdata(pdev);
15723 	struct tg3 *tp = netdev_priv(dev);
15724 	int err;
15725 
15726 	if (!netif_running(dev))
15727 		return 0;
15728 
15729 	tg3_reset_task_cancel(tp);
15730 	tg3_phy_stop(tp);
15731 	tg3_netif_stop(tp);
15732 
15733 	del_timer_sync(&tp->timer);
15734 
15735 	tg3_full_lock(tp, 1);
15736 	tg3_disable_ints(tp);
15737 	tg3_full_unlock(tp);
15738 
15739 	netif_device_detach(dev);
15740 
15741 	tg3_full_lock(tp, 0);
15742 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15743 	tg3_flag_clear(tp, INIT_COMPLETE);
15744 	tg3_full_unlock(tp);
15745 
15746 	err = tg3_power_down_prepare(tp);
15747 	if (err) {
15748 		int err2;
15749 
15750 		tg3_full_lock(tp, 0);
15751 
15752 		tg3_flag_set(tp, INIT_COMPLETE);
15753 		err2 = tg3_restart_hw(tp, 1);
15754 		if (err2)
15755 			goto out;
15756 
15757 		tp->timer.expires = jiffies + tp->timer_offset;
15758 		add_timer(&tp->timer);
15759 
15760 		netif_device_attach(dev);
15761 		tg3_netif_start(tp);
15762 
15763 out:
15764 		tg3_full_unlock(tp);
15765 
15766 		if (!err2)
15767 			tg3_phy_start(tp);
15768 	}
15769 
15770 	return err;
15771 }
15772 
15773 static int tg3_resume(struct device *device)
15774 {
15775 	struct pci_dev *pdev = to_pci_dev(device);
15776 	struct net_device *dev = pci_get_drvdata(pdev);
15777 	struct tg3 *tp = netdev_priv(dev);
15778 	int err;
15779 
15780 	if (!netif_running(dev))
15781 		return 0;
15782 
15783 	netif_device_attach(dev);
15784 
15785 	tg3_full_lock(tp, 0);
15786 
15787 	tg3_flag_set(tp, INIT_COMPLETE);
15788 	err = tg3_restart_hw(tp, 1);
15789 	if (err)
15790 		goto out;
15791 
15792 	tp->timer.expires = jiffies + tp->timer_offset;
15793 	add_timer(&tp->timer);
15794 
15795 	tg3_netif_start(tp);
15796 
15797 out:
15798 	tg3_full_unlock(tp);
15799 
15800 	if (!err)
15801 		tg3_phy_start(tp);
15802 
15803 	return err;
15804 }
15805 
15806 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15807 #define TG3_PM_OPS (&tg3_pm_ops)
15808 
15809 #else
15810 
15811 #define TG3_PM_OPS NULL
15812 
15813 #endif /* CONFIG_PM_SLEEP */
15814 
15815 /**
15816  * tg3_io_error_detected - called when PCI error is detected
15817  * @pdev: Pointer to PCI device
15818  * @state: The current pci connection state
15819  *
15820  * This function is called after a PCI bus error affecting
15821  * this device has been detected.
15822  */
15823 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15824 					      pci_channel_state_t state)
15825 {
15826 	struct net_device *netdev = pci_get_drvdata(pdev);
15827 	struct tg3 *tp = netdev_priv(netdev);
15828 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15829 
15830 	netdev_info(netdev, "PCI I/O error detected\n");
15831 
15832 	rtnl_lock();
15833 
15834 	if (!netif_running(netdev))
15835 		goto done;
15836 
15837 	tg3_phy_stop(tp);
15838 
15839 	tg3_netif_stop(tp);
15840 
15841 	del_timer_sync(&tp->timer);
15842 
15843 	/* Want to make sure that the reset task doesn't run */
15844 	tg3_reset_task_cancel(tp);
15845 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15846 
15847 	netif_device_detach(netdev);
15848 
15849 	/* Clean up software state, even if MMIO is blocked */
15850 	tg3_full_lock(tp, 0);
15851 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15852 	tg3_full_unlock(tp);
15853 
15854 done:
15855 	if (state == pci_channel_io_perm_failure)
15856 		err = PCI_ERS_RESULT_DISCONNECT;
15857 	else
15858 		pci_disable_device(pdev);
15859 
15860 	rtnl_unlock();
15861 
15862 	return err;
15863 }
15864 
15865 /**
15866  * tg3_io_slot_reset - called after the pci bus has been reset.
15867  * @pdev: Pointer to PCI device
15868  *
15869  * Restart the card from scratch, as if from a cold-boot.
15870  * At this point, the card has exprienced a hard reset,
15871  * followed by fixups by BIOS, and has its config space
15872  * set up identically to what it was at cold boot.
15873  */
15874 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15875 {
15876 	struct net_device *netdev = pci_get_drvdata(pdev);
15877 	struct tg3 *tp = netdev_priv(netdev);
15878 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15879 	int err;
15880 
15881 	rtnl_lock();
15882 
15883 	if (pci_enable_device(pdev)) {
15884 		netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15885 		goto done;
15886 	}
15887 
15888 	pci_set_master(pdev);
15889 	pci_restore_state(pdev);
15890 	pci_save_state(pdev);
15891 
15892 	if (!netif_running(netdev)) {
15893 		rc = PCI_ERS_RESULT_RECOVERED;
15894 		goto done;
15895 	}
15896 
15897 	err = tg3_power_up(tp);
15898 	if (err)
15899 		goto done;
15900 
15901 	rc = PCI_ERS_RESULT_RECOVERED;
15902 
15903 done:
15904 	rtnl_unlock();
15905 
15906 	return rc;
15907 }
15908 
15909 /**
15910  * tg3_io_resume - called when traffic can start flowing again.
15911  * @pdev: Pointer to PCI device
15912  *
15913  * This callback is called when the error recovery driver tells
15914  * us that its OK to resume normal operation.
15915  */
15916 static void tg3_io_resume(struct pci_dev *pdev)
15917 {
15918 	struct net_device *netdev = pci_get_drvdata(pdev);
15919 	struct tg3 *tp = netdev_priv(netdev);
15920 	int err;
15921 
15922 	rtnl_lock();
15923 
15924 	if (!netif_running(netdev))
15925 		goto done;
15926 
15927 	tg3_full_lock(tp, 0);
15928 	tg3_flag_set(tp, INIT_COMPLETE);
15929 	err = tg3_restart_hw(tp, 1);
15930 	tg3_full_unlock(tp);
15931 	if (err) {
15932 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
15933 		goto done;
15934 	}
15935 
15936 	netif_device_attach(netdev);
15937 
15938 	tp->timer.expires = jiffies + tp->timer_offset;
15939 	add_timer(&tp->timer);
15940 
15941 	tg3_netif_start(tp);
15942 
15943 	tg3_phy_start(tp);
15944 
15945 done:
15946 	rtnl_unlock();
15947 }
15948 
15949 static struct pci_error_handlers tg3_err_handler = {
15950 	.error_detected	= tg3_io_error_detected,
15951 	.slot_reset	= tg3_io_slot_reset,
15952 	.resume		= tg3_io_resume
15953 };
15954 
15955 static struct pci_driver tg3_driver = {
15956 	.name		= DRV_MODULE_NAME,
15957 	.id_table	= tg3_pci_tbl,
15958 	.probe		= tg3_init_one,
15959 	.remove		= __devexit_p(tg3_remove_one),
15960 	.err_handler	= &tg3_err_handler,
15961 	.driver.pm	= TG3_PM_OPS,
15962 };
15963 
15964 static int __init tg3_init(void)
15965 {
15966 	return pci_register_driver(&tg3_driver);
15967 }
15968 
15969 static void __exit tg3_cleanup(void)
15970 {
15971 	pci_unregister_driver(&tg3_driver);
15972 }
15973 
15974 module_init(tg3_init);
15975 module_exit(tg3_cleanup);
15976