xref: /linux/drivers/net/ethernet/broadcom/tg3.c (revision ff5599816711d2e67da2d7561fd36ac48debd433)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 
51 #include <net/checksum.h>
52 #include <net/ip.h>
53 
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57 
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60 
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65 
66 #define BAR_0	0
67 #define BAR_2	2
68 
69 #include "tg3.h"
70 
71 /* Functions & macros to verify TG3_FLAGS types */
72 
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 	return test_bit(flag, bits);
76 }
77 
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	set_bit(flag, bits);
81 }
82 
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 	clear_bit(flag, bits);
86 }
87 
88 #define tg3_flag(tp, flag)				\
89 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)				\
91 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)			\
93 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94 
95 #define DRV_MODULE_NAME		"tg3"
96 #define TG3_MAJ_NUM			3
97 #define TG3_MIN_NUM			132
98 #define DRV_MODULE_VERSION	\
99 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE	"May 21, 2013"
101 
102 #define RESET_KIND_SHUTDOWN	0
103 #define RESET_KIND_INIT		1
104 #define RESET_KIND_SUSPEND	2
105 
106 #define TG3_DEF_RX_MODE		0
107 #define TG3_DEF_TX_MODE		0
108 #define TG3_DEF_MSG_ENABLE	  \
109 	(NETIF_MSG_DRV		| \
110 	 NETIF_MSG_PROBE	| \
111 	 NETIF_MSG_LINK		| \
112 	 NETIF_MSG_TIMER	| \
113 	 NETIF_MSG_IFDOWN	| \
114 	 NETIF_MSG_IFUP		| \
115 	 NETIF_MSG_RX_ERR	| \
116 	 NETIF_MSG_TX_ERR)
117 
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
119 
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123 
124 #define TG3_TX_TIMEOUT			(5 * HZ)
125 
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU			60
128 #define TG3_MAX_MTU(tp)	\
129 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130 
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING		200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
143 
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150 
151 #define TG3_TX_RING_SIZE		512
152 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
153 
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
161 				 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
163 
164 #define TG3_DMA_BYTE_ENAB		64
165 
166 #define TG3_RX_STD_DMA_SZ		1536
167 #define TG3_RX_JMB_DMA_SZ		9046
168 
169 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
170 
171 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173 
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176 
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179 
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD		256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
194 #else
195 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
196 #endif
197 
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
202 #endif
203 
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K		2048
207 #define TG3_TX_BD_DMA_MAX_4K		4096
208 
209 #define TG3_RAW_IP_ALIGN 2
210 
211 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
212 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213 
214 #define FIRMWARE_TG3		"tigon/tg3.bin"
215 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
218 
219 static char version[] =
220 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221 
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229 
230 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233 
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
236 
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 			TG3_DRV_DATA_FLAG_5705_10_100},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 			TG3_DRV_DATA_FLAG_5705_10_100},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 			TG3_DRV_DATA_FLAG_5705_10_100},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 			PCI_VENDOR_ID_LENOVO,
288 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348 	{}
349 };
350 
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352 
353 static const struct {
354 	const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356 	{ "rx_octets" },
357 	{ "rx_fragments" },
358 	{ "rx_ucast_packets" },
359 	{ "rx_mcast_packets" },
360 	{ "rx_bcast_packets" },
361 	{ "rx_fcs_errors" },
362 	{ "rx_align_errors" },
363 	{ "rx_xon_pause_rcvd" },
364 	{ "rx_xoff_pause_rcvd" },
365 	{ "rx_mac_ctrl_rcvd" },
366 	{ "rx_xoff_entered" },
367 	{ "rx_frame_too_long_errors" },
368 	{ "rx_jabbers" },
369 	{ "rx_undersize_packets" },
370 	{ "rx_in_length_errors" },
371 	{ "rx_out_length_errors" },
372 	{ "rx_64_or_less_octet_packets" },
373 	{ "rx_65_to_127_octet_packets" },
374 	{ "rx_128_to_255_octet_packets" },
375 	{ "rx_256_to_511_octet_packets" },
376 	{ "rx_512_to_1023_octet_packets" },
377 	{ "rx_1024_to_1522_octet_packets" },
378 	{ "rx_1523_to_2047_octet_packets" },
379 	{ "rx_2048_to_4095_octet_packets" },
380 	{ "rx_4096_to_8191_octet_packets" },
381 	{ "rx_8192_to_9022_octet_packets" },
382 
383 	{ "tx_octets" },
384 	{ "tx_collisions" },
385 
386 	{ "tx_xon_sent" },
387 	{ "tx_xoff_sent" },
388 	{ "tx_flow_control" },
389 	{ "tx_mac_errors" },
390 	{ "tx_single_collisions" },
391 	{ "tx_mult_collisions" },
392 	{ "tx_deferred" },
393 	{ "tx_excessive_collisions" },
394 	{ "tx_late_collisions" },
395 	{ "tx_collide_2times" },
396 	{ "tx_collide_3times" },
397 	{ "tx_collide_4times" },
398 	{ "tx_collide_5times" },
399 	{ "tx_collide_6times" },
400 	{ "tx_collide_7times" },
401 	{ "tx_collide_8times" },
402 	{ "tx_collide_9times" },
403 	{ "tx_collide_10times" },
404 	{ "tx_collide_11times" },
405 	{ "tx_collide_12times" },
406 	{ "tx_collide_13times" },
407 	{ "tx_collide_14times" },
408 	{ "tx_collide_15times" },
409 	{ "tx_ucast_packets" },
410 	{ "tx_mcast_packets" },
411 	{ "tx_bcast_packets" },
412 	{ "tx_carrier_sense_errors" },
413 	{ "tx_discards" },
414 	{ "tx_errors" },
415 
416 	{ "dma_writeq_full" },
417 	{ "dma_write_prioq_full" },
418 	{ "rxbds_empty" },
419 	{ "rx_discards" },
420 	{ "rx_errors" },
421 	{ "rx_threshold_hit" },
422 
423 	{ "dma_readq_full" },
424 	{ "dma_read_prioq_full" },
425 	{ "tx_comp_queue_full" },
426 
427 	{ "ring_set_send_prod_index" },
428 	{ "ring_status_update" },
429 	{ "nic_irqs" },
430 	{ "nic_avoided_irqs" },
431 	{ "nic_tx_threshold_hit" },
432 
433 	{ "mbuf_lwm_thresh_hit" },
434 };
435 
436 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST		0
438 #define TG3_LINK_TEST		1
439 #define TG3_REGISTER_TEST	2
440 #define TG3_MEMORY_TEST		3
441 #define TG3_MAC_LOOPB_TEST	4
442 #define TG3_PHY_LOOPB_TEST	5
443 #define TG3_EXT_LOOPB_TEST	6
444 #define TG3_INTERRUPT_TEST	7
445 
446 
447 static const struct {
448 	const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
451 	[TG3_LINK_TEST]		= { "link test         (online) " },
452 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
453 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
454 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
455 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
456 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
457 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
458 };
459 
460 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
461 
462 
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465 	writel(val, tp->regs + off);
466 }
467 
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470 	return readl(tp->regs + off);
471 }
472 
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475 	writel(val, tp->aperegs + off);
476 }
477 
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480 	return readl(tp->aperegs + off);
481 }
482 
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485 	unsigned long flags;
486 
487 	spin_lock_irqsave(&tp->indirect_lock, flags);
488 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492 
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495 	writel(val, tp->regs + off);
496 	readl(tp->regs + off);
497 }
498 
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501 	unsigned long flags;
502 	u32 val;
503 
504 	spin_lock_irqsave(&tp->indirect_lock, flags);
505 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 	return val;
509 }
510 
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513 	unsigned long flags;
514 
515 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 				       TG3_64BIT_REG_LOW, val);
518 		return;
519 	}
520 	if (off == TG3_RX_STD_PROD_IDX_REG) {
521 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 				       TG3_64BIT_REG_LOW, val);
523 		return;
524 	}
525 
526 	spin_lock_irqsave(&tp->indirect_lock, flags);
527 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
530 
531 	/* In indirect mode when disabling interrupts, we also need
532 	 * to clear the interrupt bit in the GRC local ctrl register.
533 	 */
534 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 	    (val == 0x1)) {
536 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538 	}
539 }
540 
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543 	unsigned long flags;
544 	u32 val;
545 
546 	spin_lock_irqsave(&tp->indirect_lock, flags);
547 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 	return val;
551 }
552 
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 		/* Non-posted methods */
562 		tp->write32(tp, off, val);
563 	else {
564 		/* Posted method */
565 		tg3_write32(tp, off, val);
566 		if (usec_wait)
567 			udelay(usec_wait);
568 		tp->read32(tp, off);
569 	}
570 	/* Wait again after the read for the posted method to guarantee that
571 	 * the wait time is met.
572 	 */
573 	if (usec_wait)
574 		udelay(usec_wait);
575 }
576 
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579 	tp->write32_mbox(tp, off, val);
580 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 	     !tg3_flag(tp, ICH_WORKAROUND)))
583 		tp->read32_mbox(tp, off);
584 }
585 
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588 	void __iomem *mbox = tp->regs + off;
589 	writel(val, mbox);
590 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
591 		writel(val, mbox);
592 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
594 		readl(mbox);
595 }
596 
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599 	return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601 
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604 	writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606 
607 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
612 
613 #define tw32(reg, val)			tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)			tp->read32(tp, reg)
617 
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620 	unsigned long flags;
621 
622 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624 		return;
625 
626 	spin_lock_irqsave(&tp->indirect_lock, flags);
627 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630 
631 		/* Always leave this as zero. */
632 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 	} else {
634 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
636 
637 		/* Always leave this as zero. */
638 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 	}
640 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642 
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645 	unsigned long flags;
646 
647 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649 		*val = 0;
650 		return;
651 	}
652 
653 	spin_lock_irqsave(&tp->indirect_lock, flags);
654 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657 
658 		/* Always leave this as zero. */
659 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 	} else {
661 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 		*val = tr32(TG3PCI_MEM_WIN_DATA);
663 
664 		/* Always leave this as zero. */
665 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 	}
667 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669 
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672 	int i;
673 	u32 regbase, bit;
674 
675 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 		regbase = TG3_APE_LOCK_GRANT;
677 	else
678 		regbase = TG3_APE_PER_LOCK_GRANT;
679 
680 	/* Make sure the driver hasn't any stale locks. */
681 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 		switch (i) {
683 		case TG3_APE_LOCK_PHY0:
684 		case TG3_APE_LOCK_PHY1:
685 		case TG3_APE_LOCK_PHY2:
686 		case TG3_APE_LOCK_PHY3:
687 			bit = APE_LOCK_GRANT_DRIVER;
688 			break;
689 		default:
690 			if (!tp->pci_fn)
691 				bit = APE_LOCK_GRANT_DRIVER;
692 			else
693 				bit = 1 << tp->pci_fn;
694 		}
695 		tg3_ape_write32(tp, regbase + 4 * i, bit);
696 	}
697 
698 }
699 
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702 	int i, off;
703 	int ret = 0;
704 	u32 status, req, gnt, bit;
705 
706 	if (!tg3_flag(tp, ENABLE_APE))
707 		return 0;
708 
709 	switch (locknum) {
710 	case TG3_APE_LOCK_GPIO:
711 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
712 			return 0;
713 	case TG3_APE_LOCK_GRC:
714 	case TG3_APE_LOCK_MEM:
715 		if (!tp->pci_fn)
716 			bit = APE_LOCK_REQ_DRIVER;
717 		else
718 			bit = 1 << tp->pci_fn;
719 		break;
720 	case TG3_APE_LOCK_PHY0:
721 	case TG3_APE_LOCK_PHY1:
722 	case TG3_APE_LOCK_PHY2:
723 	case TG3_APE_LOCK_PHY3:
724 		bit = APE_LOCK_REQ_DRIVER;
725 		break;
726 	default:
727 		return -EINVAL;
728 	}
729 
730 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 		req = TG3_APE_LOCK_REQ;
732 		gnt = TG3_APE_LOCK_GRANT;
733 	} else {
734 		req = TG3_APE_PER_LOCK_REQ;
735 		gnt = TG3_APE_PER_LOCK_GRANT;
736 	}
737 
738 	off = 4 * locknum;
739 
740 	tg3_ape_write32(tp, req + off, bit);
741 
742 	/* Wait for up to 1 millisecond to acquire lock. */
743 	for (i = 0; i < 100; i++) {
744 		status = tg3_ape_read32(tp, gnt + off);
745 		if (status == bit)
746 			break;
747 		if (pci_channel_offline(tp->pdev))
748 			break;
749 
750 		udelay(10);
751 	}
752 
753 	if (status != bit) {
754 		/* Revoke the lock request. */
755 		tg3_ape_write32(tp, gnt + off, bit);
756 		ret = -EBUSY;
757 	}
758 
759 	return ret;
760 }
761 
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 {
764 	u32 gnt, bit;
765 
766 	if (!tg3_flag(tp, ENABLE_APE))
767 		return;
768 
769 	switch (locknum) {
770 	case TG3_APE_LOCK_GPIO:
771 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
772 			return;
773 	case TG3_APE_LOCK_GRC:
774 	case TG3_APE_LOCK_MEM:
775 		if (!tp->pci_fn)
776 			bit = APE_LOCK_GRANT_DRIVER;
777 		else
778 			bit = 1 << tp->pci_fn;
779 		break;
780 	case TG3_APE_LOCK_PHY0:
781 	case TG3_APE_LOCK_PHY1:
782 	case TG3_APE_LOCK_PHY2:
783 	case TG3_APE_LOCK_PHY3:
784 		bit = APE_LOCK_GRANT_DRIVER;
785 		break;
786 	default:
787 		return;
788 	}
789 
790 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
791 		gnt = TG3_APE_LOCK_GRANT;
792 	else
793 		gnt = TG3_APE_PER_LOCK_GRANT;
794 
795 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
796 }
797 
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
799 {
800 	u32 apedata;
801 
802 	while (timeout_us) {
803 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
804 			return -EBUSY;
805 
806 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
808 			break;
809 
810 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
811 
812 		udelay(10);
813 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
814 	}
815 
816 	return timeout_us ? 0 : -EBUSY;
817 }
818 
819 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 {
821 	u32 i, apedata;
822 
823 	for (i = 0; i < timeout_us / 10; i++) {
824 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825 
826 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
827 			break;
828 
829 		udelay(10);
830 	}
831 
832 	return i == timeout_us / 10;
833 }
834 
835 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 				   u32 len)
837 {
838 	int err;
839 	u32 i, bufoff, msgoff, maxlen, apedata;
840 
841 	if (!tg3_flag(tp, APE_HAS_NCSI))
842 		return 0;
843 
844 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
845 	if (apedata != APE_SEG_SIG_MAGIC)
846 		return -ENODEV;
847 
848 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849 	if (!(apedata & APE_FW_STATUS_READY))
850 		return -EAGAIN;
851 
852 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
853 		 TG3_APE_SHMEM_BASE;
854 	msgoff = bufoff + 2 * sizeof(u32);
855 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
856 
857 	while (len) {
858 		u32 length;
859 
860 		/* Cap xfer sizes to scratchpad limits. */
861 		length = (len > maxlen) ? maxlen : len;
862 		len -= length;
863 
864 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865 		if (!(apedata & APE_FW_STATUS_READY))
866 			return -EAGAIN;
867 
868 		/* Wait for up to 1 msec for APE to service previous event. */
869 		err = tg3_ape_event_lock(tp, 1000);
870 		if (err)
871 			return err;
872 
873 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
874 			  APE_EVENT_STATUS_SCRTCHPD_READ |
875 			  APE_EVENT_STATUS_EVENT_PENDING;
876 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
877 
878 		tg3_ape_write32(tp, bufoff, base_off);
879 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
880 
881 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
882 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883 
884 		base_off += length;
885 
886 		if (tg3_ape_wait_for_event(tp, 30000))
887 			return -EAGAIN;
888 
889 		for (i = 0; length; i += 4, length -= 4) {
890 			u32 val = tg3_ape_read32(tp, msgoff + i);
891 			memcpy(data, &val, sizeof(u32));
892 			data++;
893 		}
894 	}
895 
896 	return 0;
897 }
898 
899 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
900 {
901 	int err;
902 	u32 apedata;
903 
904 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
905 	if (apedata != APE_SEG_SIG_MAGIC)
906 		return -EAGAIN;
907 
908 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
909 	if (!(apedata & APE_FW_STATUS_READY))
910 		return -EAGAIN;
911 
912 	/* Wait for up to 1 millisecond for APE to service previous event. */
913 	err = tg3_ape_event_lock(tp, 1000);
914 	if (err)
915 		return err;
916 
917 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
918 			event | APE_EVENT_STATUS_EVENT_PENDING);
919 
920 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
921 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
922 
923 	return 0;
924 }
925 
926 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
927 {
928 	u32 event;
929 	u32 apedata;
930 
931 	if (!tg3_flag(tp, ENABLE_APE))
932 		return;
933 
934 	switch (kind) {
935 	case RESET_KIND_INIT:
936 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
937 				APE_HOST_SEG_SIG_MAGIC);
938 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
939 				APE_HOST_SEG_LEN_MAGIC);
940 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
941 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
942 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
943 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
944 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
945 				APE_HOST_BEHAV_NO_PHYLOCK);
946 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
947 				    TG3_APE_HOST_DRVR_STATE_START);
948 
949 		event = APE_EVENT_STATUS_STATE_START;
950 		break;
951 	case RESET_KIND_SHUTDOWN:
952 		/* With the interface we are currently using,
953 		 * APE does not track driver state.  Wiping
954 		 * out the HOST SEGMENT SIGNATURE forces
955 		 * the APE to assume OS absent status.
956 		 */
957 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
958 
959 		if (device_may_wakeup(&tp->pdev->dev) &&
960 		    tg3_flag(tp, WOL_ENABLE)) {
961 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
962 					    TG3_APE_HOST_WOL_SPEED_AUTO);
963 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
964 		} else
965 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
966 
967 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
968 
969 		event = APE_EVENT_STATUS_STATE_UNLOAD;
970 		break;
971 	default:
972 		return;
973 	}
974 
975 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976 
977 	tg3_ape_send_event(tp, event);
978 }
979 
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982 	int i;
983 
984 	tw32(TG3PCI_MISC_HOST_CTRL,
985 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 	for (i = 0; i < tp->irq_max; i++)
987 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989 
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992 	int i;
993 
994 	tp->irq_sync = 0;
995 	wmb();
996 
997 	tw32(TG3PCI_MISC_HOST_CTRL,
998 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999 
1000 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 	for (i = 0; i < tp->irq_cnt; i++) {
1002 		struct tg3_napi *tnapi = &tp->napi[i];
1003 
1004 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 		if (tg3_flag(tp, 1SHOT_MSI))
1006 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007 
1008 		tp->coal_now |= tnapi->coal_now;
1009 	}
1010 
1011 	/* Force an initial interrupt */
1012 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015 	else
1016 		tw32(HOSTCC_MODE, tp->coal_now);
1017 
1018 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020 
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023 	struct tg3 *tp = tnapi->tp;
1024 	struct tg3_hw_status *sblk = tnapi->hw_status;
1025 	unsigned int work_exists = 0;
1026 
1027 	/* check for phy events */
1028 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 		if (sblk->status & SD_STATUS_LINK_CHG)
1030 			work_exists = 1;
1031 	}
1032 
1033 	/* check for TX work to do */
1034 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035 		work_exists = 1;
1036 
1037 	/* check for RX work to do */
1038 	if (tnapi->rx_rcb_prod_idx &&
1039 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040 		work_exists = 1;
1041 
1042 	return work_exists;
1043 }
1044 
1045 /* tg3_int_reenable
1046  *  similar to tg3_enable_ints, but it accurately determines whether there
1047  *  is new work pending and can return without flushing the PIO write
1048  *  which reenables interrupts
1049  */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052 	struct tg3 *tp = tnapi->tp;
1053 
1054 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055 	mmiowb();
1056 
1057 	/* When doing tagged status, this work check is unnecessary.
1058 	 * The last_tag we write above tells the chip which piece of
1059 	 * work we've completed.
1060 	 */
1061 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065 
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068 	u32 clock_ctrl;
1069 	u32 orig_clock_ctrl;
1070 
1071 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072 		return;
1073 
1074 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075 
1076 	orig_clock_ctrl = clock_ctrl;
1077 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 		       CLOCK_CTRL_CLKRUN_OENABLE |
1079 		       0x1f);
1080 	tp->pci_clock_ctrl = clock_ctrl;
1081 
1082 	if (tg3_flag(tp, 5705_PLUS)) {
1083 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086 		}
1087 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089 			    clock_ctrl |
1090 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091 			    40);
1092 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094 			    40);
1095 	}
1096 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098 
1099 #define PHY_BUSY_LOOPS	5000
1100 
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102 			 u32 *val)
1103 {
1104 	u32 frame_val;
1105 	unsigned int loops;
1106 	int ret;
1107 
1108 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109 		tw32_f(MAC_MI_MODE,
1110 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111 		udelay(80);
1112 	}
1113 
1114 	tg3_ape_lock(tp, tp->phy_ape_lock);
1115 
1116 	*val = 0x0;
1117 
1118 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 		      MI_COM_PHY_ADDR_MASK);
1120 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 		      MI_COM_REG_ADDR_MASK);
1122 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123 
1124 	tw32_f(MAC_MI_COM, frame_val);
1125 
1126 	loops = PHY_BUSY_LOOPS;
1127 	while (loops != 0) {
1128 		udelay(10);
1129 		frame_val = tr32(MAC_MI_COM);
1130 
1131 		if ((frame_val & MI_COM_BUSY) == 0) {
1132 			udelay(5);
1133 			frame_val = tr32(MAC_MI_COM);
1134 			break;
1135 		}
1136 		loops -= 1;
1137 	}
1138 
1139 	ret = -EBUSY;
1140 	if (loops != 0) {
1141 		*val = frame_val & MI_COM_DATA_MASK;
1142 		ret = 0;
1143 	}
1144 
1145 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1147 		udelay(80);
1148 	}
1149 
1150 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1151 
1152 	return ret;
1153 }
1154 
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159 
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161 			  u32 val)
1162 {
1163 	u32 frame_val;
1164 	unsigned int loops;
1165 	int ret;
1166 
1167 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169 		return 0;
1170 
1171 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 		tw32_f(MAC_MI_MODE,
1173 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174 		udelay(80);
1175 	}
1176 
1177 	tg3_ape_lock(tp, tp->phy_ape_lock);
1178 
1179 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 		      MI_COM_PHY_ADDR_MASK);
1181 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 		      MI_COM_REG_ADDR_MASK);
1183 	frame_val |= (val & MI_COM_DATA_MASK);
1184 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185 
1186 	tw32_f(MAC_MI_COM, frame_val);
1187 
1188 	loops = PHY_BUSY_LOOPS;
1189 	while (loops != 0) {
1190 		udelay(10);
1191 		frame_val = tr32(MAC_MI_COM);
1192 		if ((frame_val & MI_COM_BUSY) == 0) {
1193 			udelay(5);
1194 			frame_val = tr32(MAC_MI_COM);
1195 			break;
1196 		}
1197 		loops -= 1;
1198 	}
1199 
1200 	ret = -EBUSY;
1201 	if (loops != 0)
1202 		ret = 0;
1203 
1204 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1206 		udelay(80);
1207 	}
1208 
1209 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1210 
1211 	return ret;
1212 }
1213 
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218 
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221 	int err;
1222 
1223 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224 	if (err)
1225 		goto done;
1226 
1227 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228 	if (err)
1229 		goto done;
1230 
1231 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233 	if (err)
1234 		goto done;
1235 
1236 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237 
1238 done:
1239 	return err;
1240 }
1241 
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244 	int err;
1245 
1246 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 	if (err)
1248 		goto done;
1249 
1250 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 	if (err)
1252 		goto done;
1253 
1254 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 	if (err)
1257 		goto done;
1258 
1259 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260 
1261 done:
1262 	return err;
1263 }
1264 
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267 	int err;
1268 
1269 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270 	if (!err)
1271 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272 
1273 	return err;
1274 }
1275 
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278 	int err;
1279 
1280 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281 	if (!err)
1282 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283 
1284 	return err;
1285 }
1286 
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289 	int err;
1290 
1291 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1294 	if (!err)
1295 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296 
1297 	return err;
1298 }
1299 
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 		set |= MII_TG3_AUXCTL_MISC_WREN;
1304 
1305 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307 
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310 	u32 val;
1311 	int err;
1312 
1313 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314 
1315 	if (err)
1316 		return err;
1317 
1318 	if (enable)
1319 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320 	else
1321 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322 
1323 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325 
1326 	return err;
1327 }
1328 
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331 	u32 phy_control;
1332 	int limit, err;
1333 
1334 	/* OK, reset it, and poll the BMCR_RESET bit until it
1335 	 * clears or we time out.
1336 	 */
1337 	phy_control = BMCR_RESET;
1338 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1339 	if (err != 0)
1340 		return -EBUSY;
1341 
1342 	limit = 5000;
1343 	while (limit--) {
1344 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345 		if (err != 0)
1346 			return -EBUSY;
1347 
1348 		if ((phy_control & BMCR_RESET) == 0) {
1349 			udelay(40);
1350 			break;
1351 		}
1352 		udelay(10);
1353 	}
1354 	if (limit < 0)
1355 		return -EBUSY;
1356 
1357 	return 0;
1358 }
1359 
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362 	struct tg3 *tp = bp->priv;
1363 	u32 val;
1364 
1365 	spin_lock_bh(&tp->lock);
1366 
1367 	if (tg3_readphy(tp, reg, &val))
1368 		val = -EIO;
1369 
1370 	spin_unlock_bh(&tp->lock);
1371 
1372 	return val;
1373 }
1374 
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377 	struct tg3 *tp = bp->priv;
1378 	u32 ret = 0;
1379 
1380 	spin_lock_bh(&tp->lock);
1381 
1382 	if (tg3_writephy(tp, reg, val))
1383 		ret = -EIO;
1384 
1385 	spin_unlock_bh(&tp->lock);
1386 
1387 	return ret;
1388 }
1389 
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392 	return 0;
1393 }
1394 
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397 	u32 val;
1398 	struct phy_device *phydev;
1399 
1400 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 	case PHY_ID_BCM50610:
1403 	case PHY_ID_BCM50610M:
1404 		val = MAC_PHYCFG2_50610_LED_MODES;
1405 		break;
1406 	case PHY_ID_BCMAC131:
1407 		val = MAC_PHYCFG2_AC131_LED_MODES;
1408 		break;
1409 	case PHY_ID_RTL8211C:
1410 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411 		break;
1412 	case PHY_ID_RTL8201E:
1413 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414 		break;
1415 	default:
1416 		return;
1417 	}
1418 
1419 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 		tw32(MAC_PHYCFG2, val);
1421 
1422 		val = tr32(MAC_PHYCFG1);
1423 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 		tw32(MAC_PHYCFG1, val);
1427 
1428 		return;
1429 	}
1430 
1431 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1434 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1435 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1436 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1437 		       MAC_PHYCFG2_INBAND_ENABLE;
1438 
1439 	tw32(MAC_PHYCFG2, val);
1440 
1441 	val = tr32(MAC_PHYCFG1);
1442 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449 	}
1450 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 	tw32(MAC_PHYCFG1, val);
1453 
1454 	val = tr32(MAC_EXT_RGMII_MODE);
1455 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 		 MAC_RGMII_MODE_RX_QUALITY |
1457 		 MAC_RGMII_MODE_RX_ACTIVITY |
1458 		 MAC_RGMII_MODE_RX_ENG_DET |
1459 		 MAC_RGMII_MODE_TX_ENABLE |
1460 		 MAC_RGMII_MODE_TX_LOWPWR |
1461 		 MAC_RGMII_MODE_TX_RESET);
1462 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 			val |= MAC_RGMII_MODE_RX_INT_B |
1465 			       MAC_RGMII_MODE_RX_QUALITY |
1466 			       MAC_RGMII_MODE_RX_ACTIVITY |
1467 			       MAC_RGMII_MODE_RX_ENG_DET;
1468 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 			val |= MAC_RGMII_MODE_TX_ENABLE |
1470 			       MAC_RGMII_MODE_TX_LOWPWR |
1471 			       MAC_RGMII_MODE_TX_RESET;
1472 	}
1473 	tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475 
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1480 	udelay(80);
1481 
1482 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1484 		tg3_mdio_config_5785(tp);
1485 }
1486 
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489 	int i;
1490 	u32 reg;
1491 	struct phy_device *phydev;
1492 
1493 	if (tg3_flag(tp, 5717_PLUS)) {
1494 		u32 is_serdes;
1495 
1496 		tp->phy_addr = tp->pci_fn + 1;
1497 
1498 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500 		else
1501 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1503 		if (is_serdes)
1504 			tp->phy_addr += 7;
1505 	} else
1506 		tp->phy_addr = TG3_PHY_MII_ADDR;
1507 
1508 	tg3_mdio_start(tp);
1509 
1510 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511 		return 0;
1512 
1513 	tp->mdio_bus = mdiobus_alloc();
1514 	if (tp->mdio_bus == NULL)
1515 		return -ENOMEM;
1516 
1517 	tp->mdio_bus->name     = "tg3 mdio bus";
1518 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 	tp->mdio_bus->priv     = tp;
1521 	tp->mdio_bus->parent   = &tp->pdev->dev;
1522 	tp->mdio_bus->read     = &tg3_mdio_read;
1523 	tp->mdio_bus->write    = &tg3_mdio_write;
1524 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1525 	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1527 
1528 	for (i = 0; i < PHY_MAX_ADDR; i++)
1529 		tp->mdio_bus->irq[i] = PHY_POLL;
1530 
1531 	/* The bus registration will look for all the PHYs on the mdio bus.
1532 	 * Unfortunately, it does not ensure the PHY is powered up before
1533 	 * accessing the PHY ID registers.  A chip reset is the
1534 	 * quickest way to bring the device back to an operational state..
1535 	 */
1536 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537 		tg3_bmcr_reset(tp);
1538 
1539 	i = mdiobus_register(tp->mdio_bus);
1540 	if (i) {
1541 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 		mdiobus_free(tp->mdio_bus);
1543 		return i;
1544 	}
1545 
1546 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547 
1548 	if (!phydev || !phydev->drv) {
1549 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 		mdiobus_unregister(tp->mdio_bus);
1551 		mdiobus_free(tp->mdio_bus);
1552 		return -ENODEV;
1553 	}
1554 
1555 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 	case PHY_ID_BCM57780:
1557 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559 		break;
1560 	case PHY_ID_BCM50610:
1561 	case PHY_ID_BCM50610M:
1562 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 				     PHY_BRCM_RX_REFCLK_UNUSED |
1564 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572 		/* fallthru */
1573 	case PHY_ID_RTL8211C:
1574 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575 		break;
1576 	case PHY_ID_RTL8201E:
1577 	case PHY_ID_BCMAC131:
1578 		phydev->interface = PHY_INTERFACE_MODE_MII;
1579 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581 		break;
1582 	}
1583 
1584 	tg3_flag_set(tp, MDIOBUS_INITED);
1585 
1586 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 		tg3_mdio_config_5785(tp);
1588 
1589 	return 0;
1590 }
1591 
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 		tg3_flag_clear(tp, MDIOBUS_INITED);
1596 		mdiobus_unregister(tp->mdio_bus);
1597 		mdiobus_free(tp->mdio_bus);
1598 	}
1599 }
1600 
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604 	u32 val;
1605 
1606 	val = tr32(GRC_RX_CPU_EVENT);
1607 	val |= GRC_RX_CPU_DRIVER_EVENT;
1608 	tw32_f(GRC_RX_CPU_EVENT, val);
1609 
1610 	tp->last_event_jiffies = jiffies;
1611 }
1612 
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614 
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618 	int i;
1619 	unsigned int delay_cnt;
1620 	long time_remain;
1621 
1622 	/* If enough time has passed, no wait is necessary. */
1623 	time_remain = (long)(tp->last_event_jiffies + 1 +
1624 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625 		      (long)jiffies;
1626 	if (time_remain < 0)
1627 		return;
1628 
1629 	/* Check if we can shorten the wait time. */
1630 	delay_cnt = jiffies_to_usecs(time_remain);
1631 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 	delay_cnt = (delay_cnt >> 3) + 1;
1634 
1635 	for (i = 0; i < delay_cnt; i++) {
1636 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 			break;
1638 		if (pci_channel_offline(tp->pdev))
1639 			break;
1640 
1641 		udelay(8);
1642 	}
1643 }
1644 
1645 /* tp->lock is held. */
1646 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1647 {
1648 	u32 reg, val;
1649 
1650 	val = 0;
1651 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1652 		val = reg << 16;
1653 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1654 		val |= (reg & 0xffff);
1655 	*data++ = val;
1656 
1657 	val = 0;
1658 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1659 		val = reg << 16;
1660 	if (!tg3_readphy(tp, MII_LPA, &reg))
1661 		val |= (reg & 0xffff);
1662 	*data++ = val;
1663 
1664 	val = 0;
1665 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1666 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1667 			val = reg << 16;
1668 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1669 			val |= (reg & 0xffff);
1670 	}
1671 	*data++ = val;
1672 
1673 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1674 		val = reg << 16;
1675 	else
1676 		val = 0;
1677 	*data++ = val;
1678 }
1679 
1680 /* tp->lock is held. */
1681 static void tg3_ump_link_report(struct tg3 *tp)
1682 {
1683 	u32 data[4];
1684 
1685 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1686 		return;
1687 
1688 	tg3_phy_gather_ump_data(tp, data);
1689 
1690 	tg3_wait_for_event_ack(tp);
1691 
1692 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1693 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1694 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1695 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1696 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1697 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1698 
1699 	tg3_generate_fw_event(tp);
1700 }
1701 
1702 /* tp->lock is held. */
1703 static void tg3_stop_fw(struct tg3 *tp)
1704 {
1705 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1706 		/* Wait for RX cpu to ACK the previous event. */
1707 		tg3_wait_for_event_ack(tp);
1708 
1709 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1710 
1711 		tg3_generate_fw_event(tp);
1712 
1713 		/* Wait for RX cpu to ACK this event. */
1714 		tg3_wait_for_event_ack(tp);
1715 	}
1716 }
1717 
1718 /* tp->lock is held. */
1719 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1720 {
1721 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1722 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1723 
1724 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1725 		switch (kind) {
1726 		case RESET_KIND_INIT:
1727 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1728 				      DRV_STATE_START);
1729 			break;
1730 
1731 		case RESET_KIND_SHUTDOWN:
1732 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1733 				      DRV_STATE_UNLOAD);
1734 			break;
1735 
1736 		case RESET_KIND_SUSPEND:
1737 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1738 				      DRV_STATE_SUSPEND);
1739 			break;
1740 
1741 		default:
1742 			break;
1743 		}
1744 	}
1745 }
1746 
1747 /* tp->lock is held. */
1748 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1749 {
1750 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751 		switch (kind) {
1752 		case RESET_KIND_INIT:
1753 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 				      DRV_STATE_START_DONE);
1755 			break;
1756 
1757 		case RESET_KIND_SHUTDOWN:
1758 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 				      DRV_STATE_UNLOAD_DONE);
1760 			break;
1761 
1762 		default:
1763 			break;
1764 		}
1765 	}
1766 }
1767 
1768 /* tp->lock is held. */
1769 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1770 {
1771 	if (tg3_flag(tp, ENABLE_ASF)) {
1772 		switch (kind) {
1773 		case RESET_KIND_INIT:
1774 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1775 				      DRV_STATE_START);
1776 			break;
1777 
1778 		case RESET_KIND_SHUTDOWN:
1779 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1780 				      DRV_STATE_UNLOAD);
1781 			break;
1782 
1783 		case RESET_KIND_SUSPEND:
1784 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1785 				      DRV_STATE_SUSPEND);
1786 			break;
1787 
1788 		default:
1789 			break;
1790 		}
1791 	}
1792 }
1793 
1794 static int tg3_poll_fw(struct tg3 *tp)
1795 {
1796 	int i;
1797 	u32 val;
1798 
1799 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1800 		return 0;
1801 
1802 	if (tg3_flag(tp, IS_SSB_CORE)) {
1803 		/* We don't use firmware. */
1804 		return 0;
1805 	}
1806 
1807 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1808 		/* Wait up to 20ms for init done. */
1809 		for (i = 0; i < 200; i++) {
1810 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811 				return 0;
1812 			if (pci_channel_offline(tp->pdev))
1813 				return -ENODEV;
1814 
1815 			udelay(100);
1816 		}
1817 		return -ENODEV;
1818 	}
1819 
1820 	/* Wait for firmware initialization to complete. */
1821 	for (i = 0; i < 100000; i++) {
1822 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1823 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1824 			break;
1825 		if (pci_channel_offline(tp->pdev)) {
1826 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1827 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1828 				netdev_info(tp->dev, "No firmware running\n");
1829 			}
1830 
1831 			break;
1832 		}
1833 
1834 		udelay(10);
1835 	}
1836 
1837 	/* Chip might not be fitted with firmware.  Some Sun onboard
1838 	 * parts are configured like that.  So don't signal the timeout
1839 	 * of the above loop as an error, but do report the lack of
1840 	 * running firmware once.
1841 	 */
1842 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1843 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1844 
1845 		netdev_info(tp->dev, "No firmware running\n");
1846 	}
1847 
1848 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1849 		/* The 57765 A0 needs a little more
1850 		 * time to do some important work.
1851 		 */
1852 		mdelay(10);
1853 	}
1854 
1855 	return 0;
1856 }
1857 
1858 static void tg3_link_report(struct tg3 *tp)
1859 {
1860 	if (!netif_carrier_ok(tp->dev)) {
1861 		netif_info(tp, link, tp->dev, "Link is down\n");
1862 		tg3_ump_link_report(tp);
1863 	} else if (netif_msg_link(tp)) {
1864 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1865 			    (tp->link_config.active_speed == SPEED_1000 ?
1866 			     1000 :
1867 			     (tp->link_config.active_speed == SPEED_100 ?
1868 			      100 : 10)),
1869 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1870 			     "full" : "half"));
1871 
1872 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1873 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1874 			    "on" : "off",
1875 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1876 			    "on" : "off");
1877 
1878 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1879 			netdev_info(tp->dev, "EEE is %s\n",
1880 				    tp->setlpicnt ? "enabled" : "disabled");
1881 
1882 		tg3_ump_link_report(tp);
1883 	}
1884 
1885 	tp->link_up = netif_carrier_ok(tp->dev);
1886 }
1887 
1888 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1889 {
1890 	u32 flowctrl = 0;
1891 
1892 	if (adv & ADVERTISE_PAUSE_CAP) {
1893 		flowctrl |= FLOW_CTRL_RX;
1894 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1895 			flowctrl |= FLOW_CTRL_TX;
1896 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1897 		flowctrl |= FLOW_CTRL_TX;
1898 
1899 	return flowctrl;
1900 }
1901 
1902 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1903 {
1904 	u16 miireg;
1905 
1906 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1907 		miireg = ADVERTISE_1000XPAUSE;
1908 	else if (flow_ctrl & FLOW_CTRL_TX)
1909 		miireg = ADVERTISE_1000XPSE_ASYM;
1910 	else if (flow_ctrl & FLOW_CTRL_RX)
1911 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1912 	else
1913 		miireg = 0;
1914 
1915 	return miireg;
1916 }
1917 
1918 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1919 {
1920 	u32 flowctrl = 0;
1921 
1922 	if (adv & ADVERTISE_1000XPAUSE) {
1923 		flowctrl |= FLOW_CTRL_RX;
1924 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1925 			flowctrl |= FLOW_CTRL_TX;
1926 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1927 		flowctrl |= FLOW_CTRL_TX;
1928 
1929 	return flowctrl;
1930 }
1931 
1932 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1933 {
1934 	u8 cap = 0;
1935 
1936 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1937 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1938 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1939 		if (lcladv & ADVERTISE_1000XPAUSE)
1940 			cap = FLOW_CTRL_RX;
1941 		if (rmtadv & ADVERTISE_1000XPAUSE)
1942 			cap = FLOW_CTRL_TX;
1943 	}
1944 
1945 	return cap;
1946 }
1947 
1948 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1949 {
1950 	u8 autoneg;
1951 	u8 flowctrl = 0;
1952 	u32 old_rx_mode = tp->rx_mode;
1953 	u32 old_tx_mode = tp->tx_mode;
1954 
1955 	if (tg3_flag(tp, USE_PHYLIB))
1956 		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1957 	else
1958 		autoneg = tp->link_config.autoneg;
1959 
1960 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1961 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1962 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1963 		else
1964 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1965 	} else
1966 		flowctrl = tp->link_config.flowctrl;
1967 
1968 	tp->link_config.active_flowctrl = flowctrl;
1969 
1970 	if (flowctrl & FLOW_CTRL_RX)
1971 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1972 	else
1973 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1974 
1975 	if (old_rx_mode != tp->rx_mode)
1976 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1977 
1978 	if (flowctrl & FLOW_CTRL_TX)
1979 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1980 	else
1981 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1982 
1983 	if (old_tx_mode != tp->tx_mode)
1984 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1985 }
1986 
1987 static void tg3_adjust_link(struct net_device *dev)
1988 {
1989 	u8 oldflowctrl, linkmesg = 0;
1990 	u32 mac_mode, lcl_adv, rmt_adv;
1991 	struct tg3 *tp = netdev_priv(dev);
1992 	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1993 
1994 	spin_lock_bh(&tp->lock);
1995 
1996 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1997 				    MAC_MODE_HALF_DUPLEX);
1998 
1999 	oldflowctrl = tp->link_config.active_flowctrl;
2000 
2001 	if (phydev->link) {
2002 		lcl_adv = 0;
2003 		rmt_adv = 0;
2004 
2005 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2006 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2007 		else if (phydev->speed == SPEED_1000 ||
2008 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2009 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2010 		else
2011 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2012 
2013 		if (phydev->duplex == DUPLEX_HALF)
2014 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2015 		else {
2016 			lcl_adv = mii_advertise_flowctrl(
2017 				  tp->link_config.flowctrl);
2018 
2019 			if (phydev->pause)
2020 				rmt_adv = LPA_PAUSE_CAP;
2021 			if (phydev->asym_pause)
2022 				rmt_adv |= LPA_PAUSE_ASYM;
2023 		}
2024 
2025 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2026 	} else
2027 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2028 
2029 	if (mac_mode != tp->mac_mode) {
2030 		tp->mac_mode = mac_mode;
2031 		tw32_f(MAC_MODE, tp->mac_mode);
2032 		udelay(40);
2033 	}
2034 
2035 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2036 		if (phydev->speed == SPEED_10)
2037 			tw32(MAC_MI_STAT,
2038 			     MAC_MI_STAT_10MBPS_MODE |
2039 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2040 		else
2041 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2042 	}
2043 
2044 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2045 		tw32(MAC_TX_LENGTHS,
2046 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2047 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2048 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2049 	else
2050 		tw32(MAC_TX_LENGTHS,
2051 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2052 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2053 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2054 
2055 	if (phydev->link != tp->old_link ||
2056 	    phydev->speed != tp->link_config.active_speed ||
2057 	    phydev->duplex != tp->link_config.active_duplex ||
2058 	    oldflowctrl != tp->link_config.active_flowctrl)
2059 		linkmesg = 1;
2060 
2061 	tp->old_link = phydev->link;
2062 	tp->link_config.active_speed = phydev->speed;
2063 	tp->link_config.active_duplex = phydev->duplex;
2064 
2065 	spin_unlock_bh(&tp->lock);
2066 
2067 	if (linkmesg)
2068 		tg3_link_report(tp);
2069 }
2070 
2071 static int tg3_phy_init(struct tg3 *tp)
2072 {
2073 	struct phy_device *phydev;
2074 
2075 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2076 		return 0;
2077 
2078 	/* Bring the PHY back to a known state. */
2079 	tg3_bmcr_reset(tp);
2080 
2081 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2082 
2083 	/* Attach the MAC to the PHY. */
2084 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2085 			     tg3_adjust_link, phydev->interface);
2086 	if (IS_ERR(phydev)) {
2087 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2088 		return PTR_ERR(phydev);
2089 	}
2090 
2091 	/* Mask with MAC supported features. */
2092 	switch (phydev->interface) {
2093 	case PHY_INTERFACE_MODE_GMII:
2094 	case PHY_INTERFACE_MODE_RGMII:
2095 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2096 			phydev->supported &= (PHY_GBIT_FEATURES |
2097 					      SUPPORTED_Pause |
2098 					      SUPPORTED_Asym_Pause);
2099 			break;
2100 		}
2101 		/* fallthru */
2102 	case PHY_INTERFACE_MODE_MII:
2103 		phydev->supported &= (PHY_BASIC_FEATURES |
2104 				      SUPPORTED_Pause |
2105 				      SUPPORTED_Asym_Pause);
2106 		break;
2107 	default:
2108 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2109 		return -EINVAL;
2110 	}
2111 
2112 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2113 
2114 	phydev->advertising = phydev->supported;
2115 
2116 	return 0;
2117 }
2118 
2119 static void tg3_phy_start(struct tg3 *tp)
2120 {
2121 	struct phy_device *phydev;
2122 
2123 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2124 		return;
2125 
2126 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2127 
2128 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2129 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2130 		phydev->speed = tp->link_config.speed;
2131 		phydev->duplex = tp->link_config.duplex;
2132 		phydev->autoneg = tp->link_config.autoneg;
2133 		phydev->advertising = tp->link_config.advertising;
2134 	}
2135 
2136 	phy_start(phydev);
2137 
2138 	phy_start_aneg(phydev);
2139 }
2140 
2141 static void tg3_phy_stop(struct tg3 *tp)
2142 {
2143 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2144 		return;
2145 
2146 	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2147 }
2148 
2149 static void tg3_phy_fini(struct tg3 *tp)
2150 {
2151 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2152 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2153 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2154 	}
2155 }
2156 
2157 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2158 {
2159 	int err;
2160 	u32 val;
2161 
2162 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2163 		return 0;
2164 
2165 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2166 		/* Cannot do read-modify-write on 5401 */
2167 		err = tg3_phy_auxctl_write(tp,
2168 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2169 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2170 					   0x4c20);
2171 		goto done;
2172 	}
2173 
2174 	err = tg3_phy_auxctl_read(tp,
2175 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2176 	if (err)
2177 		return err;
2178 
2179 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2180 	err = tg3_phy_auxctl_write(tp,
2181 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2182 
2183 done:
2184 	return err;
2185 }
2186 
2187 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2188 {
2189 	u32 phytest;
2190 
2191 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2192 		u32 phy;
2193 
2194 		tg3_writephy(tp, MII_TG3_FET_TEST,
2195 			     phytest | MII_TG3_FET_SHADOW_EN);
2196 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2197 			if (enable)
2198 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2199 			else
2200 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2201 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2202 		}
2203 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2204 	}
2205 }
2206 
2207 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2208 {
2209 	u32 reg;
2210 
2211 	if (!tg3_flag(tp, 5705_PLUS) ||
2212 	    (tg3_flag(tp, 5717_PLUS) &&
2213 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2214 		return;
2215 
2216 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2217 		tg3_phy_fet_toggle_apd(tp, enable);
2218 		return;
2219 	}
2220 
2221 	reg = MII_TG3_MISC_SHDW_WREN |
2222 	      MII_TG3_MISC_SHDW_SCR5_SEL |
2223 	      MII_TG3_MISC_SHDW_SCR5_LPED |
2224 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2225 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2226 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2227 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2228 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2229 
2230 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2231 
2232 
2233 	reg = MII_TG3_MISC_SHDW_WREN |
2234 	      MII_TG3_MISC_SHDW_APD_SEL |
2235 	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2236 	if (enable)
2237 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2238 
2239 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2240 }
2241 
2242 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2243 {
2244 	u32 phy;
2245 
2246 	if (!tg3_flag(tp, 5705_PLUS) ||
2247 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2248 		return;
2249 
2250 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2251 		u32 ephy;
2252 
2253 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2254 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2255 
2256 			tg3_writephy(tp, MII_TG3_FET_TEST,
2257 				     ephy | MII_TG3_FET_SHADOW_EN);
2258 			if (!tg3_readphy(tp, reg, &phy)) {
2259 				if (enable)
2260 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2261 				else
2262 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2263 				tg3_writephy(tp, reg, phy);
2264 			}
2265 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2266 		}
2267 	} else {
2268 		int ret;
2269 
2270 		ret = tg3_phy_auxctl_read(tp,
2271 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2272 		if (!ret) {
2273 			if (enable)
2274 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2275 			else
2276 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2277 			tg3_phy_auxctl_write(tp,
2278 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2279 		}
2280 	}
2281 }
2282 
2283 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2284 {
2285 	int ret;
2286 	u32 val;
2287 
2288 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2289 		return;
2290 
2291 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2292 	if (!ret)
2293 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2294 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2295 }
2296 
2297 static void tg3_phy_apply_otp(struct tg3 *tp)
2298 {
2299 	u32 otp, phy;
2300 
2301 	if (!tp->phy_otp)
2302 		return;
2303 
2304 	otp = tp->phy_otp;
2305 
2306 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2307 		return;
2308 
2309 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2310 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2311 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2312 
2313 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2314 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2315 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2316 
2317 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2318 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2319 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2320 
2321 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2322 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2323 
2324 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2325 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2326 
2327 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2328 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2329 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2330 
2331 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2332 }
2333 
2334 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2335 {
2336 	u32 val;
2337 	struct ethtool_eee *dest = &tp->eee;
2338 
2339 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2340 		return;
2341 
2342 	if (eee)
2343 		dest = eee;
2344 
2345 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2346 		return;
2347 
2348 	/* Pull eee_active */
2349 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2350 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2351 		dest->eee_active = 1;
2352 	} else
2353 		dest->eee_active = 0;
2354 
2355 	/* Pull lp advertised settings */
2356 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2357 		return;
2358 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2359 
2360 	/* Pull advertised and eee_enabled settings */
2361 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2362 		return;
2363 	dest->eee_enabled = !!val;
2364 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2365 
2366 	/* Pull tx_lpi_enabled */
2367 	val = tr32(TG3_CPMU_EEE_MODE);
2368 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2369 
2370 	/* Pull lpi timer value */
2371 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2372 }
2373 
2374 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2375 {
2376 	u32 val;
2377 
2378 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2379 		return;
2380 
2381 	tp->setlpicnt = 0;
2382 
2383 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2384 	    current_link_up &&
2385 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2386 	    (tp->link_config.active_speed == SPEED_100 ||
2387 	     tp->link_config.active_speed == SPEED_1000)) {
2388 		u32 eeectl;
2389 
2390 		if (tp->link_config.active_speed == SPEED_1000)
2391 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2392 		else
2393 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2394 
2395 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2396 
2397 		tg3_eee_pull_config(tp, NULL);
2398 		if (tp->eee.eee_active)
2399 			tp->setlpicnt = 2;
2400 	}
2401 
2402 	if (!tp->setlpicnt) {
2403 		if (current_link_up &&
2404 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2405 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2406 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2407 		}
2408 
2409 		val = tr32(TG3_CPMU_EEE_MODE);
2410 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2411 	}
2412 }
2413 
2414 static void tg3_phy_eee_enable(struct tg3 *tp)
2415 {
2416 	u32 val;
2417 
2418 	if (tp->link_config.active_speed == SPEED_1000 &&
2419 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2420 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2421 	     tg3_flag(tp, 57765_CLASS)) &&
2422 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2423 		val = MII_TG3_DSP_TAP26_ALNOKO |
2424 		      MII_TG3_DSP_TAP26_RMRXSTO;
2425 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2426 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2427 	}
2428 
2429 	val = tr32(TG3_CPMU_EEE_MODE);
2430 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2431 }
2432 
2433 static int tg3_wait_macro_done(struct tg3 *tp)
2434 {
2435 	int limit = 100;
2436 
2437 	while (limit--) {
2438 		u32 tmp32;
2439 
2440 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2441 			if ((tmp32 & 0x1000) == 0)
2442 				break;
2443 		}
2444 	}
2445 	if (limit < 0)
2446 		return -EBUSY;
2447 
2448 	return 0;
2449 }
2450 
2451 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2452 {
2453 	static const u32 test_pat[4][6] = {
2454 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2455 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2456 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2457 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2458 	};
2459 	int chan;
2460 
2461 	for (chan = 0; chan < 4; chan++) {
2462 		int i;
2463 
2464 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2465 			     (chan * 0x2000) | 0x0200);
2466 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2467 
2468 		for (i = 0; i < 6; i++)
2469 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2470 				     test_pat[chan][i]);
2471 
2472 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2473 		if (tg3_wait_macro_done(tp)) {
2474 			*resetp = 1;
2475 			return -EBUSY;
2476 		}
2477 
2478 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479 			     (chan * 0x2000) | 0x0200);
2480 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2481 		if (tg3_wait_macro_done(tp)) {
2482 			*resetp = 1;
2483 			return -EBUSY;
2484 		}
2485 
2486 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2487 		if (tg3_wait_macro_done(tp)) {
2488 			*resetp = 1;
2489 			return -EBUSY;
2490 		}
2491 
2492 		for (i = 0; i < 6; i += 2) {
2493 			u32 low, high;
2494 
2495 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2496 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2497 			    tg3_wait_macro_done(tp)) {
2498 				*resetp = 1;
2499 				return -EBUSY;
2500 			}
2501 			low &= 0x7fff;
2502 			high &= 0x000f;
2503 			if (low != test_pat[chan][i] ||
2504 			    high != test_pat[chan][i+1]) {
2505 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2506 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2507 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2508 
2509 				return -EBUSY;
2510 			}
2511 		}
2512 	}
2513 
2514 	return 0;
2515 }
2516 
2517 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2518 {
2519 	int chan;
2520 
2521 	for (chan = 0; chan < 4; chan++) {
2522 		int i;
2523 
2524 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2525 			     (chan * 0x2000) | 0x0200);
2526 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2527 		for (i = 0; i < 6; i++)
2528 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2529 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2530 		if (tg3_wait_macro_done(tp))
2531 			return -EBUSY;
2532 	}
2533 
2534 	return 0;
2535 }
2536 
2537 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2538 {
2539 	u32 reg32, phy9_orig;
2540 	int retries, do_phy_reset, err;
2541 
2542 	retries = 10;
2543 	do_phy_reset = 1;
2544 	do {
2545 		if (do_phy_reset) {
2546 			err = tg3_bmcr_reset(tp);
2547 			if (err)
2548 				return err;
2549 			do_phy_reset = 0;
2550 		}
2551 
2552 		/* Disable transmitter and interrupt.  */
2553 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2554 			continue;
2555 
2556 		reg32 |= 0x3000;
2557 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2558 
2559 		/* Set full-duplex, 1000 mbps.  */
2560 		tg3_writephy(tp, MII_BMCR,
2561 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2562 
2563 		/* Set to master mode.  */
2564 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2565 			continue;
2566 
2567 		tg3_writephy(tp, MII_CTRL1000,
2568 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2569 
2570 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2571 		if (err)
2572 			return err;
2573 
2574 		/* Block the PHY control access.  */
2575 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2576 
2577 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2578 		if (!err)
2579 			break;
2580 	} while (--retries);
2581 
2582 	err = tg3_phy_reset_chanpat(tp);
2583 	if (err)
2584 		return err;
2585 
2586 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2587 
2588 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2589 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2590 
2591 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2592 
2593 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2594 
2595 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2596 		reg32 &= ~0x3000;
2597 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2598 	} else if (!err)
2599 		err = -EBUSY;
2600 
2601 	return err;
2602 }
2603 
2604 static void tg3_carrier_off(struct tg3 *tp)
2605 {
2606 	netif_carrier_off(tp->dev);
2607 	tp->link_up = false;
2608 }
2609 
2610 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2611 {
2612 	if (tg3_flag(tp, ENABLE_ASF))
2613 		netdev_warn(tp->dev,
2614 			    "Management side-band traffic will be interrupted during phy settings change\n");
2615 }
2616 
2617 /* This will reset the tigon3 PHY if there is no valid
2618  * link unless the FORCE argument is non-zero.
2619  */
2620 static int tg3_phy_reset(struct tg3 *tp)
2621 {
2622 	u32 val, cpmuctrl;
2623 	int err;
2624 
2625 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2626 		val = tr32(GRC_MISC_CFG);
2627 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2628 		udelay(40);
2629 	}
2630 	err  = tg3_readphy(tp, MII_BMSR, &val);
2631 	err |= tg3_readphy(tp, MII_BMSR, &val);
2632 	if (err != 0)
2633 		return -EBUSY;
2634 
2635 	if (netif_running(tp->dev) && tp->link_up) {
2636 		netif_carrier_off(tp->dev);
2637 		tg3_link_report(tp);
2638 	}
2639 
2640 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2641 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2642 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2643 		err = tg3_phy_reset_5703_4_5(tp);
2644 		if (err)
2645 			return err;
2646 		goto out;
2647 	}
2648 
2649 	cpmuctrl = 0;
2650 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2651 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2652 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2653 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2654 			tw32(TG3_CPMU_CTRL,
2655 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2656 	}
2657 
2658 	err = tg3_bmcr_reset(tp);
2659 	if (err)
2660 		return err;
2661 
2662 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2663 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2664 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2665 
2666 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2667 	}
2668 
2669 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2670 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2671 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2672 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2673 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2674 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2675 			udelay(40);
2676 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2677 		}
2678 	}
2679 
2680 	if (tg3_flag(tp, 5717_PLUS) &&
2681 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2682 		return 0;
2683 
2684 	tg3_phy_apply_otp(tp);
2685 
2686 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2687 		tg3_phy_toggle_apd(tp, true);
2688 	else
2689 		tg3_phy_toggle_apd(tp, false);
2690 
2691 out:
2692 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2693 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2694 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2695 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2696 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2697 	}
2698 
2699 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2700 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2701 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2702 	}
2703 
2704 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2705 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2706 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2707 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2708 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2709 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2710 		}
2711 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2712 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2714 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2715 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2716 				tg3_writephy(tp, MII_TG3_TEST1,
2717 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2718 			} else
2719 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2720 
2721 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2722 		}
2723 	}
2724 
2725 	/* Set Extended packet length bit (bit 14) on all chips that */
2726 	/* support jumbo frames */
2727 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2728 		/* Cannot do read-modify-write on 5401 */
2729 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2730 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2731 		/* Set bit 14 with read-modify-write to preserve other bits */
2732 		err = tg3_phy_auxctl_read(tp,
2733 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2734 		if (!err)
2735 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2736 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2737 	}
2738 
2739 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2740 	 * jumbo frames transmission.
2741 	 */
2742 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2743 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2744 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2745 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2746 	}
2747 
2748 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2749 		/* adjust output voltage */
2750 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2751 	}
2752 
2753 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2754 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2755 
2756 	tg3_phy_toggle_automdix(tp, true);
2757 	tg3_phy_set_wirespeed(tp);
2758 	return 0;
2759 }
2760 
2761 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2762 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2763 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2764 					  TG3_GPIO_MSG_NEED_VAUX)
2765 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2766 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2767 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2768 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2769 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2770 
2771 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2772 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2773 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2774 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2775 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2776 
2777 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2778 {
2779 	u32 status, shift;
2780 
2781 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2782 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2783 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2784 	else
2785 		status = tr32(TG3_CPMU_DRV_STATUS);
2786 
2787 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2788 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2789 	status |= (newstat << shift);
2790 
2791 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2792 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2793 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2794 	else
2795 		tw32(TG3_CPMU_DRV_STATUS, status);
2796 
2797 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2798 }
2799 
2800 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2801 {
2802 	if (!tg3_flag(tp, IS_NIC))
2803 		return 0;
2804 
2805 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2807 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2808 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2809 			return -EIO;
2810 
2811 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2812 
2813 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2814 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2815 
2816 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2817 	} else {
2818 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2819 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2820 	}
2821 
2822 	return 0;
2823 }
2824 
2825 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2826 {
2827 	u32 grc_local_ctrl;
2828 
2829 	if (!tg3_flag(tp, IS_NIC) ||
2830 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2831 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2832 		return;
2833 
2834 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2835 
2836 	tw32_wait_f(GRC_LOCAL_CTRL,
2837 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2838 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2839 
2840 	tw32_wait_f(GRC_LOCAL_CTRL,
2841 		    grc_local_ctrl,
2842 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2843 
2844 	tw32_wait_f(GRC_LOCAL_CTRL,
2845 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2847 }
2848 
2849 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2850 {
2851 	if (!tg3_flag(tp, IS_NIC))
2852 		return;
2853 
2854 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2855 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2856 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2857 			    (GRC_LCLCTRL_GPIO_OE0 |
2858 			     GRC_LCLCTRL_GPIO_OE1 |
2859 			     GRC_LCLCTRL_GPIO_OE2 |
2860 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2861 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2862 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2863 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2864 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2865 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2866 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2867 				     GRC_LCLCTRL_GPIO_OE1 |
2868 				     GRC_LCLCTRL_GPIO_OE2 |
2869 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2870 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2871 				     tp->grc_local_ctrl;
2872 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2873 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2874 
2875 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2876 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2877 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2878 
2879 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2880 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2882 	} else {
2883 		u32 no_gpio2;
2884 		u32 grc_local_ctrl = 0;
2885 
2886 		/* Workaround to prevent overdrawing Amps. */
2887 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2888 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2889 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2890 				    grc_local_ctrl,
2891 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2892 		}
2893 
2894 		/* On 5753 and variants, GPIO2 cannot be used. */
2895 		no_gpio2 = tp->nic_sram_data_cfg &
2896 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2897 
2898 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2899 				  GRC_LCLCTRL_GPIO_OE1 |
2900 				  GRC_LCLCTRL_GPIO_OE2 |
2901 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2902 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2903 		if (no_gpio2) {
2904 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2905 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2906 		}
2907 		tw32_wait_f(GRC_LOCAL_CTRL,
2908 			    tp->grc_local_ctrl | grc_local_ctrl,
2909 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2910 
2911 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2912 
2913 		tw32_wait_f(GRC_LOCAL_CTRL,
2914 			    tp->grc_local_ctrl | grc_local_ctrl,
2915 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2916 
2917 		if (!no_gpio2) {
2918 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2919 			tw32_wait_f(GRC_LOCAL_CTRL,
2920 				    tp->grc_local_ctrl | grc_local_ctrl,
2921 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2922 		}
2923 	}
2924 }
2925 
2926 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2927 {
2928 	u32 msg = 0;
2929 
2930 	/* Serialize power state transitions */
2931 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2932 		return;
2933 
2934 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2935 		msg = TG3_GPIO_MSG_NEED_VAUX;
2936 
2937 	msg = tg3_set_function_status(tp, msg);
2938 
2939 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2940 		goto done;
2941 
2942 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2943 		tg3_pwrsrc_switch_to_vaux(tp);
2944 	else
2945 		tg3_pwrsrc_die_with_vmain(tp);
2946 
2947 done:
2948 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2949 }
2950 
2951 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2952 {
2953 	bool need_vaux = false;
2954 
2955 	/* The GPIOs do something completely different on 57765. */
2956 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2957 		return;
2958 
2959 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2960 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2961 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2962 		tg3_frob_aux_power_5717(tp, include_wol ?
2963 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2964 		return;
2965 	}
2966 
2967 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2968 		struct net_device *dev_peer;
2969 
2970 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2971 
2972 		/* remove_one() may have been run on the peer. */
2973 		if (dev_peer) {
2974 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2975 
2976 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2977 				return;
2978 
2979 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2980 			    tg3_flag(tp_peer, ENABLE_ASF))
2981 				need_vaux = true;
2982 		}
2983 	}
2984 
2985 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2986 	    tg3_flag(tp, ENABLE_ASF))
2987 		need_vaux = true;
2988 
2989 	if (need_vaux)
2990 		tg3_pwrsrc_switch_to_vaux(tp);
2991 	else
2992 		tg3_pwrsrc_die_with_vmain(tp);
2993 }
2994 
2995 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2996 {
2997 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2998 		return 1;
2999 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3000 		if (speed != SPEED_10)
3001 			return 1;
3002 	} else if (speed == SPEED_10)
3003 		return 1;
3004 
3005 	return 0;
3006 }
3007 
3008 static bool tg3_phy_power_bug(struct tg3 *tp)
3009 {
3010 	switch (tg3_asic_rev(tp)) {
3011 	case ASIC_REV_5700:
3012 	case ASIC_REV_5704:
3013 		return true;
3014 	case ASIC_REV_5780:
3015 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3016 			return true;
3017 		return false;
3018 	case ASIC_REV_5717:
3019 		if (!tp->pci_fn)
3020 			return true;
3021 		return false;
3022 	case ASIC_REV_5719:
3023 	case ASIC_REV_5720:
3024 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3025 		    !tp->pci_fn)
3026 			return true;
3027 		return false;
3028 	}
3029 
3030 	return false;
3031 }
3032 
3033 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3034 {
3035 	u32 val;
3036 
3037 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3038 		return;
3039 
3040 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3041 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3042 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3043 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3044 
3045 			sg_dig_ctrl |=
3046 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3047 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3048 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3049 		}
3050 		return;
3051 	}
3052 
3053 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3054 		tg3_bmcr_reset(tp);
3055 		val = tr32(GRC_MISC_CFG);
3056 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3057 		udelay(40);
3058 		return;
3059 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3060 		u32 phytest;
3061 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3062 			u32 phy;
3063 
3064 			tg3_writephy(tp, MII_ADVERTISE, 0);
3065 			tg3_writephy(tp, MII_BMCR,
3066 				     BMCR_ANENABLE | BMCR_ANRESTART);
3067 
3068 			tg3_writephy(tp, MII_TG3_FET_TEST,
3069 				     phytest | MII_TG3_FET_SHADOW_EN);
3070 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3071 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3072 				tg3_writephy(tp,
3073 					     MII_TG3_FET_SHDW_AUXMODE4,
3074 					     phy);
3075 			}
3076 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3077 		}
3078 		return;
3079 	} else if (do_low_power) {
3080 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
3081 			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3082 
3083 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3084 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3085 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3086 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3087 	}
3088 
3089 	/* The PHY should not be powered down on some chips because
3090 	 * of bugs.
3091 	 */
3092 	if (tg3_phy_power_bug(tp))
3093 		return;
3094 
3095 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3096 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3097 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3098 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3099 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3100 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3101 	}
3102 
3103 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3104 }
3105 
3106 /* tp->lock is held. */
3107 static int tg3_nvram_lock(struct tg3 *tp)
3108 {
3109 	if (tg3_flag(tp, NVRAM)) {
3110 		int i;
3111 
3112 		if (tp->nvram_lock_cnt == 0) {
3113 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3114 			for (i = 0; i < 8000; i++) {
3115 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3116 					break;
3117 				udelay(20);
3118 			}
3119 			if (i == 8000) {
3120 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3121 				return -ENODEV;
3122 			}
3123 		}
3124 		tp->nvram_lock_cnt++;
3125 	}
3126 	return 0;
3127 }
3128 
3129 /* tp->lock is held. */
3130 static void tg3_nvram_unlock(struct tg3 *tp)
3131 {
3132 	if (tg3_flag(tp, NVRAM)) {
3133 		if (tp->nvram_lock_cnt > 0)
3134 			tp->nvram_lock_cnt--;
3135 		if (tp->nvram_lock_cnt == 0)
3136 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3137 	}
3138 }
3139 
3140 /* tp->lock is held. */
3141 static void tg3_enable_nvram_access(struct tg3 *tp)
3142 {
3143 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3144 		u32 nvaccess = tr32(NVRAM_ACCESS);
3145 
3146 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3147 	}
3148 }
3149 
3150 /* tp->lock is held. */
3151 static void tg3_disable_nvram_access(struct tg3 *tp)
3152 {
3153 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3154 		u32 nvaccess = tr32(NVRAM_ACCESS);
3155 
3156 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3157 	}
3158 }
3159 
3160 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3161 					u32 offset, u32 *val)
3162 {
3163 	u32 tmp;
3164 	int i;
3165 
3166 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3167 		return -EINVAL;
3168 
3169 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3170 					EEPROM_ADDR_DEVID_MASK |
3171 					EEPROM_ADDR_READ);
3172 	tw32(GRC_EEPROM_ADDR,
3173 	     tmp |
3174 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3175 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3176 	      EEPROM_ADDR_ADDR_MASK) |
3177 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3178 
3179 	for (i = 0; i < 1000; i++) {
3180 		tmp = tr32(GRC_EEPROM_ADDR);
3181 
3182 		if (tmp & EEPROM_ADDR_COMPLETE)
3183 			break;
3184 		msleep(1);
3185 	}
3186 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3187 		return -EBUSY;
3188 
3189 	tmp = tr32(GRC_EEPROM_DATA);
3190 
3191 	/*
3192 	 * The data will always be opposite the native endian
3193 	 * format.  Perform a blind byteswap to compensate.
3194 	 */
3195 	*val = swab32(tmp);
3196 
3197 	return 0;
3198 }
3199 
3200 #define NVRAM_CMD_TIMEOUT 10000
3201 
3202 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3203 {
3204 	int i;
3205 
3206 	tw32(NVRAM_CMD, nvram_cmd);
3207 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3208 		udelay(10);
3209 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3210 			udelay(10);
3211 			break;
3212 		}
3213 	}
3214 
3215 	if (i == NVRAM_CMD_TIMEOUT)
3216 		return -EBUSY;
3217 
3218 	return 0;
3219 }
3220 
3221 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3222 {
3223 	if (tg3_flag(tp, NVRAM) &&
3224 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3225 	    tg3_flag(tp, FLASH) &&
3226 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3227 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3228 
3229 		addr = ((addr / tp->nvram_pagesize) <<
3230 			ATMEL_AT45DB0X1B_PAGE_POS) +
3231 		       (addr % tp->nvram_pagesize);
3232 
3233 	return addr;
3234 }
3235 
3236 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3237 {
3238 	if (tg3_flag(tp, NVRAM) &&
3239 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3240 	    tg3_flag(tp, FLASH) &&
3241 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3242 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3243 
3244 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3245 			tp->nvram_pagesize) +
3246 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3247 
3248 	return addr;
3249 }
3250 
3251 /* NOTE: Data read in from NVRAM is byteswapped according to
3252  * the byteswapping settings for all other register accesses.
3253  * tg3 devices are BE devices, so on a BE machine, the data
3254  * returned will be exactly as it is seen in NVRAM.  On a LE
3255  * machine, the 32-bit value will be byteswapped.
3256  */
3257 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3258 {
3259 	int ret;
3260 
3261 	if (!tg3_flag(tp, NVRAM))
3262 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3263 
3264 	offset = tg3_nvram_phys_addr(tp, offset);
3265 
3266 	if (offset > NVRAM_ADDR_MSK)
3267 		return -EINVAL;
3268 
3269 	ret = tg3_nvram_lock(tp);
3270 	if (ret)
3271 		return ret;
3272 
3273 	tg3_enable_nvram_access(tp);
3274 
3275 	tw32(NVRAM_ADDR, offset);
3276 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3277 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3278 
3279 	if (ret == 0)
3280 		*val = tr32(NVRAM_RDDATA);
3281 
3282 	tg3_disable_nvram_access(tp);
3283 
3284 	tg3_nvram_unlock(tp);
3285 
3286 	return ret;
3287 }
3288 
3289 /* Ensures NVRAM data is in bytestream format. */
3290 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3291 {
3292 	u32 v;
3293 	int res = tg3_nvram_read(tp, offset, &v);
3294 	if (!res)
3295 		*val = cpu_to_be32(v);
3296 	return res;
3297 }
3298 
3299 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3300 				    u32 offset, u32 len, u8 *buf)
3301 {
3302 	int i, j, rc = 0;
3303 	u32 val;
3304 
3305 	for (i = 0; i < len; i += 4) {
3306 		u32 addr;
3307 		__be32 data;
3308 
3309 		addr = offset + i;
3310 
3311 		memcpy(&data, buf + i, 4);
3312 
3313 		/*
3314 		 * The SEEPROM interface expects the data to always be opposite
3315 		 * the native endian format.  We accomplish this by reversing
3316 		 * all the operations that would have been performed on the
3317 		 * data from a call to tg3_nvram_read_be32().
3318 		 */
3319 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3320 
3321 		val = tr32(GRC_EEPROM_ADDR);
3322 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3323 
3324 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3325 			EEPROM_ADDR_READ);
3326 		tw32(GRC_EEPROM_ADDR, val |
3327 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3328 			(addr & EEPROM_ADDR_ADDR_MASK) |
3329 			EEPROM_ADDR_START |
3330 			EEPROM_ADDR_WRITE);
3331 
3332 		for (j = 0; j < 1000; j++) {
3333 			val = tr32(GRC_EEPROM_ADDR);
3334 
3335 			if (val & EEPROM_ADDR_COMPLETE)
3336 				break;
3337 			msleep(1);
3338 		}
3339 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3340 			rc = -EBUSY;
3341 			break;
3342 		}
3343 	}
3344 
3345 	return rc;
3346 }
3347 
3348 /* offset and length are dword aligned */
3349 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3350 		u8 *buf)
3351 {
3352 	int ret = 0;
3353 	u32 pagesize = tp->nvram_pagesize;
3354 	u32 pagemask = pagesize - 1;
3355 	u32 nvram_cmd;
3356 	u8 *tmp;
3357 
3358 	tmp = kmalloc(pagesize, GFP_KERNEL);
3359 	if (tmp == NULL)
3360 		return -ENOMEM;
3361 
3362 	while (len) {
3363 		int j;
3364 		u32 phy_addr, page_off, size;
3365 
3366 		phy_addr = offset & ~pagemask;
3367 
3368 		for (j = 0; j < pagesize; j += 4) {
3369 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3370 						  (__be32 *) (tmp + j));
3371 			if (ret)
3372 				break;
3373 		}
3374 		if (ret)
3375 			break;
3376 
3377 		page_off = offset & pagemask;
3378 		size = pagesize;
3379 		if (len < size)
3380 			size = len;
3381 
3382 		len -= size;
3383 
3384 		memcpy(tmp + page_off, buf, size);
3385 
3386 		offset = offset + (pagesize - page_off);
3387 
3388 		tg3_enable_nvram_access(tp);
3389 
3390 		/*
3391 		 * Before we can erase the flash page, we need
3392 		 * to issue a special "write enable" command.
3393 		 */
3394 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3395 
3396 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3397 			break;
3398 
3399 		/* Erase the target page */
3400 		tw32(NVRAM_ADDR, phy_addr);
3401 
3402 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3403 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3404 
3405 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3406 			break;
3407 
3408 		/* Issue another write enable to start the write. */
3409 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3410 
3411 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3412 			break;
3413 
3414 		for (j = 0; j < pagesize; j += 4) {
3415 			__be32 data;
3416 
3417 			data = *((__be32 *) (tmp + j));
3418 
3419 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3420 
3421 			tw32(NVRAM_ADDR, phy_addr + j);
3422 
3423 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3424 				NVRAM_CMD_WR;
3425 
3426 			if (j == 0)
3427 				nvram_cmd |= NVRAM_CMD_FIRST;
3428 			else if (j == (pagesize - 4))
3429 				nvram_cmd |= NVRAM_CMD_LAST;
3430 
3431 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3432 			if (ret)
3433 				break;
3434 		}
3435 		if (ret)
3436 			break;
3437 	}
3438 
3439 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3440 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3441 
3442 	kfree(tmp);
3443 
3444 	return ret;
3445 }
3446 
3447 /* offset and length are dword aligned */
3448 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3449 		u8 *buf)
3450 {
3451 	int i, ret = 0;
3452 
3453 	for (i = 0; i < len; i += 4, offset += 4) {
3454 		u32 page_off, phy_addr, nvram_cmd;
3455 		__be32 data;
3456 
3457 		memcpy(&data, buf + i, 4);
3458 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3459 
3460 		page_off = offset % tp->nvram_pagesize;
3461 
3462 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3463 
3464 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3465 
3466 		if (page_off == 0 || i == 0)
3467 			nvram_cmd |= NVRAM_CMD_FIRST;
3468 		if (page_off == (tp->nvram_pagesize - 4))
3469 			nvram_cmd |= NVRAM_CMD_LAST;
3470 
3471 		if (i == (len - 4))
3472 			nvram_cmd |= NVRAM_CMD_LAST;
3473 
3474 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3475 		    !tg3_flag(tp, FLASH) ||
3476 		    !tg3_flag(tp, 57765_PLUS))
3477 			tw32(NVRAM_ADDR, phy_addr);
3478 
3479 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3480 		    !tg3_flag(tp, 5755_PLUS) &&
3481 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3482 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3483 			u32 cmd;
3484 
3485 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3486 			ret = tg3_nvram_exec_cmd(tp, cmd);
3487 			if (ret)
3488 				break;
3489 		}
3490 		if (!tg3_flag(tp, FLASH)) {
3491 			/* We always do complete word writes to eeprom. */
3492 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3493 		}
3494 
3495 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3496 		if (ret)
3497 			break;
3498 	}
3499 	return ret;
3500 }
3501 
3502 /* offset and length are dword aligned */
3503 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3504 {
3505 	int ret;
3506 
3507 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3508 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3509 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3510 		udelay(40);
3511 	}
3512 
3513 	if (!tg3_flag(tp, NVRAM)) {
3514 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3515 	} else {
3516 		u32 grc_mode;
3517 
3518 		ret = tg3_nvram_lock(tp);
3519 		if (ret)
3520 			return ret;
3521 
3522 		tg3_enable_nvram_access(tp);
3523 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3524 			tw32(NVRAM_WRITE1, 0x406);
3525 
3526 		grc_mode = tr32(GRC_MODE);
3527 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3528 
3529 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3530 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3531 				buf);
3532 		} else {
3533 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3534 				buf);
3535 		}
3536 
3537 		grc_mode = tr32(GRC_MODE);
3538 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3539 
3540 		tg3_disable_nvram_access(tp);
3541 		tg3_nvram_unlock(tp);
3542 	}
3543 
3544 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3545 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3546 		udelay(40);
3547 	}
3548 
3549 	return ret;
3550 }
3551 
3552 #define RX_CPU_SCRATCH_BASE	0x30000
3553 #define RX_CPU_SCRATCH_SIZE	0x04000
3554 #define TX_CPU_SCRATCH_BASE	0x34000
3555 #define TX_CPU_SCRATCH_SIZE	0x04000
3556 
3557 /* tp->lock is held. */
3558 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3559 {
3560 	int i;
3561 	const int iters = 10000;
3562 
3563 	for (i = 0; i < iters; i++) {
3564 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3565 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3566 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3567 			break;
3568 		if (pci_channel_offline(tp->pdev))
3569 			return -EBUSY;
3570 	}
3571 
3572 	return (i == iters) ? -EBUSY : 0;
3573 }
3574 
3575 /* tp->lock is held. */
3576 static int tg3_rxcpu_pause(struct tg3 *tp)
3577 {
3578 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3579 
3580 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3581 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3582 	udelay(10);
3583 
3584 	return rc;
3585 }
3586 
3587 /* tp->lock is held. */
3588 static int tg3_txcpu_pause(struct tg3 *tp)
3589 {
3590 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3591 }
3592 
3593 /* tp->lock is held. */
3594 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3595 {
3596 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3597 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3598 }
3599 
3600 /* tp->lock is held. */
3601 static void tg3_rxcpu_resume(struct tg3 *tp)
3602 {
3603 	tg3_resume_cpu(tp, RX_CPU_BASE);
3604 }
3605 
3606 /* tp->lock is held. */
3607 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3608 {
3609 	int rc;
3610 
3611 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3612 
3613 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3614 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3615 
3616 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3617 		return 0;
3618 	}
3619 	if (cpu_base == RX_CPU_BASE) {
3620 		rc = tg3_rxcpu_pause(tp);
3621 	} else {
3622 		/*
3623 		 * There is only an Rx CPU for the 5750 derivative in the
3624 		 * BCM4785.
3625 		 */
3626 		if (tg3_flag(tp, IS_SSB_CORE))
3627 			return 0;
3628 
3629 		rc = tg3_txcpu_pause(tp);
3630 	}
3631 
3632 	if (rc) {
3633 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3634 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3635 		return -ENODEV;
3636 	}
3637 
3638 	/* Clear firmware's nvram arbitration. */
3639 	if (tg3_flag(tp, NVRAM))
3640 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3641 	return 0;
3642 }
3643 
3644 static int tg3_fw_data_len(struct tg3 *tp,
3645 			   const struct tg3_firmware_hdr *fw_hdr)
3646 {
3647 	int fw_len;
3648 
3649 	/* Non fragmented firmware have one firmware header followed by a
3650 	 * contiguous chunk of data to be written. The length field in that
3651 	 * header is not the length of data to be written but the complete
3652 	 * length of the bss. The data length is determined based on
3653 	 * tp->fw->size minus headers.
3654 	 *
3655 	 * Fragmented firmware have a main header followed by multiple
3656 	 * fragments. Each fragment is identical to non fragmented firmware
3657 	 * with a firmware header followed by a contiguous chunk of data. In
3658 	 * the main header, the length field is unused and set to 0xffffffff.
3659 	 * In each fragment header the length is the entire size of that
3660 	 * fragment i.e. fragment data + header length. Data length is
3661 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3662 	 */
3663 	if (tp->fw_len == 0xffffffff)
3664 		fw_len = be32_to_cpu(fw_hdr->len);
3665 	else
3666 		fw_len = tp->fw->size;
3667 
3668 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3669 }
3670 
3671 /* tp->lock is held. */
3672 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3673 				 u32 cpu_scratch_base, int cpu_scratch_size,
3674 				 const struct tg3_firmware_hdr *fw_hdr)
3675 {
3676 	int err, i;
3677 	void (*write_op)(struct tg3 *, u32, u32);
3678 	int total_len = tp->fw->size;
3679 
3680 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3681 		netdev_err(tp->dev,
3682 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3683 			   __func__);
3684 		return -EINVAL;
3685 	}
3686 
3687 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3688 		write_op = tg3_write_mem;
3689 	else
3690 		write_op = tg3_write_indirect_reg32;
3691 
3692 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3693 		/* It is possible that bootcode is still loading at this point.
3694 		 * Get the nvram lock first before halting the cpu.
3695 		 */
3696 		int lock_err = tg3_nvram_lock(tp);
3697 		err = tg3_halt_cpu(tp, cpu_base);
3698 		if (!lock_err)
3699 			tg3_nvram_unlock(tp);
3700 		if (err)
3701 			goto out;
3702 
3703 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3704 			write_op(tp, cpu_scratch_base + i, 0);
3705 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3706 		tw32(cpu_base + CPU_MODE,
3707 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3708 	} else {
3709 		/* Subtract additional main header for fragmented firmware and
3710 		 * advance to the first fragment
3711 		 */
3712 		total_len -= TG3_FW_HDR_LEN;
3713 		fw_hdr++;
3714 	}
3715 
3716 	do {
3717 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3718 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3719 			write_op(tp, cpu_scratch_base +
3720 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3721 				     (i * sizeof(u32)),
3722 				 be32_to_cpu(fw_data[i]));
3723 
3724 		total_len -= be32_to_cpu(fw_hdr->len);
3725 
3726 		/* Advance to next fragment */
3727 		fw_hdr = (struct tg3_firmware_hdr *)
3728 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3729 	} while (total_len > 0);
3730 
3731 	err = 0;
3732 
3733 out:
3734 	return err;
3735 }
3736 
3737 /* tp->lock is held. */
3738 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3739 {
3740 	int i;
3741 	const int iters = 5;
3742 
3743 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3744 	tw32_f(cpu_base + CPU_PC, pc);
3745 
3746 	for (i = 0; i < iters; i++) {
3747 		if (tr32(cpu_base + CPU_PC) == pc)
3748 			break;
3749 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3750 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3751 		tw32_f(cpu_base + CPU_PC, pc);
3752 		udelay(1000);
3753 	}
3754 
3755 	return (i == iters) ? -EBUSY : 0;
3756 }
3757 
3758 /* tp->lock is held. */
3759 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3760 {
3761 	const struct tg3_firmware_hdr *fw_hdr;
3762 	int err;
3763 
3764 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3765 
3766 	/* Firmware blob starts with version numbers, followed by
3767 	   start address and length. We are setting complete length.
3768 	   length = end_address_of_bss - start_address_of_text.
3769 	   Remainder is the blob to be loaded contiguously
3770 	   from start address. */
3771 
3772 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3773 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3774 				    fw_hdr);
3775 	if (err)
3776 		return err;
3777 
3778 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3779 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3780 				    fw_hdr);
3781 	if (err)
3782 		return err;
3783 
3784 	/* Now startup only the RX cpu. */
3785 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3786 				       be32_to_cpu(fw_hdr->base_addr));
3787 	if (err) {
3788 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3789 			   "should be %08x\n", __func__,
3790 			   tr32(RX_CPU_BASE + CPU_PC),
3791 				be32_to_cpu(fw_hdr->base_addr));
3792 		return -ENODEV;
3793 	}
3794 
3795 	tg3_rxcpu_resume(tp);
3796 
3797 	return 0;
3798 }
3799 
3800 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3801 {
3802 	const int iters = 1000;
3803 	int i;
3804 	u32 val;
3805 
3806 	/* Wait for boot code to complete initialization and enter service
3807 	 * loop. It is then safe to download service patches
3808 	 */
3809 	for (i = 0; i < iters; i++) {
3810 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3811 			break;
3812 
3813 		udelay(10);
3814 	}
3815 
3816 	if (i == iters) {
3817 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3818 		return -EBUSY;
3819 	}
3820 
3821 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3822 	if (val & 0xff) {
3823 		netdev_warn(tp->dev,
3824 			    "Other patches exist. Not downloading EEE patch\n");
3825 		return -EEXIST;
3826 	}
3827 
3828 	return 0;
3829 }
3830 
3831 /* tp->lock is held. */
3832 static void tg3_load_57766_firmware(struct tg3 *tp)
3833 {
3834 	struct tg3_firmware_hdr *fw_hdr;
3835 
3836 	if (!tg3_flag(tp, NO_NVRAM))
3837 		return;
3838 
3839 	if (tg3_validate_rxcpu_state(tp))
3840 		return;
3841 
3842 	if (!tp->fw)
3843 		return;
3844 
3845 	/* This firmware blob has a different format than older firmware
3846 	 * releases as given below. The main difference is we have fragmented
3847 	 * data to be written to non-contiguous locations.
3848 	 *
3849 	 * In the beginning we have a firmware header identical to other
3850 	 * firmware which consists of version, base addr and length. The length
3851 	 * here is unused and set to 0xffffffff.
3852 	 *
3853 	 * This is followed by a series of firmware fragments which are
3854 	 * individually identical to previous firmware. i.e. they have the
3855 	 * firmware header and followed by data for that fragment. The version
3856 	 * field of the individual fragment header is unused.
3857 	 */
3858 
3859 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3860 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3861 		return;
3862 
3863 	if (tg3_rxcpu_pause(tp))
3864 		return;
3865 
3866 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3867 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3868 
3869 	tg3_rxcpu_resume(tp);
3870 }
3871 
3872 /* tp->lock is held. */
3873 static int tg3_load_tso_firmware(struct tg3 *tp)
3874 {
3875 	const struct tg3_firmware_hdr *fw_hdr;
3876 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3877 	int err;
3878 
3879 	if (!tg3_flag(tp, FW_TSO))
3880 		return 0;
3881 
3882 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3883 
3884 	/* Firmware blob starts with version numbers, followed by
3885 	   start address and length. We are setting complete length.
3886 	   length = end_address_of_bss - start_address_of_text.
3887 	   Remainder is the blob to be loaded contiguously
3888 	   from start address. */
3889 
3890 	cpu_scratch_size = tp->fw_len;
3891 
3892 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3893 		cpu_base = RX_CPU_BASE;
3894 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3895 	} else {
3896 		cpu_base = TX_CPU_BASE;
3897 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3898 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3899 	}
3900 
3901 	err = tg3_load_firmware_cpu(tp, cpu_base,
3902 				    cpu_scratch_base, cpu_scratch_size,
3903 				    fw_hdr);
3904 	if (err)
3905 		return err;
3906 
3907 	/* Now startup the cpu. */
3908 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3909 				       be32_to_cpu(fw_hdr->base_addr));
3910 	if (err) {
3911 		netdev_err(tp->dev,
3912 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3913 			   __func__, tr32(cpu_base + CPU_PC),
3914 			   be32_to_cpu(fw_hdr->base_addr));
3915 		return -ENODEV;
3916 	}
3917 
3918 	tg3_resume_cpu(tp, cpu_base);
3919 	return 0;
3920 }
3921 
3922 
3923 /* tp->lock is held. */
3924 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3925 {
3926 	u32 addr_high, addr_low;
3927 	int i;
3928 
3929 	addr_high = ((tp->dev->dev_addr[0] << 8) |
3930 		     tp->dev->dev_addr[1]);
3931 	addr_low = ((tp->dev->dev_addr[2] << 24) |
3932 		    (tp->dev->dev_addr[3] << 16) |
3933 		    (tp->dev->dev_addr[4] <<  8) |
3934 		    (tp->dev->dev_addr[5] <<  0));
3935 	for (i = 0; i < 4; i++) {
3936 		if (i == 1 && skip_mac_1)
3937 			continue;
3938 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3939 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3940 	}
3941 
3942 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3943 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3944 		for (i = 0; i < 12; i++) {
3945 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3946 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3947 		}
3948 	}
3949 
3950 	addr_high = (tp->dev->dev_addr[0] +
3951 		     tp->dev->dev_addr[1] +
3952 		     tp->dev->dev_addr[2] +
3953 		     tp->dev->dev_addr[3] +
3954 		     tp->dev->dev_addr[4] +
3955 		     tp->dev->dev_addr[5]) &
3956 		TX_BACKOFF_SEED_MASK;
3957 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3958 }
3959 
3960 static void tg3_enable_register_access(struct tg3 *tp)
3961 {
3962 	/*
3963 	 * Make sure register accesses (indirect or otherwise) will function
3964 	 * correctly.
3965 	 */
3966 	pci_write_config_dword(tp->pdev,
3967 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3968 }
3969 
3970 static int tg3_power_up(struct tg3 *tp)
3971 {
3972 	int err;
3973 
3974 	tg3_enable_register_access(tp);
3975 
3976 	err = pci_set_power_state(tp->pdev, PCI_D0);
3977 	if (!err) {
3978 		/* Switch out of Vaux if it is a NIC */
3979 		tg3_pwrsrc_switch_to_vmain(tp);
3980 	} else {
3981 		netdev_err(tp->dev, "Transition to D0 failed\n");
3982 	}
3983 
3984 	return err;
3985 }
3986 
3987 static int tg3_setup_phy(struct tg3 *, bool);
3988 
3989 static int tg3_power_down_prepare(struct tg3 *tp)
3990 {
3991 	u32 misc_host_ctrl;
3992 	bool device_should_wake, do_low_power;
3993 
3994 	tg3_enable_register_access(tp);
3995 
3996 	/* Restore the CLKREQ setting. */
3997 	if (tg3_flag(tp, CLKREQ_BUG))
3998 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3999 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4000 
4001 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4002 	tw32(TG3PCI_MISC_HOST_CTRL,
4003 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4004 
4005 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4006 			     tg3_flag(tp, WOL_ENABLE);
4007 
4008 	if (tg3_flag(tp, USE_PHYLIB)) {
4009 		do_low_power = false;
4010 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4011 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4012 			struct phy_device *phydev;
4013 			u32 phyid, advertising;
4014 
4015 			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
4016 
4017 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4018 
4019 			tp->link_config.speed = phydev->speed;
4020 			tp->link_config.duplex = phydev->duplex;
4021 			tp->link_config.autoneg = phydev->autoneg;
4022 			tp->link_config.advertising = phydev->advertising;
4023 
4024 			advertising = ADVERTISED_TP |
4025 				      ADVERTISED_Pause |
4026 				      ADVERTISED_Autoneg |
4027 				      ADVERTISED_10baseT_Half;
4028 
4029 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4030 				if (tg3_flag(tp, WOL_SPEED_100MB))
4031 					advertising |=
4032 						ADVERTISED_100baseT_Half |
4033 						ADVERTISED_100baseT_Full |
4034 						ADVERTISED_10baseT_Full;
4035 				else
4036 					advertising |= ADVERTISED_10baseT_Full;
4037 			}
4038 
4039 			phydev->advertising = advertising;
4040 
4041 			phy_start_aneg(phydev);
4042 
4043 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4044 			if (phyid != PHY_ID_BCMAC131) {
4045 				phyid &= PHY_BCM_OUI_MASK;
4046 				if (phyid == PHY_BCM_OUI_1 ||
4047 				    phyid == PHY_BCM_OUI_2 ||
4048 				    phyid == PHY_BCM_OUI_3)
4049 					do_low_power = true;
4050 			}
4051 		}
4052 	} else {
4053 		do_low_power = true;
4054 
4055 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4056 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4057 
4058 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4059 			tg3_setup_phy(tp, false);
4060 	}
4061 
4062 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4063 		u32 val;
4064 
4065 		val = tr32(GRC_VCPU_EXT_CTRL);
4066 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4067 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4068 		int i;
4069 		u32 val;
4070 
4071 		for (i = 0; i < 200; i++) {
4072 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4073 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4074 				break;
4075 			msleep(1);
4076 		}
4077 	}
4078 	if (tg3_flag(tp, WOL_CAP))
4079 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4080 						     WOL_DRV_STATE_SHUTDOWN |
4081 						     WOL_DRV_WOL |
4082 						     WOL_SET_MAGIC_PKT);
4083 
4084 	if (device_should_wake) {
4085 		u32 mac_mode;
4086 
4087 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4088 			if (do_low_power &&
4089 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4090 				tg3_phy_auxctl_write(tp,
4091 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4092 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4093 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4094 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4095 				udelay(40);
4096 			}
4097 
4098 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4099 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4100 			else if (tp->phy_flags &
4101 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4102 				if (tp->link_config.active_speed == SPEED_1000)
4103 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4104 				else
4105 					mac_mode = MAC_MODE_PORT_MODE_MII;
4106 			} else
4107 				mac_mode = MAC_MODE_PORT_MODE_MII;
4108 
4109 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4110 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4111 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4112 					     SPEED_100 : SPEED_10;
4113 				if (tg3_5700_link_polarity(tp, speed))
4114 					mac_mode |= MAC_MODE_LINK_POLARITY;
4115 				else
4116 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4117 			}
4118 		} else {
4119 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4120 		}
4121 
4122 		if (!tg3_flag(tp, 5750_PLUS))
4123 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4124 
4125 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4126 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4127 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4128 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4129 
4130 		if (tg3_flag(tp, ENABLE_APE))
4131 			mac_mode |= MAC_MODE_APE_TX_EN |
4132 				    MAC_MODE_APE_RX_EN |
4133 				    MAC_MODE_TDE_ENABLE;
4134 
4135 		tw32_f(MAC_MODE, mac_mode);
4136 		udelay(100);
4137 
4138 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4139 		udelay(10);
4140 	}
4141 
4142 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4143 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4144 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4145 		u32 base_val;
4146 
4147 		base_val = tp->pci_clock_ctrl;
4148 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4149 			     CLOCK_CTRL_TXCLK_DISABLE);
4150 
4151 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4152 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4153 	} else if (tg3_flag(tp, 5780_CLASS) ||
4154 		   tg3_flag(tp, CPMU_PRESENT) ||
4155 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4156 		/* do nothing */
4157 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4158 		u32 newbits1, newbits2;
4159 
4160 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4161 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4162 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4163 				    CLOCK_CTRL_TXCLK_DISABLE |
4164 				    CLOCK_CTRL_ALTCLK);
4165 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4166 		} else if (tg3_flag(tp, 5705_PLUS)) {
4167 			newbits1 = CLOCK_CTRL_625_CORE;
4168 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4169 		} else {
4170 			newbits1 = CLOCK_CTRL_ALTCLK;
4171 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4172 		}
4173 
4174 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4175 			    40);
4176 
4177 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4178 			    40);
4179 
4180 		if (!tg3_flag(tp, 5705_PLUS)) {
4181 			u32 newbits3;
4182 
4183 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4184 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4185 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4186 					    CLOCK_CTRL_TXCLK_DISABLE |
4187 					    CLOCK_CTRL_44MHZ_CORE);
4188 			} else {
4189 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4190 			}
4191 
4192 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4193 				    tp->pci_clock_ctrl | newbits3, 40);
4194 		}
4195 	}
4196 
4197 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4198 		tg3_power_down_phy(tp, do_low_power);
4199 
4200 	tg3_frob_aux_power(tp, true);
4201 
4202 	/* Workaround for unstable PLL clock */
4203 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4204 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4205 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4206 		u32 val = tr32(0x7d00);
4207 
4208 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4209 		tw32(0x7d00, val);
4210 		if (!tg3_flag(tp, ENABLE_ASF)) {
4211 			int err;
4212 
4213 			err = tg3_nvram_lock(tp);
4214 			tg3_halt_cpu(tp, RX_CPU_BASE);
4215 			if (!err)
4216 				tg3_nvram_unlock(tp);
4217 		}
4218 	}
4219 
4220 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4221 
4222 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4223 
4224 	return 0;
4225 }
4226 
4227 static void tg3_power_down(struct tg3 *tp)
4228 {
4229 	tg3_power_down_prepare(tp);
4230 
4231 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4232 	pci_set_power_state(tp->pdev, PCI_D3hot);
4233 }
4234 
4235 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4236 {
4237 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4238 	case MII_TG3_AUX_STAT_10HALF:
4239 		*speed = SPEED_10;
4240 		*duplex = DUPLEX_HALF;
4241 		break;
4242 
4243 	case MII_TG3_AUX_STAT_10FULL:
4244 		*speed = SPEED_10;
4245 		*duplex = DUPLEX_FULL;
4246 		break;
4247 
4248 	case MII_TG3_AUX_STAT_100HALF:
4249 		*speed = SPEED_100;
4250 		*duplex = DUPLEX_HALF;
4251 		break;
4252 
4253 	case MII_TG3_AUX_STAT_100FULL:
4254 		*speed = SPEED_100;
4255 		*duplex = DUPLEX_FULL;
4256 		break;
4257 
4258 	case MII_TG3_AUX_STAT_1000HALF:
4259 		*speed = SPEED_1000;
4260 		*duplex = DUPLEX_HALF;
4261 		break;
4262 
4263 	case MII_TG3_AUX_STAT_1000FULL:
4264 		*speed = SPEED_1000;
4265 		*duplex = DUPLEX_FULL;
4266 		break;
4267 
4268 	default:
4269 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4270 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4271 				 SPEED_10;
4272 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4273 				  DUPLEX_HALF;
4274 			break;
4275 		}
4276 		*speed = SPEED_UNKNOWN;
4277 		*duplex = DUPLEX_UNKNOWN;
4278 		break;
4279 	}
4280 }
4281 
4282 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4283 {
4284 	int err = 0;
4285 	u32 val, new_adv;
4286 
4287 	new_adv = ADVERTISE_CSMA;
4288 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4289 	new_adv |= mii_advertise_flowctrl(flowctrl);
4290 
4291 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4292 	if (err)
4293 		goto done;
4294 
4295 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4296 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4297 
4298 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4299 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4300 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4301 
4302 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4303 		if (err)
4304 			goto done;
4305 	}
4306 
4307 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4308 		goto done;
4309 
4310 	tw32(TG3_CPMU_EEE_MODE,
4311 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4312 
4313 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4314 	if (!err) {
4315 		u32 err2;
4316 
4317 		val = 0;
4318 		/* Advertise 100-BaseTX EEE ability */
4319 		if (advertise & ADVERTISED_100baseT_Full)
4320 			val |= MDIO_AN_EEE_ADV_100TX;
4321 		/* Advertise 1000-BaseT EEE ability */
4322 		if (advertise & ADVERTISED_1000baseT_Full)
4323 			val |= MDIO_AN_EEE_ADV_1000T;
4324 
4325 		if (!tp->eee.eee_enabled) {
4326 			val = 0;
4327 			tp->eee.advertised = 0;
4328 		} else {
4329 			tp->eee.advertised = advertise &
4330 					     (ADVERTISED_100baseT_Full |
4331 					      ADVERTISED_1000baseT_Full);
4332 		}
4333 
4334 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4335 		if (err)
4336 			val = 0;
4337 
4338 		switch (tg3_asic_rev(tp)) {
4339 		case ASIC_REV_5717:
4340 		case ASIC_REV_57765:
4341 		case ASIC_REV_57766:
4342 		case ASIC_REV_5719:
4343 			/* If we advertised any eee advertisements above... */
4344 			if (val)
4345 				val = MII_TG3_DSP_TAP26_ALNOKO |
4346 				      MII_TG3_DSP_TAP26_RMRXSTO |
4347 				      MII_TG3_DSP_TAP26_OPCSINPT;
4348 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4349 			/* Fall through */
4350 		case ASIC_REV_5720:
4351 		case ASIC_REV_5762:
4352 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4353 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4354 						 MII_TG3_DSP_CH34TP2_HIBW01);
4355 		}
4356 
4357 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4358 		if (!err)
4359 			err = err2;
4360 	}
4361 
4362 done:
4363 	return err;
4364 }
4365 
4366 static void tg3_phy_copper_begin(struct tg3 *tp)
4367 {
4368 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4369 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4370 		u32 adv, fc;
4371 
4372 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4373 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4374 			adv = ADVERTISED_10baseT_Half |
4375 			      ADVERTISED_10baseT_Full;
4376 			if (tg3_flag(tp, WOL_SPEED_100MB))
4377 				adv |= ADVERTISED_100baseT_Half |
4378 				       ADVERTISED_100baseT_Full;
4379 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4380 				adv |= ADVERTISED_1000baseT_Half |
4381 				       ADVERTISED_1000baseT_Full;
4382 
4383 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4384 		} else {
4385 			adv = tp->link_config.advertising;
4386 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4387 				adv &= ~(ADVERTISED_1000baseT_Half |
4388 					 ADVERTISED_1000baseT_Full);
4389 
4390 			fc = tp->link_config.flowctrl;
4391 		}
4392 
4393 		tg3_phy_autoneg_cfg(tp, adv, fc);
4394 
4395 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4396 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4397 			/* Normally during power down we want to autonegotiate
4398 			 * the lowest possible speed for WOL. However, to avoid
4399 			 * link flap, we leave it untouched.
4400 			 */
4401 			return;
4402 		}
4403 
4404 		tg3_writephy(tp, MII_BMCR,
4405 			     BMCR_ANENABLE | BMCR_ANRESTART);
4406 	} else {
4407 		int i;
4408 		u32 bmcr, orig_bmcr;
4409 
4410 		tp->link_config.active_speed = tp->link_config.speed;
4411 		tp->link_config.active_duplex = tp->link_config.duplex;
4412 
4413 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4414 			/* With autoneg disabled, 5715 only links up when the
4415 			 * advertisement register has the configured speed
4416 			 * enabled.
4417 			 */
4418 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4419 		}
4420 
4421 		bmcr = 0;
4422 		switch (tp->link_config.speed) {
4423 		default:
4424 		case SPEED_10:
4425 			break;
4426 
4427 		case SPEED_100:
4428 			bmcr |= BMCR_SPEED100;
4429 			break;
4430 
4431 		case SPEED_1000:
4432 			bmcr |= BMCR_SPEED1000;
4433 			break;
4434 		}
4435 
4436 		if (tp->link_config.duplex == DUPLEX_FULL)
4437 			bmcr |= BMCR_FULLDPLX;
4438 
4439 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4440 		    (bmcr != orig_bmcr)) {
4441 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4442 			for (i = 0; i < 1500; i++) {
4443 				u32 tmp;
4444 
4445 				udelay(10);
4446 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4447 				    tg3_readphy(tp, MII_BMSR, &tmp))
4448 					continue;
4449 				if (!(tmp & BMSR_LSTATUS)) {
4450 					udelay(40);
4451 					break;
4452 				}
4453 			}
4454 			tg3_writephy(tp, MII_BMCR, bmcr);
4455 			udelay(40);
4456 		}
4457 	}
4458 }
4459 
4460 static int tg3_phy_pull_config(struct tg3 *tp)
4461 {
4462 	int err;
4463 	u32 val;
4464 
4465 	err = tg3_readphy(tp, MII_BMCR, &val);
4466 	if (err)
4467 		goto done;
4468 
4469 	if (!(val & BMCR_ANENABLE)) {
4470 		tp->link_config.autoneg = AUTONEG_DISABLE;
4471 		tp->link_config.advertising = 0;
4472 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4473 
4474 		err = -EIO;
4475 
4476 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4477 		case 0:
4478 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4479 				goto done;
4480 
4481 			tp->link_config.speed = SPEED_10;
4482 			break;
4483 		case BMCR_SPEED100:
4484 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4485 				goto done;
4486 
4487 			tp->link_config.speed = SPEED_100;
4488 			break;
4489 		case BMCR_SPEED1000:
4490 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4491 				tp->link_config.speed = SPEED_1000;
4492 				break;
4493 			}
4494 			/* Fall through */
4495 		default:
4496 			goto done;
4497 		}
4498 
4499 		if (val & BMCR_FULLDPLX)
4500 			tp->link_config.duplex = DUPLEX_FULL;
4501 		else
4502 			tp->link_config.duplex = DUPLEX_HALF;
4503 
4504 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4505 
4506 		err = 0;
4507 		goto done;
4508 	}
4509 
4510 	tp->link_config.autoneg = AUTONEG_ENABLE;
4511 	tp->link_config.advertising = ADVERTISED_Autoneg;
4512 	tg3_flag_set(tp, PAUSE_AUTONEG);
4513 
4514 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4515 		u32 adv;
4516 
4517 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4518 		if (err)
4519 			goto done;
4520 
4521 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4522 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4523 
4524 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4525 	} else {
4526 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4527 	}
4528 
4529 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4530 		u32 adv;
4531 
4532 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4533 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4534 			if (err)
4535 				goto done;
4536 
4537 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4538 		} else {
4539 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4540 			if (err)
4541 				goto done;
4542 
4543 			adv = tg3_decode_flowctrl_1000X(val);
4544 			tp->link_config.flowctrl = adv;
4545 
4546 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4547 			adv = mii_adv_to_ethtool_adv_x(val);
4548 		}
4549 
4550 		tp->link_config.advertising |= adv;
4551 	}
4552 
4553 done:
4554 	return err;
4555 }
4556 
4557 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4558 {
4559 	int err;
4560 
4561 	/* Turn off tap power management. */
4562 	/* Set Extended packet length bit */
4563 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4564 
4565 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4566 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4567 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4568 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4569 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4570 
4571 	udelay(40);
4572 
4573 	return err;
4574 }
4575 
4576 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4577 {
4578 	struct ethtool_eee eee;
4579 
4580 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4581 		return true;
4582 
4583 	tg3_eee_pull_config(tp, &eee);
4584 
4585 	if (tp->eee.eee_enabled) {
4586 		if (tp->eee.advertised != eee.advertised ||
4587 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4588 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4589 			return false;
4590 	} else {
4591 		/* EEE is disabled but we're advertising */
4592 		if (eee.advertised)
4593 			return false;
4594 	}
4595 
4596 	return true;
4597 }
4598 
4599 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4600 {
4601 	u32 advmsk, tgtadv, advertising;
4602 
4603 	advertising = tp->link_config.advertising;
4604 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4605 
4606 	advmsk = ADVERTISE_ALL;
4607 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4608 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4609 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4610 	}
4611 
4612 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4613 		return false;
4614 
4615 	if ((*lcladv & advmsk) != tgtadv)
4616 		return false;
4617 
4618 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4619 		u32 tg3_ctrl;
4620 
4621 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4622 
4623 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4624 			return false;
4625 
4626 		if (tgtadv &&
4627 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4628 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4629 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4630 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4631 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4632 		} else {
4633 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4634 		}
4635 
4636 		if (tg3_ctrl != tgtadv)
4637 			return false;
4638 	}
4639 
4640 	return true;
4641 }
4642 
4643 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4644 {
4645 	u32 lpeth = 0;
4646 
4647 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4648 		u32 val;
4649 
4650 		if (tg3_readphy(tp, MII_STAT1000, &val))
4651 			return false;
4652 
4653 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4654 	}
4655 
4656 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4657 		return false;
4658 
4659 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4660 	tp->link_config.rmt_adv = lpeth;
4661 
4662 	return true;
4663 }
4664 
4665 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4666 {
4667 	if (curr_link_up != tp->link_up) {
4668 		if (curr_link_up) {
4669 			netif_carrier_on(tp->dev);
4670 		} else {
4671 			netif_carrier_off(tp->dev);
4672 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4673 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4674 		}
4675 
4676 		tg3_link_report(tp);
4677 		return true;
4678 	}
4679 
4680 	return false;
4681 }
4682 
4683 static void tg3_clear_mac_status(struct tg3 *tp)
4684 {
4685 	tw32(MAC_EVENT, 0);
4686 
4687 	tw32_f(MAC_STATUS,
4688 	       MAC_STATUS_SYNC_CHANGED |
4689 	       MAC_STATUS_CFG_CHANGED |
4690 	       MAC_STATUS_MI_COMPLETION |
4691 	       MAC_STATUS_LNKSTATE_CHANGED);
4692 	udelay(40);
4693 }
4694 
4695 static void tg3_setup_eee(struct tg3 *tp)
4696 {
4697 	u32 val;
4698 
4699 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4700 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4701 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4702 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4703 
4704 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4705 
4706 	tw32_f(TG3_CPMU_EEE_CTRL,
4707 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4708 
4709 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4710 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4711 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4712 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4713 
4714 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4715 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4716 
4717 	if (tg3_flag(tp, ENABLE_APE))
4718 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4719 
4720 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4721 
4722 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4723 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4724 	       (tp->eee.tx_lpi_timer & 0xffff));
4725 
4726 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4727 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4728 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4729 }
4730 
4731 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4732 {
4733 	bool current_link_up;
4734 	u32 bmsr, val;
4735 	u32 lcl_adv, rmt_adv;
4736 	u16 current_speed;
4737 	u8 current_duplex;
4738 	int i, err;
4739 
4740 	tg3_clear_mac_status(tp);
4741 
4742 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4743 		tw32_f(MAC_MI_MODE,
4744 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4745 		udelay(80);
4746 	}
4747 
4748 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4749 
4750 	/* Some third-party PHYs need to be reset on link going
4751 	 * down.
4752 	 */
4753 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4754 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4755 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4756 	    tp->link_up) {
4757 		tg3_readphy(tp, MII_BMSR, &bmsr);
4758 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4759 		    !(bmsr & BMSR_LSTATUS))
4760 			force_reset = true;
4761 	}
4762 	if (force_reset)
4763 		tg3_phy_reset(tp);
4764 
4765 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4766 		tg3_readphy(tp, MII_BMSR, &bmsr);
4767 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4768 		    !tg3_flag(tp, INIT_COMPLETE))
4769 			bmsr = 0;
4770 
4771 		if (!(bmsr & BMSR_LSTATUS)) {
4772 			err = tg3_init_5401phy_dsp(tp);
4773 			if (err)
4774 				return err;
4775 
4776 			tg3_readphy(tp, MII_BMSR, &bmsr);
4777 			for (i = 0; i < 1000; i++) {
4778 				udelay(10);
4779 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4780 				    (bmsr & BMSR_LSTATUS)) {
4781 					udelay(40);
4782 					break;
4783 				}
4784 			}
4785 
4786 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4787 			    TG3_PHY_REV_BCM5401_B0 &&
4788 			    !(bmsr & BMSR_LSTATUS) &&
4789 			    tp->link_config.active_speed == SPEED_1000) {
4790 				err = tg3_phy_reset(tp);
4791 				if (!err)
4792 					err = tg3_init_5401phy_dsp(tp);
4793 				if (err)
4794 					return err;
4795 			}
4796 		}
4797 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4798 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4799 		/* 5701 {A0,B0} CRC bug workaround */
4800 		tg3_writephy(tp, 0x15, 0x0a75);
4801 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4802 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4803 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4804 	}
4805 
4806 	/* Clear pending interrupts... */
4807 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4808 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4809 
4810 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4811 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4812 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4813 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4814 
4815 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4816 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4817 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4818 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4819 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4820 		else
4821 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4822 	}
4823 
4824 	current_link_up = false;
4825 	current_speed = SPEED_UNKNOWN;
4826 	current_duplex = DUPLEX_UNKNOWN;
4827 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4828 	tp->link_config.rmt_adv = 0;
4829 
4830 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4831 		err = tg3_phy_auxctl_read(tp,
4832 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4833 					  &val);
4834 		if (!err && !(val & (1 << 10))) {
4835 			tg3_phy_auxctl_write(tp,
4836 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4837 					     val | (1 << 10));
4838 			goto relink;
4839 		}
4840 	}
4841 
4842 	bmsr = 0;
4843 	for (i = 0; i < 100; i++) {
4844 		tg3_readphy(tp, MII_BMSR, &bmsr);
4845 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4846 		    (bmsr & BMSR_LSTATUS))
4847 			break;
4848 		udelay(40);
4849 	}
4850 
4851 	if (bmsr & BMSR_LSTATUS) {
4852 		u32 aux_stat, bmcr;
4853 
4854 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4855 		for (i = 0; i < 2000; i++) {
4856 			udelay(10);
4857 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4858 			    aux_stat)
4859 				break;
4860 		}
4861 
4862 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4863 					     &current_speed,
4864 					     &current_duplex);
4865 
4866 		bmcr = 0;
4867 		for (i = 0; i < 200; i++) {
4868 			tg3_readphy(tp, MII_BMCR, &bmcr);
4869 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4870 				continue;
4871 			if (bmcr && bmcr != 0x7fff)
4872 				break;
4873 			udelay(10);
4874 		}
4875 
4876 		lcl_adv = 0;
4877 		rmt_adv = 0;
4878 
4879 		tp->link_config.active_speed = current_speed;
4880 		tp->link_config.active_duplex = current_duplex;
4881 
4882 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4883 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4884 
4885 			if ((bmcr & BMCR_ANENABLE) &&
4886 			    eee_config_ok &&
4887 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4888 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4889 				current_link_up = true;
4890 
4891 			/* EEE settings changes take effect only after a phy
4892 			 * reset.  If we have skipped a reset due to Link Flap
4893 			 * Avoidance being enabled, do it now.
4894 			 */
4895 			if (!eee_config_ok &&
4896 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4897 			    !force_reset) {
4898 				tg3_setup_eee(tp);
4899 				tg3_phy_reset(tp);
4900 			}
4901 		} else {
4902 			if (!(bmcr & BMCR_ANENABLE) &&
4903 			    tp->link_config.speed == current_speed &&
4904 			    tp->link_config.duplex == current_duplex) {
4905 				current_link_up = true;
4906 			}
4907 		}
4908 
4909 		if (current_link_up &&
4910 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4911 			u32 reg, bit;
4912 
4913 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4914 				reg = MII_TG3_FET_GEN_STAT;
4915 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4916 			} else {
4917 				reg = MII_TG3_EXT_STAT;
4918 				bit = MII_TG3_EXT_STAT_MDIX;
4919 			}
4920 
4921 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4922 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4923 
4924 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4925 		}
4926 	}
4927 
4928 relink:
4929 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4930 		tg3_phy_copper_begin(tp);
4931 
4932 		if (tg3_flag(tp, ROBOSWITCH)) {
4933 			current_link_up = true;
4934 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4935 			current_speed = SPEED_1000;
4936 			current_duplex = DUPLEX_FULL;
4937 			tp->link_config.active_speed = current_speed;
4938 			tp->link_config.active_duplex = current_duplex;
4939 		}
4940 
4941 		tg3_readphy(tp, MII_BMSR, &bmsr);
4942 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4943 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4944 			current_link_up = true;
4945 	}
4946 
4947 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4948 	if (current_link_up) {
4949 		if (tp->link_config.active_speed == SPEED_100 ||
4950 		    tp->link_config.active_speed == SPEED_10)
4951 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4952 		else
4953 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4954 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4955 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4956 	else
4957 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4958 
4959 	/* In order for the 5750 core in BCM4785 chip to work properly
4960 	 * in RGMII mode, the Led Control Register must be set up.
4961 	 */
4962 	if (tg3_flag(tp, RGMII_MODE)) {
4963 		u32 led_ctrl = tr32(MAC_LED_CTRL);
4964 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4965 
4966 		if (tp->link_config.active_speed == SPEED_10)
4967 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4968 		else if (tp->link_config.active_speed == SPEED_100)
4969 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4970 				     LED_CTRL_100MBPS_ON);
4971 		else if (tp->link_config.active_speed == SPEED_1000)
4972 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4973 				     LED_CTRL_1000MBPS_ON);
4974 
4975 		tw32(MAC_LED_CTRL, led_ctrl);
4976 		udelay(40);
4977 	}
4978 
4979 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4980 	if (tp->link_config.active_duplex == DUPLEX_HALF)
4981 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4982 
4983 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4984 		if (current_link_up &&
4985 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4986 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4987 		else
4988 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4989 	}
4990 
4991 	/* ??? Without this setting Netgear GA302T PHY does not
4992 	 * ??? send/receive packets...
4993 	 */
4994 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4995 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4996 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4997 		tw32_f(MAC_MI_MODE, tp->mi_mode);
4998 		udelay(80);
4999 	}
5000 
5001 	tw32_f(MAC_MODE, tp->mac_mode);
5002 	udelay(40);
5003 
5004 	tg3_phy_eee_adjust(tp, current_link_up);
5005 
5006 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5007 		/* Polled via timer. */
5008 		tw32_f(MAC_EVENT, 0);
5009 	} else {
5010 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5011 	}
5012 	udelay(40);
5013 
5014 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5015 	    current_link_up &&
5016 	    tp->link_config.active_speed == SPEED_1000 &&
5017 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5018 		udelay(120);
5019 		tw32_f(MAC_STATUS,
5020 		     (MAC_STATUS_SYNC_CHANGED |
5021 		      MAC_STATUS_CFG_CHANGED));
5022 		udelay(40);
5023 		tg3_write_mem(tp,
5024 			      NIC_SRAM_FIRMWARE_MBOX,
5025 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5026 	}
5027 
5028 	/* Prevent send BD corruption. */
5029 	if (tg3_flag(tp, CLKREQ_BUG)) {
5030 		if (tp->link_config.active_speed == SPEED_100 ||
5031 		    tp->link_config.active_speed == SPEED_10)
5032 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5033 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5034 		else
5035 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5036 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5037 	}
5038 
5039 	tg3_test_and_report_link_chg(tp, current_link_up);
5040 
5041 	return 0;
5042 }
5043 
5044 struct tg3_fiber_aneginfo {
5045 	int state;
5046 #define ANEG_STATE_UNKNOWN		0
5047 #define ANEG_STATE_AN_ENABLE		1
5048 #define ANEG_STATE_RESTART_INIT		2
5049 #define ANEG_STATE_RESTART		3
5050 #define ANEG_STATE_DISABLE_LINK_OK	4
5051 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5052 #define ANEG_STATE_ABILITY_DETECT	6
5053 #define ANEG_STATE_ACK_DETECT_INIT	7
5054 #define ANEG_STATE_ACK_DETECT		8
5055 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5056 #define ANEG_STATE_COMPLETE_ACK		10
5057 #define ANEG_STATE_IDLE_DETECT_INIT	11
5058 #define ANEG_STATE_IDLE_DETECT		12
5059 #define ANEG_STATE_LINK_OK		13
5060 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5061 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5062 
5063 	u32 flags;
5064 #define MR_AN_ENABLE		0x00000001
5065 #define MR_RESTART_AN		0x00000002
5066 #define MR_AN_COMPLETE		0x00000004
5067 #define MR_PAGE_RX		0x00000008
5068 #define MR_NP_LOADED		0x00000010
5069 #define MR_TOGGLE_TX		0x00000020
5070 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5071 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5072 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5073 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5074 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5075 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5076 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5077 #define MR_TOGGLE_RX		0x00002000
5078 #define MR_NP_RX		0x00004000
5079 
5080 #define MR_LINK_OK		0x80000000
5081 
5082 	unsigned long link_time, cur_time;
5083 
5084 	u32 ability_match_cfg;
5085 	int ability_match_count;
5086 
5087 	char ability_match, idle_match, ack_match;
5088 
5089 	u32 txconfig, rxconfig;
5090 #define ANEG_CFG_NP		0x00000080
5091 #define ANEG_CFG_ACK		0x00000040
5092 #define ANEG_CFG_RF2		0x00000020
5093 #define ANEG_CFG_RF1		0x00000010
5094 #define ANEG_CFG_PS2		0x00000001
5095 #define ANEG_CFG_PS1		0x00008000
5096 #define ANEG_CFG_HD		0x00004000
5097 #define ANEG_CFG_FD		0x00002000
5098 #define ANEG_CFG_INVAL		0x00001f06
5099 
5100 };
5101 #define ANEG_OK		0
5102 #define ANEG_DONE	1
5103 #define ANEG_TIMER_ENAB	2
5104 #define ANEG_FAILED	-1
5105 
5106 #define ANEG_STATE_SETTLE_TIME	10000
5107 
5108 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5109 				   struct tg3_fiber_aneginfo *ap)
5110 {
5111 	u16 flowctrl;
5112 	unsigned long delta;
5113 	u32 rx_cfg_reg;
5114 	int ret;
5115 
5116 	if (ap->state == ANEG_STATE_UNKNOWN) {
5117 		ap->rxconfig = 0;
5118 		ap->link_time = 0;
5119 		ap->cur_time = 0;
5120 		ap->ability_match_cfg = 0;
5121 		ap->ability_match_count = 0;
5122 		ap->ability_match = 0;
5123 		ap->idle_match = 0;
5124 		ap->ack_match = 0;
5125 	}
5126 	ap->cur_time++;
5127 
5128 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5129 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5130 
5131 		if (rx_cfg_reg != ap->ability_match_cfg) {
5132 			ap->ability_match_cfg = rx_cfg_reg;
5133 			ap->ability_match = 0;
5134 			ap->ability_match_count = 0;
5135 		} else {
5136 			if (++ap->ability_match_count > 1) {
5137 				ap->ability_match = 1;
5138 				ap->ability_match_cfg = rx_cfg_reg;
5139 			}
5140 		}
5141 		if (rx_cfg_reg & ANEG_CFG_ACK)
5142 			ap->ack_match = 1;
5143 		else
5144 			ap->ack_match = 0;
5145 
5146 		ap->idle_match = 0;
5147 	} else {
5148 		ap->idle_match = 1;
5149 		ap->ability_match_cfg = 0;
5150 		ap->ability_match_count = 0;
5151 		ap->ability_match = 0;
5152 		ap->ack_match = 0;
5153 
5154 		rx_cfg_reg = 0;
5155 	}
5156 
5157 	ap->rxconfig = rx_cfg_reg;
5158 	ret = ANEG_OK;
5159 
5160 	switch (ap->state) {
5161 	case ANEG_STATE_UNKNOWN:
5162 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5163 			ap->state = ANEG_STATE_AN_ENABLE;
5164 
5165 		/* fallthru */
5166 	case ANEG_STATE_AN_ENABLE:
5167 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5168 		if (ap->flags & MR_AN_ENABLE) {
5169 			ap->link_time = 0;
5170 			ap->cur_time = 0;
5171 			ap->ability_match_cfg = 0;
5172 			ap->ability_match_count = 0;
5173 			ap->ability_match = 0;
5174 			ap->idle_match = 0;
5175 			ap->ack_match = 0;
5176 
5177 			ap->state = ANEG_STATE_RESTART_INIT;
5178 		} else {
5179 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5180 		}
5181 		break;
5182 
5183 	case ANEG_STATE_RESTART_INIT:
5184 		ap->link_time = ap->cur_time;
5185 		ap->flags &= ~(MR_NP_LOADED);
5186 		ap->txconfig = 0;
5187 		tw32(MAC_TX_AUTO_NEG, 0);
5188 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5189 		tw32_f(MAC_MODE, tp->mac_mode);
5190 		udelay(40);
5191 
5192 		ret = ANEG_TIMER_ENAB;
5193 		ap->state = ANEG_STATE_RESTART;
5194 
5195 		/* fallthru */
5196 	case ANEG_STATE_RESTART:
5197 		delta = ap->cur_time - ap->link_time;
5198 		if (delta > ANEG_STATE_SETTLE_TIME)
5199 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5200 		else
5201 			ret = ANEG_TIMER_ENAB;
5202 		break;
5203 
5204 	case ANEG_STATE_DISABLE_LINK_OK:
5205 		ret = ANEG_DONE;
5206 		break;
5207 
5208 	case ANEG_STATE_ABILITY_DETECT_INIT:
5209 		ap->flags &= ~(MR_TOGGLE_TX);
5210 		ap->txconfig = ANEG_CFG_FD;
5211 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5212 		if (flowctrl & ADVERTISE_1000XPAUSE)
5213 			ap->txconfig |= ANEG_CFG_PS1;
5214 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5215 			ap->txconfig |= ANEG_CFG_PS2;
5216 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5217 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5218 		tw32_f(MAC_MODE, tp->mac_mode);
5219 		udelay(40);
5220 
5221 		ap->state = ANEG_STATE_ABILITY_DETECT;
5222 		break;
5223 
5224 	case ANEG_STATE_ABILITY_DETECT:
5225 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5226 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5227 		break;
5228 
5229 	case ANEG_STATE_ACK_DETECT_INIT:
5230 		ap->txconfig |= ANEG_CFG_ACK;
5231 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5232 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5233 		tw32_f(MAC_MODE, tp->mac_mode);
5234 		udelay(40);
5235 
5236 		ap->state = ANEG_STATE_ACK_DETECT;
5237 
5238 		/* fallthru */
5239 	case ANEG_STATE_ACK_DETECT:
5240 		if (ap->ack_match != 0) {
5241 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5242 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5243 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5244 			} else {
5245 				ap->state = ANEG_STATE_AN_ENABLE;
5246 			}
5247 		} else if (ap->ability_match != 0 &&
5248 			   ap->rxconfig == 0) {
5249 			ap->state = ANEG_STATE_AN_ENABLE;
5250 		}
5251 		break;
5252 
5253 	case ANEG_STATE_COMPLETE_ACK_INIT:
5254 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5255 			ret = ANEG_FAILED;
5256 			break;
5257 		}
5258 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5259 			       MR_LP_ADV_HALF_DUPLEX |
5260 			       MR_LP_ADV_SYM_PAUSE |
5261 			       MR_LP_ADV_ASYM_PAUSE |
5262 			       MR_LP_ADV_REMOTE_FAULT1 |
5263 			       MR_LP_ADV_REMOTE_FAULT2 |
5264 			       MR_LP_ADV_NEXT_PAGE |
5265 			       MR_TOGGLE_RX |
5266 			       MR_NP_RX);
5267 		if (ap->rxconfig & ANEG_CFG_FD)
5268 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5269 		if (ap->rxconfig & ANEG_CFG_HD)
5270 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5271 		if (ap->rxconfig & ANEG_CFG_PS1)
5272 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5273 		if (ap->rxconfig & ANEG_CFG_PS2)
5274 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5275 		if (ap->rxconfig & ANEG_CFG_RF1)
5276 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5277 		if (ap->rxconfig & ANEG_CFG_RF2)
5278 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5279 		if (ap->rxconfig & ANEG_CFG_NP)
5280 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5281 
5282 		ap->link_time = ap->cur_time;
5283 
5284 		ap->flags ^= (MR_TOGGLE_TX);
5285 		if (ap->rxconfig & 0x0008)
5286 			ap->flags |= MR_TOGGLE_RX;
5287 		if (ap->rxconfig & ANEG_CFG_NP)
5288 			ap->flags |= MR_NP_RX;
5289 		ap->flags |= MR_PAGE_RX;
5290 
5291 		ap->state = ANEG_STATE_COMPLETE_ACK;
5292 		ret = ANEG_TIMER_ENAB;
5293 		break;
5294 
5295 	case ANEG_STATE_COMPLETE_ACK:
5296 		if (ap->ability_match != 0 &&
5297 		    ap->rxconfig == 0) {
5298 			ap->state = ANEG_STATE_AN_ENABLE;
5299 			break;
5300 		}
5301 		delta = ap->cur_time - ap->link_time;
5302 		if (delta > ANEG_STATE_SETTLE_TIME) {
5303 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5304 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5305 			} else {
5306 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5307 				    !(ap->flags & MR_NP_RX)) {
5308 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5309 				} else {
5310 					ret = ANEG_FAILED;
5311 				}
5312 			}
5313 		}
5314 		break;
5315 
5316 	case ANEG_STATE_IDLE_DETECT_INIT:
5317 		ap->link_time = ap->cur_time;
5318 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5319 		tw32_f(MAC_MODE, tp->mac_mode);
5320 		udelay(40);
5321 
5322 		ap->state = ANEG_STATE_IDLE_DETECT;
5323 		ret = ANEG_TIMER_ENAB;
5324 		break;
5325 
5326 	case ANEG_STATE_IDLE_DETECT:
5327 		if (ap->ability_match != 0 &&
5328 		    ap->rxconfig == 0) {
5329 			ap->state = ANEG_STATE_AN_ENABLE;
5330 			break;
5331 		}
5332 		delta = ap->cur_time - ap->link_time;
5333 		if (delta > ANEG_STATE_SETTLE_TIME) {
5334 			/* XXX another gem from the Broadcom driver :( */
5335 			ap->state = ANEG_STATE_LINK_OK;
5336 		}
5337 		break;
5338 
5339 	case ANEG_STATE_LINK_OK:
5340 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5341 		ret = ANEG_DONE;
5342 		break;
5343 
5344 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5345 		/* ??? unimplemented */
5346 		break;
5347 
5348 	case ANEG_STATE_NEXT_PAGE_WAIT:
5349 		/* ??? unimplemented */
5350 		break;
5351 
5352 	default:
5353 		ret = ANEG_FAILED;
5354 		break;
5355 	}
5356 
5357 	return ret;
5358 }
5359 
5360 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5361 {
5362 	int res = 0;
5363 	struct tg3_fiber_aneginfo aninfo;
5364 	int status = ANEG_FAILED;
5365 	unsigned int tick;
5366 	u32 tmp;
5367 
5368 	tw32_f(MAC_TX_AUTO_NEG, 0);
5369 
5370 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5371 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5372 	udelay(40);
5373 
5374 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5375 	udelay(40);
5376 
5377 	memset(&aninfo, 0, sizeof(aninfo));
5378 	aninfo.flags |= MR_AN_ENABLE;
5379 	aninfo.state = ANEG_STATE_UNKNOWN;
5380 	aninfo.cur_time = 0;
5381 	tick = 0;
5382 	while (++tick < 195000) {
5383 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5384 		if (status == ANEG_DONE || status == ANEG_FAILED)
5385 			break;
5386 
5387 		udelay(1);
5388 	}
5389 
5390 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5391 	tw32_f(MAC_MODE, tp->mac_mode);
5392 	udelay(40);
5393 
5394 	*txflags = aninfo.txconfig;
5395 	*rxflags = aninfo.flags;
5396 
5397 	if (status == ANEG_DONE &&
5398 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5399 			     MR_LP_ADV_FULL_DUPLEX)))
5400 		res = 1;
5401 
5402 	return res;
5403 }
5404 
5405 static void tg3_init_bcm8002(struct tg3 *tp)
5406 {
5407 	u32 mac_status = tr32(MAC_STATUS);
5408 	int i;
5409 
5410 	/* Reset when initting first time or we have a link. */
5411 	if (tg3_flag(tp, INIT_COMPLETE) &&
5412 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5413 		return;
5414 
5415 	/* Set PLL lock range. */
5416 	tg3_writephy(tp, 0x16, 0x8007);
5417 
5418 	/* SW reset */
5419 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5420 
5421 	/* Wait for reset to complete. */
5422 	/* XXX schedule_timeout() ... */
5423 	for (i = 0; i < 500; i++)
5424 		udelay(10);
5425 
5426 	/* Config mode; select PMA/Ch 1 regs. */
5427 	tg3_writephy(tp, 0x10, 0x8411);
5428 
5429 	/* Enable auto-lock and comdet, select txclk for tx. */
5430 	tg3_writephy(tp, 0x11, 0x0a10);
5431 
5432 	tg3_writephy(tp, 0x18, 0x00a0);
5433 	tg3_writephy(tp, 0x16, 0x41ff);
5434 
5435 	/* Assert and deassert POR. */
5436 	tg3_writephy(tp, 0x13, 0x0400);
5437 	udelay(40);
5438 	tg3_writephy(tp, 0x13, 0x0000);
5439 
5440 	tg3_writephy(tp, 0x11, 0x0a50);
5441 	udelay(40);
5442 	tg3_writephy(tp, 0x11, 0x0a10);
5443 
5444 	/* Wait for signal to stabilize */
5445 	/* XXX schedule_timeout() ... */
5446 	for (i = 0; i < 15000; i++)
5447 		udelay(10);
5448 
5449 	/* Deselect the channel register so we can read the PHYID
5450 	 * later.
5451 	 */
5452 	tg3_writephy(tp, 0x10, 0x8011);
5453 }
5454 
5455 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5456 {
5457 	u16 flowctrl;
5458 	bool current_link_up;
5459 	u32 sg_dig_ctrl, sg_dig_status;
5460 	u32 serdes_cfg, expected_sg_dig_ctrl;
5461 	int workaround, port_a;
5462 
5463 	serdes_cfg = 0;
5464 	expected_sg_dig_ctrl = 0;
5465 	workaround = 0;
5466 	port_a = 1;
5467 	current_link_up = false;
5468 
5469 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5470 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5471 		workaround = 1;
5472 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5473 			port_a = 0;
5474 
5475 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5476 		/* preserve bits 20-23 for voltage regulator */
5477 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5478 	}
5479 
5480 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5481 
5482 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5483 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5484 			if (workaround) {
5485 				u32 val = serdes_cfg;
5486 
5487 				if (port_a)
5488 					val |= 0xc010000;
5489 				else
5490 					val |= 0x4010000;
5491 				tw32_f(MAC_SERDES_CFG, val);
5492 			}
5493 
5494 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5495 		}
5496 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5497 			tg3_setup_flow_control(tp, 0, 0);
5498 			current_link_up = true;
5499 		}
5500 		goto out;
5501 	}
5502 
5503 	/* Want auto-negotiation.  */
5504 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5505 
5506 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5507 	if (flowctrl & ADVERTISE_1000XPAUSE)
5508 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5509 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5510 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5511 
5512 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5513 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5514 		    tp->serdes_counter &&
5515 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5516 				    MAC_STATUS_RCVD_CFG)) ==
5517 		     MAC_STATUS_PCS_SYNCED)) {
5518 			tp->serdes_counter--;
5519 			current_link_up = true;
5520 			goto out;
5521 		}
5522 restart_autoneg:
5523 		if (workaround)
5524 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5525 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5526 		udelay(5);
5527 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5528 
5529 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5530 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5531 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5532 				 MAC_STATUS_SIGNAL_DET)) {
5533 		sg_dig_status = tr32(SG_DIG_STATUS);
5534 		mac_status = tr32(MAC_STATUS);
5535 
5536 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5537 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5538 			u32 local_adv = 0, remote_adv = 0;
5539 
5540 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5541 				local_adv |= ADVERTISE_1000XPAUSE;
5542 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5543 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5544 
5545 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5546 				remote_adv |= LPA_1000XPAUSE;
5547 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5548 				remote_adv |= LPA_1000XPAUSE_ASYM;
5549 
5550 			tp->link_config.rmt_adv =
5551 					   mii_adv_to_ethtool_adv_x(remote_adv);
5552 
5553 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5554 			current_link_up = true;
5555 			tp->serdes_counter = 0;
5556 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5557 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5558 			if (tp->serdes_counter)
5559 				tp->serdes_counter--;
5560 			else {
5561 				if (workaround) {
5562 					u32 val = serdes_cfg;
5563 
5564 					if (port_a)
5565 						val |= 0xc010000;
5566 					else
5567 						val |= 0x4010000;
5568 
5569 					tw32_f(MAC_SERDES_CFG, val);
5570 				}
5571 
5572 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5573 				udelay(40);
5574 
5575 				/* Link parallel detection - link is up */
5576 				/* only if we have PCS_SYNC and not */
5577 				/* receiving config code words */
5578 				mac_status = tr32(MAC_STATUS);
5579 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5580 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5581 					tg3_setup_flow_control(tp, 0, 0);
5582 					current_link_up = true;
5583 					tp->phy_flags |=
5584 						TG3_PHYFLG_PARALLEL_DETECT;
5585 					tp->serdes_counter =
5586 						SERDES_PARALLEL_DET_TIMEOUT;
5587 				} else
5588 					goto restart_autoneg;
5589 			}
5590 		}
5591 	} else {
5592 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5593 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5594 	}
5595 
5596 out:
5597 	return current_link_up;
5598 }
5599 
5600 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5601 {
5602 	bool current_link_up = false;
5603 
5604 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5605 		goto out;
5606 
5607 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5608 		u32 txflags, rxflags;
5609 		int i;
5610 
5611 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5612 			u32 local_adv = 0, remote_adv = 0;
5613 
5614 			if (txflags & ANEG_CFG_PS1)
5615 				local_adv |= ADVERTISE_1000XPAUSE;
5616 			if (txflags & ANEG_CFG_PS2)
5617 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5618 
5619 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5620 				remote_adv |= LPA_1000XPAUSE;
5621 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5622 				remote_adv |= LPA_1000XPAUSE_ASYM;
5623 
5624 			tp->link_config.rmt_adv =
5625 					   mii_adv_to_ethtool_adv_x(remote_adv);
5626 
5627 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5628 
5629 			current_link_up = true;
5630 		}
5631 		for (i = 0; i < 30; i++) {
5632 			udelay(20);
5633 			tw32_f(MAC_STATUS,
5634 			       (MAC_STATUS_SYNC_CHANGED |
5635 				MAC_STATUS_CFG_CHANGED));
5636 			udelay(40);
5637 			if ((tr32(MAC_STATUS) &
5638 			     (MAC_STATUS_SYNC_CHANGED |
5639 			      MAC_STATUS_CFG_CHANGED)) == 0)
5640 				break;
5641 		}
5642 
5643 		mac_status = tr32(MAC_STATUS);
5644 		if (!current_link_up &&
5645 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5646 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5647 			current_link_up = true;
5648 	} else {
5649 		tg3_setup_flow_control(tp, 0, 0);
5650 
5651 		/* Forcing 1000FD link up. */
5652 		current_link_up = true;
5653 
5654 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5655 		udelay(40);
5656 
5657 		tw32_f(MAC_MODE, tp->mac_mode);
5658 		udelay(40);
5659 	}
5660 
5661 out:
5662 	return current_link_up;
5663 }
5664 
5665 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5666 {
5667 	u32 orig_pause_cfg;
5668 	u16 orig_active_speed;
5669 	u8 orig_active_duplex;
5670 	u32 mac_status;
5671 	bool current_link_up;
5672 	int i;
5673 
5674 	orig_pause_cfg = tp->link_config.active_flowctrl;
5675 	orig_active_speed = tp->link_config.active_speed;
5676 	orig_active_duplex = tp->link_config.active_duplex;
5677 
5678 	if (!tg3_flag(tp, HW_AUTONEG) &&
5679 	    tp->link_up &&
5680 	    tg3_flag(tp, INIT_COMPLETE)) {
5681 		mac_status = tr32(MAC_STATUS);
5682 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5683 			       MAC_STATUS_SIGNAL_DET |
5684 			       MAC_STATUS_CFG_CHANGED |
5685 			       MAC_STATUS_RCVD_CFG);
5686 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5687 				   MAC_STATUS_SIGNAL_DET)) {
5688 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5689 					    MAC_STATUS_CFG_CHANGED));
5690 			return 0;
5691 		}
5692 	}
5693 
5694 	tw32_f(MAC_TX_AUTO_NEG, 0);
5695 
5696 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5697 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5698 	tw32_f(MAC_MODE, tp->mac_mode);
5699 	udelay(40);
5700 
5701 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5702 		tg3_init_bcm8002(tp);
5703 
5704 	/* Enable link change event even when serdes polling.  */
5705 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5706 	udelay(40);
5707 
5708 	current_link_up = false;
5709 	tp->link_config.rmt_adv = 0;
5710 	mac_status = tr32(MAC_STATUS);
5711 
5712 	if (tg3_flag(tp, HW_AUTONEG))
5713 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5714 	else
5715 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5716 
5717 	tp->napi[0].hw_status->status =
5718 		(SD_STATUS_UPDATED |
5719 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5720 
5721 	for (i = 0; i < 100; i++) {
5722 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5723 				    MAC_STATUS_CFG_CHANGED));
5724 		udelay(5);
5725 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5726 					 MAC_STATUS_CFG_CHANGED |
5727 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5728 			break;
5729 	}
5730 
5731 	mac_status = tr32(MAC_STATUS);
5732 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5733 		current_link_up = false;
5734 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5735 		    tp->serdes_counter == 0) {
5736 			tw32_f(MAC_MODE, (tp->mac_mode |
5737 					  MAC_MODE_SEND_CONFIGS));
5738 			udelay(1);
5739 			tw32_f(MAC_MODE, tp->mac_mode);
5740 		}
5741 	}
5742 
5743 	if (current_link_up) {
5744 		tp->link_config.active_speed = SPEED_1000;
5745 		tp->link_config.active_duplex = DUPLEX_FULL;
5746 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5747 				    LED_CTRL_LNKLED_OVERRIDE |
5748 				    LED_CTRL_1000MBPS_ON));
5749 	} else {
5750 		tp->link_config.active_speed = SPEED_UNKNOWN;
5751 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5752 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5753 				    LED_CTRL_LNKLED_OVERRIDE |
5754 				    LED_CTRL_TRAFFIC_OVERRIDE));
5755 	}
5756 
5757 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5758 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5759 		if (orig_pause_cfg != now_pause_cfg ||
5760 		    orig_active_speed != tp->link_config.active_speed ||
5761 		    orig_active_duplex != tp->link_config.active_duplex)
5762 			tg3_link_report(tp);
5763 	}
5764 
5765 	return 0;
5766 }
5767 
5768 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5769 {
5770 	int err = 0;
5771 	u32 bmsr, bmcr;
5772 	u16 current_speed = SPEED_UNKNOWN;
5773 	u8 current_duplex = DUPLEX_UNKNOWN;
5774 	bool current_link_up = false;
5775 	u32 local_adv, remote_adv, sgsr;
5776 
5777 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5778 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5779 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5780 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5781 
5782 		if (force_reset)
5783 			tg3_phy_reset(tp);
5784 
5785 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5786 
5787 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5788 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5789 		} else {
5790 			current_link_up = true;
5791 			if (sgsr & SERDES_TG3_SPEED_1000) {
5792 				current_speed = SPEED_1000;
5793 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5794 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5795 				current_speed = SPEED_100;
5796 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5797 			} else {
5798 				current_speed = SPEED_10;
5799 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5800 			}
5801 
5802 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5803 				current_duplex = DUPLEX_FULL;
5804 			else
5805 				current_duplex = DUPLEX_HALF;
5806 		}
5807 
5808 		tw32_f(MAC_MODE, tp->mac_mode);
5809 		udelay(40);
5810 
5811 		tg3_clear_mac_status(tp);
5812 
5813 		goto fiber_setup_done;
5814 	}
5815 
5816 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5817 	tw32_f(MAC_MODE, tp->mac_mode);
5818 	udelay(40);
5819 
5820 	tg3_clear_mac_status(tp);
5821 
5822 	if (force_reset)
5823 		tg3_phy_reset(tp);
5824 
5825 	tp->link_config.rmt_adv = 0;
5826 
5827 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5828 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5829 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5830 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5831 			bmsr |= BMSR_LSTATUS;
5832 		else
5833 			bmsr &= ~BMSR_LSTATUS;
5834 	}
5835 
5836 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5837 
5838 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5839 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5840 		/* do nothing, just check for link up at the end */
5841 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5842 		u32 adv, newadv;
5843 
5844 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5845 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5846 				 ADVERTISE_1000XPAUSE |
5847 				 ADVERTISE_1000XPSE_ASYM |
5848 				 ADVERTISE_SLCT);
5849 
5850 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5851 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5852 
5853 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5854 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5855 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5856 			tg3_writephy(tp, MII_BMCR, bmcr);
5857 
5858 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5859 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5860 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5861 
5862 			return err;
5863 		}
5864 	} else {
5865 		u32 new_bmcr;
5866 
5867 		bmcr &= ~BMCR_SPEED1000;
5868 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5869 
5870 		if (tp->link_config.duplex == DUPLEX_FULL)
5871 			new_bmcr |= BMCR_FULLDPLX;
5872 
5873 		if (new_bmcr != bmcr) {
5874 			/* BMCR_SPEED1000 is a reserved bit that needs
5875 			 * to be set on write.
5876 			 */
5877 			new_bmcr |= BMCR_SPEED1000;
5878 
5879 			/* Force a linkdown */
5880 			if (tp->link_up) {
5881 				u32 adv;
5882 
5883 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5884 				adv &= ~(ADVERTISE_1000XFULL |
5885 					 ADVERTISE_1000XHALF |
5886 					 ADVERTISE_SLCT);
5887 				tg3_writephy(tp, MII_ADVERTISE, adv);
5888 				tg3_writephy(tp, MII_BMCR, bmcr |
5889 							   BMCR_ANRESTART |
5890 							   BMCR_ANENABLE);
5891 				udelay(10);
5892 				tg3_carrier_off(tp);
5893 			}
5894 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5895 			bmcr = new_bmcr;
5896 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5897 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5898 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5899 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5900 					bmsr |= BMSR_LSTATUS;
5901 				else
5902 					bmsr &= ~BMSR_LSTATUS;
5903 			}
5904 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5905 		}
5906 	}
5907 
5908 	if (bmsr & BMSR_LSTATUS) {
5909 		current_speed = SPEED_1000;
5910 		current_link_up = true;
5911 		if (bmcr & BMCR_FULLDPLX)
5912 			current_duplex = DUPLEX_FULL;
5913 		else
5914 			current_duplex = DUPLEX_HALF;
5915 
5916 		local_adv = 0;
5917 		remote_adv = 0;
5918 
5919 		if (bmcr & BMCR_ANENABLE) {
5920 			u32 common;
5921 
5922 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5923 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5924 			common = local_adv & remote_adv;
5925 			if (common & (ADVERTISE_1000XHALF |
5926 				      ADVERTISE_1000XFULL)) {
5927 				if (common & ADVERTISE_1000XFULL)
5928 					current_duplex = DUPLEX_FULL;
5929 				else
5930 					current_duplex = DUPLEX_HALF;
5931 
5932 				tp->link_config.rmt_adv =
5933 					   mii_adv_to_ethtool_adv_x(remote_adv);
5934 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5935 				/* Link is up via parallel detect */
5936 			} else {
5937 				current_link_up = false;
5938 			}
5939 		}
5940 	}
5941 
5942 fiber_setup_done:
5943 	if (current_link_up && current_duplex == DUPLEX_FULL)
5944 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5945 
5946 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5947 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5948 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5949 
5950 	tw32_f(MAC_MODE, tp->mac_mode);
5951 	udelay(40);
5952 
5953 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5954 
5955 	tp->link_config.active_speed = current_speed;
5956 	tp->link_config.active_duplex = current_duplex;
5957 
5958 	tg3_test_and_report_link_chg(tp, current_link_up);
5959 	return err;
5960 }
5961 
5962 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5963 {
5964 	if (tp->serdes_counter) {
5965 		/* Give autoneg time to complete. */
5966 		tp->serdes_counter--;
5967 		return;
5968 	}
5969 
5970 	if (!tp->link_up &&
5971 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5972 		u32 bmcr;
5973 
5974 		tg3_readphy(tp, MII_BMCR, &bmcr);
5975 		if (bmcr & BMCR_ANENABLE) {
5976 			u32 phy1, phy2;
5977 
5978 			/* Select shadow register 0x1f */
5979 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5980 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5981 
5982 			/* Select expansion interrupt status register */
5983 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5984 					 MII_TG3_DSP_EXP1_INT_STAT);
5985 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5986 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5987 
5988 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5989 				/* We have signal detect and not receiving
5990 				 * config code words, link is up by parallel
5991 				 * detection.
5992 				 */
5993 
5994 				bmcr &= ~BMCR_ANENABLE;
5995 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5996 				tg3_writephy(tp, MII_BMCR, bmcr);
5997 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5998 			}
5999 		}
6000 	} else if (tp->link_up &&
6001 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6002 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6003 		u32 phy2;
6004 
6005 		/* Select expansion interrupt status register */
6006 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6007 				 MII_TG3_DSP_EXP1_INT_STAT);
6008 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6009 		if (phy2 & 0x20) {
6010 			u32 bmcr;
6011 
6012 			/* Config code words received, turn on autoneg. */
6013 			tg3_readphy(tp, MII_BMCR, &bmcr);
6014 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6015 
6016 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6017 
6018 		}
6019 	}
6020 }
6021 
6022 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6023 {
6024 	u32 val;
6025 	int err;
6026 
6027 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6028 		err = tg3_setup_fiber_phy(tp, force_reset);
6029 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6030 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6031 	else
6032 		err = tg3_setup_copper_phy(tp, force_reset);
6033 
6034 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6035 		u32 scale;
6036 
6037 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6038 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6039 			scale = 65;
6040 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6041 			scale = 6;
6042 		else
6043 			scale = 12;
6044 
6045 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6046 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6047 		tw32(GRC_MISC_CFG, val);
6048 	}
6049 
6050 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6051 	      (6 << TX_LENGTHS_IPG_SHIFT);
6052 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6053 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6054 		val |= tr32(MAC_TX_LENGTHS) &
6055 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6056 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6057 
6058 	if (tp->link_config.active_speed == SPEED_1000 &&
6059 	    tp->link_config.active_duplex == DUPLEX_HALF)
6060 		tw32(MAC_TX_LENGTHS, val |
6061 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6062 	else
6063 		tw32(MAC_TX_LENGTHS, val |
6064 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6065 
6066 	if (!tg3_flag(tp, 5705_PLUS)) {
6067 		if (tp->link_up) {
6068 			tw32(HOSTCC_STAT_COAL_TICKS,
6069 			     tp->coal.stats_block_coalesce_usecs);
6070 		} else {
6071 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6072 		}
6073 	}
6074 
6075 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6076 		val = tr32(PCIE_PWR_MGMT_THRESH);
6077 		if (!tp->link_up)
6078 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6079 			      tp->pwrmgmt_thresh;
6080 		else
6081 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6082 		tw32(PCIE_PWR_MGMT_THRESH, val);
6083 	}
6084 
6085 	return err;
6086 }
6087 
6088 /* tp->lock must be held */
6089 static u64 tg3_refclk_read(struct tg3 *tp)
6090 {
6091 	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6092 	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6093 }
6094 
6095 /* tp->lock must be held */
6096 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6097 {
6098 	tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6099 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6100 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6101 	tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6102 }
6103 
6104 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6105 static inline void tg3_full_unlock(struct tg3 *tp);
6106 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6107 {
6108 	struct tg3 *tp = netdev_priv(dev);
6109 
6110 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6111 				SOF_TIMESTAMPING_RX_SOFTWARE |
6112 				SOF_TIMESTAMPING_SOFTWARE;
6113 
6114 	if (tg3_flag(tp, PTP_CAPABLE)) {
6115 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6116 					SOF_TIMESTAMPING_RX_HARDWARE |
6117 					SOF_TIMESTAMPING_RAW_HARDWARE;
6118 	}
6119 
6120 	if (tp->ptp_clock)
6121 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6122 	else
6123 		info->phc_index = -1;
6124 
6125 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6126 
6127 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6128 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6129 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6130 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6131 	return 0;
6132 }
6133 
6134 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6135 {
6136 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6137 	bool neg_adj = false;
6138 	u32 correction = 0;
6139 
6140 	if (ppb < 0) {
6141 		neg_adj = true;
6142 		ppb = -ppb;
6143 	}
6144 
6145 	/* Frequency adjustment is performed using hardware with a 24 bit
6146 	 * accumulator and a programmable correction value. On each clk, the
6147 	 * correction value gets added to the accumulator and when it
6148 	 * overflows, the time counter is incremented/decremented.
6149 	 *
6150 	 * So conversion from ppb to correction value is
6151 	 *		ppb * (1 << 24) / 1000000000
6152 	 */
6153 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6154 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6155 
6156 	tg3_full_lock(tp, 0);
6157 
6158 	if (correction)
6159 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6160 		     TG3_EAV_REF_CLK_CORRECT_EN |
6161 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6162 	else
6163 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6164 
6165 	tg3_full_unlock(tp);
6166 
6167 	return 0;
6168 }
6169 
6170 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6171 {
6172 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6173 
6174 	tg3_full_lock(tp, 0);
6175 	tp->ptp_adjust += delta;
6176 	tg3_full_unlock(tp);
6177 
6178 	return 0;
6179 }
6180 
6181 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6182 {
6183 	u64 ns;
6184 	u32 remainder;
6185 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6186 
6187 	tg3_full_lock(tp, 0);
6188 	ns = tg3_refclk_read(tp);
6189 	ns += tp->ptp_adjust;
6190 	tg3_full_unlock(tp);
6191 
6192 	ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6193 	ts->tv_nsec = remainder;
6194 
6195 	return 0;
6196 }
6197 
6198 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6199 			   const struct timespec *ts)
6200 {
6201 	u64 ns;
6202 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6203 
6204 	ns = timespec_to_ns(ts);
6205 
6206 	tg3_full_lock(tp, 0);
6207 	tg3_refclk_write(tp, ns);
6208 	tp->ptp_adjust = 0;
6209 	tg3_full_unlock(tp);
6210 
6211 	return 0;
6212 }
6213 
6214 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6215 			  struct ptp_clock_request *rq, int on)
6216 {
6217 	return -EOPNOTSUPP;
6218 }
6219 
6220 static const struct ptp_clock_info tg3_ptp_caps = {
6221 	.owner		= THIS_MODULE,
6222 	.name		= "tg3 clock",
6223 	.max_adj	= 250000000,
6224 	.n_alarm	= 0,
6225 	.n_ext_ts	= 0,
6226 	.n_per_out	= 0,
6227 	.pps		= 0,
6228 	.adjfreq	= tg3_ptp_adjfreq,
6229 	.adjtime	= tg3_ptp_adjtime,
6230 	.gettime	= tg3_ptp_gettime,
6231 	.settime	= tg3_ptp_settime,
6232 	.enable		= tg3_ptp_enable,
6233 };
6234 
6235 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6236 				     struct skb_shared_hwtstamps *timestamp)
6237 {
6238 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6239 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6240 					   tp->ptp_adjust);
6241 }
6242 
6243 /* tp->lock must be held */
6244 static void tg3_ptp_init(struct tg3 *tp)
6245 {
6246 	if (!tg3_flag(tp, PTP_CAPABLE))
6247 		return;
6248 
6249 	/* Initialize the hardware clock to the system time. */
6250 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6251 	tp->ptp_adjust = 0;
6252 	tp->ptp_info = tg3_ptp_caps;
6253 }
6254 
6255 /* tp->lock must be held */
6256 static void tg3_ptp_resume(struct tg3 *tp)
6257 {
6258 	if (!tg3_flag(tp, PTP_CAPABLE))
6259 		return;
6260 
6261 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6262 	tp->ptp_adjust = 0;
6263 }
6264 
6265 static void tg3_ptp_fini(struct tg3 *tp)
6266 {
6267 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6268 		return;
6269 
6270 	ptp_clock_unregister(tp->ptp_clock);
6271 	tp->ptp_clock = NULL;
6272 	tp->ptp_adjust = 0;
6273 }
6274 
6275 static inline int tg3_irq_sync(struct tg3 *tp)
6276 {
6277 	return tp->irq_sync;
6278 }
6279 
6280 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6281 {
6282 	int i;
6283 
6284 	dst = (u32 *)((u8 *)dst + off);
6285 	for (i = 0; i < len; i += sizeof(u32))
6286 		*dst++ = tr32(off + i);
6287 }
6288 
6289 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6290 {
6291 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6292 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6293 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6294 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6295 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6296 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6297 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6298 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6299 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6300 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6301 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6302 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6303 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6304 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6305 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6306 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6307 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6308 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6309 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6310 
6311 	if (tg3_flag(tp, SUPPORT_MSIX))
6312 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6313 
6314 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6315 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6316 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6317 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6318 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6319 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6320 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6321 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6322 
6323 	if (!tg3_flag(tp, 5705_PLUS)) {
6324 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6325 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6326 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6327 	}
6328 
6329 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6330 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6331 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6332 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6333 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6334 
6335 	if (tg3_flag(tp, NVRAM))
6336 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6337 }
6338 
6339 static void tg3_dump_state(struct tg3 *tp)
6340 {
6341 	int i;
6342 	u32 *regs;
6343 
6344 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6345 	if (!regs)
6346 		return;
6347 
6348 	if (tg3_flag(tp, PCI_EXPRESS)) {
6349 		/* Read up to but not including private PCI registers */
6350 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6351 			regs[i / sizeof(u32)] = tr32(i);
6352 	} else
6353 		tg3_dump_legacy_regs(tp, regs);
6354 
6355 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6356 		if (!regs[i + 0] && !regs[i + 1] &&
6357 		    !regs[i + 2] && !regs[i + 3])
6358 			continue;
6359 
6360 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6361 			   i * 4,
6362 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6363 	}
6364 
6365 	kfree(regs);
6366 
6367 	for (i = 0; i < tp->irq_cnt; i++) {
6368 		struct tg3_napi *tnapi = &tp->napi[i];
6369 
6370 		/* SW status block */
6371 		netdev_err(tp->dev,
6372 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6373 			   i,
6374 			   tnapi->hw_status->status,
6375 			   tnapi->hw_status->status_tag,
6376 			   tnapi->hw_status->rx_jumbo_consumer,
6377 			   tnapi->hw_status->rx_consumer,
6378 			   tnapi->hw_status->rx_mini_consumer,
6379 			   tnapi->hw_status->idx[0].rx_producer,
6380 			   tnapi->hw_status->idx[0].tx_consumer);
6381 
6382 		netdev_err(tp->dev,
6383 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6384 			   i,
6385 			   tnapi->last_tag, tnapi->last_irq_tag,
6386 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6387 			   tnapi->rx_rcb_ptr,
6388 			   tnapi->prodring.rx_std_prod_idx,
6389 			   tnapi->prodring.rx_std_cons_idx,
6390 			   tnapi->prodring.rx_jmb_prod_idx,
6391 			   tnapi->prodring.rx_jmb_cons_idx);
6392 	}
6393 }
6394 
6395 /* This is called whenever we suspect that the system chipset is re-
6396  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6397  * is bogus tx completions. We try to recover by setting the
6398  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6399  * in the workqueue.
6400  */
6401 static void tg3_tx_recover(struct tg3 *tp)
6402 {
6403 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6404 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6405 
6406 	netdev_warn(tp->dev,
6407 		    "The system may be re-ordering memory-mapped I/O "
6408 		    "cycles to the network device, attempting to recover. "
6409 		    "Please report the problem to the driver maintainer "
6410 		    "and include system chipset information.\n");
6411 
6412 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6413 }
6414 
6415 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6416 {
6417 	/* Tell compiler to fetch tx indices from memory. */
6418 	barrier();
6419 	return tnapi->tx_pending -
6420 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6421 }
6422 
6423 /* Tigon3 never reports partial packet sends.  So we do not
6424  * need special logic to handle SKBs that have not had all
6425  * of their frags sent yet, like SunGEM does.
6426  */
6427 static void tg3_tx(struct tg3_napi *tnapi)
6428 {
6429 	struct tg3 *tp = tnapi->tp;
6430 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6431 	u32 sw_idx = tnapi->tx_cons;
6432 	struct netdev_queue *txq;
6433 	int index = tnapi - tp->napi;
6434 	unsigned int pkts_compl = 0, bytes_compl = 0;
6435 
6436 	if (tg3_flag(tp, ENABLE_TSS))
6437 		index--;
6438 
6439 	txq = netdev_get_tx_queue(tp->dev, index);
6440 
6441 	while (sw_idx != hw_idx) {
6442 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6443 		struct sk_buff *skb = ri->skb;
6444 		int i, tx_bug = 0;
6445 
6446 		if (unlikely(skb == NULL)) {
6447 			tg3_tx_recover(tp);
6448 			return;
6449 		}
6450 
6451 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6452 			struct skb_shared_hwtstamps timestamp;
6453 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6454 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6455 
6456 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6457 
6458 			skb_tstamp_tx(skb, &timestamp);
6459 		}
6460 
6461 		pci_unmap_single(tp->pdev,
6462 				 dma_unmap_addr(ri, mapping),
6463 				 skb_headlen(skb),
6464 				 PCI_DMA_TODEVICE);
6465 
6466 		ri->skb = NULL;
6467 
6468 		while (ri->fragmented) {
6469 			ri->fragmented = false;
6470 			sw_idx = NEXT_TX(sw_idx);
6471 			ri = &tnapi->tx_buffers[sw_idx];
6472 		}
6473 
6474 		sw_idx = NEXT_TX(sw_idx);
6475 
6476 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6477 			ri = &tnapi->tx_buffers[sw_idx];
6478 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6479 				tx_bug = 1;
6480 
6481 			pci_unmap_page(tp->pdev,
6482 				       dma_unmap_addr(ri, mapping),
6483 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6484 				       PCI_DMA_TODEVICE);
6485 
6486 			while (ri->fragmented) {
6487 				ri->fragmented = false;
6488 				sw_idx = NEXT_TX(sw_idx);
6489 				ri = &tnapi->tx_buffers[sw_idx];
6490 			}
6491 
6492 			sw_idx = NEXT_TX(sw_idx);
6493 		}
6494 
6495 		pkts_compl++;
6496 		bytes_compl += skb->len;
6497 
6498 		dev_kfree_skb(skb);
6499 
6500 		if (unlikely(tx_bug)) {
6501 			tg3_tx_recover(tp);
6502 			return;
6503 		}
6504 	}
6505 
6506 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6507 
6508 	tnapi->tx_cons = sw_idx;
6509 
6510 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6511 	 * before checking for netif_queue_stopped().  Without the
6512 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6513 	 * will miss it and cause the queue to be stopped forever.
6514 	 */
6515 	smp_mb();
6516 
6517 	if (unlikely(netif_tx_queue_stopped(txq) &&
6518 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6519 		__netif_tx_lock(txq, smp_processor_id());
6520 		if (netif_tx_queue_stopped(txq) &&
6521 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6522 			netif_tx_wake_queue(txq);
6523 		__netif_tx_unlock(txq);
6524 	}
6525 }
6526 
6527 static void tg3_frag_free(bool is_frag, void *data)
6528 {
6529 	if (is_frag)
6530 		put_page(virt_to_head_page(data));
6531 	else
6532 		kfree(data);
6533 }
6534 
6535 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6536 {
6537 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6538 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6539 
6540 	if (!ri->data)
6541 		return;
6542 
6543 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6544 			 map_sz, PCI_DMA_FROMDEVICE);
6545 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6546 	ri->data = NULL;
6547 }
6548 
6549 
6550 /* Returns size of skb allocated or < 0 on error.
6551  *
6552  * We only need to fill in the address because the other members
6553  * of the RX descriptor are invariant, see tg3_init_rings.
6554  *
6555  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6556  * posting buffers we only dirty the first cache line of the RX
6557  * descriptor (containing the address).  Whereas for the RX status
6558  * buffers the cpu only reads the last cacheline of the RX descriptor
6559  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6560  */
6561 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6562 			     u32 opaque_key, u32 dest_idx_unmasked,
6563 			     unsigned int *frag_size)
6564 {
6565 	struct tg3_rx_buffer_desc *desc;
6566 	struct ring_info *map;
6567 	u8 *data;
6568 	dma_addr_t mapping;
6569 	int skb_size, data_size, dest_idx;
6570 
6571 	switch (opaque_key) {
6572 	case RXD_OPAQUE_RING_STD:
6573 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6574 		desc = &tpr->rx_std[dest_idx];
6575 		map = &tpr->rx_std_buffers[dest_idx];
6576 		data_size = tp->rx_pkt_map_sz;
6577 		break;
6578 
6579 	case RXD_OPAQUE_RING_JUMBO:
6580 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6581 		desc = &tpr->rx_jmb[dest_idx].std;
6582 		map = &tpr->rx_jmb_buffers[dest_idx];
6583 		data_size = TG3_RX_JMB_MAP_SZ;
6584 		break;
6585 
6586 	default:
6587 		return -EINVAL;
6588 	}
6589 
6590 	/* Do not overwrite any of the map or rp information
6591 	 * until we are sure we can commit to a new buffer.
6592 	 *
6593 	 * Callers depend upon this behavior and assume that
6594 	 * we leave everything unchanged if we fail.
6595 	 */
6596 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6597 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6598 	if (skb_size <= PAGE_SIZE) {
6599 		data = netdev_alloc_frag(skb_size);
6600 		*frag_size = skb_size;
6601 	} else {
6602 		data = kmalloc(skb_size, GFP_ATOMIC);
6603 		*frag_size = 0;
6604 	}
6605 	if (!data)
6606 		return -ENOMEM;
6607 
6608 	mapping = pci_map_single(tp->pdev,
6609 				 data + TG3_RX_OFFSET(tp),
6610 				 data_size,
6611 				 PCI_DMA_FROMDEVICE);
6612 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6613 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6614 		return -EIO;
6615 	}
6616 
6617 	map->data = data;
6618 	dma_unmap_addr_set(map, mapping, mapping);
6619 
6620 	desc->addr_hi = ((u64)mapping >> 32);
6621 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6622 
6623 	return data_size;
6624 }
6625 
6626 /* We only need to move over in the address because the other
6627  * members of the RX descriptor are invariant.  See notes above
6628  * tg3_alloc_rx_data for full details.
6629  */
6630 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6631 			   struct tg3_rx_prodring_set *dpr,
6632 			   u32 opaque_key, int src_idx,
6633 			   u32 dest_idx_unmasked)
6634 {
6635 	struct tg3 *tp = tnapi->tp;
6636 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6637 	struct ring_info *src_map, *dest_map;
6638 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6639 	int dest_idx;
6640 
6641 	switch (opaque_key) {
6642 	case RXD_OPAQUE_RING_STD:
6643 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6644 		dest_desc = &dpr->rx_std[dest_idx];
6645 		dest_map = &dpr->rx_std_buffers[dest_idx];
6646 		src_desc = &spr->rx_std[src_idx];
6647 		src_map = &spr->rx_std_buffers[src_idx];
6648 		break;
6649 
6650 	case RXD_OPAQUE_RING_JUMBO:
6651 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6652 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6653 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6654 		src_desc = &spr->rx_jmb[src_idx].std;
6655 		src_map = &spr->rx_jmb_buffers[src_idx];
6656 		break;
6657 
6658 	default:
6659 		return;
6660 	}
6661 
6662 	dest_map->data = src_map->data;
6663 	dma_unmap_addr_set(dest_map, mapping,
6664 			   dma_unmap_addr(src_map, mapping));
6665 	dest_desc->addr_hi = src_desc->addr_hi;
6666 	dest_desc->addr_lo = src_desc->addr_lo;
6667 
6668 	/* Ensure that the update to the skb happens after the physical
6669 	 * addresses have been transferred to the new BD location.
6670 	 */
6671 	smp_wmb();
6672 
6673 	src_map->data = NULL;
6674 }
6675 
6676 /* The RX ring scheme is composed of multiple rings which post fresh
6677  * buffers to the chip, and one special ring the chip uses to report
6678  * status back to the host.
6679  *
6680  * The special ring reports the status of received packets to the
6681  * host.  The chip does not write into the original descriptor the
6682  * RX buffer was obtained from.  The chip simply takes the original
6683  * descriptor as provided by the host, updates the status and length
6684  * field, then writes this into the next status ring entry.
6685  *
6686  * Each ring the host uses to post buffers to the chip is described
6687  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6688  * it is first placed into the on-chip ram.  When the packet's length
6689  * is known, it walks down the TG3_BDINFO entries to select the ring.
6690  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6691  * which is within the range of the new packet's length is chosen.
6692  *
6693  * The "separate ring for rx status" scheme may sound queer, but it makes
6694  * sense from a cache coherency perspective.  If only the host writes
6695  * to the buffer post rings, and only the chip writes to the rx status
6696  * rings, then cache lines never move beyond shared-modified state.
6697  * If both the host and chip were to write into the same ring, cache line
6698  * eviction could occur since both entities want it in an exclusive state.
6699  */
6700 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6701 {
6702 	struct tg3 *tp = tnapi->tp;
6703 	u32 work_mask, rx_std_posted = 0;
6704 	u32 std_prod_idx, jmb_prod_idx;
6705 	u32 sw_idx = tnapi->rx_rcb_ptr;
6706 	u16 hw_idx;
6707 	int received;
6708 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6709 
6710 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6711 	/*
6712 	 * We need to order the read of hw_idx and the read of
6713 	 * the opaque cookie.
6714 	 */
6715 	rmb();
6716 	work_mask = 0;
6717 	received = 0;
6718 	std_prod_idx = tpr->rx_std_prod_idx;
6719 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6720 	while (sw_idx != hw_idx && budget > 0) {
6721 		struct ring_info *ri;
6722 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6723 		unsigned int len;
6724 		struct sk_buff *skb;
6725 		dma_addr_t dma_addr;
6726 		u32 opaque_key, desc_idx, *post_ptr;
6727 		u8 *data;
6728 		u64 tstamp = 0;
6729 
6730 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6731 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6732 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6733 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6734 			dma_addr = dma_unmap_addr(ri, mapping);
6735 			data = ri->data;
6736 			post_ptr = &std_prod_idx;
6737 			rx_std_posted++;
6738 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6739 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6740 			dma_addr = dma_unmap_addr(ri, mapping);
6741 			data = ri->data;
6742 			post_ptr = &jmb_prod_idx;
6743 		} else
6744 			goto next_pkt_nopost;
6745 
6746 		work_mask |= opaque_key;
6747 
6748 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6749 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6750 		drop_it:
6751 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6752 				       desc_idx, *post_ptr);
6753 		drop_it_no_recycle:
6754 			/* Other statistics kept track of by card. */
6755 			tp->rx_dropped++;
6756 			goto next_pkt;
6757 		}
6758 
6759 		prefetch(data + TG3_RX_OFFSET(tp));
6760 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6761 		      ETH_FCS_LEN;
6762 
6763 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6764 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6765 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6766 		     RXD_FLAG_PTPSTAT_PTPV2) {
6767 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6768 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6769 		}
6770 
6771 		if (len > TG3_RX_COPY_THRESH(tp)) {
6772 			int skb_size;
6773 			unsigned int frag_size;
6774 
6775 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6776 						    *post_ptr, &frag_size);
6777 			if (skb_size < 0)
6778 				goto drop_it;
6779 
6780 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6781 					 PCI_DMA_FROMDEVICE);
6782 
6783 			skb = build_skb(data, frag_size);
6784 			if (!skb) {
6785 				tg3_frag_free(frag_size != 0, data);
6786 				goto drop_it_no_recycle;
6787 			}
6788 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6789 			/* Ensure that the update to the data happens
6790 			 * after the usage of the old DMA mapping.
6791 			 */
6792 			smp_wmb();
6793 
6794 			ri->data = NULL;
6795 
6796 		} else {
6797 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6798 				       desc_idx, *post_ptr);
6799 
6800 			skb = netdev_alloc_skb(tp->dev,
6801 					       len + TG3_RAW_IP_ALIGN);
6802 			if (skb == NULL)
6803 				goto drop_it_no_recycle;
6804 
6805 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6806 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6807 			memcpy(skb->data,
6808 			       data + TG3_RX_OFFSET(tp),
6809 			       len);
6810 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6811 		}
6812 
6813 		skb_put(skb, len);
6814 		if (tstamp)
6815 			tg3_hwclock_to_timestamp(tp, tstamp,
6816 						 skb_hwtstamps(skb));
6817 
6818 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6819 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6820 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6821 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6822 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6823 		else
6824 			skb_checksum_none_assert(skb);
6825 
6826 		skb->protocol = eth_type_trans(skb, tp->dev);
6827 
6828 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6829 		    skb->protocol != htons(ETH_P_8021Q)) {
6830 			dev_kfree_skb(skb);
6831 			goto drop_it_no_recycle;
6832 		}
6833 
6834 		if (desc->type_flags & RXD_FLAG_VLAN &&
6835 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6836 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6837 					       desc->err_vlan & RXD_VLAN_MASK);
6838 
6839 		napi_gro_receive(&tnapi->napi, skb);
6840 
6841 		received++;
6842 		budget--;
6843 
6844 next_pkt:
6845 		(*post_ptr)++;
6846 
6847 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6848 			tpr->rx_std_prod_idx = std_prod_idx &
6849 					       tp->rx_std_ring_mask;
6850 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6851 				     tpr->rx_std_prod_idx);
6852 			work_mask &= ~RXD_OPAQUE_RING_STD;
6853 			rx_std_posted = 0;
6854 		}
6855 next_pkt_nopost:
6856 		sw_idx++;
6857 		sw_idx &= tp->rx_ret_ring_mask;
6858 
6859 		/* Refresh hw_idx to see if there is new work */
6860 		if (sw_idx == hw_idx) {
6861 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6862 			rmb();
6863 		}
6864 	}
6865 
6866 	/* ACK the status ring. */
6867 	tnapi->rx_rcb_ptr = sw_idx;
6868 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6869 
6870 	/* Refill RX ring(s). */
6871 	if (!tg3_flag(tp, ENABLE_RSS)) {
6872 		/* Sync BD data before updating mailbox */
6873 		wmb();
6874 
6875 		if (work_mask & RXD_OPAQUE_RING_STD) {
6876 			tpr->rx_std_prod_idx = std_prod_idx &
6877 					       tp->rx_std_ring_mask;
6878 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6879 				     tpr->rx_std_prod_idx);
6880 		}
6881 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6882 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6883 					       tp->rx_jmb_ring_mask;
6884 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6885 				     tpr->rx_jmb_prod_idx);
6886 		}
6887 		mmiowb();
6888 	} else if (work_mask) {
6889 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6890 		 * updated before the producer indices can be updated.
6891 		 */
6892 		smp_wmb();
6893 
6894 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6895 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6896 
6897 		if (tnapi != &tp->napi[1]) {
6898 			tp->rx_refill = true;
6899 			napi_schedule(&tp->napi[1].napi);
6900 		}
6901 	}
6902 
6903 	return received;
6904 }
6905 
6906 static void tg3_poll_link(struct tg3 *tp)
6907 {
6908 	/* handle link change and other phy events */
6909 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6910 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6911 
6912 		if (sblk->status & SD_STATUS_LINK_CHG) {
6913 			sblk->status = SD_STATUS_UPDATED |
6914 				       (sblk->status & ~SD_STATUS_LINK_CHG);
6915 			spin_lock(&tp->lock);
6916 			if (tg3_flag(tp, USE_PHYLIB)) {
6917 				tw32_f(MAC_STATUS,
6918 				     (MAC_STATUS_SYNC_CHANGED |
6919 				      MAC_STATUS_CFG_CHANGED |
6920 				      MAC_STATUS_MI_COMPLETION |
6921 				      MAC_STATUS_LNKSTATE_CHANGED));
6922 				udelay(40);
6923 			} else
6924 				tg3_setup_phy(tp, false);
6925 			spin_unlock(&tp->lock);
6926 		}
6927 	}
6928 }
6929 
6930 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6931 				struct tg3_rx_prodring_set *dpr,
6932 				struct tg3_rx_prodring_set *spr)
6933 {
6934 	u32 si, di, cpycnt, src_prod_idx;
6935 	int i, err = 0;
6936 
6937 	while (1) {
6938 		src_prod_idx = spr->rx_std_prod_idx;
6939 
6940 		/* Make sure updates to the rx_std_buffers[] entries and the
6941 		 * standard producer index are seen in the correct order.
6942 		 */
6943 		smp_rmb();
6944 
6945 		if (spr->rx_std_cons_idx == src_prod_idx)
6946 			break;
6947 
6948 		if (spr->rx_std_cons_idx < src_prod_idx)
6949 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6950 		else
6951 			cpycnt = tp->rx_std_ring_mask + 1 -
6952 				 spr->rx_std_cons_idx;
6953 
6954 		cpycnt = min(cpycnt,
6955 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6956 
6957 		si = spr->rx_std_cons_idx;
6958 		di = dpr->rx_std_prod_idx;
6959 
6960 		for (i = di; i < di + cpycnt; i++) {
6961 			if (dpr->rx_std_buffers[i].data) {
6962 				cpycnt = i - di;
6963 				err = -ENOSPC;
6964 				break;
6965 			}
6966 		}
6967 
6968 		if (!cpycnt)
6969 			break;
6970 
6971 		/* Ensure that updates to the rx_std_buffers ring and the
6972 		 * shadowed hardware producer ring from tg3_recycle_skb() are
6973 		 * ordered correctly WRT the skb check above.
6974 		 */
6975 		smp_rmb();
6976 
6977 		memcpy(&dpr->rx_std_buffers[di],
6978 		       &spr->rx_std_buffers[si],
6979 		       cpycnt * sizeof(struct ring_info));
6980 
6981 		for (i = 0; i < cpycnt; i++, di++, si++) {
6982 			struct tg3_rx_buffer_desc *sbd, *dbd;
6983 			sbd = &spr->rx_std[si];
6984 			dbd = &dpr->rx_std[di];
6985 			dbd->addr_hi = sbd->addr_hi;
6986 			dbd->addr_lo = sbd->addr_lo;
6987 		}
6988 
6989 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6990 				       tp->rx_std_ring_mask;
6991 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6992 				       tp->rx_std_ring_mask;
6993 	}
6994 
6995 	while (1) {
6996 		src_prod_idx = spr->rx_jmb_prod_idx;
6997 
6998 		/* Make sure updates to the rx_jmb_buffers[] entries and
6999 		 * the jumbo producer index are seen in the correct order.
7000 		 */
7001 		smp_rmb();
7002 
7003 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7004 			break;
7005 
7006 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7007 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7008 		else
7009 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7010 				 spr->rx_jmb_cons_idx;
7011 
7012 		cpycnt = min(cpycnt,
7013 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7014 
7015 		si = spr->rx_jmb_cons_idx;
7016 		di = dpr->rx_jmb_prod_idx;
7017 
7018 		for (i = di; i < di + cpycnt; i++) {
7019 			if (dpr->rx_jmb_buffers[i].data) {
7020 				cpycnt = i - di;
7021 				err = -ENOSPC;
7022 				break;
7023 			}
7024 		}
7025 
7026 		if (!cpycnt)
7027 			break;
7028 
7029 		/* Ensure that updates to the rx_jmb_buffers ring and the
7030 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7031 		 * ordered correctly WRT the skb check above.
7032 		 */
7033 		smp_rmb();
7034 
7035 		memcpy(&dpr->rx_jmb_buffers[di],
7036 		       &spr->rx_jmb_buffers[si],
7037 		       cpycnt * sizeof(struct ring_info));
7038 
7039 		for (i = 0; i < cpycnt; i++, di++, si++) {
7040 			struct tg3_rx_buffer_desc *sbd, *dbd;
7041 			sbd = &spr->rx_jmb[si].std;
7042 			dbd = &dpr->rx_jmb[di].std;
7043 			dbd->addr_hi = sbd->addr_hi;
7044 			dbd->addr_lo = sbd->addr_lo;
7045 		}
7046 
7047 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7048 				       tp->rx_jmb_ring_mask;
7049 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7050 				       tp->rx_jmb_ring_mask;
7051 	}
7052 
7053 	return err;
7054 }
7055 
7056 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7057 {
7058 	struct tg3 *tp = tnapi->tp;
7059 
7060 	/* run TX completion thread */
7061 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7062 		tg3_tx(tnapi);
7063 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7064 			return work_done;
7065 	}
7066 
7067 	if (!tnapi->rx_rcb_prod_idx)
7068 		return work_done;
7069 
7070 	/* run RX thread, within the bounds set by NAPI.
7071 	 * All RX "locking" is done by ensuring outside
7072 	 * code synchronizes with tg3->napi.poll()
7073 	 */
7074 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7075 		work_done += tg3_rx(tnapi, budget - work_done);
7076 
7077 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7078 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7079 		int i, err = 0;
7080 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7081 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7082 
7083 		tp->rx_refill = false;
7084 		for (i = 1; i <= tp->rxq_cnt; i++)
7085 			err |= tg3_rx_prodring_xfer(tp, dpr,
7086 						    &tp->napi[i].prodring);
7087 
7088 		wmb();
7089 
7090 		if (std_prod_idx != dpr->rx_std_prod_idx)
7091 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7092 				     dpr->rx_std_prod_idx);
7093 
7094 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7095 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7096 				     dpr->rx_jmb_prod_idx);
7097 
7098 		mmiowb();
7099 
7100 		if (err)
7101 			tw32_f(HOSTCC_MODE, tp->coal_now);
7102 	}
7103 
7104 	return work_done;
7105 }
7106 
7107 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7108 {
7109 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7110 		schedule_work(&tp->reset_task);
7111 }
7112 
7113 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7114 {
7115 	cancel_work_sync(&tp->reset_task);
7116 	tg3_flag_clear(tp, RESET_TASK_PENDING);
7117 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7118 }
7119 
7120 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7121 {
7122 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7123 	struct tg3 *tp = tnapi->tp;
7124 	int work_done = 0;
7125 	struct tg3_hw_status *sblk = tnapi->hw_status;
7126 
7127 	while (1) {
7128 		work_done = tg3_poll_work(tnapi, work_done, budget);
7129 
7130 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7131 			goto tx_recovery;
7132 
7133 		if (unlikely(work_done >= budget))
7134 			break;
7135 
7136 		/* tp->last_tag is used in tg3_int_reenable() below
7137 		 * to tell the hw how much work has been processed,
7138 		 * so we must read it before checking for more work.
7139 		 */
7140 		tnapi->last_tag = sblk->status_tag;
7141 		tnapi->last_irq_tag = tnapi->last_tag;
7142 		rmb();
7143 
7144 		/* check for RX/TX work to do */
7145 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7146 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7147 
7148 			/* This test here is not race free, but will reduce
7149 			 * the number of interrupts by looping again.
7150 			 */
7151 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7152 				continue;
7153 
7154 			napi_complete(napi);
7155 			/* Reenable interrupts. */
7156 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7157 
7158 			/* This test here is synchronized by napi_schedule()
7159 			 * and napi_complete() to close the race condition.
7160 			 */
7161 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7162 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7163 						  HOSTCC_MODE_ENABLE |
7164 						  tnapi->coal_now);
7165 			}
7166 			mmiowb();
7167 			break;
7168 		}
7169 	}
7170 
7171 	return work_done;
7172 
7173 tx_recovery:
7174 	/* work_done is guaranteed to be less than budget. */
7175 	napi_complete(napi);
7176 	tg3_reset_task_schedule(tp);
7177 	return work_done;
7178 }
7179 
7180 static void tg3_process_error(struct tg3 *tp)
7181 {
7182 	u32 val;
7183 	bool real_error = false;
7184 
7185 	if (tg3_flag(tp, ERROR_PROCESSED))
7186 		return;
7187 
7188 	/* Check Flow Attention register */
7189 	val = tr32(HOSTCC_FLOW_ATTN);
7190 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7191 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7192 		real_error = true;
7193 	}
7194 
7195 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7196 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7197 		real_error = true;
7198 	}
7199 
7200 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7201 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7202 		real_error = true;
7203 	}
7204 
7205 	if (!real_error)
7206 		return;
7207 
7208 	tg3_dump_state(tp);
7209 
7210 	tg3_flag_set(tp, ERROR_PROCESSED);
7211 	tg3_reset_task_schedule(tp);
7212 }
7213 
7214 static int tg3_poll(struct napi_struct *napi, int budget)
7215 {
7216 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7217 	struct tg3 *tp = tnapi->tp;
7218 	int work_done = 0;
7219 	struct tg3_hw_status *sblk = tnapi->hw_status;
7220 
7221 	while (1) {
7222 		if (sblk->status & SD_STATUS_ERROR)
7223 			tg3_process_error(tp);
7224 
7225 		tg3_poll_link(tp);
7226 
7227 		work_done = tg3_poll_work(tnapi, work_done, budget);
7228 
7229 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7230 			goto tx_recovery;
7231 
7232 		if (unlikely(work_done >= budget))
7233 			break;
7234 
7235 		if (tg3_flag(tp, TAGGED_STATUS)) {
7236 			/* tp->last_tag is used in tg3_int_reenable() below
7237 			 * to tell the hw how much work has been processed,
7238 			 * so we must read it before checking for more work.
7239 			 */
7240 			tnapi->last_tag = sblk->status_tag;
7241 			tnapi->last_irq_tag = tnapi->last_tag;
7242 			rmb();
7243 		} else
7244 			sblk->status &= ~SD_STATUS_UPDATED;
7245 
7246 		if (likely(!tg3_has_work(tnapi))) {
7247 			napi_complete(napi);
7248 			tg3_int_reenable(tnapi);
7249 			break;
7250 		}
7251 	}
7252 
7253 	return work_done;
7254 
7255 tx_recovery:
7256 	/* work_done is guaranteed to be less than budget. */
7257 	napi_complete(napi);
7258 	tg3_reset_task_schedule(tp);
7259 	return work_done;
7260 }
7261 
7262 static void tg3_napi_disable(struct tg3 *tp)
7263 {
7264 	int i;
7265 
7266 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7267 		napi_disable(&tp->napi[i].napi);
7268 }
7269 
7270 static void tg3_napi_enable(struct tg3 *tp)
7271 {
7272 	int i;
7273 
7274 	for (i = 0; i < tp->irq_cnt; i++)
7275 		napi_enable(&tp->napi[i].napi);
7276 }
7277 
7278 static void tg3_napi_init(struct tg3 *tp)
7279 {
7280 	int i;
7281 
7282 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7283 	for (i = 1; i < tp->irq_cnt; i++)
7284 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7285 }
7286 
7287 static void tg3_napi_fini(struct tg3 *tp)
7288 {
7289 	int i;
7290 
7291 	for (i = 0; i < tp->irq_cnt; i++)
7292 		netif_napi_del(&tp->napi[i].napi);
7293 }
7294 
7295 static inline void tg3_netif_stop(struct tg3 *tp)
7296 {
7297 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
7298 	tg3_napi_disable(tp);
7299 	netif_carrier_off(tp->dev);
7300 	netif_tx_disable(tp->dev);
7301 }
7302 
7303 /* tp->lock must be held */
7304 static inline void tg3_netif_start(struct tg3 *tp)
7305 {
7306 	tg3_ptp_resume(tp);
7307 
7308 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7309 	 * appropriate so long as all callers are assured to
7310 	 * have free tx slots (such as after tg3_init_hw)
7311 	 */
7312 	netif_tx_wake_all_queues(tp->dev);
7313 
7314 	if (tp->link_up)
7315 		netif_carrier_on(tp->dev);
7316 
7317 	tg3_napi_enable(tp);
7318 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7319 	tg3_enable_ints(tp);
7320 }
7321 
7322 static void tg3_irq_quiesce(struct tg3 *tp)
7323 {
7324 	int i;
7325 
7326 	BUG_ON(tp->irq_sync);
7327 
7328 	tp->irq_sync = 1;
7329 	smp_mb();
7330 
7331 	for (i = 0; i < tp->irq_cnt; i++)
7332 		synchronize_irq(tp->napi[i].irq_vec);
7333 }
7334 
7335 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7336  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7337  * with as well.  Most of the time, this is not necessary except when
7338  * shutting down the device.
7339  */
7340 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7341 {
7342 	spin_lock_bh(&tp->lock);
7343 	if (irq_sync)
7344 		tg3_irq_quiesce(tp);
7345 }
7346 
7347 static inline void tg3_full_unlock(struct tg3 *tp)
7348 {
7349 	spin_unlock_bh(&tp->lock);
7350 }
7351 
7352 /* One-shot MSI handler - Chip automatically disables interrupt
7353  * after sending MSI so driver doesn't have to do it.
7354  */
7355 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7356 {
7357 	struct tg3_napi *tnapi = dev_id;
7358 	struct tg3 *tp = tnapi->tp;
7359 
7360 	prefetch(tnapi->hw_status);
7361 	if (tnapi->rx_rcb)
7362 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7363 
7364 	if (likely(!tg3_irq_sync(tp)))
7365 		napi_schedule(&tnapi->napi);
7366 
7367 	return IRQ_HANDLED;
7368 }
7369 
7370 /* MSI ISR - No need to check for interrupt sharing and no need to
7371  * flush status block and interrupt mailbox. PCI ordering rules
7372  * guarantee that MSI will arrive after the status block.
7373  */
7374 static irqreturn_t tg3_msi(int irq, void *dev_id)
7375 {
7376 	struct tg3_napi *tnapi = dev_id;
7377 	struct tg3 *tp = tnapi->tp;
7378 
7379 	prefetch(tnapi->hw_status);
7380 	if (tnapi->rx_rcb)
7381 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7382 	/*
7383 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7384 	 * chip-internal interrupt pending events.
7385 	 * Writing non-zero to intr-mbox-0 additional tells the
7386 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7387 	 * event coalescing.
7388 	 */
7389 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7390 	if (likely(!tg3_irq_sync(tp)))
7391 		napi_schedule(&tnapi->napi);
7392 
7393 	return IRQ_RETVAL(1);
7394 }
7395 
7396 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7397 {
7398 	struct tg3_napi *tnapi = dev_id;
7399 	struct tg3 *tp = tnapi->tp;
7400 	struct tg3_hw_status *sblk = tnapi->hw_status;
7401 	unsigned int handled = 1;
7402 
7403 	/* In INTx mode, it is possible for the interrupt to arrive at
7404 	 * the CPU before the status block posted prior to the interrupt.
7405 	 * Reading the PCI State register will confirm whether the
7406 	 * interrupt is ours and will flush the status block.
7407 	 */
7408 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7409 		if (tg3_flag(tp, CHIP_RESETTING) ||
7410 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7411 			handled = 0;
7412 			goto out;
7413 		}
7414 	}
7415 
7416 	/*
7417 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7418 	 * chip-internal interrupt pending events.
7419 	 * Writing non-zero to intr-mbox-0 additional tells the
7420 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7421 	 * event coalescing.
7422 	 *
7423 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7424 	 * spurious interrupts.  The flush impacts performance but
7425 	 * excessive spurious interrupts can be worse in some cases.
7426 	 */
7427 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7428 	if (tg3_irq_sync(tp))
7429 		goto out;
7430 	sblk->status &= ~SD_STATUS_UPDATED;
7431 	if (likely(tg3_has_work(tnapi))) {
7432 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7433 		napi_schedule(&tnapi->napi);
7434 	} else {
7435 		/* No work, shared interrupt perhaps?  re-enable
7436 		 * interrupts, and flush that PCI write
7437 		 */
7438 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7439 			       0x00000000);
7440 	}
7441 out:
7442 	return IRQ_RETVAL(handled);
7443 }
7444 
7445 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7446 {
7447 	struct tg3_napi *tnapi = dev_id;
7448 	struct tg3 *tp = tnapi->tp;
7449 	struct tg3_hw_status *sblk = tnapi->hw_status;
7450 	unsigned int handled = 1;
7451 
7452 	/* In INTx mode, it is possible for the interrupt to arrive at
7453 	 * the CPU before the status block posted prior to the interrupt.
7454 	 * Reading the PCI State register will confirm whether the
7455 	 * interrupt is ours and will flush the status block.
7456 	 */
7457 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7458 		if (tg3_flag(tp, CHIP_RESETTING) ||
7459 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7460 			handled = 0;
7461 			goto out;
7462 		}
7463 	}
7464 
7465 	/*
7466 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7467 	 * chip-internal interrupt pending events.
7468 	 * writing non-zero to intr-mbox-0 additional tells the
7469 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7470 	 * event coalescing.
7471 	 *
7472 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7473 	 * spurious interrupts.  The flush impacts performance but
7474 	 * excessive spurious interrupts can be worse in some cases.
7475 	 */
7476 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7477 
7478 	/*
7479 	 * In a shared interrupt configuration, sometimes other devices'
7480 	 * interrupts will scream.  We record the current status tag here
7481 	 * so that the above check can report that the screaming interrupts
7482 	 * are unhandled.  Eventually they will be silenced.
7483 	 */
7484 	tnapi->last_irq_tag = sblk->status_tag;
7485 
7486 	if (tg3_irq_sync(tp))
7487 		goto out;
7488 
7489 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7490 
7491 	napi_schedule(&tnapi->napi);
7492 
7493 out:
7494 	return IRQ_RETVAL(handled);
7495 }
7496 
7497 /* ISR for interrupt test */
7498 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7499 {
7500 	struct tg3_napi *tnapi = dev_id;
7501 	struct tg3 *tp = tnapi->tp;
7502 	struct tg3_hw_status *sblk = tnapi->hw_status;
7503 
7504 	if ((sblk->status & SD_STATUS_UPDATED) ||
7505 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7506 		tg3_disable_ints(tp);
7507 		return IRQ_RETVAL(1);
7508 	}
7509 	return IRQ_RETVAL(0);
7510 }
7511 
7512 #ifdef CONFIG_NET_POLL_CONTROLLER
7513 static void tg3_poll_controller(struct net_device *dev)
7514 {
7515 	int i;
7516 	struct tg3 *tp = netdev_priv(dev);
7517 
7518 	if (tg3_irq_sync(tp))
7519 		return;
7520 
7521 	for (i = 0; i < tp->irq_cnt; i++)
7522 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7523 }
7524 #endif
7525 
7526 static void tg3_tx_timeout(struct net_device *dev)
7527 {
7528 	struct tg3 *tp = netdev_priv(dev);
7529 
7530 	if (netif_msg_tx_err(tp)) {
7531 		netdev_err(dev, "transmit timed out, resetting\n");
7532 		tg3_dump_state(tp);
7533 	}
7534 
7535 	tg3_reset_task_schedule(tp);
7536 }
7537 
7538 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7539 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7540 {
7541 	u32 base = (u32) mapping & 0xffffffff;
7542 
7543 	return (base > 0xffffdcc0) && (base + len + 8 < base);
7544 }
7545 
7546 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7547  * of any 4GB boundaries: 4G, 8G, etc
7548  */
7549 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7550 					   u32 len, u32 mss)
7551 {
7552 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7553 		u32 base = (u32) mapping & 0xffffffff;
7554 
7555 		return ((base + len + (mss & 0x3fff)) < base);
7556 	}
7557 	return 0;
7558 }
7559 
7560 /* Test for DMA addresses > 40-bit */
7561 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7562 					  int len)
7563 {
7564 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7565 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7566 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7567 	return 0;
7568 #else
7569 	return 0;
7570 #endif
7571 }
7572 
7573 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7574 				 dma_addr_t mapping, u32 len, u32 flags,
7575 				 u32 mss, u32 vlan)
7576 {
7577 	txbd->addr_hi = ((u64) mapping >> 32);
7578 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7579 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7580 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7581 }
7582 
7583 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7584 			    dma_addr_t map, u32 len, u32 flags,
7585 			    u32 mss, u32 vlan)
7586 {
7587 	struct tg3 *tp = tnapi->tp;
7588 	bool hwbug = false;
7589 
7590 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7591 		hwbug = true;
7592 
7593 	if (tg3_4g_overflow_test(map, len))
7594 		hwbug = true;
7595 
7596 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7597 		hwbug = true;
7598 
7599 	if (tg3_40bit_overflow_test(tp, map, len))
7600 		hwbug = true;
7601 
7602 	if (tp->dma_limit) {
7603 		u32 prvidx = *entry;
7604 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7605 		while (len > tp->dma_limit && *budget) {
7606 			u32 frag_len = tp->dma_limit;
7607 			len -= tp->dma_limit;
7608 
7609 			/* Avoid the 8byte DMA problem */
7610 			if (len <= 8) {
7611 				len += tp->dma_limit / 2;
7612 				frag_len = tp->dma_limit / 2;
7613 			}
7614 
7615 			tnapi->tx_buffers[*entry].fragmented = true;
7616 
7617 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7618 				      frag_len, tmp_flag, mss, vlan);
7619 			*budget -= 1;
7620 			prvidx = *entry;
7621 			*entry = NEXT_TX(*entry);
7622 
7623 			map += frag_len;
7624 		}
7625 
7626 		if (len) {
7627 			if (*budget) {
7628 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7629 					      len, flags, mss, vlan);
7630 				*budget -= 1;
7631 				*entry = NEXT_TX(*entry);
7632 			} else {
7633 				hwbug = true;
7634 				tnapi->tx_buffers[prvidx].fragmented = false;
7635 			}
7636 		}
7637 	} else {
7638 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7639 			      len, flags, mss, vlan);
7640 		*entry = NEXT_TX(*entry);
7641 	}
7642 
7643 	return hwbug;
7644 }
7645 
7646 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7647 {
7648 	int i;
7649 	struct sk_buff *skb;
7650 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7651 
7652 	skb = txb->skb;
7653 	txb->skb = NULL;
7654 
7655 	pci_unmap_single(tnapi->tp->pdev,
7656 			 dma_unmap_addr(txb, mapping),
7657 			 skb_headlen(skb),
7658 			 PCI_DMA_TODEVICE);
7659 
7660 	while (txb->fragmented) {
7661 		txb->fragmented = false;
7662 		entry = NEXT_TX(entry);
7663 		txb = &tnapi->tx_buffers[entry];
7664 	}
7665 
7666 	for (i = 0; i <= last; i++) {
7667 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7668 
7669 		entry = NEXT_TX(entry);
7670 		txb = &tnapi->tx_buffers[entry];
7671 
7672 		pci_unmap_page(tnapi->tp->pdev,
7673 			       dma_unmap_addr(txb, mapping),
7674 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7675 
7676 		while (txb->fragmented) {
7677 			txb->fragmented = false;
7678 			entry = NEXT_TX(entry);
7679 			txb = &tnapi->tx_buffers[entry];
7680 		}
7681 	}
7682 }
7683 
7684 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7685 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7686 				       struct sk_buff **pskb,
7687 				       u32 *entry, u32 *budget,
7688 				       u32 base_flags, u32 mss, u32 vlan)
7689 {
7690 	struct tg3 *tp = tnapi->tp;
7691 	struct sk_buff *new_skb, *skb = *pskb;
7692 	dma_addr_t new_addr = 0;
7693 	int ret = 0;
7694 
7695 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7696 		new_skb = skb_copy(skb, GFP_ATOMIC);
7697 	else {
7698 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7699 
7700 		new_skb = skb_copy_expand(skb,
7701 					  skb_headroom(skb) + more_headroom,
7702 					  skb_tailroom(skb), GFP_ATOMIC);
7703 	}
7704 
7705 	if (!new_skb) {
7706 		ret = -1;
7707 	} else {
7708 		/* New SKB is guaranteed to be linear. */
7709 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7710 					  PCI_DMA_TODEVICE);
7711 		/* Make sure the mapping succeeded */
7712 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7713 			dev_kfree_skb(new_skb);
7714 			ret = -1;
7715 		} else {
7716 			u32 save_entry = *entry;
7717 
7718 			base_flags |= TXD_FLAG_END;
7719 
7720 			tnapi->tx_buffers[*entry].skb = new_skb;
7721 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7722 					   mapping, new_addr);
7723 
7724 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7725 					    new_skb->len, base_flags,
7726 					    mss, vlan)) {
7727 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7728 				dev_kfree_skb(new_skb);
7729 				ret = -1;
7730 			}
7731 		}
7732 	}
7733 
7734 	dev_kfree_skb(skb);
7735 	*pskb = new_skb;
7736 	return ret;
7737 }
7738 
7739 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7740 
7741 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7742  * TSO header is greater than 80 bytes.
7743  */
7744 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7745 {
7746 	struct sk_buff *segs, *nskb;
7747 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7748 
7749 	/* Estimate the number of fragments in the worst case */
7750 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7751 		netif_stop_queue(tp->dev);
7752 
7753 		/* netif_tx_stop_queue() must be done before checking
7754 		 * checking tx index in tg3_tx_avail() below, because in
7755 		 * tg3_tx(), we update tx index before checking for
7756 		 * netif_tx_queue_stopped().
7757 		 */
7758 		smp_mb();
7759 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7760 			return NETDEV_TX_BUSY;
7761 
7762 		netif_wake_queue(tp->dev);
7763 	}
7764 
7765 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7766 	if (IS_ERR(segs))
7767 		goto tg3_tso_bug_end;
7768 
7769 	do {
7770 		nskb = segs;
7771 		segs = segs->next;
7772 		nskb->next = NULL;
7773 		tg3_start_xmit(nskb, tp->dev);
7774 	} while (segs);
7775 
7776 tg3_tso_bug_end:
7777 	dev_kfree_skb(skb);
7778 
7779 	return NETDEV_TX_OK;
7780 }
7781 
7782 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7783  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7784  */
7785 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7786 {
7787 	struct tg3 *tp = netdev_priv(dev);
7788 	u32 len, entry, base_flags, mss, vlan = 0;
7789 	u32 budget;
7790 	int i = -1, would_hit_hwbug;
7791 	dma_addr_t mapping;
7792 	struct tg3_napi *tnapi;
7793 	struct netdev_queue *txq;
7794 	unsigned int last;
7795 
7796 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7797 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7798 	if (tg3_flag(tp, ENABLE_TSS))
7799 		tnapi++;
7800 
7801 	budget = tg3_tx_avail(tnapi);
7802 
7803 	/* We are running in BH disabled context with netif_tx_lock
7804 	 * and TX reclaim runs via tp->napi.poll inside of a software
7805 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7806 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7807 	 */
7808 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7809 		if (!netif_tx_queue_stopped(txq)) {
7810 			netif_tx_stop_queue(txq);
7811 
7812 			/* This is a hard error, log it. */
7813 			netdev_err(dev,
7814 				   "BUG! Tx Ring full when queue awake!\n");
7815 		}
7816 		return NETDEV_TX_BUSY;
7817 	}
7818 
7819 	entry = tnapi->tx_prod;
7820 	base_flags = 0;
7821 	if (skb->ip_summed == CHECKSUM_PARTIAL)
7822 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
7823 
7824 	mss = skb_shinfo(skb)->gso_size;
7825 	if (mss) {
7826 		struct iphdr *iph;
7827 		u32 tcp_opt_len, hdr_len;
7828 
7829 		if (skb_header_cloned(skb) &&
7830 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7831 			goto drop;
7832 
7833 		iph = ip_hdr(skb);
7834 		tcp_opt_len = tcp_optlen(skb);
7835 
7836 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7837 
7838 		if (!skb_is_gso_v6(skb)) {
7839 			iph->check = 0;
7840 			iph->tot_len = htons(mss + hdr_len);
7841 		}
7842 
7843 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7844 		    tg3_flag(tp, TSO_BUG))
7845 			return tg3_tso_bug(tp, skb);
7846 
7847 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7848 			       TXD_FLAG_CPU_POST_DMA);
7849 
7850 		if (tg3_flag(tp, HW_TSO_1) ||
7851 		    tg3_flag(tp, HW_TSO_2) ||
7852 		    tg3_flag(tp, HW_TSO_3)) {
7853 			tcp_hdr(skb)->check = 0;
7854 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7855 		} else
7856 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7857 								 iph->daddr, 0,
7858 								 IPPROTO_TCP,
7859 								 0);
7860 
7861 		if (tg3_flag(tp, HW_TSO_3)) {
7862 			mss |= (hdr_len & 0xc) << 12;
7863 			if (hdr_len & 0x10)
7864 				base_flags |= 0x00000010;
7865 			base_flags |= (hdr_len & 0x3e0) << 5;
7866 		} else if (tg3_flag(tp, HW_TSO_2))
7867 			mss |= hdr_len << 9;
7868 		else if (tg3_flag(tp, HW_TSO_1) ||
7869 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
7870 			if (tcp_opt_len || iph->ihl > 5) {
7871 				int tsflags;
7872 
7873 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7874 				mss |= (tsflags << 11);
7875 			}
7876 		} else {
7877 			if (tcp_opt_len || iph->ihl > 5) {
7878 				int tsflags;
7879 
7880 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7881 				base_flags |= tsflags << 12;
7882 			}
7883 		}
7884 	}
7885 
7886 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7887 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
7888 		base_flags |= TXD_FLAG_JMB_PKT;
7889 
7890 	if (vlan_tx_tag_present(skb)) {
7891 		base_flags |= TXD_FLAG_VLAN;
7892 		vlan = vlan_tx_tag_get(skb);
7893 	}
7894 
7895 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7896 	    tg3_flag(tp, TX_TSTAMP_EN)) {
7897 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7898 		base_flags |= TXD_FLAG_HWTSTAMP;
7899 	}
7900 
7901 	len = skb_headlen(skb);
7902 
7903 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7904 	if (pci_dma_mapping_error(tp->pdev, mapping))
7905 		goto drop;
7906 
7907 
7908 	tnapi->tx_buffers[entry].skb = skb;
7909 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7910 
7911 	would_hit_hwbug = 0;
7912 
7913 	if (tg3_flag(tp, 5701_DMA_BUG))
7914 		would_hit_hwbug = 1;
7915 
7916 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7917 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7918 			    mss, vlan)) {
7919 		would_hit_hwbug = 1;
7920 	} else if (skb_shinfo(skb)->nr_frags > 0) {
7921 		u32 tmp_mss = mss;
7922 
7923 		if (!tg3_flag(tp, HW_TSO_1) &&
7924 		    !tg3_flag(tp, HW_TSO_2) &&
7925 		    !tg3_flag(tp, HW_TSO_3))
7926 			tmp_mss = 0;
7927 
7928 		/* Now loop through additional data
7929 		 * fragments, and queue them.
7930 		 */
7931 		last = skb_shinfo(skb)->nr_frags - 1;
7932 		for (i = 0; i <= last; i++) {
7933 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7934 
7935 			len = skb_frag_size(frag);
7936 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7937 						   len, DMA_TO_DEVICE);
7938 
7939 			tnapi->tx_buffers[entry].skb = NULL;
7940 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7941 					   mapping);
7942 			if (dma_mapping_error(&tp->pdev->dev, mapping))
7943 				goto dma_error;
7944 
7945 			if (!budget ||
7946 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7947 					    len, base_flags |
7948 					    ((i == last) ? TXD_FLAG_END : 0),
7949 					    tmp_mss, vlan)) {
7950 				would_hit_hwbug = 1;
7951 				break;
7952 			}
7953 		}
7954 	}
7955 
7956 	if (would_hit_hwbug) {
7957 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7958 
7959 		/* If the workaround fails due to memory/mapping
7960 		 * failure, silently drop this packet.
7961 		 */
7962 		entry = tnapi->tx_prod;
7963 		budget = tg3_tx_avail(tnapi);
7964 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7965 						base_flags, mss, vlan))
7966 			goto drop_nofree;
7967 	}
7968 
7969 	skb_tx_timestamp(skb);
7970 	netdev_tx_sent_queue(txq, skb->len);
7971 
7972 	/* Sync BD data before updating mailbox */
7973 	wmb();
7974 
7975 	/* Packets are ready, update Tx producer idx local and on card. */
7976 	tw32_tx_mbox(tnapi->prodmbox, entry);
7977 
7978 	tnapi->tx_prod = entry;
7979 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7980 		netif_tx_stop_queue(txq);
7981 
7982 		/* netif_tx_stop_queue() must be done before checking
7983 		 * checking tx index in tg3_tx_avail() below, because in
7984 		 * tg3_tx(), we update tx index before checking for
7985 		 * netif_tx_queue_stopped().
7986 		 */
7987 		smp_mb();
7988 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7989 			netif_tx_wake_queue(txq);
7990 	}
7991 
7992 	mmiowb();
7993 	return NETDEV_TX_OK;
7994 
7995 dma_error:
7996 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7997 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7998 drop:
7999 	dev_kfree_skb(skb);
8000 drop_nofree:
8001 	tp->tx_dropped++;
8002 	return NETDEV_TX_OK;
8003 }
8004 
8005 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8006 {
8007 	if (enable) {
8008 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8009 				  MAC_MODE_PORT_MODE_MASK);
8010 
8011 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8012 
8013 		if (!tg3_flag(tp, 5705_PLUS))
8014 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8015 
8016 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8017 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8018 		else
8019 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8020 	} else {
8021 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8022 
8023 		if (tg3_flag(tp, 5705_PLUS) ||
8024 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8025 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8026 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8027 	}
8028 
8029 	tw32(MAC_MODE, tp->mac_mode);
8030 	udelay(40);
8031 }
8032 
8033 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8034 {
8035 	u32 val, bmcr, mac_mode, ptest = 0;
8036 
8037 	tg3_phy_toggle_apd(tp, false);
8038 	tg3_phy_toggle_automdix(tp, false);
8039 
8040 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8041 		return -EIO;
8042 
8043 	bmcr = BMCR_FULLDPLX;
8044 	switch (speed) {
8045 	case SPEED_10:
8046 		break;
8047 	case SPEED_100:
8048 		bmcr |= BMCR_SPEED100;
8049 		break;
8050 	case SPEED_1000:
8051 	default:
8052 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8053 			speed = SPEED_100;
8054 			bmcr |= BMCR_SPEED100;
8055 		} else {
8056 			speed = SPEED_1000;
8057 			bmcr |= BMCR_SPEED1000;
8058 		}
8059 	}
8060 
8061 	if (extlpbk) {
8062 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8063 			tg3_readphy(tp, MII_CTRL1000, &val);
8064 			val |= CTL1000_AS_MASTER |
8065 			       CTL1000_ENABLE_MASTER;
8066 			tg3_writephy(tp, MII_CTRL1000, val);
8067 		} else {
8068 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8069 				MII_TG3_FET_PTEST_TRIM_2;
8070 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8071 		}
8072 	} else
8073 		bmcr |= BMCR_LOOPBACK;
8074 
8075 	tg3_writephy(tp, MII_BMCR, bmcr);
8076 
8077 	/* The write needs to be flushed for the FETs */
8078 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8079 		tg3_readphy(tp, MII_BMCR, &bmcr);
8080 
8081 	udelay(40);
8082 
8083 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8084 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8085 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8086 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8087 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8088 
8089 		/* The write needs to be flushed for the AC131 */
8090 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8091 	}
8092 
8093 	/* Reset to prevent losing 1st rx packet intermittently */
8094 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8095 	    tg3_flag(tp, 5780_CLASS)) {
8096 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8097 		udelay(10);
8098 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8099 	}
8100 
8101 	mac_mode = tp->mac_mode &
8102 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8103 	if (speed == SPEED_1000)
8104 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8105 	else
8106 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8107 
8108 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8109 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8110 
8111 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8112 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8113 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8114 			mac_mode |= MAC_MODE_LINK_POLARITY;
8115 
8116 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8117 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8118 	}
8119 
8120 	tw32(MAC_MODE, mac_mode);
8121 	udelay(40);
8122 
8123 	return 0;
8124 }
8125 
8126 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8127 {
8128 	struct tg3 *tp = netdev_priv(dev);
8129 
8130 	if (features & NETIF_F_LOOPBACK) {
8131 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8132 			return;
8133 
8134 		spin_lock_bh(&tp->lock);
8135 		tg3_mac_loopback(tp, true);
8136 		netif_carrier_on(tp->dev);
8137 		spin_unlock_bh(&tp->lock);
8138 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8139 	} else {
8140 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8141 			return;
8142 
8143 		spin_lock_bh(&tp->lock);
8144 		tg3_mac_loopback(tp, false);
8145 		/* Force link status check */
8146 		tg3_setup_phy(tp, true);
8147 		spin_unlock_bh(&tp->lock);
8148 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8149 	}
8150 }
8151 
8152 static netdev_features_t tg3_fix_features(struct net_device *dev,
8153 	netdev_features_t features)
8154 {
8155 	struct tg3 *tp = netdev_priv(dev);
8156 
8157 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8158 		features &= ~NETIF_F_ALL_TSO;
8159 
8160 	return features;
8161 }
8162 
8163 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8164 {
8165 	netdev_features_t changed = dev->features ^ features;
8166 
8167 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8168 		tg3_set_loopback(dev, features);
8169 
8170 	return 0;
8171 }
8172 
8173 static void tg3_rx_prodring_free(struct tg3 *tp,
8174 				 struct tg3_rx_prodring_set *tpr)
8175 {
8176 	int i;
8177 
8178 	if (tpr != &tp->napi[0].prodring) {
8179 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8180 		     i = (i + 1) & tp->rx_std_ring_mask)
8181 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8182 					tp->rx_pkt_map_sz);
8183 
8184 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8185 			for (i = tpr->rx_jmb_cons_idx;
8186 			     i != tpr->rx_jmb_prod_idx;
8187 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8188 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8189 						TG3_RX_JMB_MAP_SZ);
8190 			}
8191 		}
8192 
8193 		return;
8194 	}
8195 
8196 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8197 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8198 				tp->rx_pkt_map_sz);
8199 
8200 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8201 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8202 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8203 					TG3_RX_JMB_MAP_SZ);
8204 	}
8205 }
8206 
8207 /* Initialize rx rings for packet processing.
8208  *
8209  * The chip has been shut down and the driver detached from
8210  * the networking, so no interrupts or new tx packets will
8211  * end up in the driver.  tp->{tx,}lock are held and thus
8212  * we may not sleep.
8213  */
8214 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8215 				 struct tg3_rx_prodring_set *tpr)
8216 {
8217 	u32 i, rx_pkt_dma_sz;
8218 
8219 	tpr->rx_std_cons_idx = 0;
8220 	tpr->rx_std_prod_idx = 0;
8221 	tpr->rx_jmb_cons_idx = 0;
8222 	tpr->rx_jmb_prod_idx = 0;
8223 
8224 	if (tpr != &tp->napi[0].prodring) {
8225 		memset(&tpr->rx_std_buffers[0], 0,
8226 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8227 		if (tpr->rx_jmb_buffers)
8228 			memset(&tpr->rx_jmb_buffers[0], 0,
8229 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8230 		goto done;
8231 	}
8232 
8233 	/* Zero out all descriptors. */
8234 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8235 
8236 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8237 	if (tg3_flag(tp, 5780_CLASS) &&
8238 	    tp->dev->mtu > ETH_DATA_LEN)
8239 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8240 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8241 
8242 	/* Initialize invariants of the rings, we only set this
8243 	 * stuff once.  This works because the card does not
8244 	 * write into the rx buffer posting rings.
8245 	 */
8246 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8247 		struct tg3_rx_buffer_desc *rxd;
8248 
8249 		rxd = &tpr->rx_std[i];
8250 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8251 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8252 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8253 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8254 	}
8255 
8256 	/* Now allocate fresh SKBs for each rx ring. */
8257 	for (i = 0; i < tp->rx_pending; i++) {
8258 		unsigned int frag_size;
8259 
8260 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8261 				      &frag_size) < 0) {
8262 			netdev_warn(tp->dev,
8263 				    "Using a smaller RX standard ring. Only "
8264 				    "%d out of %d buffers were allocated "
8265 				    "successfully\n", i, tp->rx_pending);
8266 			if (i == 0)
8267 				goto initfail;
8268 			tp->rx_pending = i;
8269 			break;
8270 		}
8271 	}
8272 
8273 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8274 		goto done;
8275 
8276 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8277 
8278 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8279 		goto done;
8280 
8281 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8282 		struct tg3_rx_buffer_desc *rxd;
8283 
8284 		rxd = &tpr->rx_jmb[i].std;
8285 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8286 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8287 				  RXD_FLAG_JUMBO;
8288 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8289 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8290 	}
8291 
8292 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8293 		unsigned int frag_size;
8294 
8295 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8296 				      &frag_size) < 0) {
8297 			netdev_warn(tp->dev,
8298 				    "Using a smaller RX jumbo ring. Only %d "
8299 				    "out of %d buffers were allocated "
8300 				    "successfully\n", i, tp->rx_jumbo_pending);
8301 			if (i == 0)
8302 				goto initfail;
8303 			tp->rx_jumbo_pending = i;
8304 			break;
8305 		}
8306 	}
8307 
8308 done:
8309 	return 0;
8310 
8311 initfail:
8312 	tg3_rx_prodring_free(tp, tpr);
8313 	return -ENOMEM;
8314 }
8315 
8316 static void tg3_rx_prodring_fini(struct tg3 *tp,
8317 				 struct tg3_rx_prodring_set *tpr)
8318 {
8319 	kfree(tpr->rx_std_buffers);
8320 	tpr->rx_std_buffers = NULL;
8321 	kfree(tpr->rx_jmb_buffers);
8322 	tpr->rx_jmb_buffers = NULL;
8323 	if (tpr->rx_std) {
8324 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8325 				  tpr->rx_std, tpr->rx_std_mapping);
8326 		tpr->rx_std = NULL;
8327 	}
8328 	if (tpr->rx_jmb) {
8329 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8330 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8331 		tpr->rx_jmb = NULL;
8332 	}
8333 }
8334 
8335 static int tg3_rx_prodring_init(struct tg3 *tp,
8336 				struct tg3_rx_prodring_set *tpr)
8337 {
8338 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8339 				      GFP_KERNEL);
8340 	if (!tpr->rx_std_buffers)
8341 		return -ENOMEM;
8342 
8343 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8344 					 TG3_RX_STD_RING_BYTES(tp),
8345 					 &tpr->rx_std_mapping,
8346 					 GFP_KERNEL);
8347 	if (!tpr->rx_std)
8348 		goto err_out;
8349 
8350 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8351 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8352 					      GFP_KERNEL);
8353 		if (!tpr->rx_jmb_buffers)
8354 			goto err_out;
8355 
8356 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8357 						 TG3_RX_JMB_RING_BYTES(tp),
8358 						 &tpr->rx_jmb_mapping,
8359 						 GFP_KERNEL);
8360 		if (!tpr->rx_jmb)
8361 			goto err_out;
8362 	}
8363 
8364 	return 0;
8365 
8366 err_out:
8367 	tg3_rx_prodring_fini(tp, tpr);
8368 	return -ENOMEM;
8369 }
8370 
8371 /* Free up pending packets in all rx/tx rings.
8372  *
8373  * The chip has been shut down and the driver detached from
8374  * the networking, so no interrupts or new tx packets will
8375  * end up in the driver.  tp->{tx,}lock is not held and we are not
8376  * in an interrupt context and thus may sleep.
8377  */
8378 static void tg3_free_rings(struct tg3 *tp)
8379 {
8380 	int i, j;
8381 
8382 	for (j = 0; j < tp->irq_cnt; j++) {
8383 		struct tg3_napi *tnapi = &tp->napi[j];
8384 
8385 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8386 
8387 		if (!tnapi->tx_buffers)
8388 			continue;
8389 
8390 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8391 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8392 
8393 			if (!skb)
8394 				continue;
8395 
8396 			tg3_tx_skb_unmap(tnapi, i,
8397 					 skb_shinfo(skb)->nr_frags - 1);
8398 
8399 			dev_kfree_skb_any(skb);
8400 		}
8401 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8402 	}
8403 }
8404 
8405 /* Initialize tx/rx rings for packet processing.
8406  *
8407  * The chip has been shut down and the driver detached from
8408  * the networking, so no interrupts or new tx packets will
8409  * end up in the driver.  tp->{tx,}lock are held and thus
8410  * we may not sleep.
8411  */
8412 static int tg3_init_rings(struct tg3 *tp)
8413 {
8414 	int i;
8415 
8416 	/* Free up all the SKBs. */
8417 	tg3_free_rings(tp);
8418 
8419 	for (i = 0; i < tp->irq_cnt; i++) {
8420 		struct tg3_napi *tnapi = &tp->napi[i];
8421 
8422 		tnapi->last_tag = 0;
8423 		tnapi->last_irq_tag = 0;
8424 		tnapi->hw_status->status = 0;
8425 		tnapi->hw_status->status_tag = 0;
8426 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8427 
8428 		tnapi->tx_prod = 0;
8429 		tnapi->tx_cons = 0;
8430 		if (tnapi->tx_ring)
8431 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8432 
8433 		tnapi->rx_rcb_ptr = 0;
8434 		if (tnapi->rx_rcb)
8435 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8436 
8437 		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8438 			tg3_free_rings(tp);
8439 			return -ENOMEM;
8440 		}
8441 	}
8442 
8443 	return 0;
8444 }
8445 
8446 static void tg3_mem_tx_release(struct tg3 *tp)
8447 {
8448 	int i;
8449 
8450 	for (i = 0; i < tp->irq_max; i++) {
8451 		struct tg3_napi *tnapi = &tp->napi[i];
8452 
8453 		if (tnapi->tx_ring) {
8454 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8455 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8456 			tnapi->tx_ring = NULL;
8457 		}
8458 
8459 		kfree(tnapi->tx_buffers);
8460 		tnapi->tx_buffers = NULL;
8461 	}
8462 }
8463 
8464 static int tg3_mem_tx_acquire(struct tg3 *tp)
8465 {
8466 	int i;
8467 	struct tg3_napi *tnapi = &tp->napi[0];
8468 
8469 	/* If multivector TSS is enabled, vector 0 does not handle
8470 	 * tx interrupts.  Don't allocate any resources for it.
8471 	 */
8472 	if (tg3_flag(tp, ENABLE_TSS))
8473 		tnapi++;
8474 
8475 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8476 		tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8477 					    TG3_TX_RING_SIZE, GFP_KERNEL);
8478 		if (!tnapi->tx_buffers)
8479 			goto err_out;
8480 
8481 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8482 						    TG3_TX_RING_BYTES,
8483 						    &tnapi->tx_desc_mapping,
8484 						    GFP_KERNEL);
8485 		if (!tnapi->tx_ring)
8486 			goto err_out;
8487 	}
8488 
8489 	return 0;
8490 
8491 err_out:
8492 	tg3_mem_tx_release(tp);
8493 	return -ENOMEM;
8494 }
8495 
8496 static void tg3_mem_rx_release(struct tg3 *tp)
8497 {
8498 	int i;
8499 
8500 	for (i = 0; i < tp->irq_max; i++) {
8501 		struct tg3_napi *tnapi = &tp->napi[i];
8502 
8503 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8504 
8505 		if (!tnapi->rx_rcb)
8506 			continue;
8507 
8508 		dma_free_coherent(&tp->pdev->dev,
8509 				  TG3_RX_RCB_RING_BYTES(tp),
8510 				  tnapi->rx_rcb,
8511 				  tnapi->rx_rcb_mapping);
8512 		tnapi->rx_rcb = NULL;
8513 	}
8514 }
8515 
8516 static int tg3_mem_rx_acquire(struct tg3 *tp)
8517 {
8518 	unsigned int i, limit;
8519 
8520 	limit = tp->rxq_cnt;
8521 
8522 	/* If RSS is enabled, we need a (dummy) producer ring
8523 	 * set on vector zero.  This is the true hw prodring.
8524 	 */
8525 	if (tg3_flag(tp, ENABLE_RSS))
8526 		limit++;
8527 
8528 	for (i = 0; i < limit; i++) {
8529 		struct tg3_napi *tnapi = &tp->napi[i];
8530 
8531 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8532 			goto err_out;
8533 
8534 		/* If multivector RSS is enabled, vector 0
8535 		 * does not handle rx or tx interrupts.
8536 		 * Don't allocate any resources for it.
8537 		 */
8538 		if (!i && tg3_flag(tp, ENABLE_RSS))
8539 			continue;
8540 
8541 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8542 						   TG3_RX_RCB_RING_BYTES(tp),
8543 						   &tnapi->rx_rcb_mapping,
8544 						   GFP_KERNEL | __GFP_ZERO);
8545 		if (!tnapi->rx_rcb)
8546 			goto err_out;
8547 	}
8548 
8549 	return 0;
8550 
8551 err_out:
8552 	tg3_mem_rx_release(tp);
8553 	return -ENOMEM;
8554 }
8555 
8556 /*
8557  * Must not be invoked with interrupt sources disabled and
8558  * the hardware shutdown down.
8559  */
8560 static void tg3_free_consistent(struct tg3 *tp)
8561 {
8562 	int i;
8563 
8564 	for (i = 0; i < tp->irq_cnt; i++) {
8565 		struct tg3_napi *tnapi = &tp->napi[i];
8566 
8567 		if (tnapi->hw_status) {
8568 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8569 					  tnapi->hw_status,
8570 					  tnapi->status_mapping);
8571 			tnapi->hw_status = NULL;
8572 		}
8573 	}
8574 
8575 	tg3_mem_rx_release(tp);
8576 	tg3_mem_tx_release(tp);
8577 
8578 	if (tp->hw_stats) {
8579 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8580 				  tp->hw_stats, tp->stats_mapping);
8581 		tp->hw_stats = NULL;
8582 	}
8583 }
8584 
8585 /*
8586  * Must not be invoked with interrupt sources disabled and
8587  * the hardware shutdown down.  Can sleep.
8588  */
8589 static int tg3_alloc_consistent(struct tg3 *tp)
8590 {
8591 	int i;
8592 
8593 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8594 					  sizeof(struct tg3_hw_stats),
8595 					  &tp->stats_mapping,
8596 					  GFP_KERNEL | __GFP_ZERO);
8597 	if (!tp->hw_stats)
8598 		goto err_out;
8599 
8600 	for (i = 0; i < tp->irq_cnt; i++) {
8601 		struct tg3_napi *tnapi = &tp->napi[i];
8602 		struct tg3_hw_status *sblk;
8603 
8604 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8605 						      TG3_HW_STATUS_SIZE,
8606 						      &tnapi->status_mapping,
8607 						      GFP_KERNEL | __GFP_ZERO);
8608 		if (!tnapi->hw_status)
8609 			goto err_out;
8610 
8611 		sblk = tnapi->hw_status;
8612 
8613 		if (tg3_flag(tp, ENABLE_RSS)) {
8614 			u16 *prodptr = NULL;
8615 
8616 			/*
8617 			 * When RSS is enabled, the status block format changes
8618 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8619 			 * and "rx_mini_consumer" members get mapped to the
8620 			 * other three rx return ring producer indexes.
8621 			 */
8622 			switch (i) {
8623 			case 1:
8624 				prodptr = &sblk->idx[0].rx_producer;
8625 				break;
8626 			case 2:
8627 				prodptr = &sblk->rx_jumbo_consumer;
8628 				break;
8629 			case 3:
8630 				prodptr = &sblk->reserved;
8631 				break;
8632 			case 4:
8633 				prodptr = &sblk->rx_mini_consumer;
8634 				break;
8635 			}
8636 			tnapi->rx_rcb_prod_idx = prodptr;
8637 		} else {
8638 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8639 		}
8640 	}
8641 
8642 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8643 		goto err_out;
8644 
8645 	return 0;
8646 
8647 err_out:
8648 	tg3_free_consistent(tp);
8649 	return -ENOMEM;
8650 }
8651 
8652 #define MAX_WAIT_CNT 1000
8653 
8654 /* To stop a block, clear the enable bit and poll till it
8655  * clears.  tp->lock is held.
8656  */
8657 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8658 {
8659 	unsigned int i;
8660 	u32 val;
8661 
8662 	if (tg3_flag(tp, 5705_PLUS)) {
8663 		switch (ofs) {
8664 		case RCVLSC_MODE:
8665 		case DMAC_MODE:
8666 		case MBFREE_MODE:
8667 		case BUFMGR_MODE:
8668 		case MEMARB_MODE:
8669 			/* We can't enable/disable these bits of the
8670 			 * 5705/5750, just say success.
8671 			 */
8672 			return 0;
8673 
8674 		default:
8675 			break;
8676 		}
8677 	}
8678 
8679 	val = tr32(ofs);
8680 	val &= ~enable_bit;
8681 	tw32_f(ofs, val);
8682 
8683 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8684 		if (pci_channel_offline(tp->pdev)) {
8685 			dev_err(&tp->pdev->dev,
8686 				"tg3_stop_block device offline, "
8687 				"ofs=%lx enable_bit=%x\n",
8688 				ofs, enable_bit);
8689 			return -ENODEV;
8690 		}
8691 
8692 		udelay(100);
8693 		val = tr32(ofs);
8694 		if ((val & enable_bit) == 0)
8695 			break;
8696 	}
8697 
8698 	if (i == MAX_WAIT_CNT && !silent) {
8699 		dev_err(&tp->pdev->dev,
8700 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8701 			ofs, enable_bit);
8702 		return -ENODEV;
8703 	}
8704 
8705 	return 0;
8706 }
8707 
8708 /* tp->lock is held. */
8709 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8710 {
8711 	int i, err;
8712 
8713 	tg3_disable_ints(tp);
8714 
8715 	if (pci_channel_offline(tp->pdev)) {
8716 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8717 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8718 		err = -ENODEV;
8719 		goto err_no_dev;
8720 	}
8721 
8722 	tp->rx_mode &= ~RX_MODE_ENABLE;
8723 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8724 	udelay(10);
8725 
8726 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8727 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8728 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8729 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8730 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8731 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8732 
8733 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8734 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8735 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8736 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8737 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8738 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8739 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8740 
8741 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8742 	tw32_f(MAC_MODE, tp->mac_mode);
8743 	udelay(40);
8744 
8745 	tp->tx_mode &= ~TX_MODE_ENABLE;
8746 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8747 
8748 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8749 		udelay(100);
8750 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8751 			break;
8752 	}
8753 	if (i >= MAX_WAIT_CNT) {
8754 		dev_err(&tp->pdev->dev,
8755 			"%s timed out, TX_MODE_ENABLE will not clear "
8756 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8757 		err |= -ENODEV;
8758 	}
8759 
8760 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8761 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8762 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8763 
8764 	tw32(FTQ_RESET, 0xffffffff);
8765 	tw32(FTQ_RESET, 0x00000000);
8766 
8767 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8768 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8769 
8770 err_no_dev:
8771 	for (i = 0; i < tp->irq_cnt; i++) {
8772 		struct tg3_napi *tnapi = &tp->napi[i];
8773 		if (tnapi->hw_status)
8774 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8775 	}
8776 
8777 	return err;
8778 }
8779 
8780 /* Save PCI command register before chip reset */
8781 static void tg3_save_pci_state(struct tg3 *tp)
8782 {
8783 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8784 }
8785 
8786 /* Restore PCI state after chip reset */
8787 static void tg3_restore_pci_state(struct tg3 *tp)
8788 {
8789 	u32 val;
8790 
8791 	/* Re-enable indirect register accesses. */
8792 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8793 			       tp->misc_host_ctrl);
8794 
8795 	/* Set MAX PCI retry to zero. */
8796 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8797 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8798 	    tg3_flag(tp, PCIX_MODE))
8799 		val |= PCISTATE_RETRY_SAME_DMA;
8800 	/* Allow reads and writes to the APE register and memory space. */
8801 	if (tg3_flag(tp, ENABLE_APE))
8802 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8803 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8804 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8805 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8806 
8807 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8808 
8809 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8810 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8811 				      tp->pci_cacheline_sz);
8812 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8813 				      tp->pci_lat_timer);
8814 	}
8815 
8816 	/* Make sure PCI-X relaxed ordering bit is clear. */
8817 	if (tg3_flag(tp, PCIX_MODE)) {
8818 		u16 pcix_cmd;
8819 
8820 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8821 				     &pcix_cmd);
8822 		pcix_cmd &= ~PCI_X_CMD_ERO;
8823 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8824 				      pcix_cmd);
8825 	}
8826 
8827 	if (tg3_flag(tp, 5780_CLASS)) {
8828 
8829 		/* Chip reset on 5780 will reset MSI enable bit,
8830 		 * so need to restore it.
8831 		 */
8832 		if (tg3_flag(tp, USING_MSI)) {
8833 			u16 ctrl;
8834 
8835 			pci_read_config_word(tp->pdev,
8836 					     tp->msi_cap + PCI_MSI_FLAGS,
8837 					     &ctrl);
8838 			pci_write_config_word(tp->pdev,
8839 					      tp->msi_cap + PCI_MSI_FLAGS,
8840 					      ctrl | PCI_MSI_FLAGS_ENABLE);
8841 			val = tr32(MSGINT_MODE);
8842 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8843 		}
8844 	}
8845 }
8846 
8847 /* tp->lock is held. */
8848 static int tg3_chip_reset(struct tg3 *tp)
8849 {
8850 	u32 val;
8851 	void (*write_op)(struct tg3 *, u32, u32);
8852 	int i, err;
8853 
8854 	tg3_nvram_lock(tp);
8855 
8856 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8857 
8858 	/* No matching tg3_nvram_unlock() after this because
8859 	 * chip reset below will undo the nvram lock.
8860 	 */
8861 	tp->nvram_lock_cnt = 0;
8862 
8863 	/* GRC_MISC_CFG core clock reset will clear the memory
8864 	 * enable bit in PCI register 4 and the MSI enable bit
8865 	 * on some chips, so we save relevant registers here.
8866 	 */
8867 	tg3_save_pci_state(tp);
8868 
8869 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8870 	    tg3_flag(tp, 5755_PLUS))
8871 		tw32(GRC_FASTBOOT_PC, 0);
8872 
8873 	/*
8874 	 * We must avoid the readl() that normally takes place.
8875 	 * It locks machines, causes machine checks, and other
8876 	 * fun things.  So, temporarily disable the 5701
8877 	 * hardware workaround, while we do the reset.
8878 	 */
8879 	write_op = tp->write32;
8880 	if (write_op == tg3_write_flush_reg32)
8881 		tp->write32 = tg3_write32;
8882 
8883 	/* Prevent the irq handler from reading or writing PCI registers
8884 	 * during chip reset when the memory enable bit in the PCI command
8885 	 * register may be cleared.  The chip does not generate interrupt
8886 	 * at this time, but the irq handler may still be called due to irq
8887 	 * sharing or irqpoll.
8888 	 */
8889 	tg3_flag_set(tp, CHIP_RESETTING);
8890 	for (i = 0; i < tp->irq_cnt; i++) {
8891 		struct tg3_napi *tnapi = &tp->napi[i];
8892 		if (tnapi->hw_status) {
8893 			tnapi->hw_status->status = 0;
8894 			tnapi->hw_status->status_tag = 0;
8895 		}
8896 		tnapi->last_tag = 0;
8897 		tnapi->last_irq_tag = 0;
8898 	}
8899 	smp_mb();
8900 
8901 	for (i = 0; i < tp->irq_cnt; i++)
8902 		synchronize_irq(tp->napi[i].irq_vec);
8903 
8904 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8905 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8906 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8907 	}
8908 
8909 	/* do the reset */
8910 	val = GRC_MISC_CFG_CORECLK_RESET;
8911 
8912 	if (tg3_flag(tp, PCI_EXPRESS)) {
8913 		/* Force PCIe 1.0a mode */
8914 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8915 		    !tg3_flag(tp, 57765_PLUS) &&
8916 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
8917 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8918 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8919 
8920 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8921 			tw32(GRC_MISC_CFG, (1 << 29));
8922 			val |= (1 << 29);
8923 		}
8924 	}
8925 
8926 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8927 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8928 		tw32(GRC_VCPU_EXT_CTRL,
8929 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8930 	}
8931 
8932 	/* Manage gphy power for all CPMU absent PCIe devices. */
8933 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8934 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8935 
8936 	tw32(GRC_MISC_CFG, val);
8937 
8938 	/* restore 5701 hardware bug workaround write method */
8939 	tp->write32 = write_op;
8940 
8941 	/* Unfortunately, we have to delay before the PCI read back.
8942 	 * Some 575X chips even will not respond to a PCI cfg access
8943 	 * when the reset command is given to the chip.
8944 	 *
8945 	 * How do these hardware designers expect things to work
8946 	 * properly if the PCI write is posted for a long period
8947 	 * of time?  It is always necessary to have some method by
8948 	 * which a register read back can occur to push the write
8949 	 * out which does the reset.
8950 	 *
8951 	 * For most tg3 variants the trick below was working.
8952 	 * Ho hum...
8953 	 */
8954 	udelay(120);
8955 
8956 	/* Flush PCI posted writes.  The normal MMIO registers
8957 	 * are inaccessible at this time so this is the only
8958 	 * way to make this reliably (actually, this is no longer
8959 	 * the case, see above).  I tried to use indirect
8960 	 * register read/write but this upset some 5701 variants.
8961 	 */
8962 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8963 
8964 	udelay(120);
8965 
8966 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8967 		u16 val16;
8968 
8969 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8970 			int j;
8971 			u32 cfg_val;
8972 
8973 			/* Wait for link training to complete.  */
8974 			for (j = 0; j < 5000; j++)
8975 				udelay(100);
8976 
8977 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8978 			pci_write_config_dword(tp->pdev, 0xc4,
8979 					       cfg_val | (1 << 15));
8980 		}
8981 
8982 		/* Clear the "no snoop" and "relaxed ordering" bits. */
8983 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8984 		/*
8985 		 * Older PCIe devices only support the 128 byte
8986 		 * MPS setting.  Enforce the restriction.
8987 		 */
8988 		if (!tg3_flag(tp, CPMU_PRESENT))
8989 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8990 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8991 
8992 		/* Clear error status */
8993 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8994 				      PCI_EXP_DEVSTA_CED |
8995 				      PCI_EXP_DEVSTA_NFED |
8996 				      PCI_EXP_DEVSTA_FED |
8997 				      PCI_EXP_DEVSTA_URD);
8998 	}
8999 
9000 	tg3_restore_pci_state(tp);
9001 
9002 	tg3_flag_clear(tp, CHIP_RESETTING);
9003 	tg3_flag_clear(tp, ERROR_PROCESSED);
9004 
9005 	val = 0;
9006 	if (tg3_flag(tp, 5780_CLASS))
9007 		val = tr32(MEMARB_MODE);
9008 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9009 
9010 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9011 		tg3_stop_fw(tp);
9012 		tw32(0x5000, 0x400);
9013 	}
9014 
9015 	if (tg3_flag(tp, IS_SSB_CORE)) {
9016 		/*
9017 		 * BCM4785: In order to avoid repercussions from using
9018 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9019 		 * which is not required.
9020 		 */
9021 		tg3_stop_fw(tp);
9022 		tg3_halt_cpu(tp, RX_CPU_BASE);
9023 	}
9024 
9025 	err = tg3_poll_fw(tp);
9026 	if (err)
9027 		return err;
9028 
9029 	tw32(GRC_MODE, tp->grc_mode);
9030 
9031 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9032 		val = tr32(0xc4);
9033 
9034 		tw32(0xc4, val | (1 << 15));
9035 	}
9036 
9037 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9038 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9039 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9040 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9041 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9042 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9043 	}
9044 
9045 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9046 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9047 		val = tp->mac_mode;
9048 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9049 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9050 		val = tp->mac_mode;
9051 	} else
9052 		val = 0;
9053 
9054 	tw32_f(MAC_MODE, val);
9055 	udelay(40);
9056 
9057 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9058 
9059 	tg3_mdio_start(tp);
9060 
9061 	if (tg3_flag(tp, PCI_EXPRESS) &&
9062 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9063 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9064 	    !tg3_flag(tp, 57765_PLUS)) {
9065 		val = tr32(0x7c00);
9066 
9067 		tw32(0x7c00, val | (1 << 25));
9068 	}
9069 
9070 	if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9071 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9072 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9073 	}
9074 
9075 	/* Reprobe ASF enable state.  */
9076 	tg3_flag_clear(tp, ENABLE_ASF);
9077 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9078 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9079 
9080 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9081 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9082 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9083 		u32 nic_cfg;
9084 
9085 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9086 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9087 			tg3_flag_set(tp, ENABLE_ASF);
9088 			tp->last_event_jiffies = jiffies;
9089 			if (tg3_flag(tp, 5750_PLUS))
9090 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9091 
9092 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9093 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9094 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9095 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9096 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9097 		}
9098 	}
9099 
9100 	return 0;
9101 }
9102 
9103 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9104 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9105 
9106 /* tp->lock is held. */
9107 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9108 {
9109 	int err;
9110 
9111 	tg3_stop_fw(tp);
9112 
9113 	tg3_write_sig_pre_reset(tp, kind);
9114 
9115 	tg3_abort_hw(tp, silent);
9116 	err = tg3_chip_reset(tp);
9117 
9118 	__tg3_set_mac_addr(tp, false);
9119 
9120 	tg3_write_sig_legacy(tp, kind);
9121 	tg3_write_sig_post_reset(tp, kind);
9122 
9123 	if (tp->hw_stats) {
9124 		/* Save the stats across chip resets... */
9125 		tg3_get_nstats(tp, &tp->net_stats_prev);
9126 		tg3_get_estats(tp, &tp->estats_prev);
9127 
9128 		/* And make sure the next sample is new data */
9129 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9130 	}
9131 
9132 	if (err)
9133 		return err;
9134 
9135 	return 0;
9136 }
9137 
9138 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9139 {
9140 	struct tg3 *tp = netdev_priv(dev);
9141 	struct sockaddr *addr = p;
9142 	int err = 0;
9143 	bool skip_mac_1 = false;
9144 
9145 	if (!is_valid_ether_addr(addr->sa_data))
9146 		return -EADDRNOTAVAIL;
9147 
9148 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9149 
9150 	if (!netif_running(dev))
9151 		return 0;
9152 
9153 	if (tg3_flag(tp, ENABLE_ASF)) {
9154 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9155 
9156 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9157 		addr0_low = tr32(MAC_ADDR_0_LOW);
9158 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9159 		addr1_low = tr32(MAC_ADDR_1_LOW);
9160 
9161 		/* Skip MAC addr 1 if ASF is using it. */
9162 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9163 		    !(addr1_high == 0 && addr1_low == 0))
9164 			skip_mac_1 = true;
9165 	}
9166 	spin_lock_bh(&tp->lock);
9167 	__tg3_set_mac_addr(tp, skip_mac_1);
9168 	spin_unlock_bh(&tp->lock);
9169 
9170 	return err;
9171 }
9172 
9173 /* tp->lock is held. */
9174 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9175 			   dma_addr_t mapping, u32 maxlen_flags,
9176 			   u32 nic_addr)
9177 {
9178 	tg3_write_mem(tp,
9179 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9180 		      ((u64) mapping >> 32));
9181 	tg3_write_mem(tp,
9182 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9183 		      ((u64) mapping & 0xffffffff));
9184 	tg3_write_mem(tp,
9185 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9186 		       maxlen_flags);
9187 
9188 	if (!tg3_flag(tp, 5705_PLUS))
9189 		tg3_write_mem(tp,
9190 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9191 			      nic_addr);
9192 }
9193 
9194 
9195 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9196 {
9197 	int i = 0;
9198 
9199 	if (!tg3_flag(tp, ENABLE_TSS)) {
9200 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9201 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9202 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9203 	} else {
9204 		tw32(HOSTCC_TXCOL_TICKS, 0);
9205 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9206 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9207 
9208 		for (; i < tp->txq_cnt; i++) {
9209 			u32 reg;
9210 
9211 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9212 			tw32(reg, ec->tx_coalesce_usecs);
9213 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9214 			tw32(reg, ec->tx_max_coalesced_frames);
9215 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9216 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9217 		}
9218 	}
9219 
9220 	for (; i < tp->irq_max - 1; i++) {
9221 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9222 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9223 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9224 	}
9225 }
9226 
9227 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9228 {
9229 	int i = 0;
9230 	u32 limit = tp->rxq_cnt;
9231 
9232 	if (!tg3_flag(tp, ENABLE_RSS)) {
9233 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9234 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9235 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9236 		limit--;
9237 	} else {
9238 		tw32(HOSTCC_RXCOL_TICKS, 0);
9239 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9240 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9241 	}
9242 
9243 	for (; i < limit; i++) {
9244 		u32 reg;
9245 
9246 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9247 		tw32(reg, ec->rx_coalesce_usecs);
9248 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9249 		tw32(reg, ec->rx_max_coalesced_frames);
9250 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9251 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9252 	}
9253 
9254 	for (; i < tp->irq_max - 1; i++) {
9255 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9256 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9257 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9258 	}
9259 }
9260 
9261 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9262 {
9263 	tg3_coal_tx_init(tp, ec);
9264 	tg3_coal_rx_init(tp, ec);
9265 
9266 	if (!tg3_flag(tp, 5705_PLUS)) {
9267 		u32 val = ec->stats_block_coalesce_usecs;
9268 
9269 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9270 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9271 
9272 		if (!tp->link_up)
9273 			val = 0;
9274 
9275 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9276 	}
9277 }
9278 
9279 /* tp->lock is held. */
9280 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9281 {
9282 	u32 txrcb, limit;
9283 
9284 	/* Disable all transmit rings but the first. */
9285 	if (!tg3_flag(tp, 5705_PLUS))
9286 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9287 	else if (tg3_flag(tp, 5717_PLUS))
9288 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9289 	else if (tg3_flag(tp, 57765_CLASS) ||
9290 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9291 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9292 	else
9293 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9294 
9295 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9296 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9297 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9298 			      BDINFO_FLAGS_DISABLED);
9299 }
9300 
9301 /* tp->lock is held. */
9302 static void tg3_tx_rcbs_init(struct tg3 *tp)
9303 {
9304 	int i = 0;
9305 	u32 txrcb = NIC_SRAM_SEND_RCB;
9306 
9307 	if (tg3_flag(tp, ENABLE_TSS))
9308 		i++;
9309 
9310 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9311 		struct tg3_napi *tnapi = &tp->napi[i];
9312 
9313 		if (!tnapi->tx_ring)
9314 			continue;
9315 
9316 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9317 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9318 			       NIC_SRAM_TX_BUFFER_DESC);
9319 	}
9320 }
9321 
9322 /* tp->lock is held. */
9323 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9324 {
9325 	u32 rxrcb, limit;
9326 
9327 	/* Disable all receive return rings but the first. */
9328 	if (tg3_flag(tp, 5717_PLUS))
9329 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9330 	else if (!tg3_flag(tp, 5705_PLUS))
9331 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9332 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9333 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9334 		 tg3_flag(tp, 57765_CLASS))
9335 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9336 	else
9337 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9338 
9339 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9340 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9341 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9342 			      BDINFO_FLAGS_DISABLED);
9343 }
9344 
9345 /* tp->lock is held. */
9346 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9347 {
9348 	int i = 0;
9349 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9350 
9351 	if (tg3_flag(tp, ENABLE_RSS))
9352 		i++;
9353 
9354 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9355 		struct tg3_napi *tnapi = &tp->napi[i];
9356 
9357 		if (!tnapi->rx_rcb)
9358 			continue;
9359 
9360 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9361 			       (tp->rx_ret_ring_mask + 1) <<
9362 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9363 	}
9364 }
9365 
9366 /* tp->lock is held. */
9367 static void tg3_rings_reset(struct tg3 *tp)
9368 {
9369 	int i;
9370 	u32 stblk;
9371 	struct tg3_napi *tnapi = &tp->napi[0];
9372 
9373 	tg3_tx_rcbs_disable(tp);
9374 
9375 	tg3_rx_ret_rcbs_disable(tp);
9376 
9377 	/* Disable interrupts */
9378 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9379 	tp->napi[0].chk_msi_cnt = 0;
9380 	tp->napi[0].last_rx_cons = 0;
9381 	tp->napi[0].last_tx_cons = 0;
9382 
9383 	/* Zero mailbox registers. */
9384 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9385 		for (i = 1; i < tp->irq_max; i++) {
9386 			tp->napi[i].tx_prod = 0;
9387 			tp->napi[i].tx_cons = 0;
9388 			if (tg3_flag(tp, ENABLE_TSS))
9389 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9390 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9391 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9392 			tp->napi[i].chk_msi_cnt = 0;
9393 			tp->napi[i].last_rx_cons = 0;
9394 			tp->napi[i].last_tx_cons = 0;
9395 		}
9396 		if (!tg3_flag(tp, ENABLE_TSS))
9397 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9398 	} else {
9399 		tp->napi[0].tx_prod = 0;
9400 		tp->napi[0].tx_cons = 0;
9401 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9402 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9403 	}
9404 
9405 	/* Make sure the NIC-based send BD rings are disabled. */
9406 	if (!tg3_flag(tp, 5705_PLUS)) {
9407 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9408 		for (i = 0; i < 16; i++)
9409 			tw32_tx_mbox(mbox + i * 8, 0);
9410 	}
9411 
9412 	/* Clear status block in ram. */
9413 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9414 
9415 	/* Set status block DMA address */
9416 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9417 	     ((u64) tnapi->status_mapping >> 32));
9418 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9419 	     ((u64) tnapi->status_mapping & 0xffffffff));
9420 
9421 	stblk = HOSTCC_STATBLCK_RING1;
9422 
9423 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9424 		u64 mapping = (u64)tnapi->status_mapping;
9425 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9426 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9427 		stblk += 8;
9428 
9429 		/* Clear status block in ram. */
9430 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9431 	}
9432 
9433 	tg3_tx_rcbs_init(tp);
9434 	tg3_rx_ret_rcbs_init(tp);
9435 }
9436 
9437 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9438 {
9439 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9440 
9441 	if (!tg3_flag(tp, 5750_PLUS) ||
9442 	    tg3_flag(tp, 5780_CLASS) ||
9443 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9444 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9445 	    tg3_flag(tp, 57765_PLUS))
9446 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9447 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9448 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9449 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9450 	else
9451 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9452 
9453 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9454 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9455 
9456 	val = min(nic_rep_thresh, host_rep_thresh);
9457 	tw32(RCVBDI_STD_THRESH, val);
9458 
9459 	if (tg3_flag(tp, 57765_PLUS))
9460 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9461 
9462 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9463 		return;
9464 
9465 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9466 
9467 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9468 
9469 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9470 	tw32(RCVBDI_JUMBO_THRESH, val);
9471 
9472 	if (tg3_flag(tp, 57765_PLUS))
9473 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9474 }
9475 
9476 static inline u32 calc_crc(unsigned char *buf, int len)
9477 {
9478 	u32 reg;
9479 	u32 tmp;
9480 	int j, k;
9481 
9482 	reg = 0xffffffff;
9483 
9484 	for (j = 0; j < len; j++) {
9485 		reg ^= buf[j];
9486 
9487 		for (k = 0; k < 8; k++) {
9488 			tmp = reg & 0x01;
9489 
9490 			reg >>= 1;
9491 
9492 			if (tmp)
9493 				reg ^= 0xedb88320;
9494 		}
9495 	}
9496 
9497 	return ~reg;
9498 }
9499 
9500 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9501 {
9502 	/* accept or reject all multicast frames */
9503 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9504 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9505 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9506 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9507 }
9508 
9509 static void __tg3_set_rx_mode(struct net_device *dev)
9510 {
9511 	struct tg3 *tp = netdev_priv(dev);
9512 	u32 rx_mode;
9513 
9514 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9515 				  RX_MODE_KEEP_VLAN_TAG);
9516 
9517 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9518 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9519 	 * flag clear.
9520 	 */
9521 	if (!tg3_flag(tp, ENABLE_ASF))
9522 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9523 #endif
9524 
9525 	if (dev->flags & IFF_PROMISC) {
9526 		/* Promiscuous mode. */
9527 		rx_mode |= RX_MODE_PROMISC;
9528 	} else if (dev->flags & IFF_ALLMULTI) {
9529 		/* Accept all multicast. */
9530 		tg3_set_multi(tp, 1);
9531 	} else if (netdev_mc_empty(dev)) {
9532 		/* Reject all multicast. */
9533 		tg3_set_multi(tp, 0);
9534 	} else {
9535 		/* Accept one or more multicast(s). */
9536 		struct netdev_hw_addr *ha;
9537 		u32 mc_filter[4] = { 0, };
9538 		u32 regidx;
9539 		u32 bit;
9540 		u32 crc;
9541 
9542 		netdev_for_each_mc_addr(ha, dev) {
9543 			crc = calc_crc(ha->addr, ETH_ALEN);
9544 			bit = ~crc & 0x7f;
9545 			regidx = (bit & 0x60) >> 5;
9546 			bit &= 0x1f;
9547 			mc_filter[regidx] |= (1 << bit);
9548 		}
9549 
9550 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9551 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9552 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9553 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9554 	}
9555 
9556 	if (rx_mode != tp->rx_mode) {
9557 		tp->rx_mode = rx_mode;
9558 		tw32_f(MAC_RX_MODE, rx_mode);
9559 		udelay(10);
9560 	}
9561 }
9562 
9563 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9564 {
9565 	int i;
9566 
9567 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9568 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9569 }
9570 
9571 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9572 {
9573 	int i;
9574 
9575 	if (!tg3_flag(tp, SUPPORT_MSIX))
9576 		return;
9577 
9578 	if (tp->rxq_cnt == 1) {
9579 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9580 		return;
9581 	}
9582 
9583 	/* Validate table against current IRQ count */
9584 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9585 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9586 			break;
9587 	}
9588 
9589 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9590 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9591 }
9592 
9593 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9594 {
9595 	int i = 0;
9596 	u32 reg = MAC_RSS_INDIR_TBL_0;
9597 
9598 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9599 		u32 val = tp->rss_ind_tbl[i];
9600 		i++;
9601 		for (; i % 8; i++) {
9602 			val <<= 4;
9603 			val |= tp->rss_ind_tbl[i];
9604 		}
9605 		tw32(reg, val);
9606 		reg += 4;
9607 	}
9608 }
9609 
9610 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9611 {
9612 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9613 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9614 	else
9615 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9616 }
9617 
9618 /* tp->lock is held. */
9619 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9620 {
9621 	u32 val, rdmac_mode;
9622 	int i, err, limit;
9623 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9624 
9625 	tg3_disable_ints(tp);
9626 
9627 	tg3_stop_fw(tp);
9628 
9629 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9630 
9631 	if (tg3_flag(tp, INIT_COMPLETE))
9632 		tg3_abort_hw(tp, 1);
9633 
9634 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9635 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9636 		tg3_phy_pull_config(tp);
9637 		tg3_eee_pull_config(tp, NULL);
9638 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9639 	}
9640 
9641 	/* Enable MAC control of LPI */
9642 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9643 		tg3_setup_eee(tp);
9644 
9645 	if (reset_phy)
9646 		tg3_phy_reset(tp);
9647 
9648 	err = tg3_chip_reset(tp);
9649 	if (err)
9650 		return err;
9651 
9652 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9653 
9654 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9655 		val = tr32(TG3_CPMU_CTRL);
9656 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9657 		tw32(TG3_CPMU_CTRL, val);
9658 
9659 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9660 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9661 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9662 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9663 
9664 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9665 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9666 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9667 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9668 
9669 		val = tr32(TG3_CPMU_HST_ACC);
9670 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9671 		val |= CPMU_HST_ACC_MACCLK_6_25;
9672 		tw32(TG3_CPMU_HST_ACC, val);
9673 	}
9674 
9675 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9676 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9677 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9678 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9679 		tw32(PCIE_PWR_MGMT_THRESH, val);
9680 
9681 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9682 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9683 
9684 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9685 
9686 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9687 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9688 	}
9689 
9690 	if (tg3_flag(tp, L1PLLPD_EN)) {
9691 		u32 grc_mode = tr32(GRC_MODE);
9692 
9693 		/* Access the lower 1K of PL PCIE block registers. */
9694 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9695 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9696 
9697 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9698 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9699 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9700 
9701 		tw32(GRC_MODE, grc_mode);
9702 	}
9703 
9704 	if (tg3_flag(tp, 57765_CLASS)) {
9705 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9706 			u32 grc_mode = tr32(GRC_MODE);
9707 
9708 			/* Access the lower 1K of PL PCIE block registers. */
9709 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9710 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9711 
9712 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9713 				   TG3_PCIE_PL_LO_PHYCTL5);
9714 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9715 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9716 
9717 			tw32(GRC_MODE, grc_mode);
9718 		}
9719 
9720 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9721 			u32 grc_mode;
9722 
9723 			/* Fix transmit hangs */
9724 			val = tr32(TG3_CPMU_PADRNG_CTL);
9725 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9726 			tw32(TG3_CPMU_PADRNG_CTL, val);
9727 
9728 			grc_mode = tr32(GRC_MODE);
9729 
9730 			/* Access the lower 1K of DL PCIE block registers. */
9731 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9732 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9733 
9734 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9735 				   TG3_PCIE_DL_LO_FTSMAX);
9736 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9737 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9738 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9739 
9740 			tw32(GRC_MODE, grc_mode);
9741 		}
9742 
9743 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9744 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9745 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9746 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9747 	}
9748 
9749 	/* This works around an issue with Athlon chipsets on
9750 	 * B3 tigon3 silicon.  This bit has no effect on any
9751 	 * other revision.  But do not set this on PCI Express
9752 	 * chips and don't even touch the clocks if the CPMU is present.
9753 	 */
9754 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9755 		if (!tg3_flag(tp, PCI_EXPRESS))
9756 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9757 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9758 	}
9759 
9760 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9761 	    tg3_flag(tp, PCIX_MODE)) {
9762 		val = tr32(TG3PCI_PCISTATE);
9763 		val |= PCISTATE_RETRY_SAME_DMA;
9764 		tw32(TG3PCI_PCISTATE, val);
9765 	}
9766 
9767 	if (tg3_flag(tp, ENABLE_APE)) {
9768 		/* Allow reads and writes to the
9769 		 * APE register and memory space.
9770 		 */
9771 		val = tr32(TG3PCI_PCISTATE);
9772 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9773 		       PCISTATE_ALLOW_APE_SHMEM_WR |
9774 		       PCISTATE_ALLOW_APE_PSPACE_WR;
9775 		tw32(TG3PCI_PCISTATE, val);
9776 	}
9777 
9778 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9779 		/* Enable some hw fixes.  */
9780 		val = tr32(TG3PCI_MSI_DATA);
9781 		val |= (1 << 26) | (1 << 28) | (1 << 29);
9782 		tw32(TG3PCI_MSI_DATA, val);
9783 	}
9784 
9785 	/* Descriptor ring init may make accesses to the
9786 	 * NIC SRAM area to setup the TX descriptors, so we
9787 	 * can only do this after the hardware has been
9788 	 * successfully reset.
9789 	 */
9790 	err = tg3_init_rings(tp);
9791 	if (err)
9792 		return err;
9793 
9794 	if (tg3_flag(tp, 57765_PLUS)) {
9795 		val = tr32(TG3PCI_DMA_RW_CTRL) &
9796 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9797 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9798 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9799 		if (!tg3_flag(tp, 57765_CLASS) &&
9800 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
9801 		    tg3_asic_rev(tp) != ASIC_REV_5762)
9802 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
9803 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9804 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9805 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
9806 		/* This value is determined during the probe time DMA
9807 		 * engine test, tg3_test_dma.
9808 		 */
9809 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9810 	}
9811 
9812 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9813 			  GRC_MODE_4X_NIC_SEND_RINGS |
9814 			  GRC_MODE_NO_TX_PHDR_CSUM |
9815 			  GRC_MODE_NO_RX_PHDR_CSUM);
9816 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9817 
9818 	/* Pseudo-header checksum is done by hardware logic and not
9819 	 * the offload processers, so make the chip do the pseudo-
9820 	 * header checksums on receive.  For transmit it is more
9821 	 * convenient to do the pseudo-header checksum in software
9822 	 * as Linux does that on transmit for us in all cases.
9823 	 */
9824 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9825 
9826 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9827 	if (tp->rxptpctl)
9828 		tw32(TG3_RX_PTP_CTL,
9829 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9830 
9831 	if (tg3_flag(tp, PTP_CAPABLE))
9832 		val |= GRC_MODE_TIME_SYNC_ENABLE;
9833 
9834 	tw32(GRC_MODE, tp->grc_mode | val);
9835 
9836 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
9837 	val = tr32(GRC_MISC_CFG);
9838 	val &= ~0xff;
9839 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9840 	tw32(GRC_MISC_CFG, val);
9841 
9842 	/* Initialize MBUF/DESC pool. */
9843 	if (tg3_flag(tp, 5750_PLUS)) {
9844 		/* Do nothing.  */
9845 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9846 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9847 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
9848 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9849 		else
9850 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9851 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9852 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9853 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
9854 		int fw_len;
9855 
9856 		fw_len = tp->fw_len;
9857 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9858 		tw32(BUFMGR_MB_POOL_ADDR,
9859 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9860 		tw32(BUFMGR_MB_POOL_SIZE,
9861 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9862 	}
9863 
9864 	if (tp->dev->mtu <= ETH_DATA_LEN) {
9865 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9866 		     tp->bufmgr_config.mbuf_read_dma_low_water);
9867 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9868 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
9869 		tw32(BUFMGR_MB_HIGH_WATER,
9870 		     tp->bufmgr_config.mbuf_high_water);
9871 	} else {
9872 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9873 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9874 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9875 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9876 		tw32(BUFMGR_MB_HIGH_WATER,
9877 		     tp->bufmgr_config.mbuf_high_water_jumbo);
9878 	}
9879 	tw32(BUFMGR_DMA_LOW_WATER,
9880 	     tp->bufmgr_config.dma_low_water);
9881 	tw32(BUFMGR_DMA_HIGH_WATER,
9882 	     tp->bufmgr_config.dma_high_water);
9883 
9884 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9885 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9886 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9887 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9888 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9889 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9890 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9891 	tw32(BUFMGR_MODE, val);
9892 	for (i = 0; i < 2000; i++) {
9893 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9894 			break;
9895 		udelay(10);
9896 	}
9897 	if (i >= 2000) {
9898 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9899 		return -ENODEV;
9900 	}
9901 
9902 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9903 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9904 
9905 	tg3_setup_rxbd_thresholds(tp);
9906 
9907 	/* Initialize TG3_BDINFO's at:
9908 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
9909 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
9910 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
9911 	 *
9912 	 * like so:
9913 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
9914 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
9915 	 *                              ring attribute flags
9916 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
9917 	 *
9918 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9919 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9920 	 *
9921 	 * The size of each ring is fixed in the firmware, but the location is
9922 	 * configurable.
9923 	 */
9924 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9925 	     ((u64) tpr->rx_std_mapping >> 32));
9926 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9927 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
9928 	if (!tg3_flag(tp, 5717_PLUS))
9929 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9930 		     NIC_SRAM_RX_BUFFER_DESC);
9931 
9932 	/* Disable the mini ring */
9933 	if (!tg3_flag(tp, 5705_PLUS))
9934 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9935 		     BDINFO_FLAGS_DISABLED);
9936 
9937 	/* Program the jumbo buffer descriptor ring control
9938 	 * blocks on those devices that have them.
9939 	 */
9940 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9941 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9942 
9943 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9944 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9945 			     ((u64) tpr->rx_jmb_mapping >> 32));
9946 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9947 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9948 			val = TG3_RX_JMB_RING_SIZE(tp) <<
9949 			      BDINFO_FLAGS_MAXLEN_SHIFT;
9950 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9951 			     val | BDINFO_FLAGS_USE_EXT_RECV);
9952 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9953 			    tg3_flag(tp, 57765_CLASS) ||
9954 			    tg3_asic_rev(tp) == ASIC_REV_5762)
9955 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9956 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9957 		} else {
9958 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9959 			     BDINFO_FLAGS_DISABLED);
9960 		}
9961 
9962 		if (tg3_flag(tp, 57765_PLUS)) {
9963 			val = TG3_RX_STD_RING_SIZE(tp);
9964 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9965 			val |= (TG3_RX_STD_DMA_SZ << 2);
9966 		} else
9967 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9968 	} else
9969 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9970 
9971 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9972 
9973 	tpr->rx_std_prod_idx = tp->rx_pending;
9974 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9975 
9976 	tpr->rx_jmb_prod_idx =
9977 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9978 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9979 
9980 	tg3_rings_reset(tp);
9981 
9982 	/* Initialize MAC address and backoff seed. */
9983 	__tg3_set_mac_addr(tp, false);
9984 
9985 	/* MTU + ethernet header + FCS + optional VLAN tag */
9986 	tw32(MAC_RX_MTU_SIZE,
9987 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9988 
9989 	/* The slot time is changed by tg3_setup_phy if we
9990 	 * run at gigabit with half duplex.
9991 	 */
9992 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9993 	      (6 << TX_LENGTHS_IPG_SHIFT) |
9994 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9995 
9996 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9997 	    tg3_asic_rev(tp) == ASIC_REV_5762)
9998 		val |= tr32(MAC_TX_LENGTHS) &
9999 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10000 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10001 
10002 	tw32(MAC_TX_LENGTHS, val);
10003 
10004 	/* Receive rules. */
10005 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10006 	tw32(RCVLPC_CONFIG, 0x0181);
10007 
10008 	/* Calculate RDMAC_MODE setting early, we need it to determine
10009 	 * the RCVLPC_STATE_ENABLE mask.
10010 	 */
10011 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10012 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10013 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10014 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10015 		      RDMAC_MODE_LNGREAD_ENAB);
10016 
10017 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10018 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10019 
10020 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10021 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10022 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10023 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10024 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10025 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10026 
10027 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10028 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10029 		if (tg3_flag(tp, TSO_CAPABLE) &&
10030 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
10031 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10032 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10033 			   !tg3_flag(tp, IS_5788)) {
10034 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10035 		}
10036 	}
10037 
10038 	if (tg3_flag(tp, PCI_EXPRESS))
10039 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10040 
10041 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10042 		tp->dma_limit = 0;
10043 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10044 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10045 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10046 		}
10047 	}
10048 
10049 	if (tg3_flag(tp, HW_TSO_1) ||
10050 	    tg3_flag(tp, HW_TSO_2) ||
10051 	    tg3_flag(tp, HW_TSO_3))
10052 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10053 
10054 	if (tg3_flag(tp, 57765_PLUS) ||
10055 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10056 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10057 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10058 
10059 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10060 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10061 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10062 
10063 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10064 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10065 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10066 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10067 	    tg3_flag(tp, 57765_PLUS)) {
10068 		u32 tgtreg;
10069 
10070 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10071 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10072 		else
10073 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10074 
10075 		val = tr32(tgtreg);
10076 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10077 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10078 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10079 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10080 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10081 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10082 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10083 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10084 		}
10085 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10086 	}
10087 
10088 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10089 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10090 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10091 		u32 tgtreg;
10092 
10093 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10094 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10095 		else
10096 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10097 
10098 		val = tr32(tgtreg);
10099 		tw32(tgtreg, val |
10100 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10101 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10102 	}
10103 
10104 	/* Receive/send statistics. */
10105 	if (tg3_flag(tp, 5750_PLUS)) {
10106 		val = tr32(RCVLPC_STATS_ENABLE);
10107 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10108 		tw32(RCVLPC_STATS_ENABLE, val);
10109 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10110 		   tg3_flag(tp, TSO_CAPABLE)) {
10111 		val = tr32(RCVLPC_STATS_ENABLE);
10112 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10113 		tw32(RCVLPC_STATS_ENABLE, val);
10114 	} else {
10115 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10116 	}
10117 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10118 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10119 	tw32(SNDDATAI_STATSCTRL,
10120 	     (SNDDATAI_SCTRL_ENABLE |
10121 	      SNDDATAI_SCTRL_FASTUPD));
10122 
10123 	/* Setup host coalescing engine. */
10124 	tw32(HOSTCC_MODE, 0);
10125 	for (i = 0; i < 2000; i++) {
10126 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10127 			break;
10128 		udelay(10);
10129 	}
10130 
10131 	__tg3_set_coalesce(tp, &tp->coal);
10132 
10133 	if (!tg3_flag(tp, 5705_PLUS)) {
10134 		/* Status/statistics block address.  See tg3_timer,
10135 		 * the tg3_periodic_fetch_stats call there, and
10136 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10137 		 */
10138 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10139 		     ((u64) tp->stats_mapping >> 32));
10140 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10141 		     ((u64) tp->stats_mapping & 0xffffffff));
10142 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10143 
10144 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10145 
10146 		/* Clear statistics and status block memory areas */
10147 		for (i = NIC_SRAM_STATS_BLK;
10148 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10149 		     i += sizeof(u32)) {
10150 			tg3_write_mem(tp, i, 0);
10151 			udelay(40);
10152 		}
10153 	}
10154 
10155 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10156 
10157 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10158 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10159 	if (!tg3_flag(tp, 5705_PLUS))
10160 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10161 
10162 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10163 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10164 		/* reset to prevent losing 1st rx packet intermittently */
10165 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10166 		udelay(10);
10167 	}
10168 
10169 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10170 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10171 			MAC_MODE_FHDE_ENABLE;
10172 	if (tg3_flag(tp, ENABLE_APE))
10173 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10174 	if (!tg3_flag(tp, 5705_PLUS) &&
10175 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10176 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10177 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10178 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10179 	udelay(40);
10180 
10181 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10182 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10183 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10184 	 * whether used as inputs or outputs, are set by boot code after
10185 	 * reset.
10186 	 */
10187 	if (!tg3_flag(tp, IS_NIC)) {
10188 		u32 gpio_mask;
10189 
10190 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10191 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10192 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10193 
10194 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10195 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10196 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10197 
10198 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10199 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10200 
10201 		tp->grc_local_ctrl &= ~gpio_mask;
10202 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10203 
10204 		/* GPIO1 must be driven high for eeprom write protect */
10205 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10206 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10207 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10208 	}
10209 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10210 	udelay(100);
10211 
10212 	if (tg3_flag(tp, USING_MSIX)) {
10213 		val = tr32(MSGINT_MODE);
10214 		val |= MSGINT_MODE_ENABLE;
10215 		if (tp->irq_cnt > 1)
10216 			val |= MSGINT_MODE_MULTIVEC_EN;
10217 		if (!tg3_flag(tp, 1SHOT_MSI))
10218 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10219 		tw32(MSGINT_MODE, val);
10220 	}
10221 
10222 	if (!tg3_flag(tp, 5705_PLUS)) {
10223 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10224 		udelay(40);
10225 	}
10226 
10227 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10228 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10229 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10230 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10231 	       WDMAC_MODE_LNGREAD_ENAB);
10232 
10233 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10234 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10235 		if (tg3_flag(tp, TSO_CAPABLE) &&
10236 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10237 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10238 			/* nothing */
10239 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10240 			   !tg3_flag(tp, IS_5788)) {
10241 			val |= WDMAC_MODE_RX_ACCEL;
10242 		}
10243 	}
10244 
10245 	/* Enable host coalescing bug fix */
10246 	if (tg3_flag(tp, 5755_PLUS))
10247 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10248 
10249 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10250 		val |= WDMAC_MODE_BURST_ALL_DATA;
10251 
10252 	tw32_f(WDMAC_MODE, val);
10253 	udelay(40);
10254 
10255 	if (tg3_flag(tp, PCIX_MODE)) {
10256 		u16 pcix_cmd;
10257 
10258 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10259 				     &pcix_cmd);
10260 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10261 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10262 			pcix_cmd |= PCI_X_CMD_READ_2K;
10263 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10264 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10265 			pcix_cmd |= PCI_X_CMD_READ_2K;
10266 		}
10267 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10268 				      pcix_cmd);
10269 	}
10270 
10271 	tw32_f(RDMAC_MODE, rdmac_mode);
10272 	udelay(40);
10273 
10274 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10275 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10276 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10277 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10278 				break;
10279 		}
10280 		if (i < TG3_NUM_RDMA_CHANNELS) {
10281 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10282 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10283 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10284 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10285 		}
10286 	}
10287 
10288 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10289 	if (!tg3_flag(tp, 5705_PLUS))
10290 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10291 
10292 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10293 		tw32(SNDDATAC_MODE,
10294 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10295 	else
10296 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10297 
10298 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10299 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10300 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10301 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10302 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10303 	tw32(RCVDBDI_MODE, val);
10304 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10305 	if (tg3_flag(tp, HW_TSO_1) ||
10306 	    tg3_flag(tp, HW_TSO_2) ||
10307 	    tg3_flag(tp, HW_TSO_3))
10308 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10309 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10310 	if (tg3_flag(tp, ENABLE_TSS))
10311 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10312 	tw32(SNDBDI_MODE, val);
10313 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10314 
10315 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10316 		err = tg3_load_5701_a0_firmware_fix(tp);
10317 		if (err)
10318 			return err;
10319 	}
10320 
10321 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10322 		/* Ignore any errors for the firmware download. If download
10323 		 * fails, the device will operate with EEE disabled
10324 		 */
10325 		tg3_load_57766_firmware(tp);
10326 	}
10327 
10328 	if (tg3_flag(tp, TSO_CAPABLE)) {
10329 		err = tg3_load_tso_firmware(tp);
10330 		if (err)
10331 			return err;
10332 	}
10333 
10334 	tp->tx_mode = TX_MODE_ENABLE;
10335 
10336 	if (tg3_flag(tp, 5755_PLUS) ||
10337 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10338 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10339 
10340 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10341 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10342 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10343 		tp->tx_mode &= ~val;
10344 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10345 	}
10346 
10347 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10348 	udelay(100);
10349 
10350 	if (tg3_flag(tp, ENABLE_RSS)) {
10351 		tg3_rss_write_indir_tbl(tp);
10352 
10353 		/* Setup the "secret" hash key. */
10354 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10355 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10356 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10357 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10358 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10359 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10360 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10361 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10362 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10363 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10364 	}
10365 
10366 	tp->rx_mode = RX_MODE_ENABLE;
10367 	if (tg3_flag(tp, 5755_PLUS))
10368 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10369 
10370 	if (tg3_flag(tp, ENABLE_RSS))
10371 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10372 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10373 			       RX_MODE_RSS_IPV6_HASH_EN |
10374 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10375 			       RX_MODE_RSS_IPV4_HASH_EN |
10376 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10377 
10378 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10379 	udelay(10);
10380 
10381 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10382 
10383 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10384 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10385 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10386 		udelay(10);
10387 	}
10388 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10389 	udelay(10);
10390 
10391 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10392 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10393 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10394 			/* Set drive transmission level to 1.2V  */
10395 			/* only if the signal pre-emphasis bit is not set  */
10396 			val = tr32(MAC_SERDES_CFG);
10397 			val &= 0xfffff000;
10398 			val |= 0x880;
10399 			tw32(MAC_SERDES_CFG, val);
10400 		}
10401 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10402 			tw32(MAC_SERDES_CFG, 0x616000);
10403 	}
10404 
10405 	/* Prevent chip from dropping frames when flow control
10406 	 * is enabled.
10407 	 */
10408 	if (tg3_flag(tp, 57765_CLASS))
10409 		val = 1;
10410 	else
10411 		val = 2;
10412 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10413 
10414 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10415 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10416 		/* Use hardware link auto-negotiation */
10417 		tg3_flag_set(tp, HW_AUTONEG);
10418 	}
10419 
10420 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10421 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10422 		u32 tmp;
10423 
10424 		tmp = tr32(SERDES_RX_CTRL);
10425 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10426 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10427 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10428 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10429 	}
10430 
10431 	if (!tg3_flag(tp, USE_PHYLIB)) {
10432 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10433 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10434 
10435 		err = tg3_setup_phy(tp, false);
10436 		if (err)
10437 			return err;
10438 
10439 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10440 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10441 			u32 tmp;
10442 
10443 			/* Clear CRC stats. */
10444 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10445 				tg3_writephy(tp, MII_TG3_TEST1,
10446 					     tmp | MII_TG3_TEST1_CRC_EN);
10447 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10448 			}
10449 		}
10450 	}
10451 
10452 	__tg3_set_rx_mode(tp->dev);
10453 
10454 	/* Initialize receive rules. */
10455 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10456 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10457 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10458 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10459 
10460 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10461 		limit = 8;
10462 	else
10463 		limit = 16;
10464 	if (tg3_flag(tp, ENABLE_ASF))
10465 		limit -= 4;
10466 	switch (limit) {
10467 	case 16:
10468 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10469 	case 15:
10470 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10471 	case 14:
10472 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10473 	case 13:
10474 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10475 	case 12:
10476 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10477 	case 11:
10478 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10479 	case 10:
10480 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10481 	case 9:
10482 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10483 	case 8:
10484 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10485 	case 7:
10486 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10487 	case 6:
10488 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10489 	case 5:
10490 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10491 	case 4:
10492 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10493 	case 3:
10494 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10495 	case 2:
10496 	case 1:
10497 
10498 	default:
10499 		break;
10500 	}
10501 
10502 	if (tg3_flag(tp, ENABLE_APE))
10503 		/* Write our heartbeat update interval to APE. */
10504 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10505 				APE_HOST_HEARTBEAT_INT_DISABLE);
10506 
10507 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10508 
10509 	return 0;
10510 }
10511 
10512 /* Called at device open time to get the chip ready for
10513  * packet processing.  Invoked with tp->lock held.
10514  */
10515 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10516 {
10517 	/* Chip may have been just powered on. If so, the boot code may still
10518 	 * be running initialization. Wait for it to finish to avoid races in
10519 	 * accessing the hardware.
10520 	 */
10521 	tg3_enable_register_access(tp);
10522 	tg3_poll_fw(tp);
10523 
10524 	tg3_switch_clocks(tp);
10525 
10526 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10527 
10528 	return tg3_reset_hw(tp, reset_phy);
10529 }
10530 
10531 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10532 {
10533 	int i;
10534 
10535 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10536 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10537 
10538 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10539 		off += len;
10540 
10541 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10542 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10543 			memset(ocir, 0, TG3_OCIR_LEN);
10544 	}
10545 }
10546 
10547 /* sysfs attributes for hwmon */
10548 static ssize_t tg3_show_temp(struct device *dev,
10549 			     struct device_attribute *devattr, char *buf)
10550 {
10551 	struct pci_dev *pdev = to_pci_dev(dev);
10552 	struct net_device *netdev = pci_get_drvdata(pdev);
10553 	struct tg3 *tp = netdev_priv(netdev);
10554 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10555 	u32 temperature;
10556 
10557 	spin_lock_bh(&tp->lock);
10558 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10559 				sizeof(temperature));
10560 	spin_unlock_bh(&tp->lock);
10561 	return sprintf(buf, "%u\n", temperature);
10562 }
10563 
10564 
10565 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10566 			  TG3_TEMP_SENSOR_OFFSET);
10567 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10568 			  TG3_TEMP_CAUTION_OFFSET);
10569 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10570 			  TG3_TEMP_MAX_OFFSET);
10571 
10572 static struct attribute *tg3_attributes[] = {
10573 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10574 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10575 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10576 	NULL
10577 };
10578 
10579 static const struct attribute_group tg3_group = {
10580 	.attrs = tg3_attributes,
10581 };
10582 
10583 static void tg3_hwmon_close(struct tg3 *tp)
10584 {
10585 	if (tp->hwmon_dev) {
10586 		hwmon_device_unregister(tp->hwmon_dev);
10587 		tp->hwmon_dev = NULL;
10588 		sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10589 	}
10590 }
10591 
10592 static void tg3_hwmon_open(struct tg3 *tp)
10593 {
10594 	int i, err;
10595 	u32 size = 0;
10596 	struct pci_dev *pdev = tp->pdev;
10597 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10598 
10599 	tg3_sd_scan_scratchpad(tp, ocirs);
10600 
10601 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10602 		if (!ocirs[i].src_data_length)
10603 			continue;
10604 
10605 		size += ocirs[i].src_hdr_length;
10606 		size += ocirs[i].src_data_length;
10607 	}
10608 
10609 	if (!size)
10610 		return;
10611 
10612 	/* Register hwmon sysfs hooks */
10613 	err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10614 	if (err) {
10615 		dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10616 		return;
10617 	}
10618 
10619 	tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10620 	if (IS_ERR(tp->hwmon_dev)) {
10621 		tp->hwmon_dev = NULL;
10622 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10623 		sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10624 	}
10625 }
10626 
10627 
10628 #define TG3_STAT_ADD32(PSTAT, REG) \
10629 do {	u32 __val = tr32(REG); \
10630 	(PSTAT)->low += __val; \
10631 	if ((PSTAT)->low < __val) \
10632 		(PSTAT)->high += 1; \
10633 } while (0)
10634 
10635 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10636 {
10637 	struct tg3_hw_stats *sp = tp->hw_stats;
10638 
10639 	if (!tp->link_up)
10640 		return;
10641 
10642 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10643 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10644 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10645 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10646 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10647 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10648 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10649 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10650 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10651 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10652 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10653 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10654 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10655 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10656 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10657 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10658 		u32 val;
10659 
10660 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10661 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10662 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10663 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10664 	}
10665 
10666 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10667 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10668 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10669 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10670 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10671 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10672 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10673 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10674 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10675 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10676 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10677 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10678 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10679 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10680 
10681 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10682 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10683 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10684 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10685 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10686 	} else {
10687 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10688 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10689 		if (val) {
10690 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10691 			sp->rx_discards.low += val;
10692 			if (sp->rx_discards.low < val)
10693 				sp->rx_discards.high += 1;
10694 		}
10695 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10696 	}
10697 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10698 }
10699 
10700 static void tg3_chk_missed_msi(struct tg3 *tp)
10701 {
10702 	u32 i;
10703 
10704 	for (i = 0; i < tp->irq_cnt; i++) {
10705 		struct tg3_napi *tnapi = &tp->napi[i];
10706 
10707 		if (tg3_has_work(tnapi)) {
10708 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10709 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10710 				if (tnapi->chk_msi_cnt < 1) {
10711 					tnapi->chk_msi_cnt++;
10712 					return;
10713 				}
10714 				tg3_msi(0, tnapi);
10715 			}
10716 		}
10717 		tnapi->chk_msi_cnt = 0;
10718 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10719 		tnapi->last_tx_cons = tnapi->tx_cons;
10720 	}
10721 }
10722 
10723 static void tg3_timer(unsigned long __opaque)
10724 {
10725 	struct tg3 *tp = (struct tg3 *) __opaque;
10726 
10727 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10728 		goto restart_timer;
10729 
10730 	spin_lock(&tp->lock);
10731 
10732 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10733 	    tg3_flag(tp, 57765_CLASS))
10734 		tg3_chk_missed_msi(tp);
10735 
10736 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10737 		/* BCM4785: Flush posted writes from GbE to host memory. */
10738 		tr32(HOSTCC_MODE);
10739 	}
10740 
10741 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10742 		/* All of this garbage is because when using non-tagged
10743 		 * IRQ status the mailbox/status_block protocol the chip
10744 		 * uses with the cpu is race prone.
10745 		 */
10746 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10747 			tw32(GRC_LOCAL_CTRL,
10748 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10749 		} else {
10750 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10751 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10752 		}
10753 
10754 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10755 			spin_unlock(&tp->lock);
10756 			tg3_reset_task_schedule(tp);
10757 			goto restart_timer;
10758 		}
10759 	}
10760 
10761 	/* This part only runs once per second. */
10762 	if (!--tp->timer_counter) {
10763 		if (tg3_flag(tp, 5705_PLUS))
10764 			tg3_periodic_fetch_stats(tp);
10765 
10766 		if (tp->setlpicnt && !--tp->setlpicnt)
10767 			tg3_phy_eee_enable(tp);
10768 
10769 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
10770 			u32 mac_stat;
10771 			int phy_event;
10772 
10773 			mac_stat = tr32(MAC_STATUS);
10774 
10775 			phy_event = 0;
10776 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10777 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10778 					phy_event = 1;
10779 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10780 				phy_event = 1;
10781 
10782 			if (phy_event)
10783 				tg3_setup_phy(tp, false);
10784 		} else if (tg3_flag(tp, POLL_SERDES)) {
10785 			u32 mac_stat = tr32(MAC_STATUS);
10786 			int need_setup = 0;
10787 
10788 			if (tp->link_up &&
10789 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10790 				need_setup = 1;
10791 			}
10792 			if (!tp->link_up &&
10793 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
10794 					 MAC_STATUS_SIGNAL_DET))) {
10795 				need_setup = 1;
10796 			}
10797 			if (need_setup) {
10798 				if (!tp->serdes_counter) {
10799 					tw32_f(MAC_MODE,
10800 					     (tp->mac_mode &
10801 					      ~MAC_MODE_PORT_MODE_MASK));
10802 					udelay(40);
10803 					tw32_f(MAC_MODE, tp->mac_mode);
10804 					udelay(40);
10805 				}
10806 				tg3_setup_phy(tp, false);
10807 			}
10808 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10809 			   tg3_flag(tp, 5780_CLASS)) {
10810 			tg3_serdes_parallel_detect(tp);
10811 		}
10812 
10813 		tp->timer_counter = tp->timer_multiplier;
10814 	}
10815 
10816 	/* Heartbeat is only sent once every 2 seconds.
10817 	 *
10818 	 * The heartbeat is to tell the ASF firmware that the host
10819 	 * driver is still alive.  In the event that the OS crashes,
10820 	 * ASF needs to reset the hardware to free up the FIFO space
10821 	 * that may be filled with rx packets destined for the host.
10822 	 * If the FIFO is full, ASF will no longer function properly.
10823 	 *
10824 	 * Unintended resets have been reported on real time kernels
10825 	 * where the timer doesn't run on time.  Netpoll will also have
10826 	 * same problem.
10827 	 *
10828 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10829 	 * to check the ring condition when the heartbeat is expiring
10830 	 * before doing the reset.  This will prevent most unintended
10831 	 * resets.
10832 	 */
10833 	if (!--tp->asf_counter) {
10834 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10835 			tg3_wait_for_event_ack(tp);
10836 
10837 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10838 				      FWCMD_NICDRV_ALIVE3);
10839 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10840 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10841 				      TG3_FW_UPDATE_TIMEOUT_SEC);
10842 
10843 			tg3_generate_fw_event(tp);
10844 		}
10845 		tp->asf_counter = tp->asf_multiplier;
10846 	}
10847 
10848 	spin_unlock(&tp->lock);
10849 
10850 restart_timer:
10851 	tp->timer.expires = jiffies + tp->timer_offset;
10852 	add_timer(&tp->timer);
10853 }
10854 
10855 static void tg3_timer_init(struct tg3 *tp)
10856 {
10857 	if (tg3_flag(tp, TAGGED_STATUS) &&
10858 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10859 	    !tg3_flag(tp, 57765_CLASS))
10860 		tp->timer_offset = HZ;
10861 	else
10862 		tp->timer_offset = HZ / 10;
10863 
10864 	BUG_ON(tp->timer_offset > HZ);
10865 
10866 	tp->timer_multiplier = (HZ / tp->timer_offset);
10867 	tp->asf_multiplier = (HZ / tp->timer_offset) *
10868 			     TG3_FW_UPDATE_FREQ_SEC;
10869 
10870 	init_timer(&tp->timer);
10871 	tp->timer.data = (unsigned long) tp;
10872 	tp->timer.function = tg3_timer;
10873 }
10874 
10875 static void tg3_timer_start(struct tg3 *tp)
10876 {
10877 	tp->asf_counter   = tp->asf_multiplier;
10878 	tp->timer_counter = tp->timer_multiplier;
10879 
10880 	tp->timer.expires = jiffies + tp->timer_offset;
10881 	add_timer(&tp->timer);
10882 }
10883 
10884 static void tg3_timer_stop(struct tg3 *tp)
10885 {
10886 	del_timer_sync(&tp->timer);
10887 }
10888 
10889 /* Restart hardware after configuration changes, self-test, etc.
10890  * Invoked with tp->lock held.
10891  */
10892 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10893 	__releases(tp->lock)
10894 	__acquires(tp->lock)
10895 {
10896 	int err;
10897 
10898 	err = tg3_init_hw(tp, reset_phy);
10899 	if (err) {
10900 		netdev_err(tp->dev,
10901 			   "Failed to re-initialize device, aborting\n");
10902 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10903 		tg3_full_unlock(tp);
10904 		tg3_timer_stop(tp);
10905 		tp->irq_sync = 0;
10906 		tg3_napi_enable(tp);
10907 		dev_close(tp->dev);
10908 		tg3_full_lock(tp, 0);
10909 	}
10910 	return err;
10911 }
10912 
10913 static void tg3_reset_task(struct work_struct *work)
10914 {
10915 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
10916 	int err;
10917 
10918 	tg3_full_lock(tp, 0);
10919 
10920 	if (!netif_running(tp->dev)) {
10921 		tg3_flag_clear(tp, RESET_TASK_PENDING);
10922 		tg3_full_unlock(tp);
10923 		return;
10924 	}
10925 
10926 	tg3_full_unlock(tp);
10927 
10928 	tg3_phy_stop(tp);
10929 
10930 	tg3_netif_stop(tp);
10931 
10932 	tg3_full_lock(tp, 1);
10933 
10934 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10935 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
10936 		tp->write32_rx_mbox = tg3_write_flush_reg32;
10937 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
10938 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10939 	}
10940 
10941 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10942 	err = tg3_init_hw(tp, true);
10943 	if (err)
10944 		goto out;
10945 
10946 	tg3_netif_start(tp);
10947 
10948 out:
10949 	tg3_full_unlock(tp);
10950 
10951 	if (!err)
10952 		tg3_phy_start(tp);
10953 
10954 	tg3_flag_clear(tp, RESET_TASK_PENDING);
10955 }
10956 
10957 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10958 {
10959 	irq_handler_t fn;
10960 	unsigned long flags;
10961 	char *name;
10962 	struct tg3_napi *tnapi = &tp->napi[irq_num];
10963 
10964 	if (tp->irq_cnt == 1)
10965 		name = tp->dev->name;
10966 	else {
10967 		name = &tnapi->irq_lbl[0];
10968 		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10969 		name[IFNAMSIZ-1] = 0;
10970 	}
10971 
10972 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10973 		fn = tg3_msi;
10974 		if (tg3_flag(tp, 1SHOT_MSI))
10975 			fn = tg3_msi_1shot;
10976 		flags = 0;
10977 	} else {
10978 		fn = tg3_interrupt;
10979 		if (tg3_flag(tp, TAGGED_STATUS))
10980 			fn = tg3_interrupt_tagged;
10981 		flags = IRQF_SHARED;
10982 	}
10983 
10984 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10985 }
10986 
10987 static int tg3_test_interrupt(struct tg3 *tp)
10988 {
10989 	struct tg3_napi *tnapi = &tp->napi[0];
10990 	struct net_device *dev = tp->dev;
10991 	int err, i, intr_ok = 0;
10992 	u32 val;
10993 
10994 	if (!netif_running(dev))
10995 		return -ENODEV;
10996 
10997 	tg3_disable_ints(tp);
10998 
10999 	free_irq(tnapi->irq_vec, tnapi);
11000 
11001 	/*
11002 	 * Turn off MSI one shot mode.  Otherwise this test has no
11003 	 * observable way to know whether the interrupt was delivered.
11004 	 */
11005 	if (tg3_flag(tp, 57765_PLUS)) {
11006 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11007 		tw32(MSGINT_MODE, val);
11008 	}
11009 
11010 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11011 			  IRQF_SHARED, dev->name, tnapi);
11012 	if (err)
11013 		return err;
11014 
11015 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11016 	tg3_enable_ints(tp);
11017 
11018 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11019 	       tnapi->coal_now);
11020 
11021 	for (i = 0; i < 5; i++) {
11022 		u32 int_mbox, misc_host_ctrl;
11023 
11024 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11025 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11026 
11027 		if ((int_mbox != 0) ||
11028 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11029 			intr_ok = 1;
11030 			break;
11031 		}
11032 
11033 		if (tg3_flag(tp, 57765_PLUS) &&
11034 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11035 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11036 
11037 		msleep(10);
11038 	}
11039 
11040 	tg3_disable_ints(tp);
11041 
11042 	free_irq(tnapi->irq_vec, tnapi);
11043 
11044 	err = tg3_request_irq(tp, 0);
11045 
11046 	if (err)
11047 		return err;
11048 
11049 	if (intr_ok) {
11050 		/* Reenable MSI one shot mode. */
11051 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11052 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11053 			tw32(MSGINT_MODE, val);
11054 		}
11055 		return 0;
11056 	}
11057 
11058 	return -EIO;
11059 }
11060 
11061 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11062  * successfully restored
11063  */
11064 static int tg3_test_msi(struct tg3 *tp)
11065 {
11066 	int err;
11067 	u16 pci_cmd;
11068 
11069 	if (!tg3_flag(tp, USING_MSI))
11070 		return 0;
11071 
11072 	/* Turn off SERR reporting in case MSI terminates with Master
11073 	 * Abort.
11074 	 */
11075 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11076 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11077 			      pci_cmd & ~PCI_COMMAND_SERR);
11078 
11079 	err = tg3_test_interrupt(tp);
11080 
11081 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11082 
11083 	if (!err)
11084 		return 0;
11085 
11086 	/* other failures */
11087 	if (err != -EIO)
11088 		return err;
11089 
11090 	/* MSI test failed, go back to INTx mode */
11091 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11092 		    "to INTx mode. Please report this failure to the PCI "
11093 		    "maintainer and include system chipset information\n");
11094 
11095 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11096 
11097 	pci_disable_msi(tp->pdev);
11098 
11099 	tg3_flag_clear(tp, USING_MSI);
11100 	tp->napi[0].irq_vec = tp->pdev->irq;
11101 
11102 	err = tg3_request_irq(tp, 0);
11103 	if (err)
11104 		return err;
11105 
11106 	/* Need to reset the chip because the MSI cycle may have terminated
11107 	 * with Master Abort.
11108 	 */
11109 	tg3_full_lock(tp, 1);
11110 
11111 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11112 	err = tg3_init_hw(tp, true);
11113 
11114 	tg3_full_unlock(tp);
11115 
11116 	if (err)
11117 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11118 
11119 	return err;
11120 }
11121 
11122 static int tg3_request_firmware(struct tg3 *tp)
11123 {
11124 	const struct tg3_firmware_hdr *fw_hdr;
11125 
11126 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11127 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11128 			   tp->fw_needed);
11129 		return -ENOENT;
11130 	}
11131 
11132 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11133 
11134 	/* Firmware blob starts with version numbers, followed by
11135 	 * start address and _full_ length including BSS sections
11136 	 * (which must be longer than the actual data, of course
11137 	 */
11138 
11139 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11140 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11141 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11142 			   tp->fw_len, tp->fw_needed);
11143 		release_firmware(tp->fw);
11144 		tp->fw = NULL;
11145 		return -EINVAL;
11146 	}
11147 
11148 	/* We no longer need firmware; we have it. */
11149 	tp->fw_needed = NULL;
11150 	return 0;
11151 }
11152 
11153 static u32 tg3_irq_count(struct tg3 *tp)
11154 {
11155 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11156 
11157 	if (irq_cnt > 1) {
11158 		/* We want as many rx rings enabled as there are cpus.
11159 		 * In multiqueue MSI-X mode, the first MSI-X vector
11160 		 * only deals with link interrupts, etc, so we add
11161 		 * one to the number of vectors we are requesting.
11162 		 */
11163 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11164 	}
11165 
11166 	return irq_cnt;
11167 }
11168 
11169 static bool tg3_enable_msix(struct tg3 *tp)
11170 {
11171 	int i, rc;
11172 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11173 
11174 	tp->txq_cnt = tp->txq_req;
11175 	tp->rxq_cnt = tp->rxq_req;
11176 	if (!tp->rxq_cnt)
11177 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11178 	if (tp->rxq_cnt > tp->rxq_max)
11179 		tp->rxq_cnt = tp->rxq_max;
11180 
11181 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11182 	 * scheduling of the TX rings can cause starvation of rings with
11183 	 * small packets when other rings have TSO or jumbo packets.
11184 	 */
11185 	if (!tp->txq_req)
11186 		tp->txq_cnt = 1;
11187 
11188 	tp->irq_cnt = tg3_irq_count(tp);
11189 
11190 	for (i = 0; i < tp->irq_max; i++) {
11191 		msix_ent[i].entry  = i;
11192 		msix_ent[i].vector = 0;
11193 	}
11194 
11195 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11196 	if (rc < 0) {
11197 		return false;
11198 	} else if (rc != 0) {
11199 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
11200 			return false;
11201 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11202 			      tp->irq_cnt, rc);
11203 		tp->irq_cnt = rc;
11204 		tp->rxq_cnt = max(rc - 1, 1);
11205 		if (tp->txq_cnt)
11206 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11207 	}
11208 
11209 	for (i = 0; i < tp->irq_max; i++)
11210 		tp->napi[i].irq_vec = msix_ent[i].vector;
11211 
11212 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11213 		pci_disable_msix(tp->pdev);
11214 		return false;
11215 	}
11216 
11217 	if (tp->irq_cnt == 1)
11218 		return true;
11219 
11220 	tg3_flag_set(tp, ENABLE_RSS);
11221 
11222 	if (tp->txq_cnt > 1)
11223 		tg3_flag_set(tp, ENABLE_TSS);
11224 
11225 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11226 
11227 	return true;
11228 }
11229 
11230 static void tg3_ints_init(struct tg3 *tp)
11231 {
11232 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11233 	    !tg3_flag(tp, TAGGED_STATUS)) {
11234 		/* All MSI supporting chips should support tagged
11235 		 * status.  Assert that this is the case.
11236 		 */
11237 		netdev_warn(tp->dev,
11238 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11239 		goto defcfg;
11240 	}
11241 
11242 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11243 		tg3_flag_set(tp, USING_MSIX);
11244 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11245 		tg3_flag_set(tp, USING_MSI);
11246 
11247 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11248 		u32 msi_mode = tr32(MSGINT_MODE);
11249 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11250 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11251 		if (!tg3_flag(tp, 1SHOT_MSI))
11252 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11253 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11254 	}
11255 defcfg:
11256 	if (!tg3_flag(tp, USING_MSIX)) {
11257 		tp->irq_cnt = 1;
11258 		tp->napi[0].irq_vec = tp->pdev->irq;
11259 	}
11260 
11261 	if (tp->irq_cnt == 1) {
11262 		tp->txq_cnt = 1;
11263 		tp->rxq_cnt = 1;
11264 		netif_set_real_num_tx_queues(tp->dev, 1);
11265 		netif_set_real_num_rx_queues(tp->dev, 1);
11266 	}
11267 }
11268 
11269 static void tg3_ints_fini(struct tg3 *tp)
11270 {
11271 	if (tg3_flag(tp, USING_MSIX))
11272 		pci_disable_msix(tp->pdev);
11273 	else if (tg3_flag(tp, USING_MSI))
11274 		pci_disable_msi(tp->pdev);
11275 	tg3_flag_clear(tp, USING_MSI);
11276 	tg3_flag_clear(tp, USING_MSIX);
11277 	tg3_flag_clear(tp, ENABLE_RSS);
11278 	tg3_flag_clear(tp, ENABLE_TSS);
11279 }
11280 
11281 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11282 		     bool init)
11283 {
11284 	struct net_device *dev = tp->dev;
11285 	int i, err;
11286 
11287 	/*
11288 	 * Setup interrupts first so we know how
11289 	 * many NAPI resources to allocate
11290 	 */
11291 	tg3_ints_init(tp);
11292 
11293 	tg3_rss_check_indir_tbl(tp);
11294 
11295 	/* The placement of this call is tied
11296 	 * to the setup and use of Host TX descriptors.
11297 	 */
11298 	err = tg3_alloc_consistent(tp);
11299 	if (err)
11300 		goto out_ints_fini;
11301 
11302 	tg3_napi_init(tp);
11303 
11304 	tg3_napi_enable(tp);
11305 
11306 	for (i = 0; i < tp->irq_cnt; i++) {
11307 		struct tg3_napi *tnapi = &tp->napi[i];
11308 		err = tg3_request_irq(tp, i);
11309 		if (err) {
11310 			for (i--; i >= 0; i--) {
11311 				tnapi = &tp->napi[i];
11312 				free_irq(tnapi->irq_vec, tnapi);
11313 			}
11314 			goto out_napi_fini;
11315 		}
11316 	}
11317 
11318 	tg3_full_lock(tp, 0);
11319 
11320 	if (init)
11321 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11322 
11323 	err = tg3_init_hw(tp, reset_phy);
11324 	if (err) {
11325 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11326 		tg3_free_rings(tp);
11327 	}
11328 
11329 	tg3_full_unlock(tp);
11330 
11331 	if (err)
11332 		goto out_free_irq;
11333 
11334 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11335 		err = tg3_test_msi(tp);
11336 
11337 		if (err) {
11338 			tg3_full_lock(tp, 0);
11339 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11340 			tg3_free_rings(tp);
11341 			tg3_full_unlock(tp);
11342 
11343 			goto out_napi_fini;
11344 		}
11345 
11346 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11347 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11348 
11349 			tw32(PCIE_TRANSACTION_CFG,
11350 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11351 		}
11352 	}
11353 
11354 	tg3_phy_start(tp);
11355 
11356 	tg3_hwmon_open(tp);
11357 
11358 	tg3_full_lock(tp, 0);
11359 
11360 	tg3_timer_start(tp);
11361 	tg3_flag_set(tp, INIT_COMPLETE);
11362 	tg3_enable_ints(tp);
11363 
11364 	if (init)
11365 		tg3_ptp_init(tp);
11366 	else
11367 		tg3_ptp_resume(tp);
11368 
11369 
11370 	tg3_full_unlock(tp);
11371 
11372 	netif_tx_start_all_queues(dev);
11373 
11374 	/*
11375 	 * Reset loopback feature if it was turned on while the device was down
11376 	 * make sure that it's installed properly now.
11377 	 */
11378 	if (dev->features & NETIF_F_LOOPBACK)
11379 		tg3_set_loopback(dev, dev->features);
11380 
11381 	return 0;
11382 
11383 out_free_irq:
11384 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11385 		struct tg3_napi *tnapi = &tp->napi[i];
11386 		free_irq(tnapi->irq_vec, tnapi);
11387 	}
11388 
11389 out_napi_fini:
11390 	tg3_napi_disable(tp);
11391 	tg3_napi_fini(tp);
11392 	tg3_free_consistent(tp);
11393 
11394 out_ints_fini:
11395 	tg3_ints_fini(tp);
11396 
11397 	return err;
11398 }
11399 
11400 static void tg3_stop(struct tg3 *tp)
11401 {
11402 	int i;
11403 
11404 	tg3_reset_task_cancel(tp);
11405 	tg3_netif_stop(tp);
11406 
11407 	tg3_timer_stop(tp);
11408 
11409 	tg3_hwmon_close(tp);
11410 
11411 	tg3_phy_stop(tp);
11412 
11413 	tg3_full_lock(tp, 1);
11414 
11415 	tg3_disable_ints(tp);
11416 
11417 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11418 	tg3_free_rings(tp);
11419 	tg3_flag_clear(tp, INIT_COMPLETE);
11420 
11421 	tg3_full_unlock(tp);
11422 
11423 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11424 		struct tg3_napi *tnapi = &tp->napi[i];
11425 		free_irq(tnapi->irq_vec, tnapi);
11426 	}
11427 
11428 	tg3_ints_fini(tp);
11429 
11430 	tg3_napi_fini(tp);
11431 
11432 	tg3_free_consistent(tp);
11433 }
11434 
11435 static int tg3_open(struct net_device *dev)
11436 {
11437 	struct tg3 *tp = netdev_priv(dev);
11438 	int err;
11439 
11440 	if (tp->fw_needed) {
11441 		err = tg3_request_firmware(tp);
11442 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11443 			if (err) {
11444 				netdev_warn(tp->dev, "EEE capability disabled\n");
11445 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11446 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11447 				netdev_warn(tp->dev, "EEE capability restored\n");
11448 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11449 			}
11450 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11451 			if (err)
11452 				return err;
11453 		} else if (err) {
11454 			netdev_warn(tp->dev, "TSO capability disabled\n");
11455 			tg3_flag_clear(tp, TSO_CAPABLE);
11456 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11457 			netdev_notice(tp->dev, "TSO capability restored\n");
11458 			tg3_flag_set(tp, TSO_CAPABLE);
11459 		}
11460 	}
11461 
11462 	tg3_carrier_off(tp);
11463 
11464 	err = tg3_power_up(tp);
11465 	if (err)
11466 		return err;
11467 
11468 	tg3_full_lock(tp, 0);
11469 
11470 	tg3_disable_ints(tp);
11471 	tg3_flag_clear(tp, INIT_COMPLETE);
11472 
11473 	tg3_full_unlock(tp);
11474 
11475 	err = tg3_start(tp,
11476 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11477 			true, true);
11478 	if (err) {
11479 		tg3_frob_aux_power(tp, false);
11480 		pci_set_power_state(tp->pdev, PCI_D3hot);
11481 	}
11482 
11483 	if (tg3_flag(tp, PTP_CAPABLE)) {
11484 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11485 						   &tp->pdev->dev);
11486 		if (IS_ERR(tp->ptp_clock))
11487 			tp->ptp_clock = NULL;
11488 	}
11489 
11490 	return err;
11491 }
11492 
11493 static int tg3_close(struct net_device *dev)
11494 {
11495 	struct tg3 *tp = netdev_priv(dev);
11496 
11497 	tg3_ptp_fini(tp);
11498 
11499 	tg3_stop(tp);
11500 
11501 	/* Clear stats across close / open calls */
11502 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11503 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11504 
11505 	tg3_power_down(tp);
11506 
11507 	tg3_carrier_off(tp);
11508 
11509 	return 0;
11510 }
11511 
11512 static inline u64 get_stat64(tg3_stat64_t *val)
11513 {
11514        return ((u64)val->high << 32) | ((u64)val->low);
11515 }
11516 
11517 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11518 {
11519 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11520 
11521 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11522 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11523 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11524 		u32 val;
11525 
11526 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11527 			tg3_writephy(tp, MII_TG3_TEST1,
11528 				     val | MII_TG3_TEST1_CRC_EN);
11529 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11530 		} else
11531 			val = 0;
11532 
11533 		tp->phy_crc_errors += val;
11534 
11535 		return tp->phy_crc_errors;
11536 	}
11537 
11538 	return get_stat64(&hw_stats->rx_fcs_errors);
11539 }
11540 
11541 #define ESTAT_ADD(member) \
11542 	estats->member =	old_estats->member + \
11543 				get_stat64(&hw_stats->member)
11544 
11545 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11546 {
11547 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11548 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11549 
11550 	ESTAT_ADD(rx_octets);
11551 	ESTAT_ADD(rx_fragments);
11552 	ESTAT_ADD(rx_ucast_packets);
11553 	ESTAT_ADD(rx_mcast_packets);
11554 	ESTAT_ADD(rx_bcast_packets);
11555 	ESTAT_ADD(rx_fcs_errors);
11556 	ESTAT_ADD(rx_align_errors);
11557 	ESTAT_ADD(rx_xon_pause_rcvd);
11558 	ESTAT_ADD(rx_xoff_pause_rcvd);
11559 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11560 	ESTAT_ADD(rx_xoff_entered);
11561 	ESTAT_ADD(rx_frame_too_long_errors);
11562 	ESTAT_ADD(rx_jabbers);
11563 	ESTAT_ADD(rx_undersize_packets);
11564 	ESTAT_ADD(rx_in_length_errors);
11565 	ESTAT_ADD(rx_out_length_errors);
11566 	ESTAT_ADD(rx_64_or_less_octet_packets);
11567 	ESTAT_ADD(rx_65_to_127_octet_packets);
11568 	ESTAT_ADD(rx_128_to_255_octet_packets);
11569 	ESTAT_ADD(rx_256_to_511_octet_packets);
11570 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11571 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11572 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11573 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11574 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11575 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11576 
11577 	ESTAT_ADD(tx_octets);
11578 	ESTAT_ADD(tx_collisions);
11579 	ESTAT_ADD(tx_xon_sent);
11580 	ESTAT_ADD(tx_xoff_sent);
11581 	ESTAT_ADD(tx_flow_control);
11582 	ESTAT_ADD(tx_mac_errors);
11583 	ESTAT_ADD(tx_single_collisions);
11584 	ESTAT_ADD(tx_mult_collisions);
11585 	ESTAT_ADD(tx_deferred);
11586 	ESTAT_ADD(tx_excessive_collisions);
11587 	ESTAT_ADD(tx_late_collisions);
11588 	ESTAT_ADD(tx_collide_2times);
11589 	ESTAT_ADD(tx_collide_3times);
11590 	ESTAT_ADD(tx_collide_4times);
11591 	ESTAT_ADD(tx_collide_5times);
11592 	ESTAT_ADD(tx_collide_6times);
11593 	ESTAT_ADD(tx_collide_7times);
11594 	ESTAT_ADD(tx_collide_8times);
11595 	ESTAT_ADD(tx_collide_9times);
11596 	ESTAT_ADD(tx_collide_10times);
11597 	ESTAT_ADD(tx_collide_11times);
11598 	ESTAT_ADD(tx_collide_12times);
11599 	ESTAT_ADD(tx_collide_13times);
11600 	ESTAT_ADD(tx_collide_14times);
11601 	ESTAT_ADD(tx_collide_15times);
11602 	ESTAT_ADD(tx_ucast_packets);
11603 	ESTAT_ADD(tx_mcast_packets);
11604 	ESTAT_ADD(tx_bcast_packets);
11605 	ESTAT_ADD(tx_carrier_sense_errors);
11606 	ESTAT_ADD(tx_discards);
11607 	ESTAT_ADD(tx_errors);
11608 
11609 	ESTAT_ADD(dma_writeq_full);
11610 	ESTAT_ADD(dma_write_prioq_full);
11611 	ESTAT_ADD(rxbds_empty);
11612 	ESTAT_ADD(rx_discards);
11613 	ESTAT_ADD(rx_errors);
11614 	ESTAT_ADD(rx_threshold_hit);
11615 
11616 	ESTAT_ADD(dma_readq_full);
11617 	ESTAT_ADD(dma_read_prioq_full);
11618 	ESTAT_ADD(tx_comp_queue_full);
11619 
11620 	ESTAT_ADD(ring_set_send_prod_index);
11621 	ESTAT_ADD(ring_status_update);
11622 	ESTAT_ADD(nic_irqs);
11623 	ESTAT_ADD(nic_avoided_irqs);
11624 	ESTAT_ADD(nic_tx_threshold_hit);
11625 
11626 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11627 }
11628 
11629 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11630 {
11631 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11632 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11633 
11634 	stats->rx_packets = old_stats->rx_packets +
11635 		get_stat64(&hw_stats->rx_ucast_packets) +
11636 		get_stat64(&hw_stats->rx_mcast_packets) +
11637 		get_stat64(&hw_stats->rx_bcast_packets);
11638 
11639 	stats->tx_packets = old_stats->tx_packets +
11640 		get_stat64(&hw_stats->tx_ucast_packets) +
11641 		get_stat64(&hw_stats->tx_mcast_packets) +
11642 		get_stat64(&hw_stats->tx_bcast_packets);
11643 
11644 	stats->rx_bytes = old_stats->rx_bytes +
11645 		get_stat64(&hw_stats->rx_octets);
11646 	stats->tx_bytes = old_stats->tx_bytes +
11647 		get_stat64(&hw_stats->tx_octets);
11648 
11649 	stats->rx_errors = old_stats->rx_errors +
11650 		get_stat64(&hw_stats->rx_errors);
11651 	stats->tx_errors = old_stats->tx_errors +
11652 		get_stat64(&hw_stats->tx_errors) +
11653 		get_stat64(&hw_stats->tx_mac_errors) +
11654 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11655 		get_stat64(&hw_stats->tx_discards);
11656 
11657 	stats->multicast = old_stats->multicast +
11658 		get_stat64(&hw_stats->rx_mcast_packets);
11659 	stats->collisions = old_stats->collisions +
11660 		get_stat64(&hw_stats->tx_collisions);
11661 
11662 	stats->rx_length_errors = old_stats->rx_length_errors +
11663 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11664 		get_stat64(&hw_stats->rx_undersize_packets);
11665 
11666 	stats->rx_over_errors = old_stats->rx_over_errors +
11667 		get_stat64(&hw_stats->rxbds_empty);
11668 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11669 		get_stat64(&hw_stats->rx_align_errors);
11670 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11671 		get_stat64(&hw_stats->tx_discards);
11672 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11673 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11674 
11675 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11676 		tg3_calc_crc_errors(tp);
11677 
11678 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11679 		get_stat64(&hw_stats->rx_discards);
11680 
11681 	stats->rx_dropped = tp->rx_dropped;
11682 	stats->tx_dropped = tp->tx_dropped;
11683 }
11684 
11685 static int tg3_get_regs_len(struct net_device *dev)
11686 {
11687 	return TG3_REG_BLK_SIZE;
11688 }
11689 
11690 static void tg3_get_regs(struct net_device *dev,
11691 		struct ethtool_regs *regs, void *_p)
11692 {
11693 	struct tg3 *tp = netdev_priv(dev);
11694 
11695 	regs->version = 0;
11696 
11697 	memset(_p, 0, TG3_REG_BLK_SIZE);
11698 
11699 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11700 		return;
11701 
11702 	tg3_full_lock(tp, 0);
11703 
11704 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11705 
11706 	tg3_full_unlock(tp);
11707 }
11708 
11709 static int tg3_get_eeprom_len(struct net_device *dev)
11710 {
11711 	struct tg3 *tp = netdev_priv(dev);
11712 
11713 	return tp->nvram_size;
11714 }
11715 
11716 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11717 {
11718 	struct tg3 *tp = netdev_priv(dev);
11719 	int ret;
11720 	u8  *pd;
11721 	u32 i, offset, len, b_offset, b_count;
11722 	__be32 val;
11723 
11724 	if (tg3_flag(tp, NO_NVRAM))
11725 		return -EINVAL;
11726 
11727 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11728 		return -EAGAIN;
11729 
11730 	offset = eeprom->offset;
11731 	len = eeprom->len;
11732 	eeprom->len = 0;
11733 
11734 	eeprom->magic = TG3_EEPROM_MAGIC;
11735 
11736 	if (offset & 3) {
11737 		/* adjustments to start on required 4 byte boundary */
11738 		b_offset = offset & 3;
11739 		b_count = 4 - b_offset;
11740 		if (b_count > len) {
11741 			/* i.e. offset=1 len=2 */
11742 			b_count = len;
11743 		}
11744 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11745 		if (ret)
11746 			return ret;
11747 		memcpy(data, ((char *)&val) + b_offset, b_count);
11748 		len -= b_count;
11749 		offset += b_count;
11750 		eeprom->len += b_count;
11751 	}
11752 
11753 	/* read bytes up to the last 4 byte boundary */
11754 	pd = &data[eeprom->len];
11755 	for (i = 0; i < (len - (len & 3)); i += 4) {
11756 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
11757 		if (ret) {
11758 			eeprom->len += i;
11759 			return ret;
11760 		}
11761 		memcpy(pd + i, &val, 4);
11762 	}
11763 	eeprom->len += i;
11764 
11765 	if (len & 3) {
11766 		/* read last bytes not ending on 4 byte boundary */
11767 		pd = &data[eeprom->len];
11768 		b_count = len & 3;
11769 		b_offset = offset + len - b_count;
11770 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
11771 		if (ret)
11772 			return ret;
11773 		memcpy(pd, &val, b_count);
11774 		eeprom->len += b_count;
11775 	}
11776 	return 0;
11777 }
11778 
11779 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11780 {
11781 	struct tg3 *tp = netdev_priv(dev);
11782 	int ret;
11783 	u32 offset, len, b_offset, odd_len;
11784 	u8 *buf;
11785 	__be32 start, end;
11786 
11787 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11788 		return -EAGAIN;
11789 
11790 	if (tg3_flag(tp, NO_NVRAM) ||
11791 	    eeprom->magic != TG3_EEPROM_MAGIC)
11792 		return -EINVAL;
11793 
11794 	offset = eeprom->offset;
11795 	len = eeprom->len;
11796 
11797 	if ((b_offset = (offset & 3))) {
11798 		/* adjustments to start on required 4 byte boundary */
11799 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11800 		if (ret)
11801 			return ret;
11802 		len += b_offset;
11803 		offset &= ~3;
11804 		if (len < 4)
11805 			len = 4;
11806 	}
11807 
11808 	odd_len = 0;
11809 	if (len & 3) {
11810 		/* adjustments to end on required 4 byte boundary */
11811 		odd_len = 1;
11812 		len = (len + 3) & ~3;
11813 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11814 		if (ret)
11815 			return ret;
11816 	}
11817 
11818 	buf = data;
11819 	if (b_offset || odd_len) {
11820 		buf = kmalloc(len, GFP_KERNEL);
11821 		if (!buf)
11822 			return -ENOMEM;
11823 		if (b_offset)
11824 			memcpy(buf, &start, 4);
11825 		if (odd_len)
11826 			memcpy(buf+len-4, &end, 4);
11827 		memcpy(buf + b_offset, data, eeprom->len);
11828 	}
11829 
11830 	ret = tg3_nvram_write_block(tp, offset, len, buf);
11831 
11832 	if (buf != data)
11833 		kfree(buf);
11834 
11835 	return ret;
11836 }
11837 
11838 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11839 {
11840 	struct tg3 *tp = netdev_priv(dev);
11841 
11842 	if (tg3_flag(tp, USE_PHYLIB)) {
11843 		struct phy_device *phydev;
11844 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11845 			return -EAGAIN;
11846 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11847 		return phy_ethtool_gset(phydev, cmd);
11848 	}
11849 
11850 	cmd->supported = (SUPPORTED_Autoneg);
11851 
11852 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11853 		cmd->supported |= (SUPPORTED_1000baseT_Half |
11854 				   SUPPORTED_1000baseT_Full);
11855 
11856 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11857 		cmd->supported |= (SUPPORTED_100baseT_Half |
11858 				  SUPPORTED_100baseT_Full |
11859 				  SUPPORTED_10baseT_Half |
11860 				  SUPPORTED_10baseT_Full |
11861 				  SUPPORTED_TP);
11862 		cmd->port = PORT_TP;
11863 	} else {
11864 		cmd->supported |= SUPPORTED_FIBRE;
11865 		cmd->port = PORT_FIBRE;
11866 	}
11867 
11868 	cmd->advertising = tp->link_config.advertising;
11869 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
11870 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11871 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11872 				cmd->advertising |= ADVERTISED_Pause;
11873 			} else {
11874 				cmd->advertising |= ADVERTISED_Pause |
11875 						    ADVERTISED_Asym_Pause;
11876 			}
11877 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11878 			cmd->advertising |= ADVERTISED_Asym_Pause;
11879 		}
11880 	}
11881 	if (netif_running(dev) && tp->link_up) {
11882 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11883 		cmd->duplex = tp->link_config.active_duplex;
11884 		cmd->lp_advertising = tp->link_config.rmt_adv;
11885 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11886 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11887 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
11888 			else
11889 				cmd->eth_tp_mdix = ETH_TP_MDI;
11890 		}
11891 	} else {
11892 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11893 		cmd->duplex = DUPLEX_UNKNOWN;
11894 		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11895 	}
11896 	cmd->phy_address = tp->phy_addr;
11897 	cmd->transceiver = XCVR_INTERNAL;
11898 	cmd->autoneg = tp->link_config.autoneg;
11899 	cmd->maxtxpkt = 0;
11900 	cmd->maxrxpkt = 0;
11901 	return 0;
11902 }
11903 
11904 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11905 {
11906 	struct tg3 *tp = netdev_priv(dev);
11907 	u32 speed = ethtool_cmd_speed(cmd);
11908 
11909 	if (tg3_flag(tp, USE_PHYLIB)) {
11910 		struct phy_device *phydev;
11911 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11912 			return -EAGAIN;
11913 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11914 		return phy_ethtool_sset(phydev, cmd);
11915 	}
11916 
11917 	if (cmd->autoneg != AUTONEG_ENABLE &&
11918 	    cmd->autoneg != AUTONEG_DISABLE)
11919 		return -EINVAL;
11920 
11921 	if (cmd->autoneg == AUTONEG_DISABLE &&
11922 	    cmd->duplex != DUPLEX_FULL &&
11923 	    cmd->duplex != DUPLEX_HALF)
11924 		return -EINVAL;
11925 
11926 	if (cmd->autoneg == AUTONEG_ENABLE) {
11927 		u32 mask = ADVERTISED_Autoneg |
11928 			   ADVERTISED_Pause |
11929 			   ADVERTISED_Asym_Pause;
11930 
11931 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11932 			mask |= ADVERTISED_1000baseT_Half |
11933 				ADVERTISED_1000baseT_Full;
11934 
11935 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11936 			mask |= ADVERTISED_100baseT_Half |
11937 				ADVERTISED_100baseT_Full |
11938 				ADVERTISED_10baseT_Half |
11939 				ADVERTISED_10baseT_Full |
11940 				ADVERTISED_TP;
11941 		else
11942 			mask |= ADVERTISED_FIBRE;
11943 
11944 		if (cmd->advertising & ~mask)
11945 			return -EINVAL;
11946 
11947 		mask &= (ADVERTISED_1000baseT_Half |
11948 			 ADVERTISED_1000baseT_Full |
11949 			 ADVERTISED_100baseT_Half |
11950 			 ADVERTISED_100baseT_Full |
11951 			 ADVERTISED_10baseT_Half |
11952 			 ADVERTISED_10baseT_Full);
11953 
11954 		cmd->advertising &= mask;
11955 	} else {
11956 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11957 			if (speed != SPEED_1000)
11958 				return -EINVAL;
11959 
11960 			if (cmd->duplex != DUPLEX_FULL)
11961 				return -EINVAL;
11962 		} else {
11963 			if (speed != SPEED_100 &&
11964 			    speed != SPEED_10)
11965 				return -EINVAL;
11966 		}
11967 	}
11968 
11969 	tg3_full_lock(tp, 0);
11970 
11971 	tp->link_config.autoneg = cmd->autoneg;
11972 	if (cmd->autoneg == AUTONEG_ENABLE) {
11973 		tp->link_config.advertising = (cmd->advertising |
11974 					      ADVERTISED_Autoneg);
11975 		tp->link_config.speed = SPEED_UNKNOWN;
11976 		tp->link_config.duplex = DUPLEX_UNKNOWN;
11977 	} else {
11978 		tp->link_config.advertising = 0;
11979 		tp->link_config.speed = speed;
11980 		tp->link_config.duplex = cmd->duplex;
11981 	}
11982 
11983 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11984 
11985 	tg3_warn_mgmt_link_flap(tp);
11986 
11987 	if (netif_running(dev))
11988 		tg3_setup_phy(tp, true);
11989 
11990 	tg3_full_unlock(tp);
11991 
11992 	return 0;
11993 }
11994 
11995 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11996 {
11997 	struct tg3 *tp = netdev_priv(dev);
11998 
11999 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12000 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12001 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12002 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12003 }
12004 
12005 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12006 {
12007 	struct tg3 *tp = netdev_priv(dev);
12008 
12009 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12010 		wol->supported = WAKE_MAGIC;
12011 	else
12012 		wol->supported = 0;
12013 	wol->wolopts = 0;
12014 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12015 		wol->wolopts = WAKE_MAGIC;
12016 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12017 }
12018 
12019 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12020 {
12021 	struct tg3 *tp = netdev_priv(dev);
12022 	struct device *dp = &tp->pdev->dev;
12023 
12024 	if (wol->wolopts & ~WAKE_MAGIC)
12025 		return -EINVAL;
12026 	if ((wol->wolopts & WAKE_MAGIC) &&
12027 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12028 		return -EINVAL;
12029 
12030 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12031 
12032 	spin_lock_bh(&tp->lock);
12033 	if (device_may_wakeup(dp))
12034 		tg3_flag_set(tp, WOL_ENABLE);
12035 	else
12036 		tg3_flag_clear(tp, WOL_ENABLE);
12037 	spin_unlock_bh(&tp->lock);
12038 
12039 	return 0;
12040 }
12041 
12042 static u32 tg3_get_msglevel(struct net_device *dev)
12043 {
12044 	struct tg3 *tp = netdev_priv(dev);
12045 	return tp->msg_enable;
12046 }
12047 
12048 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12049 {
12050 	struct tg3 *tp = netdev_priv(dev);
12051 	tp->msg_enable = value;
12052 }
12053 
12054 static int tg3_nway_reset(struct net_device *dev)
12055 {
12056 	struct tg3 *tp = netdev_priv(dev);
12057 	int r;
12058 
12059 	if (!netif_running(dev))
12060 		return -EAGAIN;
12061 
12062 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12063 		return -EINVAL;
12064 
12065 	tg3_warn_mgmt_link_flap(tp);
12066 
12067 	if (tg3_flag(tp, USE_PHYLIB)) {
12068 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12069 			return -EAGAIN;
12070 		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12071 	} else {
12072 		u32 bmcr;
12073 
12074 		spin_lock_bh(&tp->lock);
12075 		r = -EINVAL;
12076 		tg3_readphy(tp, MII_BMCR, &bmcr);
12077 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12078 		    ((bmcr & BMCR_ANENABLE) ||
12079 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12080 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12081 						   BMCR_ANENABLE);
12082 			r = 0;
12083 		}
12084 		spin_unlock_bh(&tp->lock);
12085 	}
12086 
12087 	return r;
12088 }
12089 
12090 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12091 {
12092 	struct tg3 *tp = netdev_priv(dev);
12093 
12094 	ering->rx_max_pending = tp->rx_std_ring_mask;
12095 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12096 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12097 	else
12098 		ering->rx_jumbo_max_pending = 0;
12099 
12100 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12101 
12102 	ering->rx_pending = tp->rx_pending;
12103 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12104 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12105 	else
12106 		ering->rx_jumbo_pending = 0;
12107 
12108 	ering->tx_pending = tp->napi[0].tx_pending;
12109 }
12110 
12111 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12112 {
12113 	struct tg3 *tp = netdev_priv(dev);
12114 	int i, irq_sync = 0, err = 0;
12115 
12116 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12117 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12118 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12119 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12120 	    (tg3_flag(tp, TSO_BUG) &&
12121 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12122 		return -EINVAL;
12123 
12124 	if (netif_running(dev)) {
12125 		tg3_phy_stop(tp);
12126 		tg3_netif_stop(tp);
12127 		irq_sync = 1;
12128 	}
12129 
12130 	tg3_full_lock(tp, irq_sync);
12131 
12132 	tp->rx_pending = ering->rx_pending;
12133 
12134 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12135 	    tp->rx_pending > 63)
12136 		tp->rx_pending = 63;
12137 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12138 
12139 	for (i = 0; i < tp->irq_max; i++)
12140 		tp->napi[i].tx_pending = ering->tx_pending;
12141 
12142 	if (netif_running(dev)) {
12143 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12144 		err = tg3_restart_hw(tp, false);
12145 		if (!err)
12146 			tg3_netif_start(tp);
12147 	}
12148 
12149 	tg3_full_unlock(tp);
12150 
12151 	if (irq_sync && !err)
12152 		tg3_phy_start(tp);
12153 
12154 	return err;
12155 }
12156 
12157 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12158 {
12159 	struct tg3 *tp = netdev_priv(dev);
12160 
12161 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12162 
12163 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12164 		epause->rx_pause = 1;
12165 	else
12166 		epause->rx_pause = 0;
12167 
12168 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12169 		epause->tx_pause = 1;
12170 	else
12171 		epause->tx_pause = 0;
12172 }
12173 
12174 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12175 {
12176 	struct tg3 *tp = netdev_priv(dev);
12177 	int err = 0;
12178 
12179 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12180 		tg3_warn_mgmt_link_flap(tp);
12181 
12182 	if (tg3_flag(tp, USE_PHYLIB)) {
12183 		u32 newadv;
12184 		struct phy_device *phydev;
12185 
12186 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12187 
12188 		if (!(phydev->supported & SUPPORTED_Pause) ||
12189 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12190 		     (epause->rx_pause != epause->tx_pause)))
12191 			return -EINVAL;
12192 
12193 		tp->link_config.flowctrl = 0;
12194 		if (epause->rx_pause) {
12195 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12196 
12197 			if (epause->tx_pause) {
12198 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12199 				newadv = ADVERTISED_Pause;
12200 			} else
12201 				newadv = ADVERTISED_Pause |
12202 					 ADVERTISED_Asym_Pause;
12203 		} else if (epause->tx_pause) {
12204 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12205 			newadv = ADVERTISED_Asym_Pause;
12206 		} else
12207 			newadv = 0;
12208 
12209 		if (epause->autoneg)
12210 			tg3_flag_set(tp, PAUSE_AUTONEG);
12211 		else
12212 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12213 
12214 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12215 			u32 oldadv = phydev->advertising &
12216 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12217 			if (oldadv != newadv) {
12218 				phydev->advertising &=
12219 					~(ADVERTISED_Pause |
12220 					  ADVERTISED_Asym_Pause);
12221 				phydev->advertising |= newadv;
12222 				if (phydev->autoneg) {
12223 					/*
12224 					 * Always renegotiate the link to
12225 					 * inform our link partner of our
12226 					 * flow control settings, even if the
12227 					 * flow control is forced.  Let
12228 					 * tg3_adjust_link() do the final
12229 					 * flow control setup.
12230 					 */
12231 					return phy_start_aneg(phydev);
12232 				}
12233 			}
12234 
12235 			if (!epause->autoneg)
12236 				tg3_setup_flow_control(tp, 0, 0);
12237 		} else {
12238 			tp->link_config.advertising &=
12239 					~(ADVERTISED_Pause |
12240 					  ADVERTISED_Asym_Pause);
12241 			tp->link_config.advertising |= newadv;
12242 		}
12243 	} else {
12244 		int irq_sync = 0;
12245 
12246 		if (netif_running(dev)) {
12247 			tg3_netif_stop(tp);
12248 			irq_sync = 1;
12249 		}
12250 
12251 		tg3_full_lock(tp, irq_sync);
12252 
12253 		if (epause->autoneg)
12254 			tg3_flag_set(tp, PAUSE_AUTONEG);
12255 		else
12256 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12257 		if (epause->rx_pause)
12258 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12259 		else
12260 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12261 		if (epause->tx_pause)
12262 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12263 		else
12264 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12265 
12266 		if (netif_running(dev)) {
12267 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12268 			err = tg3_restart_hw(tp, false);
12269 			if (!err)
12270 				tg3_netif_start(tp);
12271 		}
12272 
12273 		tg3_full_unlock(tp);
12274 	}
12275 
12276 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12277 
12278 	return err;
12279 }
12280 
12281 static int tg3_get_sset_count(struct net_device *dev, int sset)
12282 {
12283 	switch (sset) {
12284 	case ETH_SS_TEST:
12285 		return TG3_NUM_TEST;
12286 	case ETH_SS_STATS:
12287 		return TG3_NUM_STATS;
12288 	default:
12289 		return -EOPNOTSUPP;
12290 	}
12291 }
12292 
12293 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12294 			 u32 *rules __always_unused)
12295 {
12296 	struct tg3 *tp = netdev_priv(dev);
12297 
12298 	if (!tg3_flag(tp, SUPPORT_MSIX))
12299 		return -EOPNOTSUPP;
12300 
12301 	switch (info->cmd) {
12302 	case ETHTOOL_GRXRINGS:
12303 		if (netif_running(tp->dev))
12304 			info->data = tp->rxq_cnt;
12305 		else {
12306 			info->data = num_online_cpus();
12307 			if (info->data > TG3_RSS_MAX_NUM_QS)
12308 				info->data = TG3_RSS_MAX_NUM_QS;
12309 		}
12310 
12311 		/* The first interrupt vector only
12312 		 * handles link interrupts.
12313 		 */
12314 		info->data -= 1;
12315 		return 0;
12316 
12317 	default:
12318 		return -EOPNOTSUPP;
12319 	}
12320 }
12321 
12322 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12323 {
12324 	u32 size = 0;
12325 	struct tg3 *tp = netdev_priv(dev);
12326 
12327 	if (tg3_flag(tp, SUPPORT_MSIX))
12328 		size = TG3_RSS_INDIR_TBL_SIZE;
12329 
12330 	return size;
12331 }
12332 
12333 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12334 {
12335 	struct tg3 *tp = netdev_priv(dev);
12336 	int i;
12337 
12338 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12339 		indir[i] = tp->rss_ind_tbl[i];
12340 
12341 	return 0;
12342 }
12343 
12344 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12345 {
12346 	struct tg3 *tp = netdev_priv(dev);
12347 	size_t i;
12348 
12349 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12350 		tp->rss_ind_tbl[i] = indir[i];
12351 
12352 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12353 		return 0;
12354 
12355 	/* It is legal to write the indirection
12356 	 * table while the device is running.
12357 	 */
12358 	tg3_full_lock(tp, 0);
12359 	tg3_rss_write_indir_tbl(tp);
12360 	tg3_full_unlock(tp);
12361 
12362 	return 0;
12363 }
12364 
12365 static void tg3_get_channels(struct net_device *dev,
12366 			     struct ethtool_channels *channel)
12367 {
12368 	struct tg3 *tp = netdev_priv(dev);
12369 	u32 deflt_qs = netif_get_num_default_rss_queues();
12370 
12371 	channel->max_rx = tp->rxq_max;
12372 	channel->max_tx = tp->txq_max;
12373 
12374 	if (netif_running(dev)) {
12375 		channel->rx_count = tp->rxq_cnt;
12376 		channel->tx_count = tp->txq_cnt;
12377 	} else {
12378 		if (tp->rxq_req)
12379 			channel->rx_count = tp->rxq_req;
12380 		else
12381 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12382 
12383 		if (tp->txq_req)
12384 			channel->tx_count = tp->txq_req;
12385 		else
12386 			channel->tx_count = min(deflt_qs, tp->txq_max);
12387 	}
12388 }
12389 
12390 static int tg3_set_channels(struct net_device *dev,
12391 			    struct ethtool_channels *channel)
12392 {
12393 	struct tg3 *tp = netdev_priv(dev);
12394 
12395 	if (!tg3_flag(tp, SUPPORT_MSIX))
12396 		return -EOPNOTSUPP;
12397 
12398 	if (channel->rx_count > tp->rxq_max ||
12399 	    channel->tx_count > tp->txq_max)
12400 		return -EINVAL;
12401 
12402 	tp->rxq_req = channel->rx_count;
12403 	tp->txq_req = channel->tx_count;
12404 
12405 	if (!netif_running(dev))
12406 		return 0;
12407 
12408 	tg3_stop(tp);
12409 
12410 	tg3_carrier_off(tp);
12411 
12412 	tg3_start(tp, true, false, false);
12413 
12414 	return 0;
12415 }
12416 
12417 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12418 {
12419 	switch (stringset) {
12420 	case ETH_SS_STATS:
12421 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12422 		break;
12423 	case ETH_SS_TEST:
12424 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12425 		break;
12426 	default:
12427 		WARN_ON(1);	/* we need a WARN() */
12428 		break;
12429 	}
12430 }
12431 
12432 static int tg3_set_phys_id(struct net_device *dev,
12433 			    enum ethtool_phys_id_state state)
12434 {
12435 	struct tg3 *tp = netdev_priv(dev);
12436 
12437 	if (!netif_running(tp->dev))
12438 		return -EAGAIN;
12439 
12440 	switch (state) {
12441 	case ETHTOOL_ID_ACTIVE:
12442 		return 1;	/* cycle on/off once per second */
12443 
12444 	case ETHTOOL_ID_ON:
12445 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12446 		     LED_CTRL_1000MBPS_ON |
12447 		     LED_CTRL_100MBPS_ON |
12448 		     LED_CTRL_10MBPS_ON |
12449 		     LED_CTRL_TRAFFIC_OVERRIDE |
12450 		     LED_CTRL_TRAFFIC_BLINK |
12451 		     LED_CTRL_TRAFFIC_LED);
12452 		break;
12453 
12454 	case ETHTOOL_ID_OFF:
12455 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12456 		     LED_CTRL_TRAFFIC_OVERRIDE);
12457 		break;
12458 
12459 	case ETHTOOL_ID_INACTIVE:
12460 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12461 		break;
12462 	}
12463 
12464 	return 0;
12465 }
12466 
12467 static void tg3_get_ethtool_stats(struct net_device *dev,
12468 				   struct ethtool_stats *estats, u64 *tmp_stats)
12469 {
12470 	struct tg3 *tp = netdev_priv(dev);
12471 
12472 	if (tp->hw_stats)
12473 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12474 	else
12475 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12476 }
12477 
12478 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12479 {
12480 	int i;
12481 	__be32 *buf;
12482 	u32 offset = 0, len = 0;
12483 	u32 magic, val;
12484 
12485 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12486 		return NULL;
12487 
12488 	if (magic == TG3_EEPROM_MAGIC) {
12489 		for (offset = TG3_NVM_DIR_START;
12490 		     offset < TG3_NVM_DIR_END;
12491 		     offset += TG3_NVM_DIRENT_SIZE) {
12492 			if (tg3_nvram_read(tp, offset, &val))
12493 				return NULL;
12494 
12495 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12496 			    TG3_NVM_DIRTYPE_EXTVPD)
12497 				break;
12498 		}
12499 
12500 		if (offset != TG3_NVM_DIR_END) {
12501 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12502 			if (tg3_nvram_read(tp, offset + 4, &offset))
12503 				return NULL;
12504 
12505 			offset = tg3_nvram_logical_addr(tp, offset);
12506 		}
12507 	}
12508 
12509 	if (!offset || !len) {
12510 		offset = TG3_NVM_VPD_OFF;
12511 		len = TG3_NVM_VPD_LEN;
12512 	}
12513 
12514 	buf = kmalloc(len, GFP_KERNEL);
12515 	if (buf == NULL)
12516 		return NULL;
12517 
12518 	if (magic == TG3_EEPROM_MAGIC) {
12519 		for (i = 0; i < len; i += 4) {
12520 			/* The data is in little-endian format in NVRAM.
12521 			 * Use the big-endian read routines to preserve
12522 			 * the byte order as it exists in NVRAM.
12523 			 */
12524 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12525 				goto error;
12526 		}
12527 	} else {
12528 		u8 *ptr;
12529 		ssize_t cnt;
12530 		unsigned int pos = 0;
12531 
12532 		ptr = (u8 *)&buf[0];
12533 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12534 			cnt = pci_read_vpd(tp->pdev, pos,
12535 					   len - pos, ptr);
12536 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12537 				cnt = 0;
12538 			else if (cnt < 0)
12539 				goto error;
12540 		}
12541 		if (pos != len)
12542 			goto error;
12543 	}
12544 
12545 	*vpdlen = len;
12546 
12547 	return buf;
12548 
12549 error:
12550 	kfree(buf);
12551 	return NULL;
12552 }
12553 
12554 #define NVRAM_TEST_SIZE 0x100
12555 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12556 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12557 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12558 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12559 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12560 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12561 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12562 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12563 
12564 static int tg3_test_nvram(struct tg3 *tp)
12565 {
12566 	u32 csum, magic, len;
12567 	__be32 *buf;
12568 	int i, j, k, err = 0, size;
12569 
12570 	if (tg3_flag(tp, NO_NVRAM))
12571 		return 0;
12572 
12573 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12574 		return -EIO;
12575 
12576 	if (magic == TG3_EEPROM_MAGIC)
12577 		size = NVRAM_TEST_SIZE;
12578 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12579 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12580 		    TG3_EEPROM_SB_FORMAT_1) {
12581 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12582 			case TG3_EEPROM_SB_REVISION_0:
12583 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12584 				break;
12585 			case TG3_EEPROM_SB_REVISION_2:
12586 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12587 				break;
12588 			case TG3_EEPROM_SB_REVISION_3:
12589 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12590 				break;
12591 			case TG3_EEPROM_SB_REVISION_4:
12592 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12593 				break;
12594 			case TG3_EEPROM_SB_REVISION_5:
12595 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12596 				break;
12597 			case TG3_EEPROM_SB_REVISION_6:
12598 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12599 				break;
12600 			default:
12601 				return -EIO;
12602 			}
12603 		} else
12604 			return 0;
12605 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12606 		size = NVRAM_SELFBOOT_HW_SIZE;
12607 	else
12608 		return -EIO;
12609 
12610 	buf = kmalloc(size, GFP_KERNEL);
12611 	if (buf == NULL)
12612 		return -ENOMEM;
12613 
12614 	err = -EIO;
12615 	for (i = 0, j = 0; i < size; i += 4, j++) {
12616 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12617 		if (err)
12618 			break;
12619 	}
12620 	if (i < size)
12621 		goto out;
12622 
12623 	/* Selfboot format */
12624 	magic = be32_to_cpu(buf[0]);
12625 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12626 	    TG3_EEPROM_MAGIC_FW) {
12627 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12628 
12629 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12630 		    TG3_EEPROM_SB_REVISION_2) {
12631 			/* For rev 2, the csum doesn't include the MBA. */
12632 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12633 				csum8 += buf8[i];
12634 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12635 				csum8 += buf8[i];
12636 		} else {
12637 			for (i = 0; i < size; i++)
12638 				csum8 += buf8[i];
12639 		}
12640 
12641 		if (csum8 == 0) {
12642 			err = 0;
12643 			goto out;
12644 		}
12645 
12646 		err = -EIO;
12647 		goto out;
12648 	}
12649 
12650 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12651 	    TG3_EEPROM_MAGIC_HW) {
12652 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12653 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12654 		u8 *buf8 = (u8 *) buf;
12655 
12656 		/* Separate the parity bits and the data bytes.  */
12657 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12658 			if ((i == 0) || (i == 8)) {
12659 				int l;
12660 				u8 msk;
12661 
12662 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12663 					parity[k++] = buf8[i] & msk;
12664 				i++;
12665 			} else if (i == 16) {
12666 				int l;
12667 				u8 msk;
12668 
12669 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12670 					parity[k++] = buf8[i] & msk;
12671 				i++;
12672 
12673 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12674 					parity[k++] = buf8[i] & msk;
12675 				i++;
12676 			}
12677 			data[j++] = buf8[i];
12678 		}
12679 
12680 		err = -EIO;
12681 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12682 			u8 hw8 = hweight8(data[i]);
12683 
12684 			if ((hw8 & 0x1) && parity[i])
12685 				goto out;
12686 			else if (!(hw8 & 0x1) && !parity[i])
12687 				goto out;
12688 		}
12689 		err = 0;
12690 		goto out;
12691 	}
12692 
12693 	err = -EIO;
12694 
12695 	/* Bootstrap checksum at offset 0x10 */
12696 	csum = calc_crc((unsigned char *) buf, 0x10);
12697 	if (csum != le32_to_cpu(buf[0x10/4]))
12698 		goto out;
12699 
12700 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12701 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12702 	if (csum != le32_to_cpu(buf[0xfc/4]))
12703 		goto out;
12704 
12705 	kfree(buf);
12706 
12707 	buf = tg3_vpd_readblock(tp, &len);
12708 	if (!buf)
12709 		return -ENOMEM;
12710 
12711 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12712 	if (i > 0) {
12713 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12714 		if (j < 0)
12715 			goto out;
12716 
12717 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12718 			goto out;
12719 
12720 		i += PCI_VPD_LRDT_TAG_SIZE;
12721 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12722 					      PCI_VPD_RO_KEYWORD_CHKSUM);
12723 		if (j > 0) {
12724 			u8 csum8 = 0;
12725 
12726 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
12727 
12728 			for (i = 0; i <= j; i++)
12729 				csum8 += ((u8 *)buf)[i];
12730 
12731 			if (csum8)
12732 				goto out;
12733 		}
12734 	}
12735 
12736 	err = 0;
12737 
12738 out:
12739 	kfree(buf);
12740 	return err;
12741 }
12742 
12743 #define TG3_SERDES_TIMEOUT_SEC	2
12744 #define TG3_COPPER_TIMEOUT_SEC	6
12745 
12746 static int tg3_test_link(struct tg3 *tp)
12747 {
12748 	int i, max;
12749 
12750 	if (!netif_running(tp->dev))
12751 		return -ENODEV;
12752 
12753 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12754 		max = TG3_SERDES_TIMEOUT_SEC;
12755 	else
12756 		max = TG3_COPPER_TIMEOUT_SEC;
12757 
12758 	for (i = 0; i < max; i++) {
12759 		if (tp->link_up)
12760 			return 0;
12761 
12762 		if (msleep_interruptible(1000))
12763 			break;
12764 	}
12765 
12766 	return -EIO;
12767 }
12768 
12769 /* Only test the commonly used registers */
12770 static int tg3_test_registers(struct tg3 *tp)
12771 {
12772 	int i, is_5705, is_5750;
12773 	u32 offset, read_mask, write_mask, val, save_val, read_val;
12774 	static struct {
12775 		u16 offset;
12776 		u16 flags;
12777 #define TG3_FL_5705	0x1
12778 #define TG3_FL_NOT_5705	0x2
12779 #define TG3_FL_NOT_5788	0x4
12780 #define TG3_FL_NOT_5750	0x8
12781 		u32 read_mask;
12782 		u32 write_mask;
12783 	} reg_tbl[] = {
12784 		/* MAC Control Registers */
12785 		{ MAC_MODE, TG3_FL_NOT_5705,
12786 			0x00000000, 0x00ef6f8c },
12787 		{ MAC_MODE, TG3_FL_5705,
12788 			0x00000000, 0x01ef6b8c },
12789 		{ MAC_STATUS, TG3_FL_NOT_5705,
12790 			0x03800107, 0x00000000 },
12791 		{ MAC_STATUS, TG3_FL_5705,
12792 			0x03800100, 0x00000000 },
12793 		{ MAC_ADDR_0_HIGH, 0x0000,
12794 			0x00000000, 0x0000ffff },
12795 		{ MAC_ADDR_0_LOW, 0x0000,
12796 			0x00000000, 0xffffffff },
12797 		{ MAC_RX_MTU_SIZE, 0x0000,
12798 			0x00000000, 0x0000ffff },
12799 		{ MAC_TX_MODE, 0x0000,
12800 			0x00000000, 0x00000070 },
12801 		{ MAC_TX_LENGTHS, 0x0000,
12802 			0x00000000, 0x00003fff },
12803 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
12804 			0x00000000, 0x000007fc },
12805 		{ MAC_RX_MODE, TG3_FL_5705,
12806 			0x00000000, 0x000007dc },
12807 		{ MAC_HASH_REG_0, 0x0000,
12808 			0x00000000, 0xffffffff },
12809 		{ MAC_HASH_REG_1, 0x0000,
12810 			0x00000000, 0xffffffff },
12811 		{ MAC_HASH_REG_2, 0x0000,
12812 			0x00000000, 0xffffffff },
12813 		{ MAC_HASH_REG_3, 0x0000,
12814 			0x00000000, 0xffffffff },
12815 
12816 		/* Receive Data and Receive BD Initiator Control Registers. */
12817 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12818 			0x00000000, 0xffffffff },
12819 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12820 			0x00000000, 0xffffffff },
12821 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12822 			0x00000000, 0x00000003 },
12823 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12824 			0x00000000, 0xffffffff },
12825 		{ RCVDBDI_STD_BD+0, 0x0000,
12826 			0x00000000, 0xffffffff },
12827 		{ RCVDBDI_STD_BD+4, 0x0000,
12828 			0x00000000, 0xffffffff },
12829 		{ RCVDBDI_STD_BD+8, 0x0000,
12830 			0x00000000, 0xffff0002 },
12831 		{ RCVDBDI_STD_BD+0xc, 0x0000,
12832 			0x00000000, 0xffffffff },
12833 
12834 		/* Receive BD Initiator Control Registers. */
12835 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12836 			0x00000000, 0xffffffff },
12837 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
12838 			0x00000000, 0x000003ff },
12839 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12840 			0x00000000, 0xffffffff },
12841 
12842 		/* Host Coalescing Control Registers. */
12843 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
12844 			0x00000000, 0x00000004 },
12845 		{ HOSTCC_MODE, TG3_FL_5705,
12846 			0x00000000, 0x000000f6 },
12847 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12848 			0x00000000, 0xffffffff },
12849 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12850 			0x00000000, 0x000003ff },
12851 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12852 			0x00000000, 0xffffffff },
12853 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12854 			0x00000000, 0x000003ff },
12855 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12856 			0x00000000, 0xffffffff },
12857 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12858 			0x00000000, 0x000000ff },
12859 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12860 			0x00000000, 0xffffffff },
12861 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12862 			0x00000000, 0x000000ff },
12863 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12864 			0x00000000, 0xffffffff },
12865 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12866 			0x00000000, 0xffffffff },
12867 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12868 			0x00000000, 0xffffffff },
12869 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12870 			0x00000000, 0x000000ff },
12871 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12872 			0x00000000, 0xffffffff },
12873 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12874 			0x00000000, 0x000000ff },
12875 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12876 			0x00000000, 0xffffffff },
12877 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12878 			0x00000000, 0xffffffff },
12879 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12880 			0x00000000, 0xffffffff },
12881 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12882 			0x00000000, 0xffffffff },
12883 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12884 			0x00000000, 0xffffffff },
12885 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12886 			0xffffffff, 0x00000000 },
12887 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12888 			0xffffffff, 0x00000000 },
12889 
12890 		/* Buffer Manager Control Registers. */
12891 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12892 			0x00000000, 0x007fff80 },
12893 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12894 			0x00000000, 0x007fffff },
12895 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12896 			0x00000000, 0x0000003f },
12897 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12898 			0x00000000, 0x000001ff },
12899 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
12900 			0x00000000, 0x000001ff },
12901 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12902 			0xffffffff, 0x00000000 },
12903 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12904 			0xffffffff, 0x00000000 },
12905 
12906 		/* Mailbox Registers */
12907 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12908 			0x00000000, 0x000001ff },
12909 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12910 			0x00000000, 0x000001ff },
12911 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12912 			0x00000000, 0x000007ff },
12913 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12914 			0x00000000, 0x000001ff },
12915 
12916 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
12917 	};
12918 
12919 	is_5705 = is_5750 = 0;
12920 	if (tg3_flag(tp, 5705_PLUS)) {
12921 		is_5705 = 1;
12922 		if (tg3_flag(tp, 5750_PLUS))
12923 			is_5750 = 1;
12924 	}
12925 
12926 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12927 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12928 			continue;
12929 
12930 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12931 			continue;
12932 
12933 		if (tg3_flag(tp, IS_5788) &&
12934 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
12935 			continue;
12936 
12937 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12938 			continue;
12939 
12940 		offset = (u32) reg_tbl[i].offset;
12941 		read_mask = reg_tbl[i].read_mask;
12942 		write_mask = reg_tbl[i].write_mask;
12943 
12944 		/* Save the original register content */
12945 		save_val = tr32(offset);
12946 
12947 		/* Determine the read-only value. */
12948 		read_val = save_val & read_mask;
12949 
12950 		/* Write zero to the register, then make sure the read-only bits
12951 		 * are not changed and the read/write bits are all zeros.
12952 		 */
12953 		tw32(offset, 0);
12954 
12955 		val = tr32(offset);
12956 
12957 		/* Test the read-only and read/write bits. */
12958 		if (((val & read_mask) != read_val) || (val & write_mask))
12959 			goto out;
12960 
12961 		/* Write ones to all the bits defined by RdMask and WrMask, then
12962 		 * make sure the read-only bits are not changed and the
12963 		 * read/write bits are all ones.
12964 		 */
12965 		tw32(offset, read_mask | write_mask);
12966 
12967 		val = tr32(offset);
12968 
12969 		/* Test the read-only bits. */
12970 		if ((val & read_mask) != read_val)
12971 			goto out;
12972 
12973 		/* Test the read/write bits. */
12974 		if ((val & write_mask) != write_mask)
12975 			goto out;
12976 
12977 		tw32(offset, save_val);
12978 	}
12979 
12980 	return 0;
12981 
12982 out:
12983 	if (netif_msg_hw(tp))
12984 		netdev_err(tp->dev,
12985 			   "Register test failed at offset %x\n", offset);
12986 	tw32(offset, save_val);
12987 	return -EIO;
12988 }
12989 
12990 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12991 {
12992 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12993 	int i;
12994 	u32 j;
12995 
12996 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12997 		for (j = 0; j < len; j += 4) {
12998 			u32 val;
12999 
13000 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13001 			tg3_read_mem(tp, offset + j, &val);
13002 			if (val != test_pattern[i])
13003 				return -EIO;
13004 		}
13005 	}
13006 	return 0;
13007 }
13008 
13009 static int tg3_test_memory(struct tg3 *tp)
13010 {
13011 	static struct mem_entry {
13012 		u32 offset;
13013 		u32 len;
13014 	} mem_tbl_570x[] = {
13015 		{ 0x00000000, 0x00b50},
13016 		{ 0x00002000, 0x1c000},
13017 		{ 0xffffffff, 0x00000}
13018 	}, mem_tbl_5705[] = {
13019 		{ 0x00000100, 0x0000c},
13020 		{ 0x00000200, 0x00008},
13021 		{ 0x00004000, 0x00800},
13022 		{ 0x00006000, 0x01000},
13023 		{ 0x00008000, 0x02000},
13024 		{ 0x00010000, 0x0e000},
13025 		{ 0xffffffff, 0x00000}
13026 	}, mem_tbl_5755[] = {
13027 		{ 0x00000200, 0x00008},
13028 		{ 0x00004000, 0x00800},
13029 		{ 0x00006000, 0x00800},
13030 		{ 0x00008000, 0x02000},
13031 		{ 0x00010000, 0x0c000},
13032 		{ 0xffffffff, 0x00000}
13033 	}, mem_tbl_5906[] = {
13034 		{ 0x00000200, 0x00008},
13035 		{ 0x00004000, 0x00400},
13036 		{ 0x00006000, 0x00400},
13037 		{ 0x00008000, 0x01000},
13038 		{ 0x00010000, 0x01000},
13039 		{ 0xffffffff, 0x00000}
13040 	}, mem_tbl_5717[] = {
13041 		{ 0x00000200, 0x00008},
13042 		{ 0x00010000, 0x0a000},
13043 		{ 0x00020000, 0x13c00},
13044 		{ 0xffffffff, 0x00000}
13045 	}, mem_tbl_57765[] = {
13046 		{ 0x00000200, 0x00008},
13047 		{ 0x00004000, 0x00800},
13048 		{ 0x00006000, 0x09800},
13049 		{ 0x00010000, 0x0a000},
13050 		{ 0xffffffff, 0x00000}
13051 	};
13052 	struct mem_entry *mem_tbl;
13053 	int err = 0;
13054 	int i;
13055 
13056 	if (tg3_flag(tp, 5717_PLUS))
13057 		mem_tbl = mem_tbl_5717;
13058 	else if (tg3_flag(tp, 57765_CLASS) ||
13059 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13060 		mem_tbl = mem_tbl_57765;
13061 	else if (tg3_flag(tp, 5755_PLUS))
13062 		mem_tbl = mem_tbl_5755;
13063 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13064 		mem_tbl = mem_tbl_5906;
13065 	else if (tg3_flag(tp, 5705_PLUS))
13066 		mem_tbl = mem_tbl_5705;
13067 	else
13068 		mem_tbl = mem_tbl_570x;
13069 
13070 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13071 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13072 		if (err)
13073 			break;
13074 	}
13075 
13076 	return err;
13077 }
13078 
13079 #define TG3_TSO_MSS		500
13080 
13081 #define TG3_TSO_IP_HDR_LEN	20
13082 #define TG3_TSO_TCP_HDR_LEN	20
13083 #define TG3_TSO_TCP_OPT_LEN	12
13084 
13085 static const u8 tg3_tso_header[] = {
13086 0x08, 0x00,
13087 0x45, 0x00, 0x00, 0x00,
13088 0x00, 0x00, 0x40, 0x00,
13089 0x40, 0x06, 0x00, 0x00,
13090 0x0a, 0x00, 0x00, 0x01,
13091 0x0a, 0x00, 0x00, 0x02,
13092 0x0d, 0x00, 0xe0, 0x00,
13093 0x00, 0x00, 0x01, 0x00,
13094 0x00, 0x00, 0x02, 0x00,
13095 0x80, 0x10, 0x10, 0x00,
13096 0x14, 0x09, 0x00, 0x00,
13097 0x01, 0x01, 0x08, 0x0a,
13098 0x11, 0x11, 0x11, 0x11,
13099 0x11, 0x11, 0x11, 0x11,
13100 };
13101 
13102 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13103 {
13104 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13105 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13106 	u32 budget;
13107 	struct sk_buff *skb;
13108 	u8 *tx_data, *rx_data;
13109 	dma_addr_t map;
13110 	int num_pkts, tx_len, rx_len, i, err;
13111 	struct tg3_rx_buffer_desc *desc;
13112 	struct tg3_napi *tnapi, *rnapi;
13113 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13114 
13115 	tnapi = &tp->napi[0];
13116 	rnapi = &tp->napi[0];
13117 	if (tp->irq_cnt > 1) {
13118 		if (tg3_flag(tp, ENABLE_RSS))
13119 			rnapi = &tp->napi[1];
13120 		if (tg3_flag(tp, ENABLE_TSS))
13121 			tnapi = &tp->napi[1];
13122 	}
13123 	coal_now = tnapi->coal_now | rnapi->coal_now;
13124 
13125 	err = -EIO;
13126 
13127 	tx_len = pktsz;
13128 	skb = netdev_alloc_skb(tp->dev, tx_len);
13129 	if (!skb)
13130 		return -ENOMEM;
13131 
13132 	tx_data = skb_put(skb, tx_len);
13133 	memcpy(tx_data, tp->dev->dev_addr, 6);
13134 	memset(tx_data + 6, 0x0, 8);
13135 
13136 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13137 
13138 	if (tso_loopback) {
13139 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13140 
13141 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13142 			      TG3_TSO_TCP_OPT_LEN;
13143 
13144 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13145 		       sizeof(tg3_tso_header));
13146 		mss = TG3_TSO_MSS;
13147 
13148 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13149 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13150 
13151 		/* Set the total length field in the IP header */
13152 		iph->tot_len = htons((u16)(mss + hdr_len));
13153 
13154 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13155 			      TXD_FLAG_CPU_POST_DMA);
13156 
13157 		if (tg3_flag(tp, HW_TSO_1) ||
13158 		    tg3_flag(tp, HW_TSO_2) ||
13159 		    tg3_flag(tp, HW_TSO_3)) {
13160 			struct tcphdr *th;
13161 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13162 			th = (struct tcphdr *)&tx_data[val];
13163 			th->check = 0;
13164 		} else
13165 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13166 
13167 		if (tg3_flag(tp, HW_TSO_3)) {
13168 			mss |= (hdr_len & 0xc) << 12;
13169 			if (hdr_len & 0x10)
13170 				base_flags |= 0x00000010;
13171 			base_flags |= (hdr_len & 0x3e0) << 5;
13172 		} else if (tg3_flag(tp, HW_TSO_2))
13173 			mss |= hdr_len << 9;
13174 		else if (tg3_flag(tp, HW_TSO_1) ||
13175 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13176 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13177 		} else {
13178 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13179 		}
13180 
13181 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13182 	} else {
13183 		num_pkts = 1;
13184 		data_off = ETH_HLEN;
13185 
13186 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13187 		    tx_len > VLAN_ETH_FRAME_LEN)
13188 			base_flags |= TXD_FLAG_JMB_PKT;
13189 	}
13190 
13191 	for (i = data_off; i < tx_len; i++)
13192 		tx_data[i] = (u8) (i & 0xff);
13193 
13194 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13195 	if (pci_dma_mapping_error(tp->pdev, map)) {
13196 		dev_kfree_skb(skb);
13197 		return -EIO;
13198 	}
13199 
13200 	val = tnapi->tx_prod;
13201 	tnapi->tx_buffers[val].skb = skb;
13202 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13203 
13204 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13205 	       rnapi->coal_now);
13206 
13207 	udelay(10);
13208 
13209 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13210 
13211 	budget = tg3_tx_avail(tnapi);
13212 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13213 			    base_flags | TXD_FLAG_END, mss, 0)) {
13214 		tnapi->tx_buffers[val].skb = NULL;
13215 		dev_kfree_skb(skb);
13216 		return -EIO;
13217 	}
13218 
13219 	tnapi->tx_prod++;
13220 
13221 	/* Sync BD data before updating mailbox */
13222 	wmb();
13223 
13224 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13225 	tr32_mailbox(tnapi->prodmbox);
13226 
13227 	udelay(10);
13228 
13229 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13230 	for (i = 0; i < 35; i++) {
13231 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13232 		       coal_now);
13233 
13234 		udelay(10);
13235 
13236 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13237 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13238 		if ((tx_idx == tnapi->tx_prod) &&
13239 		    (rx_idx == (rx_start_idx + num_pkts)))
13240 			break;
13241 	}
13242 
13243 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13244 	dev_kfree_skb(skb);
13245 
13246 	if (tx_idx != tnapi->tx_prod)
13247 		goto out;
13248 
13249 	if (rx_idx != rx_start_idx + num_pkts)
13250 		goto out;
13251 
13252 	val = data_off;
13253 	while (rx_idx != rx_start_idx) {
13254 		desc = &rnapi->rx_rcb[rx_start_idx++];
13255 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13256 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13257 
13258 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13259 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13260 			goto out;
13261 
13262 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13263 			 - ETH_FCS_LEN;
13264 
13265 		if (!tso_loopback) {
13266 			if (rx_len != tx_len)
13267 				goto out;
13268 
13269 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13270 				if (opaque_key != RXD_OPAQUE_RING_STD)
13271 					goto out;
13272 			} else {
13273 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13274 					goto out;
13275 			}
13276 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13277 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13278 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13279 			goto out;
13280 		}
13281 
13282 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13283 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13284 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13285 					     mapping);
13286 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13287 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13288 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13289 					     mapping);
13290 		} else
13291 			goto out;
13292 
13293 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13294 					    PCI_DMA_FROMDEVICE);
13295 
13296 		rx_data += TG3_RX_OFFSET(tp);
13297 		for (i = data_off; i < rx_len; i++, val++) {
13298 			if (*(rx_data + i) != (u8) (val & 0xff))
13299 				goto out;
13300 		}
13301 	}
13302 
13303 	err = 0;
13304 
13305 	/* tg3_free_rings will unmap and free the rx_data */
13306 out:
13307 	return err;
13308 }
13309 
13310 #define TG3_STD_LOOPBACK_FAILED		1
13311 #define TG3_JMB_LOOPBACK_FAILED		2
13312 #define TG3_TSO_LOOPBACK_FAILED		4
13313 #define TG3_LOOPBACK_FAILED \
13314 	(TG3_STD_LOOPBACK_FAILED | \
13315 	 TG3_JMB_LOOPBACK_FAILED | \
13316 	 TG3_TSO_LOOPBACK_FAILED)
13317 
13318 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13319 {
13320 	int err = -EIO;
13321 	u32 eee_cap;
13322 	u32 jmb_pkt_sz = 9000;
13323 
13324 	if (tp->dma_limit)
13325 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13326 
13327 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13328 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13329 
13330 	if (!netif_running(tp->dev)) {
13331 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13332 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13333 		if (do_extlpbk)
13334 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13335 		goto done;
13336 	}
13337 
13338 	err = tg3_reset_hw(tp, true);
13339 	if (err) {
13340 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13341 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13342 		if (do_extlpbk)
13343 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13344 		goto done;
13345 	}
13346 
13347 	if (tg3_flag(tp, ENABLE_RSS)) {
13348 		int i;
13349 
13350 		/* Reroute all rx packets to the 1st queue */
13351 		for (i = MAC_RSS_INDIR_TBL_0;
13352 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13353 			tw32(i, 0x0);
13354 	}
13355 
13356 	/* HW errata - mac loopback fails in some cases on 5780.
13357 	 * Normal traffic and PHY loopback are not affected by
13358 	 * errata.  Also, the MAC loopback test is deprecated for
13359 	 * all newer ASIC revisions.
13360 	 */
13361 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13362 	    !tg3_flag(tp, CPMU_PRESENT)) {
13363 		tg3_mac_loopback(tp, true);
13364 
13365 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13366 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13367 
13368 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13369 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13370 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13371 
13372 		tg3_mac_loopback(tp, false);
13373 	}
13374 
13375 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13376 	    !tg3_flag(tp, USE_PHYLIB)) {
13377 		int i;
13378 
13379 		tg3_phy_lpbk_set(tp, 0, false);
13380 
13381 		/* Wait for link */
13382 		for (i = 0; i < 100; i++) {
13383 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13384 				break;
13385 			mdelay(1);
13386 		}
13387 
13388 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13389 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13390 		if (tg3_flag(tp, TSO_CAPABLE) &&
13391 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13392 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13393 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13394 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13395 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13396 
13397 		if (do_extlpbk) {
13398 			tg3_phy_lpbk_set(tp, 0, true);
13399 
13400 			/* All link indications report up, but the hardware
13401 			 * isn't really ready for about 20 msec.  Double it
13402 			 * to be sure.
13403 			 */
13404 			mdelay(40);
13405 
13406 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13407 				data[TG3_EXT_LOOPB_TEST] |=
13408 							TG3_STD_LOOPBACK_FAILED;
13409 			if (tg3_flag(tp, TSO_CAPABLE) &&
13410 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13411 				data[TG3_EXT_LOOPB_TEST] |=
13412 							TG3_TSO_LOOPBACK_FAILED;
13413 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13414 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13415 				data[TG3_EXT_LOOPB_TEST] |=
13416 							TG3_JMB_LOOPBACK_FAILED;
13417 		}
13418 
13419 		/* Re-enable gphy autopowerdown. */
13420 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13421 			tg3_phy_toggle_apd(tp, true);
13422 	}
13423 
13424 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13425 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13426 
13427 done:
13428 	tp->phy_flags |= eee_cap;
13429 
13430 	return err;
13431 }
13432 
13433 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13434 			  u64 *data)
13435 {
13436 	struct tg3 *tp = netdev_priv(dev);
13437 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13438 
13439 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13440 		if (tg3_power_up(tp)) {
13441 			etest->flags |= ETH_TEST_FL_FAILED;
13442 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13443 			return;
13444 		}
13445 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13446 	}
13447 
13448 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13449 
13450 	if (tg3_test_nvram(tp) != 0) {
13451 		etest->flags |= ETH_TEST_FL_FAILED;
13452 		data[TG3_NVRAM_TEST] = 1;
13453 	}
13454 	if (!doextlpbk && tg3_test_link(tp)) {
13455 		etest->flags |= ETH_TEST_FL_FAILED;
13456 		data[TG3_LINK_TEST] = 1;
13457 	}
13458 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13459 		int err, err2 = 0, irq_sync = 0;
13460 
13461 		if (netif_running(dev)) {
13462 			tg3_phy_stop(tp);
13463 			tg3_netif_stop(tp);
13464 			irq_sync = 1;
13465 		}
13466 
13467 		tg3_full_lock(tp, irq_sync);
13468 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13469 		err = tg3_nvram_lock(tp);
13470 		tg3_halt_cpu(tp, RX_CPU_BASE);
13471 		if (!tg3_flag(tp, 5705_PLUS))
13472 			tg3_halt_cpu(tp, TX_CPU_BASE);
13473 		if (!err)
13474 			tg3_nvram_unlock(tp);
13475 
13476 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13477 			tg3_phy_reset(tp);
13478 
13479 		if (tg3_test_registers(tp) != 0) {
13480 			etest->flags |= ETH_TEST_FL_FAILED;
13481 			data[TG3_REGISTER_TEST] = 1;
13482 		}
13483 
13484 		if (tg3_test_memory(tp) != 0) {
13485 			etest->flags |= ETH_TEST_FL_FAILED;
13486 			data[TG3_MEMORY_TEST] = 1;
13487 		}
13488 
13489 		if (doextlpbk)
13490 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13491 
13492 		if (tg3_test_loopback(tp, data, doextlpbk))
13493 			etest->flags |= ETH_TEST_FL_FAILED;
13494 
13495 		tg3_full_unlock(tp);
13496 
13497 		if (tg3_test_interrupt(tp) != 0) {
13498 			etest->flags |= ETH_TEST_FL_FAILED;
13499 			data[TG3_INTERRUPT_TEST] = 1;
13500 		}
13501 
13502 		tg3_full_lock(tp, 0);
13503 
13504 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13505 		if (netif_running(dev)) {
13506 			tg3_flag_set(tp, INIT_COMPLETE);
13507 			err2 = tg3_restart_hw(tp, true);
13508 			if (!err2)
13509 				tg3_netif_start(tp);
13510 		}
13511 
13512 		tg3_full_unlock(tp);
13513 
13514 		if (irq_sync && !err2)
13515 			tg3_phy_start(tp);
13516 	}
13517 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13518 		tg3_power_down(tp);
13519 
13520 }
13521 
13522 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13523 			      struct ifreq *ifr, int cmd)
13524 {
13525 	struct tg3 *tp = netdev_priv(dev);
13526 	struct hwtstamp_config stmpconf;
13527 
13528 	if (!tg3_flag(tp, PTP_CAPABLE))
13529 		return -EINVAL;
13530 
13531 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13532 		return -EFAULT;
13533 
13534 	if (stmpconf.flags)
13535 		return -EINVAL;
13536 
13537 	switch (stmpconf.tx_type) {
13538 	case HWTSTAMP_TX_ON:
13539 		tg3_flag_set(tp, TX_TSTAMP_EN);
13540 		break;
13541 	case HWTSTAMP_TX_OFF:
13542 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13543 		break;
13544 	default:
13545 		return -ERANGE;
13546 	}
13547 
13548 	switch (stmpconf.rx_filter) {
13549 	case HWTSTAMP_FILTER_NONE:
13550 		tp->rxptpctl = 0;
13551 		break;
13552 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13553 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13554 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13555 		break;
13556 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13557 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13558 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13559 		break;
13560 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13561 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13562 			       TG3_RX_PTP_CTL_DELAY_REQ;
13563 		break;
13564 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13565 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13566 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13567 		break;
13568 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13569 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13570 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13571 		break;
13572 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13573 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13574 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13575 		break;
13576 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13577 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13578 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13579 		break;
13580 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13581 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13582 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13583 		break;
13584 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13585 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13586 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13587 		break;
13588 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13589 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13590 			       TG3_RX_PTP_CTL_DELAY_REQ;
13591 		break;
13592 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13593 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13594 			       TG3_RX_PTP_CTL_DELAY_REQ;
13595 		break;
13596 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13597 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13598 			       TG3_RX_PTP_CTL_DELAY_REQ;
13599 		break;
13600 	default:
13601 		return -ERANGE;
13602 	}
13603 
13604 	if (netif_running(dev) && tp->rxptpctl)
13605 		tw32(TG3_RX_PTP_CTL,
13606 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13607 
13608 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13609 		-EFAULT : 0;
13610 }
13611 
13612 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13613 {
13614 	struct mii_ioctl_data *data = if_mii(ifr);
13615 	struct tg3 *tp = netdev_priv(dev);
13616 	int err;
13617 
13618 	if (tg3_flag(tp, USE_PHYLIB)) {
13619 		struct phy_device *phydev;
13620 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13621 			return -EAGAIN;
13622 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13623 		return phy_mii_ioctl(phydev, ifr, cmd);
13624 	}
13625 
13626 	switch (cmd) {
13627 	case SIOCGMIIPHY:
13628 		data->phy_id = tp->phy_addr;
13629 
13630 		/* fallthru */
13631 	case SIOCGMIIREG: {
13632 		u32 mii_regval;
13633 
13634 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13635 			break;			/* We have no PHY */
13636 
13637 		if (!netif_running(dev))
13638 			return -EAGAIN;
13639 
13640 		spin_lock_bh(&tp->lock);
13641 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
13642 				    data->reg_num & 0x1f, &mii_regval);
13643 		spin_unlock_bh(&tp->lock);
13644 
13645 		data->val_out = mii_regval;
13646 
13647 		return err;
13648 	}
13649 
13650 	case SIOCSMIIREG:
13651 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13652 			break;			/* We have no PHY */
13653 
13654 		if (!netif_running(dev))
13655 			return -EAGAIN;
13656 
13657 		spin_lock_bh(&tp->lock);
13658 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
13659 				     data->reg_num & 0x1f, data->val_in);
13660 		spin_unlock_bh(&tp->lock);
13661 
13662 		return err;
13663 
13664 	case SIOCSHWTSTAMP:
13665 		return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13666 
13667 	default:
13668 		/* do nothing */
13669 		break;
13670 	}
13671 	return -EOPNOTSUPP;
13672 }
13673 
13674 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13675 {
13676 	struct tg3 *tp = netdev_priv(dev);
13677 
13678 	memcpy(ec, &tp->coal, sizeof(*ec));
13679 	return 0;
13680 }
13681 
13682 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13683 {
13684 	struct tg3 *tp = netdev_priv(dev);
13685 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13686 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13687 
13688 	if (!tg3_flag(tp, 5705_PLUS)) {
13689 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13690 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13691 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13692 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13693 	}
13694 
13695 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13696 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13697 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13698 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13699 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13700 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13701 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13702 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13703 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13704 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13705 		return -EINVAL;
13706 
13707 	/* No rx interrupts will be generated if both are zero */
13708 	if ((ec->rx_coalesce_usecs == 0) &&
13709 	    (ec->rx_max_coalesced_frames == 0))
13710 		return -EINVAL;
13711 
13712 	/* No tx interrupts will be generated if both are zero */
13713 	if ((ec->tx_coalesce_usecs == 0) &&
13714 	    (ec->tx_max_coalesced_frames == 0))
13715 		return -EINVAL;
13716 
13717 	/* Only copy relevant parameters, ignore all others. */
13718 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13719 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13720 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13721 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13722 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13723 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13724 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13725 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13726 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13727 
13728 	if (netif_running(dev)) {
13729 		tg3_full_lock(tp, 0);
13730 		__tg3_set_coalesce(tp, &tp->coal);
13731 		tg3_full_unlock(tp);
13732 	}
13733 	return 0;
13734 }
13735 
13736 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13737 {
13738 	struct tg3 *tp = netdev_priv(dev);
13739 
13740 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13741 		netdev_warn(tp->dev, "Board does not support EEE!\n");
13742 		return -EOPNOTSUPP;
13743 	}
13744 
13745 	if (edata->advertised != tp->eee.advertised) {
13746 		netdev_warn(tp->dev,
13747 			    "Direct manipulation of EEE advertisement is not supported\n");
13748 		return -EINVAL;
13749 	}
13750 
13751 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13752 		netdev_warn(tp->dev,
13753 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
13754 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13755 		return -EINVAL;
13756 	}
13757 
13758 	tp->eee = *edata;
13759 
13760 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13761 	tg3_warn_mgmt_link_flap(tp);
13762 
13763 	if (netif_running(tp->dev)) {
13764 		tg3_full_lock(tp, 0);
13765 		tg3_setup_eee(tp);
13766 		tg3_phy_reset(tp);
13767 		tg3_full_unlock(tp);
13768 	}
13769 
13770 	return 0;
13771 }
13772 
13773 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13774 {
13775 	struct tg3 *tp = netdev_priv(dev);
13776 
13777 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13778 		netdev_warn(tp->dev,
13779 			    "Board does not support EEE!\n");
13780 		return -EOPNOTSUPP;
13781 	}
13782 
13783 	*edata = tp->eee;
13784 	return 0;
13785 }
13786 
13787 static const struct ethtool_ops tg3_ethtool_ops = {
13788 	.get_settings		= tg3_get_settings,
13789 	.set_settings		= tg3_set_settings,
13790 	.get_drvinfo		= tg3_get_drvinfo,
13791 	.get_regs_len		= tg3_get_regs_len,
13792 	.get_regs		= tg3_get_regs,
13793 	.get_wol		= tg3_get_wol,
13794 	.set_wol		= tg3_set_wol,
13795 	.get_msglevel		= tg3_get_msglevel,
13796 	.set_msglevel		= tg3_set_msglevel,
13797 	.nway_reset		= tg3_nway_reset,
13798 	.get_link		= ethtool_op_get_link,
13799 	.get_eeprom_len		= tg3_get_eeprom_len,
13800 	.get_eeprom		= tg3_get_eeprom,
13801 	.set_eeprom		= tg3_set_eeprom,
13802 	.get_ringparam		= tg3_get_ringparam,
13803 	.set_ringparam		= tg3_set_ringparam,
13804 	.get_pauseparam		= tg3_get_pauseparam,
13805 	.set_pauseparam		= tg3_set_pauseparam,
13806 	.self_test		= tg3_self_test,
13807 	.get_strings		= tg3_get_strings,
13808 	.set_phys_id		= tg3_set_phys_id,
13809 	.get_ethtool_stats	= tg3_get_ethtool_stats,
13810 	.get_coalesce		= tg3_get_coalesce,
13811 	.set_coalesce		= tg3_set_coalesce,
13812 	.get_sset_count		= tg3_get_sset_count,
13813 	.get_rxnfc		= tg3_get_rxnfc,
13814 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13815 	.get_rxfh_indir		= tg3_get_rxfh_indir,
13816 	.set_rxfh_indir		= tg3_set_rxfh_indir,
13817 	.get_channels		= tg3_get_channels,
13818 	.set_channels		= tg3_set_channels,
13819 	.get_ts_info		= tg3_get_ts_info,
13820 	.get_eee		= tg3_get_eee,
13821 	.set_eee		= tg3_set_eee,
13822 };
13823 
13824 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13825 						struct rtnl_link_stats64 *stats)
13826 {
13827 	struct tg3 *tp = netdev_priv(dev);
13828 
13829 	spin_lock_bh(&tp->lock);
13830 	if (!tp->hw_stats) {
13831 		spin_unlock_bh(&tp->lock);
13832 		return &tp->net_stats_prev;
13833 	}
13834 
13835 	tg3_get_nstats(tp, stats);
13836 	spin_unlock_bh(&tp->lock);
13837 
13838 	return stats;
13839 }
13840 
13841 static void tg3_set_rx_mode(struct net_device *dev)
13842 {
13843 	struct tg3 *tp = netdev_priv(dev);
13844 
13845 	if (!netif_running(dev))
13846 		return;
13847 
13848 	tg3_full_lock(tp, 0);
13849 	__tg3_set_rx_mode(dev);
13850 	tg3_full_unlock(tp);
13851 }
13852 
13853 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13854 			       int new_mtu)
13855 {
13856 	dev->mtu = new_mtu;
13857 
13858 	if (new_mtu > ETH_DATA_LEN) {
13859 		if (tg3_flag(tp, 5780_CLASS)) {
13860 			netdev_update_features(dev);
13861 			tg3_flag_clear(tp, TSO_CAPABLE);
13862 		} else {
13863 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
13864 		}
13865 	} else {
13866 		if (tg3_flag(tp, 5780_CLASS)) {
13867 			tg3_flag_set(tp, TSO_CAPABLE);
13868 			netdev_update_features(dev);
13869 		}
13870 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13871 	}
13872 }
13873 
13874 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13875 {
13876 	struct tg3 *tp = netdev_priv(dev);
13877 	int err;
13878 	bool reset_phy = false;
13879 
13880 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13881 		return -EINVAL;
13882 
13883 	if (!netif_running(dev)) {
13884 		/* We'll just catch it later when the
13885 		 * device is up'd.
13886 		 */
13887 		tg3_set_mtu(dev, tp, new_mtu);
13888 		return 0;
13889 	}
13890 
13891 	tg3_phy_stop(tp);
13892 
13893 	tg3_netif_stop(tp);
13894 
13895 	tg3_full_lock(tp, 1);
13896 
13897 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13898 
13899 	tg3_set_mtu(dev, tp, new_mtu);
13900 
13901 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
13902 	 * breaks all requests to 256 bytes.
13903 	 */
13904 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
13905 		reset_phy = true;
13906 
13907 	err = tg3_restart_hw(tp, reset_phy);
13908 
13909 	if (!err)
13910 		tg3_netif_start(tp);
13911 
13912 	tg3_full_unlock(tp);
13913 
13914 	if (!err)
13915 		tg3_phy_start(tp);
13916 
13917 	return err;
13918 }
13919 
13920 static const struct net_device_ops tg3_netdev_ops = {
13921 	.ndo_open		= tg3_open,
13922 	.ndo_stop		= tg3_close,
13923 	.ndo_start_xmit		= tg3_start_xmit,
13924 	.ndo_get_stats64	= tg3_get_stats64,
13925 	.ndo_validate_addr	= eth_validate_addr,
13926 	.ndo_set_rx_mode	= tg3_set_rx_mode,
13927 	.ndo_set_mac_address	= tg3_set_mac_addr,
13928 	.ndo_do_ioctl		= tg3_ioctl,
13929 	.ndo_tx_timeout		= tg3_tx_timeout,
13930 	.ndo_change_mtu		= tg3_change_mtu,
13931 	.ndo_fix_features	= tg3_fix_features,
13932 	.ndo_set_features	= tg3_set_features,
13933 #ifdef CONFIG_NET_POLL_CONTROLLER
13934 	.ndo_poll_controller	= tg3_poll_controller,
13935 #endif
13936 };
13937 
13938 static void tg3_get_eeprom_size(struct tg3 *tp)
13939 {
13940 	u32 cursize, val, magic;
13941 
13942 	tp->nvram_size = EEPROM_CHIP_SIZE;
13943 
13944 	if (tg3_nvram_read(tp, 0, &magic) != 0)
13945 		return;
13946 
13947 	if ((magic != TG3_EEPROM_MAGIC) &&
13948 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13949 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13950 		return;
13951 
13952 	/*
13953 	 * Size the chip by reading offsets at increasing powers of two.
13954 	 * When we encounter our validation signature, we know the addressing
13955 	 * has wrapped around, and thus have our chip size.
13956 	 */
13957 	cursize = 0x10;
13958 
13959 	while (cursize < tp->nvram_size) {
13960 		if (tg3_nvram_read(tp, cursize, &val) != 0)
13961 			return;
13962 
13963 		if (val == magic)
13964 			break;
13965 
13966 		cursize <<= 1;
13967 	}
13968 
13969 	tp->nvram_size = cursize;
13970 }
13971 
13972 static void tg3_get_nvram_size(struct tg3 *tp)
13973 {
13974 	u32 val;
13975 
13976 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13977 		return;
13978 
13979 	/* Selfboot format */
13980 	if (val != TG3_EEPROM_MAGIC) {
13981 		tg3_get_eeprom_size(tp);
13982 		return;
13983 	}
13984 
13985 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13986 		if (val != 0) {
13987 			/* This is confusing.  We want to operate on the
13988 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13989 			 * call will read from NVRAM and byteswap the data
13990 			 * according to the byteswapping settings for all
13991 			 * other register accesses.  This ensures the data we
13992 			 * want will always reside in the lower 16-bits.
13993 			 * However, the data in NVRAM is in LE format, which
13994 			 * means the data from the NVRAM read will always be
13995 			 * opposite the endianness of the CPU.  The 16-bit
13996 			 * byteswap then brings the data to CPU endianness.
13997 			 */
13998 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13999 			return;
14000 		}
14001 	}
14002 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14003 }
14004 
14005 static void tg3_get_nvram_info(struct tg3 *tp)
14006 {
14007 	u32 nvcfg1;
14008 
14009 	nvcfg1 = tr32(NVRAM_CFG1);
14010 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14011 		tg3_flag_set(tp, FLASH);
14012 	} else {
14013 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14014 		tw32(NVRAM_CFG1, nvcfg1);
14015 	}
14016 
14017 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14018 	    tg3_flag(tp, 5780_CLASS)) {
14019 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14020 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14021 			tp->nvram_jedecnum = JEDEC_ATMEL;
14022 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14023 			tg3_flag_set(tp, NVRAM_BUFFERED);
14024 			break;
14025 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14026 			tp->nvram_jedecnum = JEDEC_ATMEL;
14027 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14028 			break;
14029 		case FLASH_VENDOR_ATMEL_EEPROM:
14030 			tp->nvram_jedecnum = JEDEC_ATMEL;
14031 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14032 			tg3_flag_set(tp, NVRAM_BUFFERED);
14033 			break;
14034 		case FLASH_VENDOR_ST:
14035 			tp->nvram_jedecnum = JEDEC_ST;
14036 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14037 			tg3_flag_set(tp, NVRAM_BUFFERED);
14038 			break;
14039 		case FLASH_VENDOR_SAIFUN:
14040 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14041 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14042 			break;
14043 		case FLASH_VENDOR_SST_SMALL:
14044 		case FLASH_VENDOR_SST_LARGE:
14045 			tp->nvram_jedecnum = JEDEC_SST;
14046 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14047 			break;
14048 		}
14049 	} else {
14050 		tp->nvram_jedecnum = JEDEC_ATMEL;
14051 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14052 		tg3_flag_set(tp, NVRAM_BUFFERED);
14053 	}
14054 }
14055 
14056 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14057 {
14058 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14059 	case FLASH_5752PAGE_SIZE_256:
14060 		tp->nvram_pagesize = 256;
14061 		break;
14062 	case FLASH_5752PAGE_SIZE_512:
14063 		tp->nvram_pagesize = 512;
14064 		break;
14065 	case FLASH_5752PAGE_SIZE_1K:
14066 		tp->nvram_pagesize = 1024;
14067 		break;
14068 	case FLASH_5752PAGE_SIZE_2K:
14069 		tp->nvram_pagesize = 2048;
14070 		break;
14071 	case FLASH_5752PAGE_SIZE_4K:
14072 		tp->nvram_pagesize = 4096;
14073 		break;
14074 	case FLASH_5752PAGE_SIZE_264:
14075 		tp->nvram_pagesize = 264;
14076 		break;
14077 	case FLASH_5752PAGE_SIZE_528:
14078 		tp->nvram_pagesize = 528;
14079 		break;
14080 	}
14081 }
14082 
14083 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14084 {
14085 	u32 nvcfg1;
14086 
14087 	nvcfg1 = tr32(NVRAM_CFG1);
14088 
14089 	/* NVRAM protection for TPM */
14090 	if (nvcfg1 & (1 << 27))
14091 		tg3_flag_set(tp, PROTECTED_NVRAM);
14092 
14093 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14094 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14095 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14096 		tp->nvram_jedecnum = JEDEC_ATMEL;
14097 		tg3_flag_set(tp, NVRAM_BUFFERED);
14098 		break;
14099 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14100 		tp->nvram_jedecnum = JEDEC_ATMEL;
14101 		tg3_flag_set(tp, NVRAM_BUFFERED);
14102 		tg3_flag_set(tp, FLASH);
14103 		break;
14104 	case FLASH_5752VENDOR_ST_M45PE10:
14105 	case FLASH_5752VENDOR_ST_M45PE20:
14106 	case FLASH_5752VENDOR_ST_M45PE40:
14107 		tp->nvram_jedecnum = JEDEC_ST;
14108 		tg3_flag_set(tp, NVRAM_BUFFERED);
14109 		tg3_flag_set(tp, FLASH);
14110 		break;
14111 	}
14112 
14113 	if (tg3_flag(tp, FLASH)) {
14114 		tg3_nvram_get_pagesize(tp, nvcfg1);
14115 	} else {
14116 		/* For eeprom, set pagesize to maximum eeprom size */
14117 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14118 
14119 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14120 		tw32(NVRAM_CFG1, nvcfg1);
14121 	}
14122 }
14123 
14124 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14125 {
14126 	u32 nvcfg1, protect = 0;
14127 
14128 	nvcfg1 = tr32(NVRAM_CFG1);
14129 
14130 	/* NVRAM protection for TPM */
14131 	if (nvcfg1 & (1 << 27)) {
14132 		tg3_flag_set(tp, PROTECTED_NVRAM);
14133 		protect = 1;
14134 	}
14135 
14136 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14137 	switch (nvcfg1) {
14138 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14139 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14140 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14141 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14142 		tp->nvram_jedecnum = JEDEC_ATMEL;
14143 		tg3_flag_set(tp, NVRAM_BUFFERED);
14144 		tg3_flag_set(tp, FLASH);
14145 		tp->nvram_pagesize = 264;
14146 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14147 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14148 			tp->nvram_size = (protect ? 0x3e200 :
14149 					  TG3_NVRAM_SIZE_512KB);
14150 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14151 			tp->nvram_size = (protect ? 0x1f200 :
14152 					  TG3_NVRAM_SIZE_256KB);
14153 		else
14154 			tp->nvram_size = (protect ? 0x1f200 :
14155 					  TG3_NVRAM_SIZE_128KB);
14156 		break;
14157 	case FLASH_5752VENDOR_ST_M45PE10:
14158 	case FLASH_5752VENDOR_ST_M45PE20:
14159 	case FLASH_5752VENDOR_ST_M45PE40:
14160 		tp->nvram_jedecnum = JEDEC_ST;
14161 		tg3_flag_set(tp, NVRAM_BUFFERED);
14162 		tg3_flag_set(tp, FLASH);
14163 		tp->nvram_pagesize = 256;
14164 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14165 			tp->nvram_size = (protect ?
14166 					  TG3_NVRAM_SIZE_64KB :
14167 					  TG3_NVRAM_SIZE_128KB);
14168 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14169 			tp->nvram_size = (protect ?
14170 					  TG3_NVRAM_SIZE_64KB :
14171 					  TG3_NVRAM_SIZE_256KB);
14172 		else
14173 			tp->nvram_size = (protect ?
14174 					  TG3_NVRAM_SIZE_128KB :
14175 					  TG3_NVRAM_SIZE_512KB);
14176 		break;
14177 	}
14178 }
14179 
14180 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14181 {
14182 	u32 nvcfg1;
14183 
14184 	nvcfg1 = tr32(NVRAM_CFG1);
14185 
14186 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14187 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14188 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14189 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14190 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14191 		tp->nvram_jedecnum = JEDEC_ATMEL;
14192 		tg3_flag_set(tp, NVRAM_BUFFERED);
14193 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14194 
14195 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14196 		tw32(NVRAM_CFG1, nvcfg1);
14197 		break;
14198 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14199 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14200 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14201 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14202 		tp->nvram_jedecnum = JEDEC_ATMEL;
14203 		tg3_flag_set(tp, NVRAM_BUFFERED);
14204 		tg3_flag_set(tp, FLASH);
14205 		tp->nvram_pagesize = 264;
14206 		break;
14207 	case FLASH_5752VENDOR_ST_M45PE10:
14208 	case FLASH_5752VENDOR_ST_M45PE20:
14209 	case FLASH_5752VENDOR_ST_M45PE40:
14210 		tp->nvram_jedecnum = JEDEC_ST;
14211 		tg3_flag_set(tp, NVRAM_BUFFERED);
14212 		tg3_flag_set(tp, FLASH);
14213 		tp->nvram_pagesize = 256;
14214 		break;
14215 	}
14216 }
14217 
14218 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14219 {
14220 	u32 nvcfg1, protect = 0;
14221 
14222 	nvcfg1 = tr32(NVRAM_CFG1);
14223 
14224 	/* NVRAM protection for TPM */
14225 	if (nvcfg1 & (1 << 27)) {
14226 		tg3_flag_set(tp, PROTECTED_NVRAM);
14227 		protect = 1;
14228 	}
14229 
14230 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14231 	switch (nvcfg1) {
14232 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14233 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14234 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14235 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14236 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14237 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14238 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14239 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14240 		tp->nvram_jedecnum = JEDEC_ATMEL;
14241 		tg3_flag_set(tp, NVRAM_BUFFERED);
14242 		tg3_flag_set(tp, FLASH);
14243 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14244 		tp->nvram_pagesize = 256;
14245 		break;
14246 	case FLASH_5761VENDOR_ST_A_M45PE20:
14247 	case FLASH_5761VENDOR_ST_A_M45PE40:
14248 	case FLASH_5761VENDOR_ST_A_M45PE80:
14249 	case FLASH_5761VENDOR_ST_A_M45PE16:
14250 	case FLASH_5761VENDOR_ST_M_M45PE20:
14251 	case FLASH_5761VENDOR_ST_M_M45PE40:
14252 	case FLASH_5761VENDOR_ST_M_M45PE80:
14253 	case FLASH_5761VENDOR_ST_M_M45PE16:
14254 		tp->nvram_jedecnum = JEDEC_ST;
14255 		tg3_flag_set(tp, NVRAM_BUFFERED);
14256 		tg3_flag_set(tp, FLASH);
14257 		tp->nvram_pagesize = 256;
14258 		break;
14259 	}
14260 
14261 	if (protect) {
14262 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14263 	} else {
14264 		switch (nvcfg1) {
14265 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14266 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14267 		case FLASH_5761VENDOR_ST_A_M45PE16:
14268 		case FLASH_5761VENDOR_ST_M_M45PE16:
14269 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14270 			break;
14271 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14272 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14273 		case FLASH_5761VENDOR_ST_A_M45PE80:
14274 		case FLASH_5761VENDOR_ST_M_M45PE80:
14275 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14276 			break;
14277 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14278 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14279 		case FLASH_5761VENDOR_ST_A_M45PE40:
14280 		case FLASH_5761VENDOR_ST_M_M45PE40:
14281 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14282 			break;
14283 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14284 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14285 		case FLASH_5761VENDOR_ST_A_M45PE20:
14286 		case FLASH_5761VENDOR_ST_M_M45PE20:
14287 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14288 			break;
14289 		}
14290 	}
14291 }
14292 
14293 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14294 {
14295 	tp->nvram_jedecnum = JEDEC_ATMEL;
14296 	tg3_flag_set(tp, NVRAM_BUFFERED);
14297 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14298 }
14299 
14300 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14301 {
14302 	u32 nvcfg1;
14303 
14304 	nvcfg1 = tr32(NVRAM_CFG1);
14305 
14306 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14307 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14308 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14309 		tp->nvram_jedecnum = JEDEC_ATMEL;
14310 		tg3_flag_set(tp, NVRAM_BUFFERED);
14311 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14312 
14313 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14314 		tw32(NVRAM_CFG1, nvcfg1);
14315 		return;
14316 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14317 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14318 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14319 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14320 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14321 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14322 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14323 		tp->nvram_jedecnum = JEDEC_ATMEL;
14324 		tg3_flag_set(tp, NVRAM_BUFFERED);
14325 		tg3_flag_set(tp, FLASH);
14326 
14327 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14328 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14329 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14330 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14331 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14332 			break;
14333 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14334 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14335 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14336 			break;
14337 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14338 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14339 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14340 			break;
14341 		}
14342 		break;
14343 	case FLASH_5752VENDOR_ST_M45PE10:
14344 	case FLASH_5752VENDOR_ST_M45PE20:
14345 	case FLASH_5752VENDOR_ST_M45PE40:
14346 		tp->nvram_jedecnum = JEDEC_ST;
14347 		tg3_flag_set(tp, NVRAM_BUFFERED);
14348 		tg3_flag_set(tp, FLASH);
14349 
14350 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14351 		case FLASH_5752VENDOR_ST_M45PE10:
14352 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14353 			break;
14354 		case FLASH_5752VENDOR_ST_M45PE20:
14355 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14356 			break;
14357 		case FLASH_5752VENDOR_ST_M45PE40:
14358 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14359 			break;
14360 		}
14361 		break;
14362 	default:
14363 		tg3_flag_set(tp, NO_NVRAM);
14364 		return;
14365 	}
14366 
14367 	tg3_nvram_get_pagesize(tp, nvcfg1);
14368 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14369 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14370 }
14371 
14372 
14373 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14374 {
14375 	u32 nvcfg1;
14376 
14377 	nvcfg1 = tr32(NVRAM_CFG1);
14378 
14379 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14380 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14381 	case FLASH_5717VENDOR_MICRO_EEPROM:
14382 		tp->nvram_jedecnum = JEDEC_ATMEL;
14383 		tg3_flag_set(tp, NVRAM_BUFFERED);
14384 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14385 
14386 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14387 		tw32(NVRAM_CFG1, nvcfg1);
14388 		return;
14389 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14390 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14391 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14392 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14393 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14394 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14395 	case FLASH_5717VENDOR_ATMEL_45USPT:
14396 		tp->nvram_jedecnum = JEDEC_ATMEL;
14397 		tg3_flag_set(tp, NVRAM_BUFFERED);
14398 		tg3_flag_set(tp, FLASH);
14399 
14400 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14401 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14402 			/* Detect size with tg3_nvram_get_size() */
14403 			break;
14404 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14405 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14406 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14407 			break;
14408 		default:
14409 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14410 			break;
14411 		}
14412 		break;
14413 	case FLASH_5717VENDOR_ST_M_M25PE10:
14414 	case FLASH_5717VENDOR_ST_A_M25PE10:
14415 	case FLASH_5717VENDOR_ST_M_M45PE10:
14416 	case FLASH_5717VENDOR_ST_A_M45PE10:
14417 	case FLASH_5717VENDOR_ST_M_M25PE20:
14418 	case FLASH_5717VENDOR_ST_A_M25PE20:
14419 	case FLASH_5717VENDOR_ST_M_M45PE20:
14420 	case FLASH_5717VENDOR_ST_A_M45PE20:
14421 	case FLASH_5717VENDOR_ST_25USPT:
14422 	case FLASH_5717VENDOR_ST_45USPT:
14423 		tp->nvram_jedecnum = JEDEC_ST;
14424 		tg3_flag_set(tp, NVRAM_BUFFERED);
14425 		tg3_flag_set(tp, FLASH);
14426 
14427 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14428 		case FLASH_5717VENDOR_ST_M_M25PE20:
14429 		case FLASH_5717VENDOR_ST_M_M45PE20:
14430 			/* Detect size with tg3_nvram_get_size() */
14431 			break;
14432 		case FLASH_5717VENDOR_ST_A_M25PE20:
14433 		case FLASH_5717VENDOR_ST_A_M45PE20:
14434 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14435 			break;
14436 		default:
14437 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14438 			break;
14439 		}
14440 		break;
14441 	default:
14442 		tg3_flag_set(tp, NO_NVRAM);
14443 		return;
14444 	}
14445 
14446 	tg3_nvram_get_pagesize(tp, nvcfg1);
14447 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14448 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14449 }
14450 
14451 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14452 {
14453 	u32 nvcfg1, nvmpinstrp;
14454 
14455 	nvcfg1 = tr32(NVRAM_CFG1);
14456 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14457 
14458 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14459 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14460 			tg3_flag_set(tp, NO_NVRAM);
14461 			return;
14462 		}
14463 
14464 		switch (nvmpinstrp) {
14465 		case FLASH_5762_EEPROM_HD:
14466 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14467 			break;
14468 		case FLASH_5762_EEPROM_LD:
14469 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14470 			break;
14471 		case FLASH_5720VENDOR_M_ST_M45PE20:
14472 			/* This pinstrap supports multiple sizes, so force it
14473 			 * to read the actual size from location 0xf0.
14474 			 */
14475 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14476 			break;
14477 		}
14478 	}
14479 
14480 	switch (nvmpinstrp) {
14481 	case FLASH_5720_EEPROM_HD:
14482 	case FLASH_5720_EEPROM_LD:
14483 		tp->nvram_jedecnum = JEDEC_ATMEL;
14484 		tg3_flag_set(tp, NVRAM_BUFFERED);
14485 
14486 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14487 		tw32(NVRAM_CFG1, nvcfg1);
14488 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14489 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14490 		else
14491 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14492 		return;
14493 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14494 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14495 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14496 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14497 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14498 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14499 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14500 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14501 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14502 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14503 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14504 	case FLASH_5720VENDOR_ATMEL_45USPT:
14505 		tp->nvram_jedecnum = JEDEC_ATMEL;
14506 		tg3_flag_set(tp, NVRAM_BUFFERED);
14507 		tg3_flag_set(tp, FLASH);
14508 
14509 		switch (nvmpinstrp) {
14510 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14511 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14512 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14513 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14514 			break;
14515 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14516 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14517 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14518 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14519 			break;
14520 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14521 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14522 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14523 			break;
14524 		default:
14525 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14526 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14527 			break;
14528 		}
14529 		break;
14530 	case FLASH_5720VENDOR_M_ST_M25PE10:
14531 	case FLASH_5720VENDOR_M_ST_M45PE10:
14532 	case FLASH_5720VENDOR_A_ST_M25PE10:
14533 	case FLASH_5720VENDOR_A_ST_M45PE10:
14534 	case FLASH_5720VENDOR_M_ST_M25PE20:
14535 	case FLASH_5720VENDOR_M_ST_M45PE20:
14536 	case FLASH_5720VENDOR_A_ST_M25PE20:
14537 	case FLASH_5720VENDOR_A_ST_M45PE20:
14538 	case FLASH_5720VENDOR_M_ST_M25PE40:
14539 	case FLASH_5720VENDOR_M_ST_M45PE40:
14540 	case FLASH_5720VENDOR_A_ST_M25PE40:
14541 	case FLASH_5720VENDOR_A_ST_M45PE40:
14542 	case FLASH_5720VENDOR_M_ST_M25PE80:
14543 	case FLASH_5720VENDOR_M_ST_M45PE80:
14544 	case FLASH_5720VENDOR_A_ST_M25PE80:
14545 	case FLASH_5720VENDOR_A_ST_M45PE80:
14546 	case FLASH_5720VENDOR_ST_25USPT:
14547 	case FLASH_5720VENDOR_ST_45USPT:
14548 		tp->nvram_jedecnum = JEDEC_ST;
14549 		tg3_flag_set(tp, NVRAM_BUFFERED);
14550 		tg3_flag_set(tp, FLASH);
14551 
14552 		switch (nvmpinstrp) {
14553 		case FLASH_5720VENDOR_M_ST_M25PE20:
14554 		case FLASH_5720VENDOR_M_ST_M45PE20:
14555 		case FLASH_5720VENDOR_A_ST_M25PE20:
14556 		case FLASH_5720VENDOR_A_ST_M45PE20:
14557 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14558 			break;
14559 		case FLASH_5720VENDOR_M_ST_M25PE40:
14560 		case FLASH_5720VENDOR_M_ST_M45PE40:
14561 		case FLASH_5720VENDOR_A_ST_M25PE40:
14562 		case FLASH_5720VENDOR_A_ST_M45PE40:
14563 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14564 			break;
14565 		case FLASH_5720VENDOR_M_ST_M25PE80:
14566 		case FLASH_5720VENDOR_M_ST_M45PE80:
14567 		case FLASH_5720VENDOR_A_ST_M25PE80:
14568 		case FLASH_5720VENDOR_A_ST_M45PE80:
14569 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14570 			break;
14571 		default:
14572 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14573 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14574 			break;
14575 		}
14576 		break;
14577 	default:
14578 		tg3_flag_set(tp, NO_NVRAM);
14579 		return;
14580 	}
14581 
14582 	tg3_nvram_get_pagesize(tp, nvcfg1);
14583 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14584 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14585 
14586 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14587 		u32 val;
14588 
14589 		if (tg3_nvram_read(tp, 0, &val))
14590 			return;
14591 
14592 		if (val != TG3_EEPROM_MAGIC &&
14593 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14594 			tg3_flag_set(tp, NO_NVRAM);
14595 	}
14596 }
14597 
14598 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14599 static void tg3_nvram_init(struct tg3 *tp)
14600 {
14601 	if (tg3_flag(tp, IS_SSB_CORE)) {
14602 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14603 		tg3_flag_clear(tp, NVRAM);
14604 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14605 		tg3_flag_set(tp, NO_NVRAM);
14606 		return;
14607 	}
14608 
14609 	tw32_f(GRC_EEPROM_ADDR,
14610 	     (EEPROM_ADDR_FSM_RESET |
14611 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14612 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14613 
14614 	msleep(1);
14615 
14616 	/* Enable seeprom accesses. */
14617 	tw32_f(GRC_LOCAL_CTRL,
14618 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14619 	udelay(100);
14620 
14621 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14622 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
14623 		tg3_flag_set(tp, NVRAM);
14624 
14625 		if (tg3_nvram_lock(tp)) {
14626 			netdev_warn(tp->dev,
14627 				    "Cannot get nvram lock, %s failed\n",
14628 				    __func__);
14629 			return;
14630 		}
14631 		tg3_enable_nvram_access(tp);
14632 
14633 		tp->nvram_size = 0;
14634 
14635 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
14636 			tg3_get_5752_nvram_info(tp);
14637 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14638 			tg3_get_5755_nvram_info(tp);
14639 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14640 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14641 			 tg3_asic_rev(tp) == ASIC_REV_5785)
14642 			tg3_get_5787_nvram_info(tp);
14643 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14644 			tg3_get_5761_nvram_info(tp);
14645 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14646 			tg3_get_5906_nvram_info(tp);
14647 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14648 			 tg3_flag(tp, 57765_CLASS))
14649 			tg3_get_57780_nvram_info(tp);
14650 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14651 			 tg3_asic_rev(tp) == ASIC_REV_5719)
14652 			tg3_get_5717_nvram_info(tp);
14653 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14654 			 tg3_asic_rev(tp) == ASIC_REV_5762)
14655 			tg3_get_5720_nvram_info(tp);
14656 		else
14657 			tg3_get_nvram_info(tp);
14658 
14659 		if (tp->nvram_size == 0)
14660 			tg3_get_nvram_size(tp);
14661 
14662 		tg3_disable_nvram_access(tp);
14663 		tg3_nvram_unlock(tp);
14664 
14665 	} else {
14666 		tg3_flag_clear(tp, NVRAM);
14667 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14668 
14669 		tg3_get_eeprom_size(tp);
14670 	}
14671 }
14672 
14673 struct subsys_tbl_ent {
14674 	u16 subsys_vendor, subsys_devid;
14675 	u32 phy_id;
14676 };
14677 
14678 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14679 	/* Broadcom boards. */
14680 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14681 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14682 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14683 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14684 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14685 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14686 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14687 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14688 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14689 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14690 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14691 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14692 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14693 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14694 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14695 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14696 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14697 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14698 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14699 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14700 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14701 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14702 
14703 	/* 3com boards. */
14704 	{ TG3PCI_SUBVENDOR_ID_3COM,
14705 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14706 	{ TG3PCI_SUBVENDOR_ID_3COM,
14707 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14708 	{ TG3PCI_SUBVENDOR_ID_3COM,
14709 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14710 	{ TG3PCI_SUBVENDOR_ID_3COM,
14711 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14712 	{ TG3PCI_SUBVENDOR_ID_3COM,
14713 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14714 
14715 	/* DELL boards. */
14716 	{ TG3PCI_SUBVENDOR_ID_DELL,
14717 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14718 	{ TG3PCI_SUBVENDOR_ID_DELL,
14719 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14720 	{ TG3PCI_SUBVENDOR_ID_DELL,
14721 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14722 	{ TG3PCI_SUBVENDOR_ID_DELL,
14723 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14724 
14725 	/* Compaq boards. */
14726 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14727 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14728 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14729 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14730 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14731 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14732 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14733 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14734 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14735 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14736 
14737 	/* IBM boards. */
14738 	{ TG3PCI_SUBVENDOR_ID_IBM,
14739 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14740 };
14741 
14742 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14743 {
14744 	int i;
14745 
14746 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14747 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
14748 		     tp->pdev->subsystem_vendor) &&
14749 		    (subsys_id_to_phy_id[i].subsys_devid ==
14750 		     tp->pdev->subsystem_device))
14751 			return &subsys_id_to_phy_id[i];
14752 	}
14753 	return NULL;
14754 }
14755 
14756 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14757 {
14758 	u32 val;
14759 
14760 	tp->phy_id = TG3_PHY_ID_INVALID;
14761 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14762 
14763 	/* Assume an onboard device and WOL capable by default.  */
14764 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
14765 	tg3_flag_set(tp, WOL_CAP);
14766 
14767 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14768 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14769 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14770 			tg3_flag_set(tp, IS_NIC);
14771 		}
14772 		val = tr32(VCPU_CFGSHDW);
14773 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
14774 			tg3_flag_set(tp, ASPM_WORKAROUND);
14775 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14776 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14777 			tg3_flag_set(tp, WOL_ENABLE);
14778 			device_set_wakeup_enable(&tp->pdev->dev, true);
14779 		}
14780 		goto done;
14781 	}
14782 
14783 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14784 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14785 		u32 nic_cfg, led_cfg;
14786 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14787 		int eeprom_phy_serdes = 0;
14788 
14789 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14790 		tp->nic_sram_data_cfg = nic_cfg;
14791 
14792 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14793 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
14794 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14795 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
14796 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
14797 		    (ver > 0) && (ver < 0x100))
14798 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14799 
14800 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
14801 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14802 
14803 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14804 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14805 			eeprom_phy_serdes = 1;
14806 
14807 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14808 		if (nic_phy_id != 0) {
14809 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14810 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14811 
14812 			eeprom_phy_id  = (id1 >> 16) << 10;
14813 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
14814 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14815 		} else
14816 			eeprom_phy_id = 0;
14817 
14818 		tp->phy_id = eeprom_phy_id;
14819 		if (eeprom_phy_serdes) {
14820 			if (!tg3_flag(tp, 5705_PLUS))
14821 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14822 			else
14823 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14824 		}
14825 
14826 		if (tg3_flag(tp, 5750_PLUS))
14827 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14828 				    SHASTA_EXT_LED_MODE_MASK);
14829 		else
14830 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14831 
14832 		switch (led_cfg) {
14833 		default:
14834 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14835 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14836 			break;
14837 
14838 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14839 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14840 			break;
14841 
14842 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14843 			tp->led_ctrl = LED_CTRL_MODE_MAC;
14844 
14845 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
14846 			 * read on some older 5700/5701 bootcode.
14847 			 */
14848 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14849 			    tg3_asic_rev(tp) == ASIC_REV_5701)
14850 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14851 
14852 			break;
14853 
14854 		case SHASTA_EXT_LED_SHARED:
14855 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
14856 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14857 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14858 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14859 						 LED_CTRL_MODE_PHY_2);
14860 			break;
14861 
14862 		case SHASTA_EXT_LED_MAC:
14863 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14864 			break;
14865 
14866 		case SHASTA_EXT_LED_COMBO:
14867 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
14868 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14869 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14870 						 LED_CTRL_MODE_PHY_2);
14871 			break;
14872 
14873 		}
14874 
14875 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14876 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
14877 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14878 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14879 
14880 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14881 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14882 
14883 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14884 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
14885 			if ((tp->pdev->subsystem_vendor ==
14886 			     PCI_VENDOR_ID_ARIMA) &&
14887 			    (tp->pdev->subsystem_device == 0x205a ||
14888 			     tp->pdev->subsystem_device == 0x2063))
14889 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14890 		} else {
14891 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14892 			tg3_flag_set(tp, IS_NIC);
14893 		}
14894 
14895 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14896 			tg3_flag_set(tp, ENABLE_ASF);
14897 			if (tg3_flag(tp, 5750_PLUS))
14898 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14899 		}
14900 
14901 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14902 		    tg3_flag(tp, 5750_PLUS))
14903 			tg3_flag_set(tp, ENABLE_APE);
14904 
14905 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14906 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14907 			tg3_flag_clear(tp, WOL_CAP);
14908 
14909 		if (tg3_flag(tp, WOL_CAP) &&
14910 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14911 			tg3_flag_set(tp, WOL_ENABLE);
14912 			device_set_wakeup_enable(&tp->pdev->dev, true);
14913 		}
14914 
14915 		if (cfg2 & (1 << 17))
14916 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14917 
14918 		/* serdes signal pre-emphasis in register 0x590 set by */
14919 		/* bootcode if bit 18 is set */
14920 		if (cfg2 & (1 << 18))
14921 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14922 
14923 		if ((tg3_flag(tp, 57765_PLUS) ||
14924 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14925 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14926 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14927 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14928 
14929 		if (tg3_flag(tp, PCI_EXPRESS)) {
14930 			u32 cfg3;
14931 
14932 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14933 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14934 			    !tg3_flag(tp, 57765_PLUS) &&
14935 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14936 				tg3_flag_set(tp, ASPM_WORKAROUND);
14937 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14938 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14939 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14940 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14941 		}
14942 
14943 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14944 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14945 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14946 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14947 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14948 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14949 	}
14950 done:
14951 	if (tg3_flag(tp, WOL_CAP))
14952 		device_set_wakeup_enable(&tp->pdev->dev,
14953 					 tg3_flag(tp, WOL_ENABLE));
14954 	else
14955 		device_set_wakeup_capable(&tp->pdev->dev, false);
14956 }
14957 
14958 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14959 {
14960 	int i, err;
14961 	u32 val2, off = offset * 8;
14962 
14963 	err = tg3_nvram_lock(tp);
14964 	if (err)
14965 		return err;
14966 
14967 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14968 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14969 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14970 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14971 	udelay(10);
14972 
14973 	for (i = 0; i < 100; i++) {
14974 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14975 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
14976 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14977 			break;
14978 		}
14979 		udelay(10);
14980 	}
14981 
14982 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14983 
14984 	tg3_nvram_unlock(tp);
14985 	if (val2 & APE_OTP_STATUS_CMD_DONE)
14986 		return 0;
14987 
14988 	return -EBUSY;
14989 }
14990 
14991 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14992 {
14993 	int i;
14994 	u32 val;
14995 
14996 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14997 	tw32(OTP_CTRL, cmd);
14998 
14999 	/* Wait for up to 1 ms for command to execute. */
15000 	for (i = 0; i < 100; i++) {
15001 		val = tr32(OTP_STATUS);
15002 		if (val & OTP_STATUS_CMD_DONE)
15003 			break;
15004 		udelay(10);
15005 	}
15006 
15007 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15008 }
15009 
15010 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15011  * configuration is a 32-bit value that straddles the alignment boundary.
15012  * We do two 32-bit reads and then shift and merge the results.
15013  */
15014 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15015 {
15016 	u32 bhalf_otp, thalf_otp;
15017 
15018 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15019 
15020 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15021 		return 0;
15022 
15023 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15024 
15025 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15026 		return 0;
15027 
15028 	thalf_otp = tr32(OTP_READ_DATA);
15029 
15030 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15031 
15032 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15033 		return 0;
15034 
15035 	bhalf_otp = tr32(OTP_READ_DATA);
15036 
15037 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15038 }
15039 
15040 static void tg3_phy_init_link_config(struct tg3 *tp)
15041 {
15042 	u32 adv = ADVERTISED_Autoneg;
15043 
15044 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15045 		adv |= ADVERTISED_1000baseT_Half |
15046 		       ADVERTISED_1000baseT_Full;
15047 
15048 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15049 		adv |= ADVERTISED_100baseT_Half |
15050 		       ADVERTISED_100baseT_Full |
15051 		       ADVERTISED_10baseT_Half |
15052 		       ADVERTISED_10baseT_Full |
15053 		       ADVERTISED_TP;
15054 	else
15055 		adv |= ADVERTISED_FIBRE;
15056 
15057 	tp->link_config.advertising = adv;
15058 	tp->link_config.speed = SPEED_UNKNOWN;
15059 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15060 	tp->link_config.autoneg = AUTONEG_ENABLE;
15061 	tp->link_config.active_speed = SPEED_UNKNOWN;
15062 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15063 
15064 	tp->old_link = -1;
15065 }
15066 
15067 static int tg3_phy_probe(struct tg3 *tp)
15068 {
15069 	u32 hw_phy_id_1, hw_phy_id_2;
15070 	u32 hw_phy_id, hw_phy_id_masked;
15071 	int err;
15072 
15073 	/* flow control autonegotiation is default behavior */
15074 	tg3_flag_set(tp, PAUSE_AUTONEG);
15075 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15076 
15077 	if (tg3_flag(tp, ENABLE_APE)) {
15078 		switch (tp->pci_fn) {
15079 		case 0:
15080 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15081 			break;
15082 		case 1:
15083 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15084 			break;
15085 		case 2:
15086 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15087 			break;
15088 		case 3:
15089 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15090 			break;
15091 		}
15092 	}
15093 
15094 	if (!tg3_flag(tp, ENABLE_ASF) &&
15095 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15096 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15097 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15098 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15099 
15100 	if (tg3_flag(tp, USE_PHYLIB))
15101 		return tg3_phy_init(tp);
15102 
15103 	/* Reading the PHY ID register can conflict with ASF
15104 	 * firmware access to the PHY hardware.
15105 	 */
15106 	err = 0;
15107 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15108 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15109 	} else {
15110 		/* Now read the physical PHY_ID from the chip and verify
15111 		 * that it is sane.  If it doesn't look good, we fall back
15112 		 * to either the hard-coded table based PHY_ID and failing
15113 		 * that the value found in the eeprom area.
15114 		 */
15115 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15116 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15117 
15118 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15119 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15120 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15121 
15122 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15123 	}
15124 
15125 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15126 		tp->phy_id = hw_phy_id;
15127 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15128 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15129 		else
15130 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15131 	} else {
15132 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15133 			/* Do nothing, phy ID already set up in
15134 			 * tg3_get_eeprom_hw_cfg().
15135 			 */
15136 		} else {
15137 			struct subsys_tbl_ent *p;
15138 
15139 			/* No eeprom signature?  Try the hardcoded
15140 			 * subsys device table.
15141 			 */
15142 			p = tg3_lookup_by_subsys(tp);
15143 			if (p) {
15144 				tp->phy_id = p->phy_id;
15145 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15146 				/* For now we saw the IDs 0xbc050cd0,
15147 				 * 0xbc050f80 and 0xbc050c30 on devices
15148 				 * connected to an BCM4785 and there are
15149 				 * probably more. Just assume that the phy is
15150 				 * supported when it is connected to a SSB core
15151 				 * for now.
15152 				 */
15153 				return -ENODEV;
15154 			}
15155 
15156 			if (!tp->phy_id ||
15157 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15158 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15159 		}
15160 	}
15161 
15162 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15163 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15164 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15165 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15166 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15167 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15168 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15169 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15170 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15171 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15172 
15173 		tp->eee.supported = SUPPORTED_100baseT_Full |
15174 				    SUPPORTED_1000baseT_Full;
15175 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15176 				     ADVERTISED_1000baseT_Full;
15177 		tp->eee.eee_enabled = 1;
15178 		tp->eee.tx_lpi_enabled = 1;
15179 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15180 	}
15181 
15182 	tg3_phy_init_link_config(tp);
15183 
15184 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15185 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15186 	    !tg3_flag(tp, ENABLE_APE) &&
15187 	    !tg3_flag(tp, ENABLE_ASF)) {
15188 		u32 bmsr, dummy;
15189 
15190 		tg3_readphy(tp, MII_BMSR, &bmsr);
15191 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15192 		    (bmsr & BMSR_LSTATUS))
15193 			goto skip_phy_reset;
15194 
15195 		err = tg3_phy_reset(tp);
15196 		if (err)
15197 			return err;
15198 
15199 		tg3_phy_set_wirespeed(tp);
15200 
15201 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15202 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15203 					    tp->link_config.flowctrl);
15204 
15205 			tg3_writephy(tp, MII_BMCR,
15206 				     BMCR_ANENABLE | BMCR_ANRESTART);
15207 		}
15208 	}
15209 
15210 skip_phy_reset:
15211 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15212 		err = tg3_init_5401phy_dsp(tp);
15213 		if (err)
15214 			return err;
15215 
15216 		err = tg3_init_5401phy_dsp(tp);
15217 	}
15218 
15219 	return err;
15220 }
15221 
15222 static void tg3_read_vpd(struct tg3 *tp)
15223 {
15224 	u8 *vpd_data;
15225 	unsigned int block_end, rosize, len;
15226 	u32 vpdlen;
15227 	int j, i = 0;
15228 
15229 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15230 	if (!vpd_data)
15231 		goto out_no_vpd;
15232 
15233 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15234 	if (i < 0)
15235 		goto out_not_found;
15236 
15237 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15238 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15239 	i += PCI_VPD_LRDT_TAG_SIZE;
15240 
15241 	if (block_end > vpdlen)
15242 		goto out_not_found;
15243 
15244 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15245 				      PCI_VPD_RO_KEYWORD_MFR_ID);
15246 	if (j > 0) {
15247 		len = pci_vpd_info_field_size(&vpd_data[j]);
15248 
15249 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15250 		if (j + len > block_end || len != 4 ||
15251 		    memcmp(&vpd_data[j], "1028", 4))
15252 			goto partno;
15253 
15254 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15255 					      PCI_VPD_RO_KEYWORD_VENDOR0);
15256 		if (j < 0)
15257 			goto partno;
15258 
15259 		len = pci_vpd_info_field_size(&vpd_data[j]);
15260 
15261 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15262 		if (j + len > block_end)
15263 			goto partno;
15264 
15265 		if (len >= sizeof(tp->fw_ver))
15266 			len = sizeof(tp->fw_ver) - 1;
15267 		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15268 		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15269 			 &vpd_data[j]);
15270 	}
15271 
15272 partno:
15273 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15274 				      PCI_VPD_RO_KEYWORD_PARTNO);
15275 	if (i < 0)
15276 		goto out_not_found;
15277 
15278 	len = pci_vpd_info_field_size(&vpd_data[i]);
15279 
15280 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15281 	if (len > TG3_BPN_SIZE ||
15282 	    (len + i) > vpdlen)
15283 		goto out_not_found;
15284 
15285 	memcpy(tp->board_part_number, &vpd_data[i], len);
15286 
15287 out_not_found:
15288 	kfree(vpd_data);
15289 	if (tp->board_part_number[0])
15290 		return;
15291 
15292 out_no_vpd:
15293 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15294 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15295 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15296 			strcpy(tp->board_part_number, "BCM5717");
15297 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15298 			strcpy(tp->board_part_number, "BCM5718");
15299 		else
15300 			goto nomatch;
15301 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15302 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15303 			strcpy(tp->board_part_number, "BCM57780");
15304 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15305 			strcpy(tp->board_part_number, "BCM57760");
15306 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15307 			strcpy(tp->board_part_number, "BCM57790");
15308 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15309 			strcpy(tp->board_part_number, "BCM57788");
15310 		else
15311 			goto nomatch;
15312 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15313 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15314 			strcpy(tp->board_part_number, "BCM57761");
15315 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15316 			strcpy(tp->board_part_number, "BCM57765");
15317 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15318 			strcpy(tp->board_part_number, "BCM57781");
15319 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15320 			strcpy(tp->board_part_number, "BCM57785");
15321 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15322 			strcpy(tp->board_part_number, "BCM57791");
15323 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15324 			strcpy(tp->board_part_number, "BCM57795");
15325 		else
15326 			goto nomatch;
15327 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15328 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15329 			strcpy(tp->board_part_number, "BCM57762");
15330 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15331 			strcpy(tp->board_part_number, "BCM57766");
15332 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15333 			strcpy(tp->board_part_number, "BCM57782");
15334 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15335 			strcpy(tp->board_part_number, "BCM57786");
15336 		else
15337 			goto nomatch;
15338 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15339 		strcpy(tp->board_part_number, "BCM95906");
15340 	} else {
15341 nomatch:
15342 		strcpy(tp->board_part_number, "none");
15343 	}
15344 }
15345 
15346 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15347 {
15348 	u32 val;
15349 
15350 	if (tg3_nvram_read(tp, offset, &val) ||
15351 	    (val & 0xfc000000) != 0x0c000000 ||
15352 	    tg3_nvram_read(tp, offset + 4, &val) ||
15353 	    val != 0)
15354 		return 0;
15355 
15356 	return 1;
15357 }
15358 
15359 static void tg3_read_bc_ver(struct tg3 *tp)
15360 {
15361 	u32 val, offset, start, ver_offset;
15362 	int i, dst_off;
15363 	bool newver = false;
15364 
15365 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15366 	    tg3_nvram_read(tp, 0x4, &start))
15367 		return;
15368 
15369 	offset = tg3_nvram_logical_addr(tp, offset);
15370 
15371 	if (tg3_nvram_read(tp, offset, &val))
15372 		return;
15373 
15374 	if ((val & 0xfc000000) == 0x0c000000) {
15375 		if (tg3_nvram_read(tp, offset + 4, &val))
15376 			return;
15377 
15378 		if (val == 0)
15379 			newver = true;
15380 	}
15381 
15382 	dst_off = strlen(tp->fw_ver);
15383 
15384 	if (newver) {
15385 		if (TG3_VER_SIZE - dst_off < 16 ||
15386 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15387 			return;
15388 
15389 		offset = offset + ver_offset - start;
15390 		for (i = 0; i < 16; i += 4) {
15391 			__be32 v;
15392 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15393 				return;
15394 
15395 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15396 		}
15397 	} else {
15398 		u32 major, minor;
15399 
15400 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15401 			return;
15402 
15403 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15404 			TG3_NVM_BCVER_MAJSFT;
15405 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15406 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15407 			 "v%d.%02d", major, minor);
15408 	}
15409 }
15410 
15411 static void tg3_read_hwsb_ver(struct tg3 *tp)
15412 {
15413 	u32 val, major, minor;
15414 
15415 	/* Use native endian representation */
15416 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15417 		return;
15418 
15419 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15420 		TG3_NVM_HWSB_CFG1_MAJSFT;
15421 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15422 		TG3_NVM_HWSB_CFG1_MINSFT;
15423 
15424 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15425 }
15426 
15427 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15428 {
15429 	u32 offset, major, minor, build;
15430 
15431 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15432 
15433 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15434 		return;
15435 
15436 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15437 	case TG3_EEPROM_SB_REVISION_0:
15438 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15439 		break;
15440 	case TG3_EEPROM_SB_REVISION_2:
15441 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15442 		break;
15443 	case TG3_EEPROM_SB_REVISION_3:
15444 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15445 		break;
15446 	case TG3_EEPROM_SB_REVISION_4:
15447 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15448 		break;
15449 	case TG3_EEPROM_SB_REVISION_5:
15450 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15451 		break;
15452 	case TG3_EEPROM_SB_REVISION_6:
15453 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15454 		break;
15455 	default:
15456 		return;
15457 	}
15458 
15459 	if (tg3_nvram_read(tp, offset, &val))
15460 		return;
15461 
15462 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15463 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15464 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15465 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15466 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15467 
15468 	if (minor > 99 || build > 26)
15469 		return;
15470 
15471 	offset = strlen(tp->fw_ver);
15472 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15473 		 " v%d.%02d", major, minor);
15474 
15475 	if (build > 0) {
15476 		offset = strlen(tp->fw_ver);
15477 		if (offset < TG3_VER_SIZE - 1)
15478 			tp->fw_ver[offset] = 'a' + build - 1;
15479 	}
15480 }
15481 
15482 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15483 {
15484 	u32 val, offset, start;
15485 	int i, vlen;
15486 
15487 	for (offset = TG3_NVM_DIR_START;
15488 	     offset < TG3_NVM_DIR_END;
15489 	     offset += TG3_NVM_DIRENT_SIZE) {
15490 		if (tg3_nvram_read(tp, offset, &val))
15491 			return;
15492 
15493 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15494 			break;
15495 	}
15496 
15497 	if (offset == TG3_NVM_DIR_END)
15498 		return;
15499 
15500 	if (!tg3_flag(tp, 5705_PLUS))
15501 		start = 0x08000000;
15502 	else if (tg3_nvram_read(tp, offset - 4, &start))
15503 		return;
15504 
15505 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15506 	    !tg3_fw_img_is_valid(tp, offset) ||
15507 	    tg3_nvram_read(tp, offset + 8, &val))
15508 		return;
15509 
15510 	offset += val - start;
15511 
15512 	vlen = strlen(tp->fw_ver);
15513 
15514 	tp->fw_ver[vlen++] = ',';
15515 	tp->fw_ver[vlen++] = ' ';
15516 
15517 	for (i = 0; i < 4; i++) {
15518 		__be32 v;
15519 		if (tg3_nvram_read_be32(tp, offset, &v))
15520 			return;
15521 
15522 		offset += sizeof(v);
15523 
15524 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15525 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15526 			break;
15527 		}
15528 
15529 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15530 		vlen += sizeof(v);
15531 	}
15532 }
15533 
15534 static void tg3_probe_ncsi(struct tg3 *tp)
15535 {
15536 	u32 apedata;
15537 
15538 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15539 	if (apedata != APE_SEG_SIG_MAGIC)
15540 		return;
15541 
15542 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15543 	if (!(apedata & APE_FW_STATUS_READY))
15544 		return;
15545 
15546 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15547 		tg3_flag_set(tp, APE_HAS_NCSI);
15548 }
15549 
15550 static void tg3_read_dash_ver(struct tg3 *tp)
15551 {
15552 	int vlen;
15553 	u32 apedata;
15554 	char *fwtype;
15555 
15556 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15557 
15558 	if (tg3_flag(tp, APE_HAS_NCSI))
15559 		fwtype = "NCSI";
15560 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15561 		fwtype = "SMASH";
15562 	else
15563 		fwtype = "DASH";
15564 
15565 	vlen = strlen(tp->fw_ver);
15566 
15567 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15568 		 fwtype,
15569 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15570 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15571 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15572 		 (apedata & APE_FW_VERSION_BLDMSK));
15573 }
15574 
15575 static void tg3_read_otp_ver(struct tg3 *tp)
15576 {
15577 	u32 val, val2;
15578 
15579 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15580 		return;
15581 
15582 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15583 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15584 	    TG3_OTP_MAGIC0_VALID(val)) {
15585 		u64 val64 = (u64) val << 32 | val2;
15586 		u32 ver = 0;
15587 		int i, vlen;
15588 
15589 		for (i = 0; i < 7; i++) {
15590 			if ((val64 & 0xff) == 0)
15591 				break;
15592 			ver = val64 & 0xff;
15593 			val64 >>= 8;
15594 		}
15595 		vlen = strlen(tp->fw_ver);
15596 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15597 	}
15598 }
15599 
15600 static void tg3_read_fw_ver(struct tg3 *tp)
15601 {
15602 	u32 val;
15603 	bool vpd_vers = false;
15604 
15605 	if (tp->fw_ver[0] != 0)
15606 		vpd_vers = true;
15607 
15608 	if (tg3_flag(tp, NO_NVRAM)) {
15609 		strcat(tp->fw_ver, "sb");
15610 		tg3_read_otp_ver(tp);
15611 		return;
15612 	}
15613 
15614 	if (tg3_nvram_read(tp, 0, &val))
15615 		return;
15616 
15617 	if (val == TG3_EEPROM_MAGIC)
15618 		tg3_read_bc_ver(tp);
15619 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15620 		tg3_read_sb_ver(tp, val);
15621 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15622 		tg3_read_hwsb_ver(tp);
15623 
15624 	if (tg3_flag(tp, ENABLE_ASF)) {
15625 		if (tg3_flag(tp, ENABLE_APE)) {
15626 			tg3_probe_ncsi(tp);
15627 			if (!vpd_vers)
15628 				tg3_read_dash_ver(tp);
15629 		} else if (!vpd_vers) {
15630 			tg3_read_mgmtfw_ver(tp);
15631 		}
15632 	}
15633 
15634 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15635 }
15636 
15637 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15638 {
15639 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
15640 		return TG3_RX_RET_MAX_SIZE_5717;
15641 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15642 		return TG3_RX_RET_MAX_SIZE_5700;
15643 	else
15644 		return TG3_RX_RET_MAX_SIZE_5705;
15645 }
15646 
15647 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15648 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15649 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15650 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15651 	{ },
15652 };
15653 
15654 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15655 {
15656 	struct pci_dev *peer;
15657 	unsigned int func, devnr = tp->pdev->devfn & ~7;
15658 
15659 	for (func = 0; func < 8; func++) {
15660 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
15661 		if (peer && peer != tp->pdev)
15662 			break;
15663 		pci_dev_put(peer);
15664 	}
15665 	/* 5704 can be configured in single-port mode, set peer to
15666 	 * tp->pdev in that case.
15667 	 */
15668 	if (!peer) {
15669 		peer = tp->pdev;
15670 		return peer;
15671 	}
15672 
15673 	/*
15674 	 * We don't need to keep the refcount elevated; there's no way
15675 	 * to remove one half of this device without removing the other
15676 	 */
15677 	pci_dev_put(peer);
15678 
15679 	return peer;
15680 }
15681 
15682 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15683 {
15684 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15685 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15686 		u32 reg;
15687 
15688 		/* All devices that use the alternate
15689 		 * ASIC REV location have a CPMU.
15690 		 */
15691 		tg3_flag_set(tp, CPMU_PRESENT);
15692 
15693 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15694 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15695 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15696 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15697 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15698 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15699 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15700 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15701 			reg = TG3PCI_GEN2_PRODID_ASICREV;
15702 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15703 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15704 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15705 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15706 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15707 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15708 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15709 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15710 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15711 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15712 			reg = TG3PCI_GEN15_PRODID_ASICREV;
15713 		else
15714 			reg = TG3PCI_PRODID_ASICREV;
15715 
15716 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15717 	}
15718 
15719 	/* Wrong chip ID in 5752 A0. This code can be removed later
15720 	 * as A0 is not in production.
15721 	 */
15722 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15723 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15724 
15725 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15726 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15727 
15728 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15729 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15730 	    tg3_asic_rev(tp) == ASIC_REV_5720)
15731 		tg3_flag_set(tp, 5717_PLUS);
15732 
15733 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15734 	    tg3_asic_rev(tp) == ASIC_REV_57766)
15735 		tg3_flag_set(tp, 57765_CLASS);
15736 
15737 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15738 	     tg3_asic_rev(tp) == ASIC_REV_5762)
15739 		tg3_flag_set(tp, 57765_PLUS);
15740 
15741 	/* Intentionally exclude ASIC_REV_5906 */
15742 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15743 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
15744 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
15745 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
15746 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
15747 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
15748 	    tg3_flag(tp, 57765_PLUS))
15749 		tg3_flag_set(tp, 5755_PLUS);
15750 
15751 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15752 	    tg3_asic_rev(tp) == ASIC_REV_5714)
15753 		tg3_flag_set(tp, 5780_CLASS);
15754 
15755 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15756 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
15757 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
15758 	    tg3_flag(tp, 5755_PLUS) ||
15759 	    tg3_flag(tp, 5780_CLASS))
15760 		tg3_flag_set(tp, 5750_PLUS);
15761 
15762 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15763 	    tg3_flag(tp, 5750_PLUS))
15764 		tg3_flag_set(tp, 5705_PLUS);
15765 }
15766 
15767 static bool tg3_10_100_only_device(struct tg3 *tp,
15768 				   const struct pci_device_id *ent)
15769 {
15770 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15771 
15772 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15773 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15774 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
15775 		return true;
15776 
15777 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15778 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15779 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15780 				return true;
15781 		} else {
15782 			return true;
15783 		}
15784 	}
15785 
15786 	return false;
15787 }
15788 
15789 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15790 {
15791 	u32 misc_ctrl_reg;
15792 	u32 pci_state_reg, grc_misc_cfg;
15793 	u32 val;
15794 	u16 pci_cmd;
15795 	int err;
15796 
15797 	/* Force memory write invalidate off.  If we leave it on,
15798 	 * then on 5700_BX chips we have to enable a workaround.
15799 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15800 	 * to match the cacheline size.  The Broadcom driver have this
15801 	 * workaround but turns MWI off all the times so never uses
15802 	 * it.  This seems to suggest that the workaround is insufficient.
15803 	 */
15804 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15805 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15806 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15807 
15808 	/* Important! -- Make sure register accesses are byteswapped
15809 	 * correctly.  Also, for those chips that require it, make
15810 	 * sure that indirect register accesses are enabled before
15811 	 * the first operation.
15812 	 */
15813 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15814 			      &misc_ctrl_reg);
15815 	tp->misc_host_ctrl |= (misc_ctrl_reg &
15816 			       MISC_HOST_CTRL_CHIPREV);
15817 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15818 			       tp->misc_host_ctrl);
15819 
15820 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
15821 
15822 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15823 	 * we need to disable memory and use config. cycles
15824 	 * only to access all registers. The 5702/03 chips
15825 	 * can mistakenly decode the special cycles from the
15826 	 * ICH chipsets as memory write cycles, causing corruption
15827 	 * of register and memory space. Only certain ICH bridges
15828 	 * will drive special cycles with non-zero data during the
15829 	 * address phase which can fall within the 5703's address
15830 	 * range. This is not an ICH bug as the PCI spec allows
15831 	 * non-zero address during special cycles. However, only
15832 	 * these ICH bridges are known to drive non-zero addresses
15833 	 * during special cycles.
15834 	 *
15835 	 * Since special cycles do not cross PCI bridges, we only
15836 	 * enable this workaround if the 5703 is on the secondary
15837 	 * bus of these ICH bridges.
15838 	 */
15839 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15840 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15841 		static struct tg3_dev_id {
15842 			u32	vendor;
15843 			u32	device;
15844 			u32	rev;
15845 		} ich_chipsets[] = {
15846 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15847 			  PCI_ANY_ID },
15848 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15849 			  PCI_ANY_ID },
15850 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15851 			  0xa },
15852 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15853 			  PCI_ANY_ID },
15854 			{ },
15855 		};
15856 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
15857 		struct pci_dev *bridge = NULL;
15858 
15859 		while (pci_id->vendor != 0) {
15860 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
15861 						bridge);
15862 			if (!bridge) {
15863 				pci_id++;
15864 				continue;
15865 			}
15866 			if (pci_id->rev != PCI_ANY_ID) {
15867 				if (bridge->revision > pci_id->rev)
15868 					continue;
15869 			}
15870 			if (bridge->subordinate &&
15871 			    (bridge->subordinate->number ==
15872 			     tp->pdev->bus->number)) {
15873 				tg3_flag_set(tp, ICH_WORKAROUND);
15874 				pci_dev_put(bridge);
15875 				break;
15876 			}
15877 		}
15878 	}
15879 
15880 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15881 		static struct tg3_dev_id {
15882 			u32	vendor;
15883 			u32	device;
15884 		} bridge_chipsets[] = {
15885 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15886 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15887 			{ },
15888 		};
15889 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15890 		struct pci_dev *bridge = NULL;
15891 
15892 		while (pci_id->vendor != 0) {
15893 			bridge = pci_get_device(pci_id->vendor,
15894 						pci_id->device,
15895 						bridge);
15896 			if (!bridge) {
15897 				pci_id++;
15898 				continue;
15899 			}
15900 			if (bridge->subordinate &&
15901 			    (bridge->subordinate->number <=
15902 			     tp->pdev->bus->number) &&
15903 			    (bridge->subordinate->busn_res.end >=
15904 			     tp->pdev->bus->number)) {
15905 				tg3_flag_set(tp, 5701_DMA_BUG);
15906 				pci_dev_put(bridge);
15907 				break;
15908 			}
15909 		}
15910 	}
15911 
15912 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
15913 	 * DMA addresses > 40-bit. This bridge may have other additional
15914 	 * 57xx devices behind it in some 4-port NIC designs for example.
15915 	 * Any tg3 device found behind the bridge will also need the 40-bit
15916 	 * DMA workaround.
15917 	 */
15918 	if (tg3_flag(tp, 5780_CLASS)) {
15919 		tg3_flag_set(tp, 40BIT_DMA_BUG);
15920 		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15921 	} else {
15922 		struct pci_dev *bridge = NULL;
15923 
15924 		do {
15925 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15926 						PCI_DEVICE_ID_SERVERWORKS_EPB,
15927 						bridge);
15928 			if (bridge && bridge->subordinate &&
15929 			    (bridge->subordinate->number <=
15930 			     tp->pdev->bus->number) &&
15931 			    (bridge->subordinate->busn_res.end >=
15932 			     tp->pdev->bus->number)) {
15933 				tg3_flag_set(tp, 40BIT_DMA_BUG);
15934 				pci_dev_put(bridge);
15935 				break;
15936 			}
15937 		} while (bridge);
15938 	}
15939 
15940 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15941 	    tg3_asic_rev(tp) == ASIC_REV_5714)
15942 		tp->pdev_peer = tg3_find_peer(tp);
15943 
15944 	/* Determine TSO capabilities */
15945 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15946 		; /* Do nothing. HW bug. */
15947 	else if (tg3_flag(tp, 57765_PLUS))
15948 		tg3_flag_set(tp, HW_TSO_3);
15949 	else if (tg3_flag(tp, 5755_PLUS) ||
15950 		 tg3_asic_rev(tp) == ASIC_REV_5906)
15951 		tg3_flag_set(tp, HW_TSO_2);
15952 	else if (tg3_flag(tp, 5750_PLUS)) {
15953 		tg3_flag_set(tp, HW_TSO_1);
15954 		tg3_flag_set(tp, TSO_BUG);
15955 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15956 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15957 			tg3_flag_clear(tp, TSO_BUG);
15958 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15959 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
15960 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15961 		tg3_flag_set(tp, FW_TSO);
15962 		tg3_flag_set(tp, TSO_BUG);
15963 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
15964 			tp->fw_needed = FIRMWARE_TG3TSO5;
15965 		else
15966 			tp->fw_needed = FIRMWARE_TG3TSO;
15967 	}
15968 
15969 	/* Selectively allow TSO based on operating conditions */
15970 	if (tg3_flag(tp, HW_TSO_1) ||
15971 	    tg3_flag(tp, HW_TSO_2) ||
15972 	    tg3_flag(tp, HW_TSO_3) ||
15973 	    tg3_flag(tp, FW_TSO)) {
15974 		/* For firmware TSO, assume ASF is disabled.
15975 		 * We'll disable TSO later if we discover ASF
15976 		 * is enabled in tg3_get_eeprom_hw_cfg().
15977 		 */
15978 		tg3_flag_set(tp, TSO_CAPABLE);
15979 	} else {
15980 		tg3_flag_clear(tp, TSO_CAPABLE);
15981 		tg3_flag_clear(tp, TSO_BUG);
15982 		tp->fw_needed = NULL;
15983 	}
15984 
15985 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15986 		tp->fw_needed = FIRMWARE_TG3;
15987 
15988 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
15989 		tp->fw_needed = FIRMWARE_TG357766;
15990 
15991 	tp->irq_max = 1;
15992 
15993 	if (tg3_flag(tp, 5750_PLUS)) {
15994 		tg3_flag_set(tp, SUPPORT_MSI);
15995 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15996 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15997 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15998 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15999 		     tp->pdev_peer == tp->pdev))
16000 			tg3_flag_clear(tp, SUPPORT_MSI);
16001 
16002 		if (tg3_flag(tp, 5755_PLUS) ||
16003 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16004 			tg3_flag_set(tp, 1SHOT_MSI);
16005 		}
16006 
16007 		if (tg3_flag(tp, 57765_PLUS)) {
16008 			tg3_flag_set(tp, SUPPORT_MSIX);
16009 			tp->irq_max = TG3_IRQ_MAX_VECS;
16010 		}
16011 	}
16012 
16013 	tp->txq_max = 1;
16014 	tp->rxq_max = 1;
16015 	if (tp->irq_max > 1) {
16016 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16017 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16018 
16019 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16020 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16021 			tp->txq_max = tp->irq_max - 1;
16022 	}
16023 
16024 	if (tg3_flag(tp, 5755_PLUS) ||
16025 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16026 		tg3_flag_set(tp, SHORT_DMA_BUG);
16027 
16028 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16029 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16030 
16031 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16032 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16033 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16034 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16035 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16036 
16037 	if (tg3_flag(tp, 57765_PLUS) &&
16038 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16039 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16040 
16041 	if (!tg3_flag(tp, 5705_PLUS) ||
16042 	    tg3_flag(tp, 5780_CLASS) ||
16043 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16044 		tg3_flag_set(tp, JUMBO_CAPABLE);
16045 
16046 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16047 			      &pci_state_reg);
16048 
16049 	if (pci_is_pcie(tp->pdev)) {
16050 		u16 lnkctl;
16051 
16052 		tg3_flag_set(tp, PCI_EXPRESS);
16053 
16054 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16055 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16056 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16057 				tg3_flag_clear(tp, HW_TSO_2);
16058 				tg3_flag_clear(tp, TSO_CAPABLE);
16059 			}
16060 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16061 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16062 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16063 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16064 				tg3_flag_set(tp, CLKREQ_BUG);
16065 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16066 			tg3_flag_set(tp, L1PLLPD_EN);
16067 		}
16068 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16069 		/* BCM5785 devices are effectively PCIe devices, and should
16070 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16071 		 * section.
16072 		 */
16073 		tg3_flag_set(tp, PCI_EXPRESS);
16074 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16075 		   tg3_flag(tp, 5780_CLASS)) {
16076 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16077 		if (!tp->pcix_cap) {
16078 			dev_err(&tp->pdev->dev,
16079 				"Cannot find PCI-X capability, aborting\n");
16080 			return -EIO;
16081 		}
16082 
16083 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16084 			tg3_flag_set(tp, PCIX_MODE);
16085 	}
16086 
16087 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16088 	 * reordering to the mailbox registers done by the host
16089 	 * controller can cause major troubles.  We read back from
16090 	 * every mailbox register write to force the writes to be
16091 	 * posted to the chip in order.
16092 	 */
16093 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16094 	    !tg3_flag(tp, PCI_EXPRESS))
16095 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16096 
16097 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16098 			     &tp->pci_cacheline_sz);
16099 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16100 			     &tp->pci_lat_timer);
16101 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16102 	    tp->pci_lat_timer < 64) {
16103 		tp->pci_lat_timer = 64;
16104 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16105 				      tp->pci_lat_timer);
16106 	}
16107 
16108 	/* Important! -- It is critical that the PCI-X hw workaround
16109 	 * situation is decided before the first MMIO register access.
16110 	 */
16111 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16112 		/* 5700 BX chips need to have their TX producer index
16113 		 * mailboxes written twice to workaround a bug.
16114 		 */
16115 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16116 
16117 		/* If we are in PCI-X mode, enable register write workaround.
16118 		 *
16119 		 * The workaround is to use indirect register accesses
16120 		 * for all chip writes not to mailbox registers.
16121 		 */
16122 		if (tg3_flag(tp, PCIX_MODE)) {
16123 			u32 pm_reg;
16124 
16125 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16126 
16127 			/* The chip can have it's power management PCI config
16128 			 * space registers clobbered due to this bug.
16129 			 * So explicitly force the chip into D0 here.
16130 			 */
16131 			pci_read_config_dword(tp->pdev,
16132 					      tp->pm_cap + PCI_PM_CTRL,
16133 					      &pm_reg);
16134 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16135 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16136 			pci_write_config_dword(tp->pdev,
16137 					       tp->pm_cap + PCI_PM_CTRL,
16138 					       pm_reg);
16139 
16140 			/* Also, force SERR#/PERR# in PCI command. */
16141 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16142 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16143 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16144 		}
16145 	}
16146 
16147 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16148 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16149 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16150 		tg3_flag_set(tp, PCI_32BIT);
16151 
16152 	/* Chip-specific fixup from Broadcom driver */
16153 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16154 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16155 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16156 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16157 	}
16158 
16159 	/* Default fast path register access methods */
16160 	tp->read32 = tg3_read32;
16161 	tp->write32 = tg3_write32;
16162 	tp->read32_mbox = tg3_read32;
16163 	tp->write32_mbox = tg3_write32;
16164 	tp->write32_tx_mbox = tg3_write32;
16165 	tp->write32_rx_mbox = tg3_write32;
16166 
16167 	/* Various workaround register access methods */
16168 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16169 		tp->write32 = tg3_write_indirect_reg32;
16170 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16171 		 (tg3_flag(tp, PCI_EXPRESS) &&
16172 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16173 		/*
16174 		 * Back to back register writes can cause problems on these
16175 		 * chips, the workaround is to read back all reg writes
16176 		 * except those to mailbox regs.
16177 		 *
16178 		 * See tg3_write_indirect_reg32().
16179 		 */
16180 		tp->write32 = tg3_write_flush_reg32;
16181 	}
16182 
16183 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16184 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16185 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16186 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16187 	}
16188 
16189 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16190 		tp->read32 = tg3_read_indirect_reg32;
16191 		tp->write32 = tg3_write_indirect_reg32;
16192 		tp->read32_mbox = tg3_read_indirect_mbox;
16193 		tp->write32_mbox = tg3_write_indirect_mbox;
16194 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16195 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16196 
16197 		iounmap(tp->regs);
16198 		tp->regs = NULL;
16199 
16200 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16201 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16202 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16203 	}
16204 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16205 		tp->read32_mbox = tg3_read32_mbox_5906;
16206 		tp->write32_mbox = tg3_write32_mbox_5906;
16207 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16208 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16209 	}
16210 
16211 	if (tp->write32 == tg3_write_indirect_reg32 ||
16212 	    (tg3_flag(tp, PCIX_MODE) &&
16213 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16214 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16215 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16216 
16217 	/* The memory arbiter has to be enabled in order for SRAM accesses
16218 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16219 	 * sure it is enabled, but other entities such as system netboot
16220 	 * code might disable it.
16221 	 */
16222 	val = tr32(MEMARB_MODE);
16223 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16224 
16225 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16226 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16227 	    tg3_flag(tp, 5780_CLASS)) {
16228 		if (tg3_flag(tp, PCIX_MODE)) {
16229 			pci_read_config_dword(tp->pdev,
16230 					      tp->pcix_cap + PCI_X_STATUS,
16231 					      &val);
16232 			tp->pci_fn = val & 0x7;
16233 		}
16234 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16235 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16236 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16237 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16238 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16239 			val = tr32(TG3_CPMU_STATUS);
16240 
16241 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16242 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16243 		else
16244 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16245 				     TG3_CPMU_STATUS_FSHFT_5719;
16246 	}
16247 
16248 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16249 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16250 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16251 	}
16252 
16253 	/* Get eeprom hw config before calling tg3_set_power_state().
16254 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16255 	 * determined before calling tg3_set_power_state() so that
16256 	 * we know whether or not to switch out of Vaux power.
16257 	 * When the flag is set, it means that GPIO1 is used for eeprom
16258 	 * write protect and also implies that it is a LOM where GPIOs
16259 	 * are not used to switch power.
16260 	 */
16261 	tg3_get_eeprom_hw_cfg(tp);
16262 
16263 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16264 		tg3_flag_clear(tp, TSO_CAPABLE);
16265 		tg3_flag_clear(tp, TSO_BUG);
16266 		tp->fw_needed = NULL;
16267 	}
16268 
16269 	if (tg3_flag(tp, ENABLE_APE)) {
16270 		/* Allow reads and writes to the
16271 		 * APE register and memory space.
16272 		 */
16273 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16274 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16275 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16276 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16277 				       pci_state_reg);
16278 
16279 		tg3_ape_lock_init(tp);
16280 	}
16281 
16282 	/* Set up tp->grc_local_ctrl before calling
16283 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16284 	 * will bring 5700's external PHY out of reset.
16285 	 * It is also used as eeprom write protect on LOMs.
16286 	 */
16287 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16288 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16289 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16290 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16291 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16292 	/* Unused GPIO3 must be driven as output on 5752 because there
16293 	 * are no pull-up resistors on unused GPIO pins.
16294 	 */
16295 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16296 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16297 
16298 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16299 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16300 	    tg3_flag(tp, 57765_CLASS))
16301 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16302 
16303 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16304 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16305 		/* Turn off the debug UART. */
16306 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16307 		if (tg3_flag(tp, IS_NIC))
16308 			/* Keep VMain power. */
16309 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16310 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16311 	}
16312 
16313 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16314 		tp->grc_local_ctrl |=
16315 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16316 
16317 	/* Switch out of Vaux if it is a NIC */
16318 	tg3_pwrsrc_switch_to_vmain(tp);
16319 
16320 	/* Derive initial jumbo mode from MTU assigned in
16321 	 * ether_setup() via the alloc_etherdev() call
16322 	 */
16323 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16324 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16325 
16326 	/* Determine WakeOnLan speed to use. */
16327 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16328 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16329 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16330 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16331 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16332 	} else {
16333 		tg3_flag_set(tp, WOL_SPEED_100MB);
16334 	}
16335 
16336 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16337 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16338 
16339 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16340 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16341 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16342 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16343 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16344 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16345 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16346 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16347 
16348 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16349 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16350 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16351 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16352 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16353 
16354 	if (tg3_flag(tp, 5705_PLUS) &&
16355 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16356 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16357 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16358 	    !tg3_flag(tp, 57765_PLUS)) {
16359 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16360 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16361 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16362 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16363 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16364 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16365 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16366 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16367 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16368 		} else
16369 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16370 	}
16371 
16372 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16373 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16374 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16375 		if (tp->phy_otp == 0)
16376 			tp->phy_otp = TG3_OTP_DEFAULT;
16377 	}
16378 
16379 	if (tg3_flag(tp, CPMU_PRESENT))
16380 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16381 	else
16382 		tp->mi_mode = MAC_MI_MODE_BASE;
16383 
16384 	tp->coalesce_mode = 0;
16385 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16386 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16387 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16388 
16389 	/* Set these bits to enable statistics workaround. */
16390 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16391 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16392 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16393 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16394 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16395 	}
16396 
16397 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16398 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16399 		tg3_flag_set(tp, USE_PHYLIB);
16400 
16401 	err = tg3_mdio_init(tp);
16402 	if (err)
16403 		return err;
16404 
16405 	/* Initialize data/descriptor byte/word swapping. */
16406 	val = tr32(GRC_MODE);
16407 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16408 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16409 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16410 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16411 			GRC_MODE_B2HRX_ENABLE |
16412 			GRC_MODE_HTX2B_ENABLE |
16413 			GRC_MODE_HOST_STACKUP);
16414 	else
16415 		val &= GRC_MODE_HOST_STACKUP;
16416 
16417 	tw32(GRC_MODE, val | tp->grc_mode);
16418 
16419 	tg3_switch_clocks(tp);
16420 
16421 	/* Clear this out for sanity. */
16422 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16423 
16424 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16425 			      &pci_state_reg);
16426 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16427 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16428 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16429 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16430 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16431 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16432 			void __iomem *sram_base;
16433 
16434 			/* Write some dummy words into the SRAM status block
16435 			 * area, see if it reads back correctly.  If the return
16436 			 * value is bad, force enable the PCIX workaround.
16437 			 */
16438 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16439 
16440 			writel(0x00000000, sram_base);
16441 			writel(0x00000000, sram_base + 4);
16442 			writel(0xffffffff, sram_base + 4);
16443 			if (readl(sram_base) != 0x00000000)
16444 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16445 		}
16446 	}
16447 
16448 	udelay(50);
16449 	tg3_nvram_init(tp);
16450 
16451 	/* If the device has an NVRAM, no need to load patch firmware */
16452 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16453 	    !tg3_flag(tp, NO_NVRAM))
16454 		tp->fw_needed = NULL;
16455 
16456 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16457 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16458 
16459 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16460 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16461 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16462 		tg3_flag_set(tp, IS_5788);
16463 
16464 	if (!tg3_flag(tp, IS_5788) &&
16465 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16466 		tg3_flag_set(tp, TAGGED_STATUS);
16467 	if (tg3_flag(tp, TAGGED_STATUS)) {
16468 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16469 				      HOSTCC_MODE_CLRTICK_TXBD);
16470 
16471 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16472 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16473 				       tp->misc_host_ctrl);
16474 	}
16475 
16476 	/* Preserve the APE MAC_MODE bits */
16477 	if (tg3_flag(tp, ENABLE_APE))
16478 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16479 	else
16480 		tp->mac_mode = 0;
16481 
16482 	if (tg3_10_100_only_device(tp, ent))
16483 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16484 
16485 	err = tg3_phy_probe(tp);
16486 	if (err) {
16487 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16488 		/* ... but do not return immediately ... */
16489 		tg3_mdio_fini(tp);
16490 	}
16491 
16492 	tg3_read_vpd(tp);
16493 	tg3_read_fw_ver(tp);
16494 
16495 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16496 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16497 	} else {
16498 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16499 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16500 		else
16501 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16502 	}
16503 
16504 	/* 5700 {AX,BX} chips have a broken status block link
16505 	 * change bit implementation, so we must use the
16506 	 * status register in those cases.
16507 	 */
16508 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16509 		tg3_flag_set(tp, USE_LINKCHG_REG);
16510 	else
16511 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16512 
16513 	/* The led_ctrl is set during tg3_phy_probe, here we might
16514 	 * have to force the link status polling mechanism based
16515 	 * upon subsystem IDs.
16516 	 */
16517 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16518 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16519 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16520 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16521 		tg3_flag_set(tp, USE_LINKCHG_REG);
16522 	}
16523 
16524 	/* For all SERDES we poll the MAC status register. */
16525 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16526 		tg3_flag_set(tp, POLL_SERDES);
16527 	else
16528 		tg3_flag_clear(tp, POLL_SERDES);
16529 
16530 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16531 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16532 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16533 	    tg3_flag(tp, PCIX_MODE)) {
16534 		tp->rx_offset = NET_SKB_PAD;
16535 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16536 		tp->rx_copy_thresh = ~(u16)0;
16537 #endif
16538 	}
16539 
16540 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16541 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16542 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16543 
16544 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16545 
16546 	/* Increment the rx prod index on the rx std ring by at most
16547 	 * 8 for these chips to workaround hw errata.
16548 	 */
16549 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16550 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16551 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16552 		tp->rx_std_max_post = 8;
16553 
16554 	if (tg3_flag(tp, ASPM_WORKAROUND))
16555 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16556 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16557 
16558 	return err;
16559 }
16560 
16561 #ifdef CONFIG_SPARC
16562 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16563 {
16564 	struct net_device *dev = tp->dev;
16565 	struct pci_dev *pdev = tp->pdev;
16566 	struct device_node *dp = pci_device_to_OF_node(pdev);
16567 	const unsigned char *addr;
16568 	int len;
16569 
16570 	addr = of_get_property(dp, "local-mac-address", &len);
16571 	if (addr && len == 6) {
16572 		memcpy(dev->dev_addr, addr, 6);
16573 		return 0;
16574 	}
16575 	return -ENODEV;
16576 }
16577 
16578 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16579 {
16580 	struct net_device *dev = tp->dev;
16581 
16582 	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16583 	return 0;
16584 }
16585 #endif
16586 
16587 static int tg3_get_device_address(struct tg3 *tp)
16588 {
16589 	struct net_device *dev = tp->dev;
16590 	u32 hi, lo, mac_offset;
16591 	int addr_ok = 0;
16592 	int err;
16593 
16594 #ifdef CONFIG_SPARC
16595 	if (!tg3_get_macaddr_sparc(tp))
16596 		return 0;
16597 #endif
16598 
16599 	if (tg3_flag(tp, IS_SSB_CORE)) {
16600 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16601 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16602 			return 0;
16603 	}
16604 
16605 	mac_offset = 0x7c;
16606 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16607 	    tg3_flag(tp, 5780_CLASS)) {
16608 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16609 			mac_offset = 0xcc;
16610 		if (tg3_nvram_lock(tp))
16611 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16612 		else
16613 			tg3_nvram_unlock(tp);
16614 	} else if (tg3_flag(tp, 5717_PLUS)) {
16615 		if (tp->pci_fn & 1)
16616 			mac_offset = 0xcc;
16617 		if (tp->pci_fn > 1)
16618 			mac_offset += 0x18c;
16619 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16620 		mac_offset = 0x10;
16621 
16622 	/* First try to get it from MAC address mailbox. */
16623 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16624 	if ((hi >> 16) == 0x484b) {
16625 		dev->dev_addr[0] = (hi >>  8) & 0xff;
16626 		dev->dev_addr[1] = (hi >>  0) & 0xff;
16627 
16628 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16629 		dev->dev_addr[2] = (lo >> 24) & 0xff;
16630 		dev->dev_addr[3] = (lo >> 16) & 0xff;
16631 		dev->dev_addr[4] = (lo >>  8) & 0xff;
16632 		dev->dev_addr[5] = (lo >>  0) & 0xff;
16633 
16634 		/* Some old bootcode may report a 0 MAC address in SRAM */
16635 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16636 	}
16637 	if (!addr_ok) {
16638 		/* Next, try NVRAM. */
16639 		if (!tg3_flag(tp, NO_NVRAM) &&
16640 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16641 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16642 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16643 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16644 		}
16645 		/* Finally just fetch it out of the MAC control regs. */
16646 		else {
16647 			hi = tr32(MAC_ADDR_0_HIGH);
16648 			lo = tr32(MAC_ADDR_0_LOW);
16649 
16650 			dev->dev_addr[5] = lo & 0xff;
16651 			dev->dev_addr[4] = (lo >> 8) & 0xff;
16652 			dev->dev_addr[3] = (lo >> 16) & 0xff;
16653 			dev->dev_addr[2] = (lo >> 24) & 0xff;
16654 			dev->dev_addr[1] = hi & 0xff;
16655 			dev->dev_addr[0] = (hi >> 8) & 0xff;
16656 		}
16657 	}
16658 
16659 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16660 #ifdef CONFIG_SPARC
16661 		if (!tg3_get_default_macaddr_sparc(tp))
16662 			return 0;
16663 #endif
16664 		return -EINVAL;
16665 	}
16666 	return 0;
16667 }
16668 
16669 #define BOUNDARY_SINGLE_CACHELINE	1
16670 #define BOUNDARY_MULTI_CACHELINE	2
16671 
16672 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16673 {
16674 	int cacheline_size;
16675 	u8 byte;
16676 	int goal;
16677 
16678 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16679 	if (byte == 0)
16680 		cacheline_size = 1024;
16681 	else
16682 		cacheline_size = (int) byte * 4;
16683 
16684 	/* On 5703 and later chips, the boundary bits have no
16685 	 * effect.
16686 	 */
16687 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16688 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16689 	    !tg3_flag(tp, PCI_EXPRESS))
16690 		goto out;
16691 
16692 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16693 	goal = BOUNDARY_MULTI_CACHELINE;
16694 #else
16695 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16696 	goal = BOUNDARY_SINGLE_CACHELINE;
16697 #else
16698 	goal = 0;
16699 #endif
16700 #endif
16701 
16702 	if (tg3_flag(tp, 57765_PLUS)) {
16703 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16704 		goto out;
16705 	}
16706 
16707 	if (!goal)
16708 		goto out;
16709 
16710 	/* PCI controllers on most RISC systems tend to disconnect
16711 	 * when a device tries to burst across a cache-line boundary.
16712 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16713 	 *
16714 	 * Unfortunately, for PCI-E there are only limited
16715 	 * write-side controls for this, and thus for reads
16716 	 * we will still get the disconnects.  We'll also waste
16717 	 * these PCI cycles for both read and write for chips
16718 	 * other than 5700 and 5701 which do not implement the
16719 	 * boundary bits.
16720 	 */
16721 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16722 		switch (cacheline_size) {
16723 		case 16:
16724 		case 32:
16725 		case 64:
16726 		case 128:
16727 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16728 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16729 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16730 			} else {
16731 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16732 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16733 			}
16734 			break;
16735 
16736 		case 256:
16737 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16738 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16739 			break;
16740 
16741 		default:
16742 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16743 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16744 			break;
16745 		}
16746 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
16747 		switch (cacheline_size) {
16748 		case 16:
16749 		case 32:
16750 		case 64:
16751 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16752 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16753 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16754 				break;
16755 			}
16756 			/* fallthrough */
16757 		case 128:
16758 		default:
16759 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16760 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16761 			break;
16762 		}
16763 	} else {
16764 		switch (cacheline_size) {
16765 		case 16:
16766 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16767 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
16768 					DMA_RWCTRL_WRITE_BNDRY_16);
16769 				break;
16770 			}
16771 			/* fallthrough */
16772 		case 32:
16773 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16774 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
16775 					DMA_RWCTRL_WRITE_BNDRY_32);
16776 				break;
16777 			}
16778 			/* fallthrough */
16779 		case 64:
16780 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16781 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
16782 					DMA_RWCTRL_WRITE_BNDRY_64);
16783 				break;
16784 			}
16785 			/* fallthrough */
16786 		case 128:
16787 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16788 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
16789 					DMA_RWCTRL_WRITE_BNDRY_128);
16790 				break;
16791 			}
16792 			/* fallthrough */
16793 		case 256:
16794 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
16795 				DMA_RWCTRL_WRITE_BNDRY_256);
16796 			break;
16797 		case 512:
16798 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
16799 				DMA_RWCTRL_WRITE_BNDRY_512);
16800 			break;
16801 		case 1024:
16802 		default:
16803 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16804 				DMA_RWCTRL_WRITE_BNDRY_1024);
16805 			break;
16806 		}
16807 	}
16808 
16809 out:
16810 	return val;
16811 }
16812 
16813 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16814 			   int size, bool to_device)
16815 {
16816 	struct tg3_internal_buffer_desc test_desc;
16817 	u32 sram_dma_descs;
16818 	int i, ret;
16819 
16820 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16821 
16822 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16823 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16824 	tw32(RDMAC_STATUS, 0);
16825 	tw32(WDMAC_STATUS, 0);
16826 
16827 	tw32(BUFMGR_MODE, 0);
16828 	tw32(FTQ_RESET, 0);
16829 
16830 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
16831 	test_desc.addr_lo = buf_dma & 0xffffffff;
16832 	test_desc.nic_mbuf = 0x00002100;
16833 	test_desc.len = size;
16834 
16835 	/*
16836 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16837 	 * the *second* time the tg3 driver was getting loaded after an
16838 	 * initial scan.
16839 	 *
16840 	 * Broadcom tells me:
16841 	 *   ...the DMA engine is connected to the GRC block and a DMA
16842 	 *   reset may affect the GRC block in some unpredictable way...
16843 	 *   The behavior of resets to individual blocks has not been tested.
16844 	 *
16845 	 * Broadcom noted the GRC reset will also reset all sub-components.
16846 	 */
16847 	if (to_device) {
16848 		test_desc.cqid_sqid = (13 << 8) | 2;
16849 
16850 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16851 		udelay(40);
16852 	} else {
16853 		test_desc.cqid_sqid = (16 << 8) | 7;
16854 
16855 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16856 		udelay(40);
16857 	}
16858 	test_desc.flags = 0x00000005;
16859 
16860 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16861 		u32 val;
16862 
16863 		val = *(((u32 *)&test_desc) + i);
16864 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16865 				       sram_dma_descs + (i * sizeof(u32)));
16866 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16867 	}
16868 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16869 
16870 	if (to_device)
16871 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16872 	else
16873 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16874 
16875 	ret = -ENODEV;
16876 	for (i = 0; i < 40; i++) {
16877 		u32 val;
16878 
16879 		if (to_device)
16880 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16881 		else
16882 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16883 		if ((val & 0xffff) == sram_dma_descs) {
16884 			ret = 0;
16885 			break;
16886 		}
16887 
16888 		udelay(100);
16889 	}
16890 
16891 	return ret;
16892 }
16893 
16894 #define TEST_BUFFER_SIZE	0x2000
16895 
16896 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16897 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16898 	{ },
16899 };
16900 
16901 static int tg3_test_dma(struct tg3 *tp)
16902 {
16903 	dma_addr_t buf_dma;
16904 	u32 *buf, saved_dma_rwctrl;
16905 	int ret = 0;
16906 
16907 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16908 				 &buf_dma, GFP_KERNEL);
16909 	if (!buf) {
16910 		ret = -ENOMEM;
16911 		goto out_nofree;
16912 	}
16913 
16914 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16915 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16916 
16917 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16918 
16919 	if (tg3_flag(tp, 57765_PLUS))
16920 		goto out;
16921 
16922 	if (tg3_flag(tp, PCI_EXPRESS)) {
16923 		/* DMA read watermark not used on PCIE */
16924 		tp->dma_rwctrl |= 0x00180000;
16925 	} else if (!tg3_flag(tp, PCIX_MODE)) {
16926 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16927 		    tg3_asic_rev(tp) == ASIC_REV_5750)
16928 			tp->dma_rwctrl |= 0x003f0000;
16929 		else
16930 			tp->dma_rwctrl |= 0x003f000f;
16931 	} else {
16932 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16933 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
16934 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16935 			u32 read_water = 0x7;
16936 
16937 			/* If the 5704 is behind the EPB bridge, we can
16938 			 * do the less restrictive ONE_DMA workaround for
16939 			 * better performance.
16940 			 */
16941 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16942 			    tg3_asic_rev(tp) == ASIC_REV_5704)
16943 				tp->dma_rwctrl |= 0x8000;
16944 			else if (ccval == 0x6 || ccval == 0x7)
16945 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16946 
16947 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
16948 				read_water = 4;
16949 			/* Set bit 23 to enable PCIX hw bug fix */
16950 			tp->dma_rwctrl |=
16951 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16952 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16953 				(1 << 23);
16954 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16955 			/* 5780 always in PCIX mode */
16956 			tp->dma_rwctrl |= 0x00144000;
16957 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16958 			/* 5714 always in PCIX mode */
16959 			tp->dma_rwctrl |= 0x00148000;
16960 		} else {
16961 			tp->dma_rwctrl |= 0x001b000f;
16962 		}
16963 	}
16964 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16965 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16966 
16967 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16968 	    tg3_asic_rev(tp) == ASIC_REV_5704)
16969 		tp->dma_rwctrl &= 0xfffffff0;
16970 
16971 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16972 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
16973 		/* Remove this if it causes problems for some boards. */
16974 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16975 
16976 		/* On 5700/5701 chips, we need to set this bit.
16977 		 * Otherwise the chip will issue cacheline transactions
16978 		 * to streamable DMA memory with not all the byte
16979 		 * enables turned on.  This is an error on several
16980 		 * RISC PCI controllers, in particular sparc64.
16981 		 *
16982 		 * On 5703/5704 chips, this bit has been reassigned
16983 		 * a different meaning.  In particular, it is used
16984 		 * on those chips to enable a PCI-X workaround.
16985 		 */
16986 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16987 	}
16988 
16989 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16990 
16991 #if 0
16992 	/* Unneeded, already done by tg3_get_invariants.  */
16993 	tg3_switch_clocks(tp);
16994 #endif
16995 
16996 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16997 	    tg3_asic_rev(tp) != ASIC_REV_5701)
16998 		goto out;
16999 
17000 	/* It is best to perform DMA test with maximum write burst size
17001 	 * to expose the 5700/5701 write DMA bug.
17002 	 */
17003 	saved_dma_rwctrl = tp->dma_rwctrl;
17004 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17005 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17006 
17007 	while (1) {
17008 		u32 *p = buf, i;
17009 
17010 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17011 			p[i] = i;
17012 
17013 		/* Send the buffer to the chip. */
17014 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17015 		if (ret) {
17016 			dev_err(&tp->pdev->dev,
17017 				"%s: Buffer write failed. err = %d\n",
17018 				__func__, ret);
17019 			break;
17020 		}
17021 
17022 #if 0
17023 		/* validate data reached card RAM correctly. */
17024 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17025 			u32 val;
17026 			tg3_read_mem(tp, 0x2100 + (i*4), &val);
17027 			if (le32_to_cpu(val) != p[i]) {
17028 				dev_err(&tp->pdev->dev,
17029 					"%s: Buffer corrupted on device! "
17030 					"(%d != %d)\n", __func__, val, i);
17031 				/* ret = -ENODEV here? */
17032 			}
17033 			p[i] = 0;
17034 		}
17035 #endif
17036 		/* Now read it back. */
17037 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17038 		if (ret) {
17039 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17040 				"err = %d\n", __func__, ret);
17041 			break;
17042 		}
17043 
17044 		/* Verify it. */
17045 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17046 			if (p[i] == i)
17047 				continue;
17048 
17049 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17050 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17051 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17052 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17053 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17054 				break;
17055 			} else {
17056 				dev_err(&tp->pdev->dev,
17057 					"%s: Buffer corrupted on read back! "
17058 					"(%d != %d)\n", __func__, p[i], i);
17059 				ret = -ENODEV;
17060 				goto out;
17061 			}
17062 		}
17063 
17064 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17065 			/* Success. */
17066 			ret = 0;
17067 			break;
17068 		}
17069 	}
17070 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17071 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17072 		/* DMA test passed without adjusting DMA boundary,
17073 		 * now look for chipsets that are known to expose the
17074 		 * DMA bug without failing the test.
17075 		 */
17076 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17077 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17078 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17079 		} else {
17080 			/* Safe to use the calculated DMA boundary. */
17081 			tp->dma_rwctrl = saved_dma_rwctrl;
17082 		}
17083 
17084 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17085 	}
17086 
17087 out:
17088 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17089 out_nofree:
17090 	return ret;
17091 }
17092 
17093 static void tg3_init_bufmgr_config(struct tg3 *tp)
17094 {
17095 	if (tg3_flag(tp, 57765_PLUS)) {
17096 		tp->bufmgr_config.mbuf_read_dma_low_water =
17097 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17098 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17099 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17100 		tp->bufmgr_config.mbuf_high_water =
17101 			DEFAULT_MB_HIGH_WATER_57765;
17102 
17103 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17104 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17105 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17106 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17107 		tp->bufmgr_config.mbuf_high_water_jumbo =
17108 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17109 	} else if (tg3_flag(tp, 5705_PLUS)) {
17110 		tp->bufmgr_config.mbuf_read_dma_low_water =
17111 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17112 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17113 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17114 		tp->bufmgr_config.mbuf_high_water =
17115 			DEFAULT_MB_HIGH_WATER_5705;
17116 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17117 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17118 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17119 			tp->bufmgr_config.mbuf_high_water =
17120 				DEFAULT_MB_HIGH_WATER_5906;
17121 		}
17122 
17123 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17124 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17125 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17126 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17127 		tp->bufmgr_config.mbuf_high_water_jumbo =
17128 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17129 	} else {
17130 		tp->bufmgr_config.mbuf_read_dma_low_water =
17131 			DEFAULT_MB_RDMA_LOW_WATER;
17132 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17133 			DEFAULT_MB_MACRX_LOW_WATER;
17134 		tp->bufmgr_config.mbuf_high_water =
17135 			DEFAULT_MB_HIGH_WATER;
17136 
17137 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17138 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17139 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17140 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17141 		tp->bufmgr_config.mbuf_high_water_jumbo =
17142 			DEFAULT_MB_HIGH_WATER_JUMBO;
17143 	}
17144 
17145 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17146 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17147 }
17148 
17149 static char *tg3_phy_string(struct tg3 *tp)
17150 {
17151 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17152 	case TG3_PHY_ID_BCM5400:	return "5400";
17153 	case TG3_PHY_ID_BCM5401:	return "5401";
17154 	case TG3_PHY_ID_BCM5411:	return "5411";
17155 	case TG3_PHY_ID_BCM5701:	return "5701";
17156 	case TG3_PHY_ID_BCM5703:	return "5703";
17157 	case TG3_PHY_ID_BCM5704:	return "5704";
17158 	case TG3_PHY_ID_BCM5705:	return "5705";
17159 	case TG3_PHY_ID_BCM5750:	return "5750";
17160 	case TG3_PHY_ID_BCM5752:	return "5752";
17161 	case TG3_PHY_ID_BCM5714:	return "5714";
17162 	case TG3_PHY_ID_BCM5780:	return "5780";
17163 	case TG3_PHY_ID_BCM5755:	return "5755";
17164 	case TG3_PHY_ID_BCM5787:	return "5787";
17165 	case TG3_PHY_ID_BCM5784:	return "5784";
17166 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17167 	case TG3_PHY_ID_BCM5906:	return "5906";
17168 	case TG3_PHY_ID_BCM5761:	return "5761";
17169 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17170 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17171 	case TG3_PHY_ID_BCM57765:	return "57765";
17172 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17173 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17174 	case TG3_PHY_ID_BCM5762:	return "5762C";
17175 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17176 	case 0:			return "serdes";
17177 	default:		return "unknown";
17178 	}
17179 }
17180 
17181 static char *tg3_bus_string(struct tg3 *tp, char *str)
17182 {
17183 	if (tg3_flag(tp, PCI_EXPRESS)) {
17184 		strcpy(str, "PCI Express");
17185 		return str;
17186 	} else if (tg3_flag(tp, PCIX_MODE)) {
17187 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17188 
17189 		strcpy(str, "PCIX:");
17190 
17191 		if ((clock_ctrl == 7) ||
17192 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17193 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17194 			strcat(str, "133MHz");
17195 		else if (clock_ctrl == 0)
17196 			strcat(str, "33MHz");
17197 		else if (clock_ctrl == 2)
17198 			strcat(str, "50MHz");
17199 		else if (clock_ctrl == 4)
17200 			strcat(str, "66MHz");
17201 		else if (clock_ctrl == 6)
17202 			strcat(str, "100MHz");
17203 	} else {
17204 		strcpy(str, "PCI:");
17205 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17206 			strcat(str, "66MHz");
17207 		else
17208 			strcat(str, "33MHz");
17209 	}
17210 	if (tg3_flag(tp, PCI_32BIT))
17211 		strcat(str, ":32-bit");
17212 	else
17213 		strcat(str, ":64-bit");
17214 	return str;
17215 }
17216 
17217 static void tg3_init_coal(struct tg3 *tp)
17218 {
17219 	struct ethtool_coalesce *ec = &tp->coal;
17220 
17221 	memset(ec, 0, sizeof(*ec));
17222 	ec->cmd = ETHTOOL_GCOALESCE;
17223 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17224 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17225 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17226 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17227 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17228 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17229 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17230 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17231 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17232 
17233 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17234 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17235 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17236 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17237 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17238 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17239 	}
17240 
17241 	if (tg3_flag(tp, 5705_PLUS)) {
17242 		ec->rx_coalesce_usecs_irq = 0;
17243 		ec->tx_coalesce_usecs_irq = 0;
17244 		ec->stats_block_coalesce_usecs = 0;
17245 	}
17246 }
17247 
17248 static int tg3_init_one(struct pci_dev *pdev,
17249 				  const struct pci_device_id *ent)
17250 {
17251 	struct net_device *dev;
17252 	struct tg3 *tp;
17253 	int i, err;
17254 	u32 sndmbx, rcvmbx, intmbx;
17255 	char str[40];
17256 	u64 dma_mask, persist_dma_mask;
17257 	netdev_features_t features = 0;
17258 
17259 	printk_once(KERN_INFO "%s\n", version);
17260 
17261 	err = pci_enable_device(pdev);
17262 	if (err) {
17263 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17264 		return err;
17265 	}
17266 
17267 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17268 	if (err) {
17269 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17270 		goto err_out_disable_pdev;
17271 	}
17272 
17273 	pci_set_master(pdev);
17274 
17275 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17276 	if (!dev) {
17277 		err = -ENOMEM;
17278 		goto err_out_free_res;
17279 	}
17280 
17281 	SET_NETDEV_DEV(dev, &pdev->dev);
17282 
17283 	tp = netdev_priv(dev);
17284 	tp->pdev = pdev;
17285 	tp->dev = dev;
17286 	tp->pm_cap = pdev->pm_cap;
17287 	tp->rx_mode = TG3_DEF_RX_MODE;
17288 	tp->tx_mode = TG3_DEF_TX_MODE;
17289 	tp->irq_sync = 1;
17290 
17291 	if (tg3_debug > 0)
17292 		tp->msg_enable = tg3_debug;
17293 	else
17294 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17295 
17296 	if (pdev_is_ssb_gige_core(pdev)) {
17297 		tg3_flag_set(tp, IS_SSB_CORE);
17298 		if (ssb_gige_must_flush_posted_writes(pdev))
17299 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17300 		if (ssb_gige_one_dma_at_once(pdev))
17301 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17302 		if (ssb_gige_have_roboswitch(pdev))
17303 			tg3_flag_set(tp, ROBOSWITCH);
17304 		if (ssb_gige_is_rgmii(pdev))
17305 			tg3_flag_set(tp, RGMII_MODE);
17306 	}
17307 
17308 	/* The word/byte swap controls here control register access byte
17309 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17310 	 * setting below.
17311 	 */
17312 	tp->misc_host_ctrl =
17313 		MISC_HOST_CTRL_MASK_PCI_INT |
17314 		MISC_HOST_CTRL_WORD_SWAP |
17315 		MISC_HOST_CTRL_INDIR_ACCESS |
17316 		MISC_HOST_CTRL_PCISTATE_RW;
17317 
17318 	/* The NONFRM (non-frame) byte/word swap controls take effect
17319 	 * on descriptor entries, anything which isn't packet data.
17320 	 *
17321 	 * The StrongARM chips on the board (one for tx, one for rx)
17322 	 * are running in big-endian mode.
17323 	 */
17324 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17325 			GRC_MODE_WSWAP_NONFRM_DATA);
17326 #ifdef __BIG_ENDIAN
17327 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17328 #endif
17329 	spin_lock_init(&tp->lock);
17330 	spin_lock_init(&tp->indirect_lock);
17331 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17332 
17333 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17334 	if (!tp->regs) {
17335 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17336 		err = -ENOMEM;
17337 		goto err_out_free_dev;
17338 	}
17339 
17340 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17341 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17342 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17343 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17344 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17345 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17346 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17347 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17348 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17349 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17350 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17351 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17352 		tg3_flag_set(tp, ENABLE_APE);
17353 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17354 		if (!tp->aperegs) {
17355 			dev_err(&pdev->dev,
17356 				"Cannot map APE registers, aborting\n");
17357 			err = -ENOMEM;
17358 			goto err_out_iounmap;
17359 		}
17360 	}
17361 
17362 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17363 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17364 
17365 	dev->ethtool_ops = &tg3_ethtool_ops;
17366 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17367 	dev->netdev_ops = &tg3_netdev_ops;
17368 	dev->irq = pdev->irq;
17369 
17370 	err = tg3_get_invariants(tp, ent);
17371 	if (err) {
17372 		dev_err(&pdev->dev,
17373 			"Problem fetching invariants of chip, aborting\n");
17374 		goto err_out_apeunmap;
17375 	}
17376 
17377 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17378 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17379 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17380 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17381 	 * do DMA address check in tg3_start_xmit().
17382 	 */
17383 	if (tg3_flag(tp, IS_5788))
17384 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17385 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17386 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17387 #ifdef CONFIG_HIGHMEM
17388 		dma_mask = DMA_BIT_MASK(64);
17389 #endif
17390 	} else
17391 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17392 
17393 	/* Configure DMA attributes. */
17394 	if (dma_mask > DMA_BIT_MASK(32)) {
17395 		err = pci_set_dma_mask(pdev, dma_mask);
17396 		if (!err) {
17397 			features |= NETIF_F_HIGHDMA;
17398 			err = pci_set_consistent_dma_mask(pdev,
17399 							  persist_dma_mask);
17400 			if (err < 0) {
17401 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17402 					"DMA for consistent allocations\n");
17403 				goto err_out_apeunmap;
17404 			}
17405 		}
17406 	}
17407 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17408 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17409 		if (err) {
17410 			dev_err(&pdev->dev,
17411 				"No usable DMA configuration, aborting\n");
17412 			goto err_out_apeunmap;
17413 		}
17414 	}
17415 
17416 	tg3_init_bufmgr_config(tp);
17417 
17418 	features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17419 
17420 	/* 5700 B0 chips do not support checksumming correctly due
17421 	 * to hardware bugs.
17422 	 */
17423 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17424 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17425 
17426 		if (tg3_flag(tp, 5755_PLUS))
17427 			features |= NETIF_F_IPV6_CSUM;
17428 	}
17429 
17430 	/* TSO is on by default on chips that support hardware TSO.
17431 	 * Firmware TSO on older chips gives lower performance, so it
17432 	 * is off by default, but can be enabled using ethtool.
17433 	 */
17434 	if ((tg3_flag(tp, HW_TSO_1) ||
17435 	     tg3_flag(tp, HW_TSO_2) ||
17436 	     tg3_flag(tp, HW_TSO_3)) &&
17437 	    (features & NETIF_F_IP_CSUM))
17438 		features |= NETIF_F_TSO;
17439 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17440 		if (features & NETIF_F_IPV6_CSUM)
17441 			features |= NETIF_F_TSO6;
17442 		if (tg3_flag(tp, HW_TSO_3) ||
17443 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17444 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17445 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17446 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17447 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17448 			features |= NETIF_F_TSO_ECN;
17449 	}
17450 
17451 	dev->features |= features;
17452 	dev->vlan_features |= features;
17453 
17454 	/*
17455 	 * Add loopback capability only for a subset of devices that support
17456 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17457 	 * loopback for the remaining devices.
17458 	 */
17459 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17460 	    !tg3_flag(tp, CPMU_PRESENT))
17461 		/* Add the loopback capability */
17462 		features |= NETIF_F_LOOPBACK;
17463 
17464 	dev->hw_features |= features;
17465 
17466 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17467 	    !tg3_flag(tp, TSO_CAPABLE) &&
17468 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17469 		tg3_flag_set(tp, MAX_RXPEND_64);
17470 		tp->rx_pending = 63;
17471 	}
17472 
17473 	err = tg3_get_device_address(tp);
17474 	if (err) {
17475 		dev_err(&pdev->dev,
17476 			"Could not obtain valid ethernet address, aborting\n");
17477 		goto err_out_apeunmap;
17478 	}
17479 
17480 	/*
17481 	 * Reset chip in case UNDI or EFI driver did not shutdown
17482 	 * DMA self test will enable WDMAC and we'll see (spurious)
17483 	 * pending DMA on the PCI bus at that point.
17484 	 */
17485 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17486 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17487 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17488 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17489 	}
17490 
17491 	err = tg3_test_dma(tp);
17492 	if (err) {
17493 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17494 		goto err_out_apeunmap;
17495 	}
17496 
17497 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17498 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17499 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17500 	for (i = 0; i < tp->irq_max; i++) {
17501 		struct tg3_napi *tnapi = &tp->napi[i];
17502 
17503 		tnapi->tp = tp;
17504 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17505 
17506 		tnapi->int_mbox = intmbx;
17507 		if (i <= 4)
17508 			intmbx += 0x8;
17509 		else
17510 			intmbx += 0x4;
17511 
17512 		tnapi->consmbox = rcvmbx;
17513 		tnapi->prodmbox = sndmbx;
17514 
17515 		if (i)
17516 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17517 		else
17518 			tnapi->coal_now = HOSTCC_MODE_NOW;
17519 
17520 		if (!tg3_flag(tp, SUPPORT_MSIX))
17521 			break;
17522 
17523 		/*
17524 		 * If we support MSIX, we'll be using RSS.  If we're using
17525 		 * RSS, the first vector only handles link interrupts and the
17526 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17527 		 * mailbox values for the next iteration.  The values we setup
17528 		 * above are still useful for the single vectored mode.
17529 		 */
17530 		if (!i)
17531 			continue;
17532 
17533 		rcvmbx += 0x8;
17534 
17535 		if (sndmbx & 0x4)
17536 			sndmbx -= 0x4;
17537 		else
17538 			sndmbx += 0xc;
17539 	}
17540 
17541 	tg3_init_coal(tp);
17542 
17543 	pci_set_drvdata(pdev, dev);
17544 
17545 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17546 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17547 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17548 		tg3_flag_set(tp, PTP_CAPABLE);
17549 
17550 	if (tg3_flag(tp, 5717_PLUS)) {
17551 		/* Resume a low-power mode */
17552 		tg3_frob_aux_power(tp, false);
17553 	}
17554 
17555 	tg3_timer_init(tp);
17556 
17557 	tg3_carrier_off(tp);
17558 
17559 	err = register_netdev(dev);
17560 	if (err) {
17561 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17562 		goto err_out_apeunmap;
17563 	}
17564 
17565 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17566 		    tp->board_part_number,
17567 		    tg3_chip_rev_id(tp),
17568 		    tg3_bus_string(tp, str),
17569 		    dev->dev_addr);
17570 
17571 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17572 		struct phy_device *phydev;
17573 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17574 		netdev_info(dev,
17575 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17576 			    phydev->drv->name, dev_name(&phydev->dev));
17577 	} else {
17578 		char *ethtype;
17579 
17580 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17581 			ethtype = "10/100Base-TX";
17582 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17583 			ethtype = "1000Base-SX";
17584 		else
17585 			ethtype = "10/100/1000Base-T";
17586 
17587 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17588 			    "(WireSpeed[%d], EEE[%d])\n",
17589 			    tg3_phy_string(tp), ethtype,
17590 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17591 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17592 	}
17593 
17594 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17595 		    (dev->features & NETIF_F_RXCSUM) != 0,
17596 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17597 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17598 		    tg3_flag(tp, ENABLE_ASF) != 0,
17599 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17600 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17601 		    tp->dma_rwctrl,
17602 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17603 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17604 
17605 	pci_save_state(pdev);
17606 
17607 	return 0;
17608 
17609 err_out_apeunmap:
17610 	if (tp->aperegs) {
17611 		iounmap(tp->aperegs);
17612 		tp->aperegs = NULL;
17613 	}
17614 
17615 err_out_iounmap:
17616 	if (tp->regs) {
17617 		iounmap(tp->regs);
17618 		tp->regs = NULL;
17619 	}
17620 
17621 err_out_free_dev:
17622 	free_netdev(dev);
17623 
17624 err_out_free_res:
17625 	pci_release_regions(pdev);
17626 
17627 err_out_disable_pdev:
17628 	pci_disable_device(pdev);
17629 	pci_set_drvdata(pdev, NULL);
17630 	return err;
17631 }
17632 
17633 static void tg3_remove_one(struct pci_dev *pdev)
17634 {
17635 	struct net_device *dev = pci_get_drvdata(pdev);
17636 
17637 	if (dev) {
17638 		struct tg3 *tp = netdev_priv(dev);
17639 
17640 		release_firmware(tp->fw);
17641 
17642 		tg3_reset_task_cancel(tp);
17643 
17644 		if (tg3_flag(tp, USE_PHYLIB)) {
17645 			tg3_phy_fini(tp);
17646 			tg3_mdio_fini(tp);
17647 		}
17648 
17649 		unregister_netdev(dev);
17650 		if (tp->aperegs) {
17651 			iounmap(tp->aperegs);
17652 			tp->aperegs = NULL;
17653 		}
17654 		if (tp->regs) {
17655 			iounmap(tp->regs);
17656 			tp->regs = NULL;
17657 		}
17658 		free_netdev(dev);
17659 		pci_release_regions(pdev);
17660 		pci_disable_device(pdev);
17661 		pci_set_drvdata(pdev, NULL);
17662 	}
17663 }
17664 
17665 #ifdef CONFIG_PM_SLEEP
17666 static int tg3_suspend(struct device *device)
17667 {
17668 	struct pci_dev *pdev = to_pci_dev(device);
17669 	struct net_device *dev = pci_get_drvdata(pdev);
17670 	struct tg3 *tp = netdev_priv(dev);
17671 	int err;
17672 
17673 	if (!netif_running(dev))
17674 		return 0;
17675 
17676 	tg3_reset_task_cancel(tp);
17677 	tg3_phy_stop(tp);
17678 	tg3_netif_stop(tp);
17679 
17680 	tg3_timer_stop(tp);
17681 
17682 	tg3_full_lock(tp, 1);
17683 	tg3_disable_ints(tp);
17684 	tg3_full_unlock(tp);
17685 
17686 	netif_device_detach(dev);
17687 
17688 	tg3_full_lock(tp, 0);
17689 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17690 	tg3_flag_clear(tp, INIT_COMPLETE);
17691 	tg3_full_unlock(tp);
17692 
17693 	err = tg3_power_down_prepare(tp);
17694 	if (err) {
17695 		int err2;
17696 
17697 		tg3_full_lock(tp, 0);
17698 
17699 		tg3_flag_set(tp, INIT_COMPLETE);
17700 		err2 = tg3_restart_hw(tp, true);
17701 		if (err2)
17702 			goto out;
17703 
17704 		tg3_timer_start(tp);
17705 
17706 		netif_device_attach(dev);
17707 		tg3_netif_start(tp);
17708 
17709 out:
17710 		tg3_full_unlock(tp);
17711 
17712 		if (!err2)
17713 			tg3_phy_start(tp);
17714 	}
17715 
17716 	return err;
17717 }
17718 
17719 static int tg3_resume(struct device *device)
17720 {
17721 	struct pci_dev *pdev = to_pci_dev(device);
17722 	struct net_device *dev = pci_get_drvdata(pdev);
17723 	struct tg3 *tp = netdev_priv(dev);
17724 	int err;
17725 
17726 	if (!netif_running(dev))
17727 		return 0;
17728 
17729 	netif_device_attach(dev);
17730 
17731 	tg3_full_lock(tp, 0);
17732 
17733 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17734 
17735 	tg3_flag_set(tp, INIT_COMPLETE);
17736 	err = tg3_restart_hw(tp,
17737 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17738 	if (err)
17739 		goto out;
17740 
17741 	tg3_timer_start(tp);
17742 
17743 	tg3_netif_start(tp);
17744 
17745 out:
17746 	tg3_full_unlock(tp);
17747 
17748 	if (!err)
17749 		tg3_phy_start(tp);
17750 
17751 	return err;
17752 }
17753 #endif /* CONFIG_PM_SLEEP */
17754 
17755 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17756 
17757 /**
17758  * tg3_io_error_detected - called when PCI error is detected
17759  * @pdev: Pointer to PCI device
17760  * @state: The current pci connection state
17761  *
17762  * This function is called after a PCI bus error affecting
17763  * this device has been detected.
17764  */
17765 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17766 					      pci_channel_state_t state)
17767 {
17768 	struct net_device *netdev = pci_get_drvdata(pdev);
17769 	struct tg3 *tp = netdev_priv(netdev);
17770 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17771 
17772 	netdev_info(netdev, "PCI I/O error detected\n");
17773 
17774 	rtnl_lock();
17775 
17776 	if (!netif_running(netdev))
17777 		goto done;
17778 
17779 	tg3_phy_stop(tp);
17780 
17781 	tg3_netif_stop(tp);
17782 
17783 	tg3_timer_stop(tp);
17784 
17785 	/* Want to make sure that the reset task doesn't run */
17786 	tg3_reset_task_cancel(tp);
17787 
17788 	netif_device_detach(netdev);
17789 
17790 	/* Clean up software state, even if MMIO is blocked */
17791 	tg3_full_lock(tp, 0);
17792 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17793 	tg3_full_unlock(tp);
17794 
17795 done:
17796 	if (state == pci_channel_io_perm_failure) {
17797 		tg3_napi_enable(tp);
17798 		dev_close(netdev);
17799 		err = PCI_ERS_RESULT_DISCONNECT;
17800 	} else {
17801 		pci_disable_device(pdev);
17802 	}
17803 
17804 	rtnl_unlock();
17805 
17806 	return err;
17807 }
17808 
17809 /**
17810  * tg3_io_slot_reset - called after the pci bus has been reset.
17811  * @pdev: Pointer to PCI device
17812  *
17813  * Restart the card from scratch, as if from a cold-boot.
17814  * At this point, the card has exprienced a hard reset,
17815  * followed by fixups by BIOS, and has its config space
17816  * set up identically to what it was at cold boot.
17817  */
17818 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17819 {
17820 	struct net_device *netdev = pci_get_drvdata(pdev);
17821 	struct tg3 *tp = netdev_priv(netdev);
17822 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17823 	int err;
17824 
17825 	rtnl_lock();
17826 
17827 	if (pci_enable_device(pdev)) {
17828 		netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17829 		goto done;
17830 	}
17831 
17832 	pci_set_master(pdev);
17833 	pci_restore_state(pdev);
17834 	pci_save_state(pdev);
17835 
17836 	if (!netif_running(netdev)) {
17837 		rc = PCI_ERS_RESULT_RECOVERED;
17838 		goto done;
17839 	}
17840 
17841 	err = tg3_power_up(tp);
17842 	if (err)
17843 		goto done;
17844 
17845 	rc = PCI_ERS_RESULT_RECOVERED;
17846 
17847 done:
17848 	if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) {
17849 		tg3_napi_enable(tp);
17850 		dev_close(netdev);
17851 	}
17852 	rtnl_unlock();
17853 
17854 	return rc;
17855 }
17856 
17857 /**
17858  * tg3_io_resume - called when traffic can start flowing again.
17859  * @pdev: Pointer to PCI device
17860  *
17861  * This callback is called when the error recovery driver tells
17862  * us that its OK to resume normal operation.
17863  */
17864 static void tg3_io_resume(struct pci_dev *pdev)
17865 {
17866 	struct net_device *netdev = pci_get_drvdata(pdev);
17867 	struct tg3 *tp = netdev_priv(netdev);
17868 	int err;
17869 
17870 	rtnl_lock();
17871 
17872 	if (!netif_running(netdev))
17873 		goto done;
17874 
17875 	tg3_full_lock(tp, 0);
17876 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17877 	tg3_flag_set(tp, INIT_COMPLETE);
17878 	err = tg3_restart_hw(tp, true);
17879 	if (err) {
17880 		tg3_full_unlock(tp);
17881 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
17882 		goto done;
17883 	}
17884 
17885 	netif_device_attach(netdev);
17886 
17887 	tg3_timer_start(tp);
17888 
17889 	tg3_netif_start(tp);
17890 
17891 	tg3_full_unlock(tp);
17892 
17893 	tg3_phy_start(tp);
17894 
17895 done:
17896 	rtnl_unlock();
17897 }
17898 
17899 static const struct pci_error_handlers tg3_err_handler = {
17900 	.error_detected	= tg3_io_error_detected,
17901 	.slot_reset	= tg3_io_slot_reset,
17902 	.resume		= tg3_io_resume
17903 };
17904 
17905 static struct pci_driver tg3_driver = {
17906 	.name		= DRV_MODULE_NAME,
17907 	.id_table	= tg3_pci_tbl,
17908 	.probe		= tg3_init_one,
17909 	.remove		= tg3_remove_one,
17910 	.err_handler	= &tg3_err_handler,
17911 	.driver.pm	= &tg3_pm_ops,
17912 };
17913 
17914 module_pci_driver(tg3_driver);
17915