xref: /linux/drivers/net/ethernet/broadcom/tg3.c (revision e5c5d22e8dcf7c2d430336cbf8e180bd38e8daf1)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 
51 #include <net/checksum.h>
52 #include <net/ip.h>
53 
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57 
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60 
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65 
66 #define BAR_0	0
67 #define BAR_2	2
68 
69 #include "tg3.h"
70 
71 /* Functions & macros to verify TG3_FLAGS types */
72 
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 	return test_bit(flag, bits);
76 }
77 
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	set_bit(flag, bits);
81 }
82 
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 	clear_bit(flag, bits);
86 }
87 
88 #define tg3_flag(tp, flag)				\
89 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)				\
91 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)			\
93 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94 
95 #define DRV_MODULE_NAME		"tg3"
96 #define TG3_MAJ_NUM			3
97 #define TG3_MIN_NUM			130
98 #define DRV_MODULE_VERSION	\
99 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE	"February 14, 2013"
101 
102 #define RESET_KIND_SHUTDOWN	0
103 #define RESET_KIND_INIT		1
104 #define RESET_KIND_SUSPEND	2
105 
106 #define TG3_DEF_RX_MODE		0
107 #define TG3_DEF_TX_MODE		0
108 #define TG3_DEF_MSG_ENABLE	  \
109 	(NETIF_MSG_DRV		| \
110 	 NETIF_MSG_PROBE	| \
111 	 NETIF_MSG_LINK		| \
112 	 NETIF_MSG_TIMER	| \
113 	 NETIF_MSG_IFDOWN	| \
114 	 NETIF_MSG_IFUP		| \
115 	 NETIF_MSG_RX_ERR	| \
116 	 NETIF_MSG_TX_ERR)
117 
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
119 
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123 
124 #define TG3_TX_TIMEOUT			(5 * HZ)
125 
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU			60
128 #define TG3_MAX_MTU(tp)	\
129 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130 
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING		200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
143 
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150 
151 #define TG3_TX_RING_SIZE		512
152 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
153 
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
161 				 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
163 
164 #define TG3_DMA_BYTE_ENAB		64
165 
166 #define TG3_RX_STD_DMA_SZ		1536
167 #define TG3_RX_JMB_DMA_SZ		9046
168 
169 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
170 
171 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173 
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176 
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179 
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD		256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
194 #else
195 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
196 #endif
197 
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
202 #endif
203 
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K		2048
207 #define TG3_TX_BD_DMA_MAX_4K		4096
208 
209 #define TG3_RAW_IP_ALIGN 2
210 
211 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
212 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213 
214 #define FIRMWARE_TG3		"tigon/tg3.bin"
215 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
218 
219 static char version[] =
220 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221 
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229 
230 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233 
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
236 
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 			TG3_DRV_DATA_FLAG_5705_10_100},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 			TG3_DRV_DATA_FLAG_5705_10_100},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 			TG3_DRV_DATA_FLAG_5705_10_100},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 			PCI_VENDOR_ID_LENOVO,
288 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348 	{}
349 };
350 
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352 
353 static const struct {
354 	const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356 	{ "rx_octets" },
357 	{ "rx_fragments" },
358 	{ "rx_ucast_packets" },
359 	{ "rx_mcast_packets" },
360 	{ "rx_bcast_packets" },
361 	{ "rx_fcs_errors" },
362 	{ "rx_align_errors" },
363 	{ "rx_xon_pause_rcvd" },
364 	{ "rx_xoff_pause_rcvd" },
365 	{ "rx_mac_ctrl_rcvd" },
366 	{ "rx_xoff_entered" },
367 	{ "rx_frame_too_long_errors" },
368 	{ "rx_jabbers" },
369 	{ "rx_undersize_packets" },
370 	{ "rx_in_length_errors" },
371 	{ "rx_out_length_errors" },
372 	{ "rx_64_or_less_octet_packets" },
373 	{ "rx_65_to_127_octet_packets" },
374 	{ "rx_128_to_255_octet_packets" },
375 	{ "rx_256_to_511_octet_packets" },
376 	{ "rx_512_to_1023_octet_packets" },
377 	{ "rx_1024_to_1522_octet_packets" },
378 	{ "rx_1523_to_2047_octet_packets" },
379 	{ "rx_2048_to_4095_octet_packets" },
380 	{ "rx_4096_to_8191_octet_packets" },
381 	{ "rx_8192_to_9022_octet_packets" },
382 
383 	{ "tx_octets" },
384 	{ "tx_collisions" },
385 
386 	{ "tx_xon_sent" },
387 	{ "tx_xoff_sent" },
388 	{ "tx_flow_control" },
389 	{ "tx_mac_errors" },
390 	{ "tx_single_collisions" },
391 	{ "tx_mult_collisions" },
392 	{ "tx_deferred" },
393 	{ "tx_excessive_collisions" },
394 	{ "tx_late_collisions" },
395 	{ "tx_collide_2times" },
396 	{ "tx_collide_3times" },
397 	{ "tx_collide_4times" },
398 	{ "tx_collide_5times" },
399 	{ "tx_collide_6times" },
400 	{ "tx_collide_7times" },
401 	{ "tx_collide_8times" },
402 	{ "tx_collide_9times" },
403 	{ "tx_collide_10times" },
404 	{ "tx_collide_11times" },
405 	{ "tx_collide_12times" },
406 	{ "tx_collide_13times" },
407 	{ "tx_collide_14times" },
408 	{ "tx_collide_15times" },
409 	{ "tx_ucast_packets" },
410 	{ "tx_mcast_packets" },
411 	{ "tx_bcast_packets" },
412 	{ "tx_carrier_sense_errors" },
413 	{ "tx_discards" },
414 	{ "tx_errors" },
415 
416 	{ "dma_writeq_full" },
417 	{ "dma_write_prioq_full" },
418 	{ "rxbds_empty" },
419 	{ "rx_discards" },
420 	{ "rx_errors" },
421 	{ "rx_threshold_hit" },
422 
423 	{ "dma_readq_full" },
424 	{ "dma_read_prioq_full" },
425 	{ "tx_comp_queue_full" },
426 
427 	{ "ring_set_send_prod_index" },
428 	{ "ring_status_update" },
429 	{ "nic_irqs" },
430 	{ "nic_avoided_irqs" },
431 	{ "nic_tx_threshold_hit" },
432 
433 	{ "mbuf_lwm_thresh_hit" },
434 };
435 
436 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST		0
438 #define TG3_LINK_TEST		1
439 #define TG3_REGISTER_TEST	2
440 #define TG3_MEMORY_TEST		3
441 #define TG3_MAC_LOOPB_TEST	4
442 #define TG3_PHY_LOOPB_TEST	5
443 #define TG3_EXT_LOOPB_TEST	6
444 #define TG3_INTERRUPT_TEST	7
445 
446 
447 static const struct {
448 	const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
451 	[TG3_LINK_TEST]		= { "link test         (online) " },
452 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
453 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
454 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
455 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
456 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
457 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
458 };
459 
460 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
461 
462 
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465 	writel(val, tp->regs + off);
466 }
467 
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470 	return readl(tp->regs + off);
471 }
472 
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475 	writel(val, tp->aperegs + off);
476 }
477 
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480 	return readl(tp->aperegs + off);
481 }
482 
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485 	unsigned long flags;
486 
487 	spin_lock_irqsave(&tp->indirect_lock, flags);
488 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492 
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495 	writel(val, tp->regs + off);
496 	readl(tp->regs + off);
497 }
498 
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501 	unsigned long flags;
502 	u32 val;
503 
504 	spin_lock_irqsave(&tp->indirect_lock, flags);
505 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 	return val;
509 }
510 
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513 	unsigned long flags;
514 
515 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 				       TG3_64BIT_REG_LOW, val);
518 		return;
519 	}
520 	if (off == TG3_RX_STD_PROD_IDX_REG) {
521 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 				       TG3_64BIT_REG_LOW, val);
523 		return;
524 	}
525 
526 	spin_lock_irqsave(&tp->indirect_lock, flags);
527 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
530 
531 	/* In indirect mode when disabling interrupts, we also need
532 	 * to clear the interrupt bit in the GRC local ctrl register.
533 	 */
534 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535 	    (val == 0x1)) {
536 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538 	}
539 }
540 
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543 	unsigned long flags;
544 	u32 val;
545 
546 	spin_lock_irqsave(&tp->indirect_lock, flags);
547 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
550 	return val;
551 }
552 
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 		/* Non-posted methods */
562 		tp->write32(tp, off, val);
563 	else {
564 		/* Posted method */
565 		tg3_write32(tp, off, val);
566 		if (usec_wait)
567 			udelay(usec_wait);
568 		tp->read32(tp, off);
569 	}
570 	/* Wait again after the read for the posted method to guarantee that
571 	 * the wait time is met.
572 	 */
573 	if (usec_wait)
574 		udelay(usec_wait);
575 }
576 
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579 	tp->write32_mbox(tp, off, val);
580 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 	     !tg3_flag(tp, ICH_WORKAROUND)))
583 		tp->read32_mbox(tp, off);
584 }
585 
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588 	void __iomem *mbox = tp->regs + off;
589 	writel(val, mbox);
590 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
591 		writel(val, mbox);
592 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
594 		readl(mbox);
595 }
596 
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599 	return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601 
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604 	writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606 
607 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
612 
613 #define tw32(reg, val)			tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)			tp->read32(tp, reg)
617 
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620 	unsigned long flags;
621 
622 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624 		return;
625 
626 	spin_lock_irqsave(&tp->indirect_lock, flags);
627 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630 
631 		/* Always leave this as zero. */
632 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633 	} else {
634 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
636 
637 		/* Always leave this as zero. */
638 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 	}
640 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642 
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645 	unsigned long flags;
646 
647 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649 		*val = 0;
650 		return;
651 	}
652 
653 	spin_lock_irqsave(&tp->indirect_lock, flags);
654 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657 
658 		/* Always leave this as zero. */
659 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660 	} else {
661 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 		*val = tr32(TG3PCI_MEM_WIN_DATA);
663 
664 		/* Always leave this as zero. */
665 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 	}
667 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669 
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672 	int i;
673 	u32 regbase, bit;
674 
675 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 		regbase = TG3_APE_LOCK_GRANT;
677 	else
678 		regbase = TG3_APE_PER_LOCK_GRANT;
679 
680 	/* Make sure the driver hasn't any stale locks. */
681 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682 		switch (i) {
683 		case TG3_APE_LOCK_PHY0:
684 		case TG3_APE_LOCK_PHY1:
685 		case TG3_APE_LOCK_PHY2:
686 		case TG3_APE_LOCK_PHY3:
687 			bit = APE_LOCK_GRANT_DRIVER;
688 			break;
689 		default:
690 			if (!tp->pci_fn)
691 				bit = APE_LOCK_GRANT_DRIVER;
692 			else
693 				bit = 1 << tp->pci_fn;
694 		}
695 		tg3_ape_write32(tp, regbase + 4 * i, bit);
696 	}
697 
698 }
699 
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702 	int i, off;
703 	int ret = 0;
704 	u32 status, req, gnt, bit;
705 
706 	if (!tg3_flag(tp, ENABLE_APE))
707 		return 0;
708 
709 	switch (locknum) {
710 	case TG3_APE_LOCK_GPIO:
711 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
712 			return 0;
713 	case TG3_APE_LOCK_GRC:
714 	case TG3_APE_LOCK_MEM:
715 		if (!tp->pci_fn)
716 			bit = APE_LOCK_REQ_DRIVER;
717 		else
718 			bit = 1 << tp->pci_fn;
719 		break;
720 	case TG3_APE_LOCK_PHY0:
721 	case TG3_APE_LOCK_PHY1:
722 	case TG3_APE_LOCK_PHY2:
723 	case TG3_APE_LOCK_PHY3:
724 		bit = APE_LOCK_REQ_DRIVER;
725 		break;
726 	default:
727 		return -EINVAL;
728 	}
729 
730 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 		req = TG3_APE_LOCK_REQ;
732 		gnt = TG3_APE_LOCK_GRANT;
733 	} else {
734 		req = TG3_APE_PER_LOCK_REQ;
735 		gnt = TG3_APE_PER_LOCK_GRANT;
736 	}
737 
738 	off = 4 * locknum;
739 
740 	tg3_ape_write32(tp, req + off, bit);
741 
742 	/* Wait for up to 1 millisecond to acquire lock. */
743 	for (i = 0; i < 100; i++) {
744 		status = tg3_ape_read32(tp, gnt + off);
745 		if (status == bit)
746 			break;
747 		udelay(10);
748 	}
749 
750 	if (status != bit) {
751 		/* Revoke the lock request. */
752 		tg3_ape_write32(tp, gnt + off, bit);
753 		ret = -EBUSY;
754 	}
755 
756 	return ret;
757 }
758 
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
760 {
761 	u32 gnt, bit;
762 
763 	if (!tg3_flag(tp, ENABLE_APE))
764 		return;
765 
766 	switch (locknum) {
767 	case TG3_APE_LOCK_GPIO:
768 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
769 			return;
770 	case TG3_APE_LOCK_GRC:
771 	case TG3_APE_LOCK_MEM:
772 		if (!tp->pci_fn)
773 			bit = APE_LOCK_GRANT_DRIVER;
774 		else
775 			bit = 1 << tp->pci_fn;
776 		break;
777 	case TG3_APE_LOCK_PHY0:
778 	case TG3_APE_LOCK_PHY1:
779 	case TG3_APE_LOCK_PHY2:
780 	case TG3_APE_LOCK_PHY3:
781 		bit = APE_LOCK_GRANT_DRIVER;
782 		break;
783 	default:
784 		return;
785 	}
786 
787 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 		gnt = TG3_APE_LOCK_GRANT;
789 	else
790 		gnt = TG3_APE_PER_LOCK_GRANT;
791 
792 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
793 }
794 
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
796 {
797 	u32 apedata;
798 
799 	while (timeout_us) {
800 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
801 			return -EBUSY;
802 
803 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
805 			break;
806 
807 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
808 
809 		udelay(10);
810 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
811 	}
812 
813 	return timeout_us ? 0 : -EBUSY;
814 }
815 
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
817 {
818 	u32 i, apedata;
819 
820 	for (i = 0; i < timeout_us / 10; i++) {
821 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822 
823 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
824 			break;
825 
826 		udelay(10);
827 	}
828 
829 	return i == timeout_us / 10;
830 }
831 
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
833 				   u32 len)
834 {
835 	int err;
836 	u32 i, bufoff, msgoff, maxlen, apedata;
837 
838 	if (!tg3_flag(tp, APE_HAS_NCSI))
839 		return 0;
840 
841 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 	if (apedata != APE_SEG_SIG_MAGIC)
843 		return -ENODEV;
844 
845 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 	if (!(apedata & APE_FW_STATUS_READY))
847 		return -EAGAIN;
848 
849 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
850 		 TG3_APE_SHMEM_BASE;
851 	msgoff = bufoff + 2 * sizeof(u32);
852 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
853 
854 	while (len) {
855 		u32 length;
856 
857 		/* Cap xfer sizes to scratchpad limits. */
858 		length = (len > maxlen) ? maxlen : len;
859 		len -= length;
860 
861 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 		if (!(apedata & APE_FW_STATUS_READY))
863 			return -EAGAIN;
864 
865 		/* Wait for up to 1 msec for APE to service previous event. */
866 		err = tg3_ape_event_lock(tp, 1000);
867 		if (err)
868 			return err;
869 
870 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 			  APE_EVENT_STATUS_SCRTCHPD_READ |
872 			  APE_EVENT_STATUS_EVENT_PENDING;
873 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
874 
875 		tg3_ape_write32(tp, bufoff, base_off);
876 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
877 
878 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
880 
881 		base_off += length;
882 
883 		if (tg3_ape_wait_for_event(tp, 30000))
884 			return -EAGAIN;
885 
886 		for (i = 0; length; i += 4, length -= 4) {
887 			u32 val = tg3_ape_read32(tp, msgoff + i);
888 			memcpy(data, &val, sizeof(u32));
889 			data++;
890 		}
891 	}
892 
893 	return 0;
894 }
895 
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
897 {
898 	int err;
899 	u32 apedata;
900 
901 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 	if (apedata != APE_SEG_SIG_MAGIC)
903 		return -EAGAIN;
904 
905 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 	if (!(apedata & APE_FW_STATUS_READY))
907 		return -EAGAIN;
908 
909 	/* Wait for up to 1 millisecond for APE to service previous event. */
910 	err = tg3_ape_event_lock(tp, 1000);
911 	if (err)
912 		return err;
913 
914 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 			event | APE_EVENT_STATUS_EVENT_PENDING);
916 
917 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
919 
920 	return 0;
921 }
922 
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
924 {
925 	u32 event;
926 	u32 apedata;
927 
928 	if (!tg3_flag(tp, ENABLE_APE))
929 		return;
930 
931 	switch (kind) {
932 	case RESET_KIND_INIT:
933 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 				APE_HOST_SEG_SIG_MAGIC);
935 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 				APE_HOST_SEG_LEN_MAGIC);
937 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 				APE_HOST_BEHAV_NO_PHYLOCK);
943 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 				    TG3_APE_HOST_DRVR_STATE_START);
945 
946 		event = APE_EVENT_STATUS_STATE_START;
947 		break;
948 	case RESET_KIND_SHUTDOWN:
949 		/* With the interface we are currently using,
950 		 * APE does not track driver state.  Wiping
951 		 * out the HOST SEGMENT SIGNATURE forces
952 		 * the APE to assume OS absent status.
953 		 */
954 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
955 
956 		if (device_may_wakeup(&tp->pdev->dev) &&
957 		    tg3_flag(tp, WOL_ENABLE)) {
958 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 					    TG3_APE_HOST_WOL_SPEED_AUTO);
960 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
961 		} else
962 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
963 
964 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
965 
966 		event = APE_EVENT_STATUS_STATE_UNLOAD;
967 		break;
968 	case RESET_KIND_SUSPEND:
969 		event = APE_EVENT_STATUS_STATE_SUSPEND;
970 		break;
971 	default:
972 		return;
973 	}
974 
975 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
976 
977 	tg3_ape_send_event(tp, event);
978 }
979 
980 static void tg3_disable_ints(struct tg3 *tp)
981 {
982 	int i;
983 
984 	tw32(TG3PCI_MISC_HOST_CTRL,
985 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
986 	for (i = 0; i < tp->irq_max; i++)
987 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
988 }
989 
990 static void tg3_enable_ints(struct tg3 *tp)
991 {
992 	int i;
993 
994 	tp->irq_sync = 0;
995 	wmb();
996 
997 	tw32(TG3PCI_MISC_HOST_CTRL,
998 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
999 
1000 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1001 	for (i = 0; i < tp->irq_cnt; i++) {
1002 		struct tg3_napi *tnapi = &tp->napi[i];
1003 
1004 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 		if (tg3_flag(tp, 1SHOT_MSI))
1006 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1007 
1008 		tp->coal_now |= tnapi->coal_now;
1009 	}
1010 
1011 	/* Force an initial interrupt */
1012 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1013 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1014 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1015 	else
1016 		tw32(HOSTCC_MODE, tp->coal_now);
1017 
1018 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1019 }
1020 
1021 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1022 {
1023 	struct tg3 *tp = tnapi->tp;
1024 	struct tg3_hw_status *sblk = tnapi->hw_status;
1025 	unsigned int work_exists = 0;
1026 
1027 	/* check for phy events */
1028 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1029 		if (sblk->status & SD_STATUS_LINK_CHG)
1030 			work_exists = 1;
1031 	}
1032 
1033 	/* check for TX work to do */
1034 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1035 		work_exists = 1;
1036 
1037 	/* check for RX work to do */
1038 	if (tnapi->rx_rcb_prod_idx &&
1039 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1040 		work_exists = 1;
1041 
1042 	return work_exists;
1043 }
1044 
1045 /* tg3_int_reenable
1046  *  similar to tg3_enable_ints, but it accurately determines whether there
1047  *  is new work pending and can return without flushing the PIO write
1048  *  which reenables interrupts
1049  */
1050 static void tg3_int_reenable(struct tg3_napi *tnapi)
1051 {
1052 	struct tg3 *tp = tnapi->tp;
1053 
1054 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1055 	mmiowb();
1056 
1057 	/* When doing tagged status, this work check is unnecessary.
1058 	 * The last_tag we write above tells the chip which piece of
1059 	 * work we've completed.
1060 	 */
1061 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1062 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1063 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1064 }
1065 
1066 static void tg3_switch_clocks(struct tg3 *tp)
1067 {
1068 	u32 clock_ctrl;
1069 	u32 orig_clock_ctrl;
1070 
1071 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1072 		return;
1073 
1074 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1075 
1076 	orig_clock_ctrl = clock_ctrl;
1077 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1078 		       CLOCK_CTRL_CLKRUN_OENABLE |
1079 		       0x1f);
1080 	tp->pci_clock_ctrl = clock_ctrl;
1081 
1082 	if (tg3_flag(tp, 5705_PLUS)) {
1083 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1084 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1085 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1086 		}
1087 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1088 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1089 			    clock_ctrl |
1090 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1091 			    40);
1092 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1094 			    40);
1095 	}
1096 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1097 }
1098 
1099 #define PHY_BUSY_LOOPS	5000
1100 
1101 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1102 			 u32 *val)
1103 {
1104 	u32 frame_val;
1105 	unsigned int loops;
1106 	int ret;
1107 
1108 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1109 		tw32_f(MAC_MI_MODE,
1110 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111 		udelay(80);
1112 	}
1113 
1114 	tg3_ape_lock(tp, tp->phy_ape_lock);
1115 
1116 	*val = 0x0;
1117 
1118 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1119 		      MI_COM_PHY_ADDR_MASK);
1120 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1121 		      MI_COM_REG_ADDR_MASK);
1122 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1123 
1124 	tw32_f(MAC_MI_COM, frame_val);
1125 
1126 	loops = PHY_BUSY_LOOPS;
1127 	while (loops != 0) {
1128 		udelay(10);
1129 		frame_val = tr32(MAC_MI_COM);
1130 
1131 		if ((frame_val & MI_COM_BUSY) == 0) {
1132 			udelay(5);
1133 			frame_val = tr32(MAC_MI_COM);
1134 			break;
1135 		}
1136 		loops -= 1;
1137 	}
1138 
1139 	ret = -EBUSY;
1140 	if (loops != 0) {
1141 		*val = frame_val & MI_COM_DATA_MASK;
1142 		ret = 0;
1143 	}
1144 
1145 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1146 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1147 		udelay(80);
1148 	}
1149 
1150 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1151 
1152 	return ret;
1153 }
1154 
1155 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1156 {
1157 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1158 }
1159 
1160 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1161 			  u32 val)
1162 {
1163 	u32 frame_val;
1164 	unsigned int loops;
1165 	int ret;
1166 
1167 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1168 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1169 		return 0;
1170 
1171 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1172 		tw32_f(MAC_MI_MODE,
1173 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174 		udelay(80);
1175 	}
1176 
1177 	tg3_ape_lock(tp, tp->phy_ape_lock);
1178 
1179 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1180 		      MI_COM_PHY_ADDR_MASK);
1181 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1182 		      MI_COM_REG_ADDR_MASK);
1183 	frame_val |= (val & MI_COM_DATA_MASK);
1184 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1185 
1186 	tw32_f(MAC_MI_COM, frame_val);
1187 
1188 	loops = PHY_BUSY_LOOPS;
1189 	while (loops != 0) {
1190 		udelay(10);
1191 		frame_val = tr32(MAC_MI_COM);
1192 		if ((frame_val & MI_COM_BUSY) == 0) {
1193 			udelay(5);
1194 			frame_val = tr32(MAC_MI_COM);
1195 			break;
1196 		}
1197 		loops -= 1;
1198 	}
1199 
1200 	ret = -EBUSY;
1201 	if (loops != 0)
1202 		ret = 0;
1203 
1204 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1205 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1206 		udelay(80);
1207 	}
1208 
1209 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1210 
1211 	return ret;
1212 }
1213 
1214 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1215 {
1216 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1217 }
1218 
1219 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 {
1221 	int err;
1222 
1223 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224 	if (err)
1225 		goto done;
1226 
1227 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228 	if (err)
1229 		goto done;
1230 
1231 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1232 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233 	if (err)
1234 		goto done;
1235 
1236 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1237 
1238 done:
1239 	return err;
1240 }
1241 
1242 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 {
1244 	int err;
1245 
1246 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 	if (err)
1248 		goto done;
1249 
1250 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 	if (err)
1252 		goto done;
1253 
1254 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 	if (err)
1257 		goto done;
1258 
1259 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1260 
1261 done:
1262 	return err;
1263 }
1264 
1265 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 {
1267 	int err;
1268 
1269 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1270 	if (!err)
1271 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1272 
1273 	return err;
1274 }
1275 
1276 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 {
1278 	int err;
1279 
1280 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281 	if (!err)
1282 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1283 
1284 	return err;
1285 }
1286 
1287 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 {
1289 	int err;
1290 
1291 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1292 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1293 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1294 	if (!err)
1295 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1296 
1297 	return err;
1298 }
1299 
1300 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1301 {
1302 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1303 		set |= MII_TG3_AUXCTL_MISC_WREN;
1304 
1305 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1306 }
1307 
1308 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1309 {
1310 	u32 val;
1311 	int err;
1312 
1313 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1314 
1315 	if (err)
1316 		return err;
1317 	if (enable)
1318 
1319 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320 	else
1321 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1322 
1323 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1324 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1325 
1326 	return err;
1327 }
1328 
1329 static int tg3_bmcr_reset(struct tg3 *tp)
1330 {
1331 	u32 phy_control;
1332 	int limit, err;
1333 
1334 	/* OK, reset it, and poll the BMCR_RESET bit until it
1335 	 * clears or we time out.
1336 	 */
1337 	phy_control = BMCR_RESET;
1338 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1339 	if (err != 0)
1340 		return -EBUSY;
1341 
1342 	limit = 5000;
1343 	while (limit--) {
1344 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345 		if (err != 0)
1346 			return -EBUSY;
1347 
1348 		if ((phy_control & BMCR_RESET) == 0) {
1349 			udelay(40);
1350 			break;
1351 		}
1352 		udelay(10);
1353 	}
1354 	if (limit < 0)
1355 		return -EBUSY;
1356 
1357 	return 0;
1358 }
1359 
1360 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1361 {
1362 	struct tg3 *tp = bp->priv;
1363 	u32 val;
1364 
1365 	spin_lock_bh(&tp->lock);
1366 
1367 	if (tg3_readphy(tp, reg, &val))
1368 		val = -EIO;
1369 
1370 	spin_unlock_bh(&tp->lock);
1371 
1372 	return val;
1373 }
1374 
1375 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1376 {
1377 	struct tg3 *tp = bp->priv;
1378 	u32 ret = 0;
1379 
1380 	spin_lock_bh(&tp->lock);
1381 
1382 	if (tg3_writephy(tp, reg, val))
1383 		ret = -EIO;
1384 
1385 	spin_unlock_bh(&tp->lock);
1386 
1387 	return ret;
1388 }
1389 
1390 static int tg3_mdio_reset(struct mii_bus *bp)
1391 {
1392 	return 0;
1393 }
1394 
1395 static void tg3_mdio_config_5785(struct tg3 *tp)
1396 {
1397 	u32 val;
1398 	struct phy_device *phydev;
1399 
1400 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1401 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1402 	case PHY_ID_BCM50610:
1403 	case PHY_ID_BCM50610M:
1404 		val = MAC_PHYCFG2_50610_LED_MODES;
1405 		break;
1406 	case PHY_ID_BCMAC131:
1407 		val = MAC_PHYCFG2_AC131_LED_MODES;
1408 		break;
1409 	case PHY_ID_RTL8211C:
1410 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1411 		break;
1412 	case PHY_ID_RTL8201E:
1413 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1414 		break;
1415 	default:
1416 		return;
1417 	}
1418 
1419 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1420 		tw32(MAC_PHYCFG2, val);
1421 
1422 		val = tr32(MAC_PHYCFG1);
1423 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1424 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1425 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1426 		tw32(MAC_PHYCFG1, val);
1427 
1428 		return;
1429 	}
1430 
1431 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1432 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1433 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1434 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1435 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1436 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1437 		       MAC_PHYCFG2_INBAND_ENABLE;
1438 
1439 	tw32(MAC_PHYCFG2, val);
1440 
1441 	val = tr32(MAC_PHYCFG1);
1442 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1443 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1444 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1445 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1446 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1447 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1448 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1449 	}
1450 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1451 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1452 	tw32(MAC_PHYCFG1, val);
1453 
1454 	val = tr32(MAC_EXT_RGMII_MODE);
1455 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1456 		 MAC_RGMII_MODE_RX_QUALITY |
1457 		 MAC_RGMII_MODE_RX_ACTIVITY |
1458 		 MAC_RGMII_MODE_RX_ENG_DET |
1459 		 MAC_RGMII_MODE_TX_ENABLE |
1460 		 MAC_RGMII_MODE_TX_LOWPWR |
1461 		 MAC_RGMII_MODE_TX_RESET);
1462 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 			val |= MAC_RGMII_MODE_RX_INT_B |
1465 			       MAC_RGMII_MODE_RX_QUALITY |
1466 			       MAC_RGMII_MODE_RX_ACTIVITY |
1467 			       MAC_RGMII_MODE_RX_ENG_DET;
1468 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1469 			val |= MAC_RGMII_MODE_TX_ENABLE |
1470 			       MAC_RGMII_MODE_TX_LOWPWR |
1471 			       MAC_RGMII_MODE_TX_RESET;
1472 	}
1473 	tw32(MAC_EXT_RGMII_MODE, val);
1474 }
1475 
1476 static void tg3_mdio_start(struct tg3 *tp)
1477 {
1478 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1479 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1480 	udelay(80);
1481 
1482 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1483 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1484 		tg3_mdio_config_5785(tp);
1485 }
1486 
1487 static int tg3_mdio_init(struct tg3 *tp)
1488 {
1489 	int i;
1490 	u32 reg;
1491 	struct phy_device *phydev;
1492 
1493 	if (tg3_flag(tp, 5717_PLUS)) {
1494 		u32 is_serdes;
1495 
1496 		tp->phy_addr = tp->pci_fn + 1;
1497 
1498 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1499 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1500 		else
1501 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1502 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1503 		if (is_serdes)
1504 			tp->phy_addr += 7;
1505 	} else
1506 		tp->phy_addr = TG3_PHY_MII_ADDR;
1507 
1508 	tg3_mdio_start(tp);
1509 
1510 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1511 		return 0;
1512 
1513 	tp->mdio_bus = mdiobus_alloc();
1514 	if (tp->mdio_bus == NULL)
1515 		return -ENOMEM;
1516 
1517 	tp->mdio_bus->name     = "tg3 mdio bus";
1518 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1519 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1520 	tp->mdio_bus->priv     = tp;
1521 	tp->mdio_bus->parent   = &tp->pdev->dev;
1522 	tp->mdio_bus->read     = &tg3_mdio_read;
1523 	tp->mdio_bus->write    = &tg3_mdio_write;
1524 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1525 	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1526 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1527 
1528 	for (i = 0; i < PHY_MAX_ADDR; i++)
1529 		tp->mdio_bus->irq[i] = PHY_POLL;
1530 
1531 	/* The bus registration will look for all the PHYs on the mdio bus.
1532 	 * Unfortunately, it does not ensure the PHY is powered up before
1533 	 * accessing the PHY ID registers.  A chip reset is the
1534 	 * quickest way to bring the device back to an operational state..
1535 	 */
1536 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1537 		tg3_bmcr_reset(tp);
1538 
1539 	i = mdiobus_register(tp->mdio_bus);
1540 	if (i) {
1541 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1542 		mdiobus_free(tp->mdio_bus);
1543 		return i;
1544 	}
1545 
1546 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1547 
1548 	if (!phydev || !phydev->drv) {
1549 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1550 		mdiobus_unregister(tp->mdio_bus);
1551 		mdiobus_free(tp->mdio_bus);
1552 		return -ENODEV;
1553 	}
1554 
1555 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1556 	case PHY_ID_BCM57780:
1557 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1558 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1559 		break;
1560 	case PHY_ID_BCM50610:
1561 	case PHY_ID_BCM50610M:
1562 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1563 				     PHY_BRCM_RX_REFCLK_UNUSED |
1564 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1565 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1566 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1567 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1568 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1569 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1570 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1571 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1572 		/* fallthru */
1573 	case PHY_ID_RTL8211C:
1574 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1575 		break;
1576 	case PHY_ID_RTL8201E:
1577 	case PHY_ID_BCMAC131:
1578 		phydev->interface = PHY_INTERFACE_MODE_MII;
1579 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581 		break;
1582 	}
1583 
1584 	tg3_flag_set(tp, MDIOBUS_INITED);
1585 
1586 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1587 		tg3_mdio_config_5785(tp);
1588 
1589 	return 0;
1590 }
1591 
1592 static void tg3_mdio_fini(struct tg3 *tp)
1593 {
1594 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1595 		tg3_flag_clear(tp, MDIOBUS_INITED);
1596 		mdiobus_unregister(tp->mdio_bus);
1597 		mdiobus_free(tp->mdio_bus);
1598 	}
1599 }
1600 
1601 /* tp->lock is held. */
1602 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 {
1604 	u32 val;
1605 
1606 	val = tr32(GRC_RX_CPU_EVENT);
1607 	val |= GRC_RX_CPU_DRIVER_EVENT;
1608 	tw32_f(GRC_RX_CPU_EVENT, val);
1609 
1610 	tp->last_event_jiffies = jiffies;
1611 }
1612 
1613 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1614 
1615 /* tp->lock is held. */
1616 static void tg3_wait_for_event_ack(struct tg3 *tp)
1617 {
1618 	int i;
1619 	unsigned int delay_cnt;
1620 	long time_remain;
1621 
1622 	/* If enough time has passed, no wait is necessary. */
1623 	time_remain = (long)(tp->last_event_jiffies + 1 +
1624 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1625 		      (long)jiffies;
1626 	if (time_remain < 0)
1627 		return;
1628 
1629 	/* Check if we can shorten the wait time. */
1630 	delay_cnt = jiffies_to_usecs(time_remain);
1631 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1632 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1633 	delay_cnt = (delay_cnt >> 3) + 1;
1634 
1635 	for (i = 0; i < delay_cnt; i++) {
1636 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1637 			break;
1638 		udelay(8);
1639 	}
1640 }
1641 
1642 /* tp->lock is held. */
1643 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1644 {
1645 	u32 reg, val;
1646 
1647 	val = 0;
1648 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1649 		val = reg << 16;
1650 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1651 		val |= (reg & 0xffff);
1652 	*data++ = val;
1653 
1654 	val = 0;
1655 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1656 		val = reg << 16;
1657 	if (!tg3_readphy(tp, MII_LPA, &reg))
1658 		val |= (reg & 0xffff);
1659 	*data++ = val;
1660 
1661 	val = 0;
1662 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1663 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1664 			val = reg << 16;
1665 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1666 			val |= (reg & 0xffff);
1667 	}
1668 	*data++ = val;
1669 
1670 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1671 		val = reg << 16;
1672 	else
1673 		val = 0;
1674 	*data++ = val;
1675 }
1676 
1677 /* tp->lock is held. */
1678 static void tg3_ump_link_report(struct tg3 *tp)
1679 {
1680 	u32 data[4];
1681 
1682 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1683 		return;
1684 
1685 	tg3_phy_gather_ump_data(tp, data);
1686 
1687 	tg3_wait_for_event_ack(tp);
1688 
1689 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1690 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1691 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1692 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1693 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1694 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1695 
1696 	tg3_generate_fw_event(tp);
1697 }
1698 
1699 /* tp->lock is held. */
1700 static void tg3_stop_fw(struct tg3 *tp)
1701 {
1702 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1703 		/* Wait for RX cpu to ACK the previous event. */
1704 		tg3_wait_for_event_ack(tp);
1705 
1706 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1707 
1708 		tg3_generate_fw_event(tp);
1709 
1710 		/* Wait for RX cpu to ACK this event. */
1711 		tg3_wait_for_event_ack(tp);
1712 	}
1713 }
1714 
1715 /* tp->lock is held. */
1716 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1717 {
1718 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1719 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1720 
1721 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1722 		switch (kind) {
1723 		case RESET_KIND_INIT:
1724 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725 				      DRV_STATE_START);
1726 			break;
1727 
1728 		case RESET_KIND_SHUTDOWN:
1729 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730 				      DRV_STATE_UNLOAD);
1731 			break;
1732 
1733 		case RESET_KIND_SUSPEND:
1734 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1735 				      DRV_STATE_SUSPEND);
1736 			break;
1737 
1738 		default:
1739 			break;
1740 		}
1741 	}
1742 
1743 	if (kind == RESET_KIND_INIT ||
1744 	    kind == RESET_KIND_SUSPEND)
1745 		tg3_ape_driver_state_change(tp, kind);
1746 }
1747 
1748 /* tp->lock is held. */
1749 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1750 {
1751 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1752 		switch (kind) {
1753 		case RESET_KIND_INIT:
1754 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755 				      DRV_STATE_START_DONE);
1756 			break;
1757 
1758 		case RESET_KIND_SHUTDOWN:
1759 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1760 				      DRV_STATE_UNLOAD_DONE);
1761 			break;
1762 
1763 		default:
1764 			break;
1765 		}
1766 	}
1767 
1768 	if (kind == RESET_KIND_SHUTDOWN)
1769 		tg3_ape_driver_state_change(tp, kind);
1770 }
1771 
1772 /* tp->lock is held. */
1773 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1774 {
1775 	if (tg3_flag(tp, ENABLE_ASF)) {
1776 		switch (kind) {
1777 		case RESET_KIND_INIT:
1778 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1779 				      DRV_STATE_START);
1780 			break;
1781 
1782 		case RESET_KIND_SHUTDOWN:
1783 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1784 				      DRV_STATE_UNLOAD);
1785 			break;
1786 
1787 		case RESET_KIND_SUSPEND:
1788 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789 				      DRV_STATE_SUSPEND);
1790 			break;
1791 
1792 		default:
1793 			break;
1794 		}
1795 	}
1796 }
1797 
1798 static int tg3_poll_fw(struct tg3 *tp)
1799 {
1800 	int i;
1801 	u32 val;
1802 
1803 	if (tg3_flag(tp, IS_SSB_CORE)) {
1804 		/* We don't use firmware. */
1805 		return 0;
1806 	}
1807 
1808 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1809 		/* Wait up to 20ms for init done. */
1810 		for (i = 0; i < 200; i++) {
1811 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1812 				return 0;
1813 			udelay(100);
1814 		}
1815 		return -ENODEV;
1816 	}
1817 
1818 	/* Wait for firmware initialization to complete. */
1819 	for (i = 0; i < 100000; i++) {
1820 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1821 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1822 			break;
1823 		udelay(10);
1824 	}
1825 
1826 	/* Chip might not be fitted with firmware.  Some Sun onboard
1827 	 * parts are configured like that.  So don't signal the timeout
1828 	 * of the above loop as an error, but do report the lack of
1829 	 * running firmware once.
1830 	 */
1831 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1832 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1833 
1834 		netdev_info(tp->dev, "No firmware running\n");
1835 	}
1836 
1837 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1838 		/* The 57765 A0 needs a little more
1839 		 * time to do some important work.
1840 		 */
1841 		mdelay(10);
1842 	}
1843 
1844 	return 0;
1845 }
1846 
1847 static void tg3_link_report(struct tg3 *tp)
1848 {
1849 	if (!netif_carrier_ok(tp->dev)) {
1850 		netif_info(tp, link, tp->dev, "Link is down\n");
1851 		tg3_ump_link_report(tp);
1852 	} else if (netif_msg_link(tp)) {
1853 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1854 			    (tp->link_config.active_speed == SPEED_1000 ?
1855 			     1000 :
1856 			     (tp->link_config.active_speed == SPEED_100 ?
1857 			      100 : 10)),
1858 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1859 			     "full" : "half"));
1860 
1861 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1862 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1863 			    "on" : "off",
1864 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1865 			    "on" : "off");
1866 
1867 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1868 			netdev_info(tp->dev, "EEE is %s\n",
1869 				    tp->setlpicnt ? "enabled" : "disabled");
1870 
1871 		tg3_ump_link_report(tp);
1872 	}
1873 
1874 	tp->link_up = netif_carrier_ok(tp->dev);
1875 }
1876 
1877 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1878 {
1879 	u16 miireg;
1880 
1881 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1882 		miireg = ADVERTISE_1000XPAUSE;
1883 	else if (flow_ctrl & FLOW_CTRL_TX)
1884 		miireg = ADVERTISE_1000XPSE_ASYM;
1885 	else if (flow_ctrl & FLOW_CTRL_RX)
1886 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1887 	else
1888 		miireg = 0;
1889 
1890 	return miireg;
1891 }
1892 
1893 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1894 {
1895 	u8 cap = 0;
1896 
1897 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1898 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1899 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1900 		if (lcladv & ADVERTISE_1000XPAUSE)
1901 			cap = FLOW_CTRL_RX;
1902 		if (rmtadv & ADVERTISE_1000XPAUSE)
1903 			cap = FLOW_CTRL_TX;
1904 	}
1905 
1906 	return cap;
1907 }
1908 
1909 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1910 {
1911 	u8 autoneg;
1912 	u8 flowctrl = 0;
1913 	u32 old_rx_mode = tp->rx_mode;
1914 	u32 old_tx_mode = tp->tx_mode;
1915 
1916 	if (tg3_flag(tp, USE_PHYLIB))
1917 		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1918 	else
1919 		autoneg = tp->link_config.autoneg;
1920 
1921 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1922 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1923 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1924 		else
1925 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1926 	} else
1927 		flowctrl = tp->link_config.flowctrl;
1928 
1929 	tp->link_config.active_flowctrl = flowctrl;
1930 
1931 	if (flowctrl & FLOW_CTRL_RX)
1932 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1933 	else
1934 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1935 
1936 	if (old_rx_mode != tp->rx_mode)
1937 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1938 
1939 	if (flowctrl & FLOW_CTRL_TX)
1940 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1941 	else
1942 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1943 
1944 	if (old_tx_mode != tp->tx_mode)
1945 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1946 }
1947 
1948 static void tg3_adjust_link(struct net_device *dev)
1949 {
1950 	u8 oldflowctrl, linkmesg = 0;
1951 	u32 mac_mode, lcl_adv, rmt_adv;
1952 	struct tg3 *tp = netdev_priv(dev);
1953 	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1954 
1955 	spin_lock_bh(&tp->lock);
1956 
1957 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1958 				    MAC_MODE_HALF_DUPLEX);
1959 
1960 	oldflowctrl = tp->link_config.active_flowctrl;
1961 
1962 	if (phydev->link) {
1963 		lcl_adv = 0;
1964 		rmt_adv = 0;
1965 
1966 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1967 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1968 		else if (phydev->speed == SPEED_1000 ||
1969 			 tg3_asic_rev(tp) != ASIC_REV_5785)
1970 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
1971 		else
1972 			mac_mode |= MAC_MODE_PORT_MODE_MII;
1973 
1974 		if (phydev->duplex == DUPLEX_HALF)
1975 			mac_mode |= MAC_MODE_HALF_DUPLEX;
1976 		else {
1977 			lcl_adv = mii_advertise_flowctrl(
1978 				  tp->link_config.flowctrl);
1979 
1980 			if (phydev->pause)
1981 				rmt_adv = LPA_PAUSE_CAP;
1982 			if (phydev->asym_pause)
1983 				rmt_adv |= LPA_PAUSE_ASYM;
1984 		}
1985 
1986 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1987 	} else
1988 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
1989 
1990 	if (mac_mode != tp->mac_mode) {
1991 		tp->mac_mode = mac_mode;
1992 		tw32_f(MAC_MODE, tp->mac_mode);
1993 		udelay(40);
1994 	}
1995 
1996 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
1997 		if (phydev->speed == SPEED_10)
1998 			tw32(MAC_MI_STAT,
1999 			     MAC_MI_STAT_10MBPS_MODE |
2000 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2001 		else
2002 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2003 	}
2004 
2005 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2006 		tw32(MAC_TX_LENGTHS,
2007 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2008 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2009 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2010 	else
2011 		tw32(MAC_TX_LENGTHS,
2012 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2013 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2014 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2015 
2016 	if (phydev->link != tp->old_link ||
2017 	    phydev->speed != tp->link_config.active_speed ||
2018 	    phydev->duplex != tp->link_config.active_duplex ||
2019 	    oldflowctrl != tp->link_config.active_flowctrl)
2020 		linkmesg = 1;
2021 
2022 	tp->old_link = phydev->link;
2023 	tp->link_config.active_speed = phydev->speed;
2024 	tp->link_config.active_duplex = phydev->duplex;
2025 
2026 	spin_unlock_bh(&tp->lock);
2027 
2028 	if (linkmesg)
2029 		tg3_link_report(tp);
2030 }
2031 
2032 static int tg3_phy_init(struct tg3 *tp)
2033 {
2034 	struct phy_device *phydev;
2035 
2036 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2037 		return 0;
2038 
2039 	/* Bring the PHY back to a known state. */
2040 	tg3_bmcr_reset(tp);
2041 
2042 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2043 
2044 	/* Attach the MAC to the PHY. */
2045 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2046 			     tg3_adjust_link, phydev->interface);
2047 	if (IS_ERR(phydev)) {
2048 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2049 		return PTR_ERR(phydev);
2050 	}
2051 
2052 	/* Mask with MAC supported features. */
2053 	switch (phydev->interface) {
2054 	case PHY_INTERFACE_MODE_GMII:
2055 	case PHY_INTERFACE_MODE_RGMII:
2056 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2057 			phydev->supported &= (PHY_GBIT_FEATURES |
2058 					      SUPPORTED_Pause |
2059 					      SUPPORTED_Asym_Pause);
2060 			break;
2061 		}
2062 		/* fallthru */
2063 	case PHY_INTERFACE_MODE_MII:
2064 		phydev->supported &= (PHY_BASIC_FEATURES |
2065 				      SUPPORTED_Pause |
2066 				      SUPPORTED_Asym_Pause);
2067 		break;
2068 	default:
2069 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2070 		return -EINVAL;
2071 	}
2072 
2073 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2074 
2075 	phydev->advertising = phydev->supported;
2076 
2077 	return 0;
2078 }
2079 
2080 static void tg3_phy_start(struct tg3 *tp)
2081 {
2082 	struct phy_device *phydev;
2083 
2084 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2085 		return;
2086 
2087 	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2088 
2089 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2090 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2091 		phydev->speed = tp->link_config.speed;
2092 		phydev->duplex = tp->link_config.duplex;
2093 		phydev->autoneg = tp->link_config.autoneg;
2094 		phydev->advertising = tp->link_config.advertising;
2095 	}
2096 
2097 	phy_start(phydev);
2098 
2099 	phy_start_aneg(phydev);
2100 }
2101 
2102 static void tg3_phy_stop(struct tg3 *tp)
2103 {
2104 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2105 		return;
2106 
2107 	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2108 }
2109 
2110 static void tg3_phy_fini(struct tg3 *tp)
2111 {
2112 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2113 		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2114 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2115 	}
2116 }
2117 
2118 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2119 {
2120 	int err;
2121 	u32 val;
2122 
2123 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2124 		return 0;
2125 
2126 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2127 		/* Cannot do read-modify-write on 5401 */
2128 		err = tg3_phy_auxctl_write(tp,
2129 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2130 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2131 					   0x4c20);
2132 		goto done;
2133 	}
2134 
2135 	err = tg3_phy_auxctl_read(tp,
2136 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2137 	if (err)
2138 		return err;
2139 
2140 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2141 	err = tg3_phy_auxctl_write(tp,
2142 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2143 
2144 done:
2145 	return err;
2146 }
2147 
2148 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2149 {
2150 	u32 phytest;
2151 
2152 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2153 		u32 phy;
2154 
2155 		tg3_writephy(tp, MII_TG3_FET_TEST,
2156 			     phytest | MII_TG3_FET_SHADOW_EN);
2157 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2158 			if (enable)
2159 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2160 			else
2161 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2162 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2163 		}
2164 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2165 	}
2166 }
2167 
2168 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2169 {
2170 	u32 reg;
2171 
2172 	if (!tg3_flag(tp, 5705_PLUS) ||
2173 	    (tg3_flag(tp, 5717_PLUS) &&
2174 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2175 		return;
2176 
2177 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2178 		tg3_phy_fet_toggle_apd(tp, enable);
2179 		return;
2180 	}
2181 
2182 	reg = MII_TG3_MISC_SHDW_WREN |
2183 	      MII_TG3_MISC_SHDW_SCR5_SEL |
2184 	      MII_TG3_MISC_SHDW_SCR5_LPED |
2185 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2186 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2187 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2188 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2189 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2190 
2191 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2192 
2193 
2194 	reg = MII_TG3_MISC_SHDW_WREN |
2195 	      MII_TG3_MISC_SHDW_APD_SEL |
2196 	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2197 	if (enable)
2198 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2199 
2200 	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2201 }
2202 
2203 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2204 {
2205 	u32 phy;
2206 
2207 	if (!tg3_flag(tp, 5705_PLUS) ||
2208 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2209 		return;
2210 
2211 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2212 		u32 ephy;
2213 
2214 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2215 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2216 
2217 			tg3_writephy(tp, MII_TG3_FET_TEST,
2218 				     ephy | MII_TG3_FET_SHADOW_EN);
2219 			if (!tg3_readphy(tp, reg, &phy)) {
2220 				if (enable)
2221 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2222 				else
2223 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2224 				tg3_writephy(tp, reg, phy);
2225 			}
2226 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2227 		}
2228 	} else {
2229 		int ret;
2230 
2231 		ret = tg3_phy_auxctl_read(tp,
2232 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2233 		if (!ret) {
2234 			if (enable)
2235 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2236 			else
2237 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2238 			tg3_phy_auxctl_write(tp,
2239 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2240 		}
2241 	}
2242 }
2243 
2244 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2245 {
2246 	int ret;
2247 	u32 val;
2248 
2249 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2250 		return;
2251 
2252 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2253 	if (!ret)
2254 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2255 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2256 }
2257 
2258 static void tg3_phy_apply_otp(struct tg3 *tp)
2259 {
2260 	u32 otp, phy;
2261 
2262 	if (!tp->phy_otp)
2263 		return;
2264 
2265 	otp = tp->phy_otp;
2266 
2267 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2268 		return;
2269 
2270 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2271 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2272 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2273 
2274 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2275 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2276 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2277 
2278 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2279 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2280 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2281 
2282 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2283 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2284 
2285 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2286 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2287 
2288 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2289 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2290 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2291 
2292 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2293 }
2294 
2295 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2296 {
2297 	u32 val;
2298 
2299 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2300 		return;
2301 
2302 	tp->setlpicnt = 0;
2303 
2304 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2305 	    current_link_up == 1 &&
2306 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2307 	    (tp->link_config.active_speed == SPEED_100 ||
2308 	     tp->link_config.active_speed == SPEED_1000)) {
2309 		u32 eeectl;
2310 
2311 		if (tp->link_config.active_speed == SPEED_1000)
2312 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2313 		else
2314 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2315 
2316 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2317 
2318 		tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2319 				  TG3_CL45_D7_EEERES_STAT, &val);
2320 
2321 		if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2322 		    val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2323 			tp->setlpicnt = 2;
2324 	}
2325 
2326 	if (!tp->setlpicnt) {
2327 		if (current_link_up == 1 &&
2328 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2329 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2330 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2331 		}
2332 
2333 		val = tr32(TG3_CPMU_EEE_MODE);
2334 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2335 	}
2336 }
2337 
2338 static void tg3_phy_eee_enable(struct tg3 *tp)
2339 {
2340 	u32 val;
2341 
2342 	if (tp->link_config.active_speed == SPEED_1000 &&
2343 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2344 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2345 	     tg3_flag(tp, 57765_CLASS)) &&
2346 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2347 		val = MII_TG3_DSP_TAP26_ALNOKO |
2348 		      MII_TG3_DSP_TAP26_RMRXSTO;
2349 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2350 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2351 	}
2352 
2353 	val = tr32(TG3_CPMU_EEE_MODE);
2354 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2355 }
2356 
2357 static int tg3_wait_macro_done(struct tg3 *tp)
2358 {
2359 	int limit = 100;
2360 
2361 	while (limit--) {
2362 		u32 tmp32;
2363 
2364 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2365 			if ((tmp32 & 0x1000) == 0)
2366 				break;
2367 		}
2368 	}
2369 	if (limit < 0)
2370 		return -EBUSY;
2371 
2372 	return 0;
2373 }
2374 
2375 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2376 {
2377 	static const u32 test_pat[4][6] = {
2378 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2379 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2380 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2381 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2382 	};
2383 	int chan;
2384 
2385 	for (chan = 0; chan < 4; chan++) {
2386 		int i;
2387 
2388 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2389 			     (chan * 0x2000) | 0x0200);
2390 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2391 
2392 		for (i = 0; i < 6; i++)
2393 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2394 				     test_pat[chan][i]);
2395 
2396 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2397 		if (tg3_wait_macro_done(tp)) {
2398 			*resetp = 1;
2399 			return -EBUSY;
2400 		}
2401 
2402 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2403 			     (chan * 0x2000) | 0x0200);
2404 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2405 		if (tg3_wait_macro_done(tp)) {
2406 			*resetp = 1;
2407 			return -EBUSY;
2408 		}
2409 
2410 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2411 		if (tg3_wait_macro_done(tp)) {
2412 			*resetp = 1;
2413 			return -EBUSY;
2414 		}
2415 
2416 		for (i = 0; i < 6; i += 2) {
2417 			u32 low, high;
2418 
2419 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2420 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2421 			    tg3_wait_macro_done(tp)) {
2422 				*resetp = 1;
2423 				return -EBUSY;
2424 			}
2425 			low &= 0x7fff;
2426 			high &= 0x000f;
2427 			if (low != test_pat[chan][i] ||
2428 			    high != test_pat[chan][i+1]) {
2429 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2430 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2431 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2432 
2433 				return -EBUSY;
2434 			}
2435 		}
2436 	}
2437 
2438 	return 0;
2439 }
2440 
2441 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2442 {
2443 	int chan;
2444 
2445 	for (chan = 0; chan < 4; chan++) {
2446 		int i;
2447 
2448 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2449 			     (chan * 0x2000) | 0x0200);
2450 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2451 		for (i = 0; i < 6; i++)
2452 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2453 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2454 		if (tg3_wait_macro_done(tp))
2455 			return -EBUSY;
2456 	}
2457 
2458 	return 0;
2459 }
2460 
2461 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2462 {
2463 	u32 reg32, phy9_orig;
2464 	int retries, do_phy_reset, err;
2465 
2466 	retries = 10;
2467 	do_phy_reset = 1;
2468 	do {
2469 		if (do_phy_reset) {
2470 			err = tg3_bmcr_reset(tp);
2471 			if (err)
2472 				return err;
2473 			do_phy_reset = 0;
2474 		}
2475 
2476 		/* Disable transmitter and interrupt.  */
2477 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2478 			continue;
2479 
2480 		reg32 |= 0x3000;
2481 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2482 
2483 		/* Set full-duplex, 1000 mbps.  */
2484 		tg3_writephy(tp, MII_BMCR,
2485 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2486 
2487 		/* Set to master mode.  */
2488 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2489 			continue;
2490 
2491 		tg3_writephy(tp, MII_CTRL1000,
2492 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2493 
2494 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2495 		if (err)
2496 			return err;
2497 
2498 		/* Block the PHY control access.  */
2499 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2500 
2501 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2502 		if (!err)
2503 			break;
2504 	} while (--retries);
2505 
2506 	err = tg3_phy_reset_chanpat(tp);
2507 	if (err)
2508 		return err;
2509 
2510 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2511 
2512 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2513 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2514 
2515 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2516 
2517 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2518 
2519 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2520 		reg32 &= ~0x3000;
2521 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2522 	} else if (!err)
2523 		err = -EBUSY;
2524 
2525 	return err;
2526 }
2527 
2528 static void tg3_carrier_off(struct tg3 *tp)
2529 {
2530 	netif_carrier_off(tp->dev);
2531 	tp->link_up = false;
2532 }
2533 
2534 /* This will reset the tigon3 PHY if there is no valid
2535  * link unless the FORCE argument is non-zero.
2536  */
2537 static int tg3_phy_reset(struct tg3 *tp)
2538 {
2539 	u32 val, cpmuctrl;
2540 	int err;
2541 
2542 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2543 		val = tr32(GRC_MISC_CFG);
2544 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2545 		udelay(40);
2546 	}
2547 	err  = tg3_readphy(tp, MII_BMSR, &val);
2548 	err |= tg3_readphy(tp, MII_BMSR, &val);
2549 	if (err != 0)
2550 		return -EBUSY;
2551 
2552 	if (netif_running(tp->dev) && tp->link_up) {
2553 		netif_carrier_off(tp->dev);
2554 		tg3_link_report(tp);
2555 	}
2556 
2557 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2558 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2559 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2560 		err = tg3_phy_reset_5703_4_5(tp);
2561 		if (err)
2562 			return err;
2563 		goto out;
2564 	}
2565 
2566 	cpmuctrl = 0;
2567 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2568 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2569 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2570 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2571 			tw32(TG3_CPMU_CTRL,
2572 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2573 	}
2574 
2575 	err = tg3_bmcr_reset(tp);
2576 	if (err)
2577 		return err;
2578 
2579 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2580 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2581 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2582 
2583 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2584 	}
2585 
2586 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2587 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2588 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2589 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2590 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2591 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2592 			udelay(40);
2593 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2594 		}
2595 	}
2596 
2597 	if (tg3_flag(tp, 5717_PLUS) &&
2598 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2599 		return 0;
2600 
2601 	tg3_phy_apply_otp(tp);
2602 
2603 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2604 		tg3_phy_toggle_apd(tp, true);
2605 	else
2606 		tg3_phy_toggle_apd(tp, false);
2607 
2608 out:
2609 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2610 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2611 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2612 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2613 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2614 	}
2615 
2616 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2617 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2618 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2619 	}
2620 
2621 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2622 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2623 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2624 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2625 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2626 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2627 		}
2628 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2629 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2630 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2631 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2632 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2633 				tg3_writephy(tp, MII_TG3_TEST1,
2634 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2635 			} else
2636 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2637 
2638 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2639 		}
2640 	}
2641 
2642 	/* Set Extended packet length bit (bit 14) on all chips that */
2643 	/* support jumbo frames */
2644 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2645 		/* Cannot do read-modify-write on 5401 */
2646 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2647 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2648 		/* Set bit 14 with read-modify-write to preserve other bits */
2649 		err = tg3_phy_auxctl_read(tp,
2650 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2651 		if (!err)
2652 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2653 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2654 	}
2655 
2656 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2657 	 * jumbo frames transmission.
2658 	 */
2659 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2660 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2661 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2662 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2663 	}
2664 
2665 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2666 		/* adjust output voltage */
2667 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2668 	}
2669 
2670 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2671 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2672 
2673 	tg3_phy_toggle_automdix(tp, 1);
2674 	tg3_phy_set_wirespeed(tp);
2675 	return 0;
2676 }
2677 
2678 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2679 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2680 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2681 					  TG3_GPIO_MSG_NEED_VAUX)
2682 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2683 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2684 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2685 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2686 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2687 
2688 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2689 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2690 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2691 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2692 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2693 
2694 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2695 {
2696 	u32 status, shift;
2697 
2698 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2699 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2700 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2701 	else
2702 		status = tr32(TG3_CPMU_DRV_STATUS);
2703 
2704 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2705 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2706 	status |= (newstat << shift);
2707 
2708 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2709 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2710 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2711 	else
2712 		tw32(TG3_CPMU_DRV_STATUS, status);
2713 
2714 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2715 }
2716 
2717 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2718 {
2719 	if (!tg3_flag(tp, IS_NIC))
2720 		return 0;
2721 
2722 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2723 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2724 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2725 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2726 			return -EIO;
2727 
2728 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2729 
2730 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2731 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2732 
2733 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2734 	} else {
2735 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2736 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2737 	}
2738 
2739 	return 0;
2740 }
2741 
2742 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2743 {
2744 	u32 grc_local_ctrl;
2745 
2746 	if (!tg3_flag(tp, IS_NIC) ||
2747 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2748 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2749 		return;
2750 
2751 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2752 
2753 	tw32_wait_f(GRC_LOCAL_CTRL,
2754 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2755 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2756 
2757 	tw32_wait_f(GRC_LOCAL_CTRL,
2758 		    grc_local_ctrl,
2759 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2760 
2761 	tw32_wait_f(GRC_LOCAL_CTRL,
2762 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2763 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2764 }
2765 
2766 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2767 {
2768 	if (!tg3_flag(tp, IS_NIC))
2769 		return;
2770 
2771 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2772 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2773 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2774 			    (GRC_LCLCTRL_GPIO_OE0 |
2775 			     GRC_LCLCTRL_GPIO_OE1 |
2776 			     GRC_LCLCTRL_GPIO_OE2 |
2777 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2778 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2779 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2780 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2781 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2782 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2783 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2784 				     GRC_LCLCTRL_GPIO_OE1 |
2785 				     GRC_LCLCTRL_GPIO_OE2 |
2786 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2787 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2788 				     tp->grc_local_ctrl;
2789 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2790 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2791 
2792 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2793 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2794 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2795 
2796 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2797 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2798 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2799 	} else {
2800 		u32 no_gpio2;
2801 		u32 grc_local_ctrl = 0;
2802 
2803 		/* Workaround to prevent overdrawing Amps. */
2804 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2805 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2806 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2807 				    grc_local_ctrl,
2808 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2809 		}
2810 
2811 		/* On 5753 and variants, GPIO2 cannot be used. */
2812 		no_gpio2 = tp->nic_sram_data_cfg &
2813 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2814 
2815 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2816 				  GRC_LCLCTRL_GPIO_OE1 |
2817 				  GRC_LCLCTRL_GPIO_OE2 |
2818 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2819 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2820 		if (no_gpio2) {
2821 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2822 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2823 		}
2824 		tw32_wait_f(GRC_LOCAL_CTRL,
2825 			    tp->grc_local_ctrl | grc_local_ctrl,
2826 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2827 
2828 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2829 
2830 		tw32_wait_f(GRC_LOCAL_CTRL,
2831 			    tp->grc_local_ctrl | grc_local_ctrl,
2832 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2833 
2834 		if (!no_gpio2) {
2835 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2836 			tw32_wait_f(GRC_LOCAL_CTRL,
2837 				    tp->grc_local_ctrl | grc_local_ctrl,
2838 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2839 		}
2840 	}
2841 }
2842 
2843 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2844 {
2845 	u32 msg = 0;
2846 
2847 	/* Serialize power state transitions */
2848 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2849 		return;
2850 
2851 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2852 		msg = TG3_GPIO_MSG_NEED_VAUX;
2853 
2854 	msg = tg3_set_function_status(tp, msg);
2855 
2856 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2857 		goto done;
2858 
2859 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2860 		tg3_pwrsrc_switch_to_vaux(tp);
2861 	else
2862 		tg3_pwrsrc_die_with_vmain(tp);
2863 
2864 done:
2865 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2866 }
2867 
2868 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2869 {
2870 	bool need_vaux = false;
2871 
2872 	/* The GPIOs do something completely different on 57765. */
2873 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2874 		return;
2875 
2876 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2877 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2878 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2879 		tg3_frob_aux_power_5717(tp, include_wol ?
2880 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2881 		return;
2882 	}
2883 
2884 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2885 		struct net_device *dev_peer;
2886 
2887 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2888 
2889 		/* remove_one() may have been run on the peer. */
2890 		if (dev_peer) {
2891 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2892 
2893 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2894 				return;
2895 
2896 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2897 			    tg3_flag(tp_peer, ENABLE_ASF))
2898 				need_vaux = true;
2899 		}
2900 	}
2901 
2902 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2903 	    tg3_flag(tp, ENABLE_ASF))
2904 		need_vaux = true;
2905 
2906 	if (need_vaux)
2907 		tg3_pwrsrc_switch_to_vaux(tp);
2908 	else
2909 		tg3_pwrsrc_die_with_vmain(tp);
2910 }
2911 
2912 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2913 {
2914 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2915 		return 1;
2916 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2917 		if (speed != SPEED_10)
2918 			return 1;
2919 	} else if (speed == SPEED_10)
2920 		return 1;
2921 
2922 	return 0;
2923 }
2924 
2925 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2926 {
2927 	u32 val;
2928 
2929 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2930 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
2931 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2932 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2933 
2934 			sg_dig_ctrl |=
2935 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2936 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
2937 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2938 		}
2939 		return;
2940 	}
2941 
2942 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2943 		tg3_bmcr_reset(tp);
2944 		val = tr32(GRC_MISC_CFG);
2945 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2946 		udelay(40);
2947 		return;
2948 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2949 		u32 phytest;
2950 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2951 			u32 phy;
2952 
2953 			tg3_writephy(tp, MII_ADVERTISE, 0);
2954 			tg3_writephy(tp, MII_BMCR,
2955 				     BMCR_ANENABLE | BMCR_ANRESTART);
2956 
2957 			tg3_writephy(tp, MII_TG3_FET_TEST,
2958 				     phytest | MII_TG3_FET_SHADOW_EN);
2959 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2960 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2961 				tg3_writephy(tp,
2962 					     MII_TG3_FET_SHDW_AUXMODE4,
2963 					     phy);
2964 			}
2965 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2966 		}
2967 		return;
2968 	} else if (do_low_power) {
2969 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
2970 			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2971 
2972 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2973 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2974 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
2975 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2976 	}
2977 
2978 	/* The PHY should not be powered down on some chips because
2979 	 * of bugs.
2980 	 */
2981 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2982 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2983 	    (tg3_asic_rev(tp) == ASIC_REV_5780 &&
2984 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2985 	    (tg3_asic_rev(tp) == ASIC_REV_5717 &&
2986 	     !tp->pci_fn))
2987 		return;
2988 
2989 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2990 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2991 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2992 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2993 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2994 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2995 	}
2996 
2997 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2998 }
2999 
3000 /* tp->lock is held. */
3001 static int tg3_nvram_lock(struct tg3 *tp)
3002 {
3003 	if (tg3_flag(tp, NVRAM)) {
3004 		int i;
3005 
3006 		if (tp->nvram_lock_cnt == 0) {
3007 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3008 			for (i = 0; i < 8000; i++) {
3009 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3010 					break;
3011 				udelay(20);
3012 			}
3013 			if (i == 8000) {
3014 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3015 				return -ENODEV;
3016 			}
3017 		}
3018 		tp->nvram_lock_cnt++;
3019 	}
3020 	return 0;
3021 }
3022 
3023 /* tp->lock is held. */
3024 static void tg3_nvram_unlock(struct tg3 *tp)
3025 {
3026 	if (tg3_flag(tp, NVRAM)) {
3027 		if (tp->nvram_lock_cnt > 0)
3028 			tp->nvram_lock_cnt--;
3029 		if (tp->nvram_lock_cnt == 0)
3030 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3031 	}
3032 }
3033 
3034 /* tp->lock is held. */
3035 static void tg3_enable_nvram_access(struct tg3 *tp)
3036 {
3037 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3038 		u32 nvaccess = tr32(NVRAM_ACCESS);
3039 
3040 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3041 	}
3042 }
3043 
3044 /* tp->lock is held. */
3045 static void tg3_disable_nvram_access(struct tg3 *tp)
3046 {
3047 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3048 		u32 nvaccess = tr32(NVRAM_ACCESS);
3049 
3050 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3051 	}
3052 }
3053 
3054 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3055 					u32 offset, u32 *val)
3056 {
3057 	u32 tmp;
3058 	int i;
3059 
3060 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3061 		return -EINVAL;
3062 
3063 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3064 					EEPROM_ADDR_DEVID_MASK |
3065 					EEPROM_ADDR_READ);
3066 	tw32(GRC_EEPROM_ADDR,
3067 	     tmp |
3068 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3069 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3070 	      EEPROM_ADDR_ADDR_MASK) |
3071 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3072 
3073 	for (i = 0; i < 1000; i++) {
3074 		tmp = tr32(GRC_EEPROM_ADDR);
3075 
3076 		if (tmp & EEPROM_ADDR_COMPLETE)
3077 			break;
3078 		msleep(1);
3079 	}
3080 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3081 		return -EBUSY;
3082 
3083 	tmp = tr32(GRC_EEPROM_DATA);
3084 
3085 	/*
3086 	 * The data will always be opposite the native endian
3087 	 * format.  Perform a blind byteswap to compensate.
3088 	 */
3089 	*val = swab32(tmp);
3090 
3091 	return 0;
3092 }
3093 
3094 #define NVRAM_CMD_TIMEOUT 10000
3095 
3096 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3097 {
3098 	int i;
3099 
3100 	tw32(NVRAM_CMD, nvram_cmd);
3101 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3102 		udelay(10);
3103 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3104 			udelay(10);
3105 			break;
3106 		}
3107 	}
3108 
3109 	if (i == NVRAM_CMD_TIMEOUT)
3110 		return -EBUSY;
3111 
3112 	return 0;
3113 }
3114 
3115 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3116 {
3117 	if (tg3_flag(tp, NVRAM) &&
3118 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3119 	    tg3_flag(tp, FLASH) &&
3120 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3121 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3122 
3123 		addr = ((addr / tp->nvram_pagesize) <<
3124 			ATMEL_AT45DB0X1B_PAGE_POS) +
3125 		       (addr % tp->nvram_pagesize);
3126 
3127 	return addr;
3128 }
3129 
3130 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3131 {
3132 	if (tg3_flag(tp, NVRAM) &&
3133 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3134 	    tg3_flag(tp, FLASH) &&
3135 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3136 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3137 
3138 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3139 			tp->nvram_pagesize) +
3140 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3141 
3142 	return addr;
3143 }
3144 
3145 /* NOTE: Data read in from NVRAM is byteswapped according to
3146  * the byteswapping settings for all other register accesses.
3147  * tg3 devices are BE devices, so on a BE machine, the data
3148  * returned will be exactly as it is seen in NVRAM.  On a LE
3149  * machine, the 32-bit value will be byteswapped.
3150  */
3151 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3152 {
3153 	int ret;
3154 
3155 	if (!tg3_flag(tp, NVRAM))
3156 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3157 
3158 	offset = tg3_nvram_phys_addr(tp, offset);
3159 
3160 	if (offset > NVRAM_ADDR_MSK)
3161 		return -EINVAL;
3162 
3163 	ret = tg3_nvram_lock(tp);
3164 	if (ret)
3165 		return ret;
3166 
3167 	tg3_enable_nvram_access(tp);
3168 
3169 	tw32(NVRAM_ADDR, offset);
3170 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3171 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3172 
3173 	if (ret == 0)
3174 		*val = tr32(NVRAM_RDDATA);
3175 
3176 	tg3_disable_nvram_access(tp);
3177 
3178 	tg3_nvram_unlock(tp);
3179 
3180 	return ret;
3181 }
3182 
3183 /* Ensures NVRAM data is in bytestream format. */
3184 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3185 {
3186 	u32 v;
3187 	int res = tg3_nvram_read(tp, offset, &v);
3188 	if (!res)
3189 		*val = cpu_to_be32(v);
3190 	return res;
3191 }
3192 
3193 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3194 				    u32 offset, u32 len, u8 *buf)
3195 {
3196 	int i, j, rc = 0;
3197 	u32 val;
3198 
3199 	for (i = 0; i < len; i += 4) {
3200 		u32 addr;
3201 		__be32 data;
3202 
3203 		addr = offset + i;
3204 
3205 		memcpy(&data, buf + i, 4);
3206 
3207 		/*
3208 		 * The SEEPROM interface expects the data to always be opposite
3209 		 * the native endian format.  We accomplish this by reversing
3210 		 * all the operations that would have been performed on the
3211 		 * data from a call to tg3_nvram_read_be32().
3212 		 */
3213 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3214 
3215 		val = tr32(GRC_EEPROM_ADDR);
3216 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3217 
3218 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3219 			EEPROM_ADDR_READ);
3220 		tw32(GRC_EEPROM_ADDR, val |
3221 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3222 			(addr & EEPROM_ADDR_ADDR_MASK) |
3223 			EEPROM_ADDR_START |
3224 			EEPROM_ADDR_WRITE);
3225 
3226 		for (j = 0; j < 1000; j++) {
3227 			val = tr32(GRC_EEPROM_ADDR);
3228 
3229 			if (val & EEPROM_ADDR_COMPLETE)
3230 				break;
3231 			msleep(1);
3232 		}
3233 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3234 			rc = -EBUSY;
3235 			break;
3236 		}
3237 	}
3238 
3239 	return rc;
3240 }
3241 
3242 /* offset and length are dword aligned */
3243 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3244 		u8 *buf)
3245 {
3246 	int ret = 0;
3247 	u32 pagesize = tp->nvram_pagesize;
3248 	u32 pagemask = pagesize - 1;
3249 	u32 nvram_cmd;
3250 	u8 *tmp;
3251 
3252 	tmp = kmalloc(pagesize, GFP_KERNEL);
3253 	if (tmp == NULL)
3254 		return -ENOMEM;
3255 
3256 	while (len) {
3257 		int j;
3258 		u32 phy_addr, page_off, size;
3259 
3260 		phy_addr = offset & ~pagemask;
3261 
3262 		for (j = 0; j < pagesize; j += 4) {
3263 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3264 						  (__be32 *) (tmp + j));
3265 			if (ret)
3266 				break;
3267 		}
3268 		if (ret)
3269 			break;
3270 
3271 		page_off = offset & pagemask;
3272 		size = pagesize;
3273 		if (len < size)
3274 			size = len;
3275 
3276 		len -= size;
3277 
3278 		memcpy(tmp + page_off, buf, size);
3279 
3280 		offset = offset + (pagesize - page_off);
3281 
3282 		tg3_enable_nvram_access(tp);
3283 
3284 		/*
3285 		 * Before we can erase the flash page, we need
3286 		 * to issue a special "write enable" command.
3287 		 */
3288 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3289 
3290 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3291 			break;
3292 
3293 		/* Erase the target page */
3294 		tw32(NVRAM_ADDR, phy_addr);
3295 
3296 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3297 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3298 
3299 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3300 			break;
3301 
3302 		/* Issue another write enable to start the write. */
3303 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3304 
3305 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3306 			break;
3307 
3308 		for (j = 0; j < pagesize; j += 4) {
3309 			__be32 data;
3310 
3311 			data = *((__be32 *) (tmp + j));
3312 
3313 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3314 
3315 			tw32(NVRAM_ADDR, phy_addr + j);
3316 
3317 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3318 				NVRAM_CMD_WR;
3319 
3320 			if (j == 0)
3321 				nvram_cmd |= NVRAM_CMD_FIRST;
3322 			else if (j == (pagesize - 4))
3323 				nvram_cmd |= NVRAM_CMD_LAST;
3324 
3325 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3326 			if (ret)
3327 				break;
3328 		}
3329 		if (ret)
3330 			break;
3331 	}
3332 
3333 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3334 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3335 
3336 	kfree(tmp);
3337 
3338 	return ret;
3339 }
3340 
3341 /* offset and length are dword aligned */
3342 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3343 		u8 *buf)
3344 {
3345 	int i, ret = 0;
3346 
3347 	for (i = 0; i < len; i += 4, offset += 4) {
3348 		u32 page_off, phy_addr, nvram_cmd;
3349 		__be32 data;
3350 
3351 		memcpy(&data, buf + i, 4);
3352 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3353 
3354 		page_off = offset % tp->nvram_pagesize;
3355 
3356 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3357 
3358 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3359 
3360 		if (page_off == 0 || i == 0)
3361 			nvram_cmd |= NVRAM_CMD_FIRST;
3362 		if (page_off == (tp->nvram_pagesize - 4))
3363 			nvram_cmd |= NVRAM_CMD_LAST;
3364 
3365 		if (i == (len - 4))
3366 			nvram_cmd |= NVRAM_CMD_LAST;
3367 
3368 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3369 		    !tg3_flag(tp, FLASH) ||
3370 		    !tg3_flag(tp, 57765_PLUS))
3371 			tw32(NVRAM_ADDR, phy_addr);
3372 
3373 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3374 		    !tg3_flag(tp, 5755_PLUS) &&
3375 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3376 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3377 			u32 cmd;
3378 
3379 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3380 			ret = tg3_nvram_exec_cmd(tp, cmd);
3381 			if (ret)
3382 				break;
3383 		}
3384 		if (!tg3_flag(tp, FLASH)) {
3385 			/* We always do complete word writes to eeprom. */
3386 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3387 		}
3388 
3389 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3390 		if (ret)
3391 			break;
3392 	}
3393 	return ret;
3394 }
3395 
3396 /* offset and length are dword aligned */
3397 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3398 {
3399 	int ret;
3400 
3401 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3402 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3403 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3404 		udelay(40);
3405 	}
3406 
3407 	if (!tg3_flag(tp, NVRAM)) {
3408 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3409 	} else {
3410 		u32 grc_mode;
3411 
3412 		ret = tg3_nvram_lock(tp);
3413 		if (ret)
3414 			return ret;
3415 
3416 		tg3_enable_nvram_access(tp);
3417 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3418 			tw32(NVRAM_WRITE1, 0x406);
3419 
3420 		grc_mode = tr32(GRC_MODE);
3421 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3422 
3423 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3424 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3425 				buf);
3426 		} else {
3427 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3428 				buf);
3429 		}
3430 
3431 		grc_mode = tr32(GRC_MODE);
3432 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3433 
3434 		tg3_disable_nvram_access(tp);
3435 		tg3_nvram_unlock(tp);
3436 	}
3437 
3438 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3439 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3440 		udelay(40);
3441 	}
3442 
3443 	return ret;
3444 }
3445 
3446 #define RX_CPU_SCRATCH_BASE	0x30000
3447 #define RX_CPU_SCRATCH_SIZE	0x04000
3448 #define TX_CPU_SCRATCH_BASE	0x34000
3449 #define TX_CPU_SCRATCH_SIZE	0x04000
3450 
3451 /* tp->lock is held. */
3452 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3453 {
3454 	int i;
3455 	const int iters = 10000;
3456 
3457 	for (i = 0; i < iters; i++) {
3458 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3459 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3460 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3461 			break;
3462 	}
3463 
3464 	return (i == iters) ? -EBUSY : 0;
3465 }
3466 
3467 /* tp->lock is held. */
3468 static int tg3_rxcpu_pause(struct tg3 *tp)
3469 {
3470 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3471 
3472 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3473 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3474 	udelay(10);
3475 
3476 	return rc;
3477 }
3478 
3479 /* tp->lock is held. */
3480 static int tg3_txcpu_pause(struct tg3 *tp)
3481 {
3482 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3483 }
3484 
3485 /* tp->lock is held. */
3486 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3487 {
3488 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3489 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3490 }
3491 
3492 /* tp->lock is held. */
3493 static void tg3_rxcpu_resume(struct tg3 *tp)
3494 {
3495 	tg3_resume_cpu(tp, RX_CPU_BASE);
3496 }
3497 
3498 /* tp->lock is held. */
3499 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3500 {
3501 	int rc;
3502 
3503 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3504 
3505 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3506 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3507 
3508 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3509 		return 0;
3510 	}
3511 	if (cpu_base == RX_CPU_BASE) {
3512 		rc = tg3_rxcpu_pause(tp);
3513 	} else {
3514 		/*
3515 		 * There is only an Rx CPU for the 5750 derivative in the
3516 		 * BCM4785.
3517 		 */
3518 		if (tg3_flag(tp, IS_SSB_CORE))
3519 			return 0;
3520 
3521 		rc = tg3_txcpu_pause(tp);
3522 	}
3523 
3524 	if (rc) {
3525 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3526 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3527 		return -ENODEV;
3528 	}
3529 
3530 	/* Clear firmware's nvram arbitration. */
3531 	if (tg3_flag(tp, NVRAM))
3532 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3533 	return 0;
3534 }
3535 
3536 static int tg3_fw_data_len(struct tg3 *tp,
3537 			   const struct tg3_firmware_hdr *fw_hdr)
3538 {
3539 	int fw_len;
3540 
3541 	/* Non fragmented firmware have one firmware header followed by a
3542 	 * contiguous chunk of data to be written. The length field in that
3543 	 * header is not the length of data to be written but the complete
3544 	 * length of the bss. The data length is determined based on
3545 	 * tp->fw->size minus headers.
3546 	 *
3547 	 * Fragmented firmware have a main header followed by multiple
3548 	 * fragments. Each fragment is identical to non fragmented firmware
3549 	 * with a firmware header followed by a contiguous chunk of data. In
3550 	 * the main header, the length field is unused and set to 0xffffffff.
3551 	 * In each fragment header the length is the entire size of that
3552 	 * fragment i.e. fragment data + header length. Data length is
3553 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3554 	 */
3555 	if (tp->fw_len == 0xffffffff)
3556 		fw_len = be32_to_cpu(fw_hdr->len);
3557 	else
3558 		fw_len = tp->fw->size;
3559 
3560 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3561 }
3562 
3563 /* tp->lock is held. */
3564 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3565 				 u32 cpu_scratch_base, int cpu_scratch_size,
3566 				 const struct tg3_firmware_hdr *fw_hdr)
3567 {
3568 	int err, i;
3569 	void (*write_op)(struct tg3 *, u32, u32);
3570 	int total_len = tp->fw->size;
3571 
3572 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3573 		netdev_err(tp->dev,
3574 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3575 			   __func__);
3576 		return -EINVAL;
3577 	}
3578 
3579 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3580 		write_op = tg3_write_mem;
3581 	else
3582 		write_op = tg3_write_indirect_reg32;
3583 
3584 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3585 		/* It is possible that bootcode is still loading at this point.
3586 		 * Get the nvram lock first before halting the cpu.
3587 		 */
3588 		int lock_err = tg3_nvram_lock(tp);
3589 		err = tg3_halt_cpu(tp, cpu_base);
3590 		if (!lock_err)
3591 			tg3_nvram_unlock(tp);
3592 		if (err)
3593 			goto out;
3594 
3595 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3596 			write_op(tp, cpu_scratch_base + i, 0);
3597 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3598 		tw32(cpu_base + CPU_MODE,
3599 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3600 	} else {
3601 		/* Subtract additional main header for fragmented firmware and
3602 		 * advance to the first fragment
3603 		 */
3604 		total_len -= TG3_FW_HDR_LEN;
3605 		fw_hdr++;
3606 	}
3607 
3608 	do {
3609 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3610 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3611 			write_op(tp, cpu_scratch_base +
3612 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3613 				     (i * sizeof(u32)),
3614 				 be32_to_cpu(fw_data[i]));
3615 
3616 		total_len -= be32_to_cpu(fw_hdr->len);
3617 
3618 		/* Advance to next fragment */
3619 		fw_hdr = (struct tg3_firmware_hdr *)
3620 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3621 	} while (total_len > 0);
3622 
3623 	err = 0;
3624 
3625 out:
3626 	return err;
3627 }
3628 
3629 /* tp->lock is held. */
3630 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3631 {
3632 	int i;
3633 	const int iters = 5;
3634 
3635 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3636 	tw32_f(cpu_base + CPU_PC, pc);
3637 
3638 	for (i = 0; i < iters; i++) {
3639 		if (tr32(cpu_base + CPU_PC) == pc)
3640 			break;
3641 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3642 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3643 		tw32_f(cpu_base + CPU_PC, pc);
3644 		udelay(1000);
3645 	}
3646 
3647 	return (i == iters) ? -EBUSY : 0;
3648 }
3649 
3650 /* tp->lock is held. */
3651 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3652 {
3653 	const struct tg3_firmware_hdr *fw_hdr;
3654 	int err;
3655 
3656 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3657 
3658 	/* Firmware blob starts with version numbers, followed by
3659 	   start address and length. We are setting complete length.
3660 	   length = end_address_of_bss - start_address_of_text.
3661 	   Remainder is the blob to be loaded contiguously
3662 	   from start address. */
3663 
3664 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3665 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3666 				    fw_hdr);
3667 	if (err)
3668 		return err;
3669 
3670 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3671 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3672 				    fw_hdr);
3673 	if (err)
3674 		return err;
3675 
3676 	/* Now startup only the RX cpu. */
3677 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3678 				       be32_to_cpu(fw_hdr->base_addr));
3679 	if (err) {
3680 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3681 			   "should be %08x\n", __func__,
3682 			   tr32(RX_CPU_BASE + CPU_PC),
3683 				be32_to_cpu(fw_hdr->base_addr));
3684 		return -ENODEV;
3685 	}
3686 
3687 	tg3_rxcpu_resume(tp);
3688 
3689 	return 0;
3690 }
3691 
3692 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3693 {
3694 	const int iters = 1000;
3695 	int i;
3696 	u32 val;
3697 
3698 	/* Wait for boot code to complete initialization and enter service
3699 	 * loop. It is then safe to download service patches
3700 	 */
3701 	for (i = 0; i < iters; i++) {
3702 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3703 			break;
3704 
3705 		udelay(10);
3706 	}
3707 
3708 	if (i == iters) {
3709 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3710 		return -EBUSY;
3711 	}
3712 
3713 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3714 	if (val & 0xff) {
3715 		netdev_warn(tp->dev,
3716 			    "Other patches exist. Not downloading EEE patch\n");
3717 		return -EEXIST;
3718 	}
3719 
3720 	return 0;
3721 }
3722 
3723 /* tp->lock is held. */
3724 static void tg3_load_57766_firmware(struct tg3 *tp)
3725 {
3726 	struct tg3_firmware_hdr *fw_hdr;
3727 
3728 	if (!tg3_flag(tp, NO_NVRAM))
3729 		return;
3730 
3731 	if (tg3_validate_rxcpu_state(tp))
3732 		return;
3733 
3734 	if (!tp->fw)
3735 		return;
3736 
3737 	/* This firmware blob has a different format than older firmware
3738 	 * releases as given below. The main difference is we have fragmented
3739 	 * data to be written to non-contiguous locations.
3740 	 *
3741 	 * In the beginning we have a firmware header identical to other
3742 	 * firmware which consists of version, base addr and length. The length
3743 	 * here is unused and set to 0xffffffff.
3744 	 *
3745 	 * This is followed by a series of firmware fragments which are
3746 	 * individually identical to previous firmware. i.e. they have the
3747 	 * firmware header and followed by data for that fragment. The version
3748 	 * field of the individual fragment header is unused.
3749 	 */
3750 
3751 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3752 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3753 		return;
3754 
3755 	if (tg3_rxcpu_pause(tp))
3756 		return;
3757 
3758 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3759 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3760 
3761 	tg3_rxcpu_resume(tp);
3762 }
3763 
3764 /* tp->lock is held. */
3765 static int tg3_load_tso_firmware(struct tg3 *tp)
3766 {
3767 	const struct tg3_firmware_hdr *fw_hdr;
3768 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3769 	int err;
3770 
3771 	if (!tg3_flag(tp, FW_TSO))
3772 		return 0;
3773 
3774 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3775 
3776 	/* Firmware blob starts with version numbers, followed by
3777 	   start address and length. We are setting complete length.
3778 	   length = end_address_of_bss - start_address_of_text.
3779 	   Remainder is the blob to be loaded contiguously
3780 	   from start address. */
3781 
3782 	cpu_scratch_size = tp->fw_len;
3783 
3784 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3785 		cpu_base = RX_CPU_BASE;
3786 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3787 	} else {
3788 		cpu_base = TX_CPU_BASE;
3789 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3790 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3791 	}
3792 
3793 	err = tg3_load_firmware_cpu(tp, cpu_base,
3794 				    cpu_scratch_base, cpu_scratch_size,
3795 				    fw_hdr);
3796 	if (err)
3797 		return err;
3798 
3799 	/* Now startup the cpu. */
3800 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3801 				       be32_to_cpu(fw_hdr->base_addr));
3802 	if (err) {
3803 		netdev_err(tp->dev,
3804 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3805 			   __func__, tr32(cpu_base + CPU_PC),
3806 			   be32_to_cpu(fw_hdr->base_addr));
3807 		return -ENODEV;
3808 	}
3809 
3810 	tg3_resume_cpu(tp, cpu_base);
3811 	return 0;
3812 }
3813 
3814 
3815 /* tp->lock is held. */
3816 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3817 {
3818 	u32 addr_high, addr_low;
3819 	int i;
3820 
3821 	addr_high = ((tp->dev->dev_addr[0] << 8) |
3822 		     tp->dev->dev_addr[1]);
3823 	addr_low = ((tp->dev->dev_addr[2] << 24) |
3824 		    (tp->dev->dev_addr[3] << 16) |
3825 		    (tp->dev->dev_addr[4] <<  8) |
3826 		    (tp->dev->dev_addr[5] <<  0));
3827 	for (i = 0; i < 4; i++) {
3828 		if (i == 1 && skip_mac_1)
3829 			continue;
3830 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3831 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3832 	}
3833 
3834 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3835 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3836 		for (i = 0; i < 12; i++) {
3837 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3838 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3839 		}
3840 	}
3841 
3842 	addr_high = (tp->dev->dev_addr[0] +
3843 		     tp->dev->dev_addr[1] +
3844 		     tp->dev->dev_addr[2] +
3845 		     tp->dev->dev_addr[3] +
3846 		     tp->dev->dev_addr[4] +
3847 		     tp->dev->dev_addr[5]) &
3848 		TX_BACKOFF_SEED_MASK;
3849 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3850 }
3851 
3852 static void tg3_enable_register_access(struct tg3 *tp)
3853 {
3854 	/*
3855 	 * Make sure register accesses (indirect or otherwise) will function
3856 	 * correctly.
3857 	 */
3858 	pci_write_config_dword(tp->pdev,
3859 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3860 }
3861 
3862 static int tg3_power_up(struct tg3 *tp)
3863 {
3864 	int err;
3865 
3866 	tg3_enable_register_access(tp);
3867 
3868 	err = pci_set_power_state(tp->pdev, PCI_D0);
3869 	if (!err) {
3870 		/* Switch out of Vaux if it is a NIC */
3871 		tg3_pwrsrc_switch_to_vmain(tp);
3872 	} else {
3873 		netdev_err(tp->dev, "Transition to D0 failed\n");
3874 	}
3875 
3876 	return err;
3877 }
3878 
3879 static int tg3_setup_phy(struct tg3 *, int);
3880 
3881 static int tg3_power_down_prepare(struct tg3 *tp)
3882 {
3883 	u32 misc_host_ctrl;
3884 	bool device_should_wake, do_low_power;
3885 
3886 	tg3_enable_register_access(tp);
3887 
3888 	/* Restore the CLKREQ setting. */
3889 	if (tg3_flag(tp, CLKREQ_BUG))
3890 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3891 					 PCI_EXP_LNKCTL_CLKREQ_EN);
3892 
3893 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3894 	tw32(TG3PCI_MISC_HOST_CTRL,
3895 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3896 
3897 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3898 			     tg3_flag(tp, WOL_ENABLE);
3899 
3900 	if (tg3_flag(tp, USE_PHYLIB)) {
3901 		do_low_power = false;
3902 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3903 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3904 			struct phy_device *phydev;
3905 			u32 phyid, advertising;
3906 
3907 			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3908 
3909 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3910 
3911 			tp->link_config.speed = phydev->speed;
3912 			tp->link_config.duplex = phydev->duplex;
3913 			tp->link_config.autoneg = phydev->autoneg;
3914 			tp->link_config.advertising = phydev->advertising;
3915 
3916 			advertising = ADVERTISED_TP |
3917 				      ADVERTISED_Pause |
3918 				      ADVERTISED_Autoneg |
3919 				      ADVERTISED_10baseT_Half;
3920 
3921 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3922 				if (tg3_flag(tp, WOL_SPEED_100MB))
3923 					advertising |=
3924 						ADVERTISED_100baseT_Half |
3925 						ADVERTISED_100baseT_Full |
3926 						ADVERTISED_10baseT_Full;
3927 				else
3928 					advertising |= ADVERTISED_10baseT_Full;
3929 			}
3930 
3931 			phydev->advertising = advertising;
3932 
3933 			phy_start_aneg(phydev);
3934 
3935 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3936 			if (phyid != PHY_ID_BCMAC131) {
3937 				phyid &= PHY_BCM_OUI_MASK;
3938 				if (phyid == PHY_BCM_OUI_1 ||
3939 				    phyid == PHY_BCM_OUI_2 ||
3940 				    phyid == PHY_BCM_OUI_3)
3941 					do_low_power = true;
3942 			}
3943 		}
3944 	} else {
3945 		do_low_power = true;
3946 
3947 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3948 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3949 
3950 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3951 			tg3_setup_phy(tp, 0);
3952 	}
3953 
3954 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3955 		u32 val;
3956 
3957 		val = tr32(GRC_VCPU_EXT_CTRL);
3958 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3959 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
3960 		int i;
3961 		u32 val;
3962 
3963 		for (i = 0; i < 200; i++) {
3964 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3965 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3966 				break;
3967 			msleep(1);
3968 		}
3969 	}
3970 	if (tg3_flag(tp, WOL_CAP))
3971 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3972 						     WOL_DRV_STATE_SHUTDOWN |
3973 						     WOL_DRV_WOL |
3974 						     WOL_SET_MAGIC_PKT);
3975 
3976 	if (device_should_wake) {
3977 		u32 mac_mode;
3978 
3979 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3980 			if (do_low_power &&
3981 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3982 				tg3_phy_auxctl_write(tp,
3983 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3984 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
3985 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3986 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3987 				udelay(40);
3988 			}
3989 
3990 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3991 				mac_mode = MAC_MODE_PORT_MODE_GMII;
3992 			else
3993 				mac_mode = MAC_MODE_PORT_MODE_MII;
3994 
3995 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3996 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
3997 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3998 					     SPEED_100 : SPEED_10;
3999 				if (tg3_5700_link_polarity(tp, speed))
4000 					mac_mode |= MAC_MODE_LINK_POLARITY;
4001 				else
4002 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4003 			}
4004 		} else {
4005 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4006 		}
4007 
4008 		if (!tg3_flag(tp, 5750_PLUS))
4009 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4010 
4011 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4012 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4013 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4014 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4015 
4016 		if (tg3_flag(tp, ENABLE_APE))
4017 			mac_mode |= MAC_MODE_APE_TX_EN |
4018 				    MAC_MODE_APE_RX_EN |
4019 				    MAC_MODE_TDE_ENABLE;
4020 
4021 		tw32_f(MAC_MODE, mac_mode);
4022 		udelay(100);
4023 
4024 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4025 		udelay(10);
4026 	}
4027 
4028 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4029 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4030 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4031 		u32 base_val;
4032 
4033 		base_val = tp->pci_clock_ctrl;
4034 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4035 			     CLOCK_CTRL_TXCLK_DISABLE);
4036 
4037 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4038 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4039 	} else if (tg3_flag(tp, 5780_CLASS) ||
4040 		   tg3_flag(tp, CPMU_PRESENT) ||
4041 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4042 		/* do nothing */
4043 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4044 		u32 newbits1, newbits2;
4045 
4046 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4047 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4048 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4049 				    CLOCK_CTRL_TXCLK_DISABLE |
4050 				    CLOCK_CTRL_ALTCLK);
4051 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4052 		} else if (tg3_flag(tp, 5705_PLUS)) {
4053 			newbits1 = CLOCK_CTRL_625_CORE;
4054 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4055 		} else {
4056 			newbits1 = CLOCK_CTRL_ALTCLK;
4057 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4058 		}
4059 
4060 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4061 			    40);
4062 
4063 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4064 			    40);
4065 
4066 		if (!tg3_flag(tp, 5705_PLUS)) {
4067 			u32 newbits3;
4068 
4069 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4070 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4071 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4072 					    CLOCK_CTRL_TXCLK_DISABLE |
4073 					    CLOCK_CTRL_44MHZ_CORE);
4074 			} else {
4075 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4076 			}
4077 
4078 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4079 				    tp->pci_clock_ctrl | newbits3, 40);
4080 		}
4081 	}
4082 
4083 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4084 		tg3_power_down_phy(tp, do_low_power);
4085 
4086 	tg3_frob_aux_power(tp, true);
4087 
4088 	/* Workaround for unstable PLL clock */
4089 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4090 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4091 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4092 		u32 val = tr32(0x7d00);
4093 
4094 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4095 		tw32(0x7d00, val);
4096 		if (!tg3_flag(tp, ENABLE_ASF)) {
4097 			int err;
4098 
4099 			err = tg3_nvram_lock(tp);
4100 			tg3_halt_cpu(tp, RX_CPU_BASE);
4101 			if (!err)
4102 				tg3_nvram_unlock(tp);
4103 		}
4104 	}
4105 
4106 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4107 
4108 	return 0;
4109 }
4110 
4111 static void tg3_power_down(struct tg3 *tp)
4112 {
4113 	tg3_power_down_prepare(tp);
4114 
4115 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4116 	pci_set_power_state(tp->pdev, PCI_D3hot);
4117 }
4118 
4119 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4120 {
4121 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4122 	case MII_TG3_AUX_STAT_10HALF:
4123 		*speed = SPEED_10;
4124 		*duplex = DUPLEX_HALF;
4125 		break;
4126 
4127 	case MII_TG3_AUX_STAT_10FULL:
4128 		*speed = SPEED_10;
4129 		*duplex = DUPLEX_FULL;
4130 		break;
4131 
4132 	case MII_TG3_AUX_STAT_100HALF:
4133 		*speed = SPEED_100;
4134 		*duplex = DUPLEX_HALF;
4135 		break;
4136 
4137 	case MII_TG3_AUX_STAT_100FULL:
4138 		*speed = SPEED_100;
4139 		*duplex = DUPLEX_FULL;
4140 		break;
4141 
4142 	case MII_TG3_AUX_STAT_1000HALF:
4143 		*speed = SPEED_1000;
4144 		*duplex = DUPLEX_HALF;
4145 		break;
4146 
4147 	case MII_TG3_AUX_STAT_1000FULL:
4148 		*speed = SPEED_1000;
4149 		*duplex = DUPLEX_FULL;
4150 		break;
4151 
4152 	default:
4153 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4154 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4155 				 SPEED_10;
4156 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4157 				  DUPLEX_HALF;
4158 			break;
4159 		}
4160 		*speed = SPEED_UNKNOWN;
4161 		*duplex = DUPLEX_UNKNOWN;
4162 		break;
4163 	}
4164 }
4165 
4166 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4167 {
4168 	int err = 0;
4169 	u32 val, new_adv;
4170 
4171 	new_adv = ADVERTISE_CSMA;
4172 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4173 	new_adv |= mii_advertise_flowctrl(flowctrl);
4174 
4175 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4176 	if (err)
4177 		goto done;
4178 
4179 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4180 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4181 
4182 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4183 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4184 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4185 
4186 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4187 		if (err)
4188 			goto done;
4189 	}
4190 
4191 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4192 		goto done;
4193 
4194 	tw32(TG3_CPMU_EEE_MODE,
4195 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4196 
4197 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4198 	if (!err) {
4199 		u32 err2;
4200 
4201 		val = 0;
4202 		/* Advertise 100-BaseTX EEE ability */
4203 		if (advertise & ADVERTISED_100baseT_Full)
4204 			val |= MDIO_AN_EEE_ADV_100TX;
4205 		/* Advertise 1000-BaseT EEE ability */
4206 		if (advertise & ADVERTISED_1000baseT_Full)
4207 			val |= MDIO_AN_EEE_ADV_1000T;
4208 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4209 		if (err)
4210 			val = 0;
4211 
4212 		switch (tg3_asic_rev(tp)) {
4213 		case ASIC_REV_5717:
4214 		case ASIC_REV_57765:
4215 		case ASIC_REV_57766:
4216 		case ASIC_REV_5719:
4217 			/* If we advertised any eee advertisements above... */
4218 			if (val)
4219 				val = MII_TG3_DSP_TAP26_ALNOKO |
4220 				      MII_TG3_DSP_TAP26_RMRXSTO |
4221 				      MII_TG3_DSP_TAP26_OPCSINPT;
4222 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4223 			/* Fall through */
4224 		case ASIC_REV_5720:
4225 		case ASIC_REV_5762:
4226 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4227 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4228 						 MII_TG3_DSP_CH34TP2_HIBW01);
4229 		}
4230 
4231 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4232 		if (!err)
4233 			err = err2;
4234 	}
4235 
4236 done:
4237 	return err;
4238 }
4239 
4240 static void tg3_phy_copper_begin(struct tg3 *tp)
4241 {
4242 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4243 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4244 		u32 adv, fc;
4245 
4246 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4247 			adv = ADVERTISED_10baseT_Half |
4248 			      ADVERTISED_10baseT_Full;
4249 			if (tg3_flag(tp, WOL_SPEED_100MB))
4250 				adv |= ADVERTISED_100baseT_Half |
4251 				       ADVERTISED_100baseT_Full;
4252 
4253 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4254 		} else {
4255 			adv = tp->link_config.advertising;
4256 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4257 				adv &= ~(ADVERTISED_1000baseT_Half |
4258 					 ADVERTISED_1000baseT_Full);
4259 
4260 			fc = tp->link_config.flowctrl;
4261 		}
4262 
4263 		tg3_phy_autoneg_cfg(tp, adv, fc);
4264 
4265 		tg3_writephy(tp, MII_BMCR,
4266 			     BMCR_ANENABLE | BMCR_ANRESTART);
4267 	} else {
4268 		int i;
4269 		u32 bmcr, orig_bmcr;
4270 
4271 		tp->link_config.active_speed = tp->link_config.speed;
4272 		tp->link_config.active_duplex = tp->link_config.duplex;
4273 
4274 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4275 			/* With autoneg disabled, 5715 only links up when the
4276 			 * advertisement register has the configured speed
4277 			 * enabled.
4278 			 */
4279 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4280 		}
4281 
4282 		bmcr = 0;
4283 		switch (tp->link_config.speed) {
4284 		default:
4285 		case SPEED_10:
4286 			break;
4287 
4288 		case SPEED_100:
4289 			bmcr |= BMCR_SPEED100;
4290 			break;
4291 
4292 		case SPEED_1000:
4293 			bmcr |= BMCR_SPEED1000;
4294 			break;
4295 		}
4296 
4297 		if (tp->link_config.duplex == DUPLEX_FULL)
4298 			bmcr |= BMCR_FULLDPLX;
4299 
4300 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4301 		    (bmcr != orig_bmcr)) {
4302 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4303 			for (i = 0; i < 1500; i++) {
4304 				u32 tmp;
4305 
4306 				udelay(10);
4307 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4308 				    tg3_readphy(tp, MII_BMSR, &tmp))
4309 					continue;
4310 				if (!(tmp & BMSR_LSTATUS)) {
4311 					udelay(40);
4312 					break;
4313 				}
4314 			}
4315 			tg3_writephy(tp, MII_BMCR, bmcr);
4316 			udelay(40);
4317 		}
4318 	}
4319 }
4320 
4321 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4322 {
4323 	int err;
4324 
4325 	/* Turn off tap power management. */
4326 	/* Set Extended packet length bit */
4327 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4328 
4329 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4330 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4331 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4332 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4333 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4334 
4335 	udelay(40);
4336 
4337 	return err;
4338 }
4339 
4340 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4341 {
4342 	u32 advmsk, tgtadv, advertising;
4343 
4344 	advertising = tp->link_config.advertising;
4345 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4346 
4347 	advmsk = ADVERTISE_ALL;
4348 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4349 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4350 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4351 	}
4352 
4353 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4354 		return false;
4355 
4356 	if ((*lcladv & advmsk) != tgtadv)
4357 		return false;
4358 
4359 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4360 		u32 tg3_ctrl;
4361 
4362 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4363 
4364 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4365 			return false;
4366 
4367 		if (tgtadv &&
4368 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4369 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4370 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4371 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4372 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4373 		} else {
4374 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4375 		}
4376 
4377 		if (tg3_ctrl != tgtadv)
4378 			return false;
4379 	}
4380 
4381 	return true;
4382 }
4383 
4384 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4385 {
4386 	u32 lpeth = 0;
4387 
4388 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4389 		u32 val;
4390 
4391 		if (tg3_readphy(tp, MII_STAT1000, &val))
4392 			return false;
4393 
4394 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4395 	}
4396 
4397 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4398 		return false;
4399 
4400 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4401 	tp->link_config.rmt_adv = lpeth;
4402 
4403 	return true;
4404 }
4405 
4406 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4407 {
4408 	if (curr_link_up != tp->link_up) {
4409 		if (curr_link_up) {
4410 			netif_carrier_on(tp->dev);
4411 		} else {
4412 			netif_carrier_off(tp->dev);
4413 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4414 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4415 		}
4416 
4417 		tg3_link_report(tp);
4418 		return true;
4419 	}
4420 
4421 	return false;
4422 }
4423 
4424 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4425 {
4426 	int current_link_up;
4427 	u32 bmsr, val;
4428 	u32 lcl_adv, rmt_adv;
4429 	u16 current_speed;
4430 	u8 current_duplex;
4431 	int i, err;
4432 
4433 	tw32(MAC_EVENT, 0);
4434 
4435 	tw32_f(MAC_STATUS,
4436 	     (MAC_STATUS_SYNC_CHANGED |
4437 	      MAC_STATUS_CFG_CHANGED |
4438 	      MAC_STATUS_MI_COMPLETION |
4439 	      MAC_STATUS_LNKSTATE_CHANGED));
4440 	udelay(40);
4441 
4442 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4443 		tw32_f(MAC_MI_MODE,
4444 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4445 		udelay(80);
4446 	}
4447 
4448 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4449 
4450 	/* Some third-party PHYs need to be reset on link going
4451 	 * down.
4452 	 */
4453 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4454 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4455 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4456 	    tp->link_up) {
4457 		tg3_readphy(tp, MII_BMSR, &bmsr);
4458 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4459 		    !(bmsr & BMSR_LSTATUS))
4460 			force_reset = 1;
4461 	}
4462 	if (force_reset)
4463 		tg3_phy_reset(tp);
4464 
4465 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4466 		tg3_readphy(tp, MII_BMSR, &bmsr);
4467 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4468 		    !tg3_flag(tp, INIT_COMPLETE))
4469 			bmsr = 0;
4470 
4471 		if (!(bmsr & BMSR_LSTATUS)) {
4472 			err = tg3_init_5401phy_dsp(tp);
4473 			if (err)
4474 				return err;
4475 
4476 			tg3_readphy(tp, MII_BMSR, &bmsr);
4477 			for (i = 0; i < 1000; i++) {
4478 				udelay(10);
4479 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4480 				    (bmsr & BMSR_LSTATUS)) {
4481 					udelay(40);
4482 					break;
4483 				}
4484 			}
4485 
4486 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4487 			    TG3_PHY_REV_BCM5401_B0 &&
4488 			    !(bmsr & BMSR_LSTATUS) &&
4489 			    tp->link_config.active_speed == SPEED_1000) {
4490 				err = tg3_phy_reset(tp);
4491 				if (!err)
4492 					err = tg3_init_5401phy_dsp(tp);
4493 				if (err)
4494 					return err;
4495 			}
4496 		}
4497 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4498 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4499 		/* 5701 {A0,B0} CRC bug workaround */
4500 		tg3_writephy(tp, 0x15, 0x0a75);
4501 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4502 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4503 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4504 	}
4505 
4506 	/* Clear pending interrupts... */
4507 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4508 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4509 
4510 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4511 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4512 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4513 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4514 
4515 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4516 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4517 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4518 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4519 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4520 		else
4521 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4522 	}
4523 
4524 	current_link_up = 0;
4525 	current_speed = SPEED_UNKNOWN;
4526 	current_duplex = DUPLEX_UNKNOWN;
4527 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4528 	tp->link_config.rmt_adv = 0;
4529 
4530 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4531 		err = tg3_phy_auxctl_read(tp,
4532 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4533 					  &val);
4534 		if (!err && !(val & (1 << 10))) {
4535 			tg3_phy_auxctl_write(tp,
4536 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4537 					     val | (1 << 10));
4538 			goto relink;
4539 		}
4540 	}
4541 
4542 	bmsr = 0;
4543 	for (i = 0; i < 100; i++) {
4544 		tg3_readphy(tp, MII_BMSR, &bmsr);
4545 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4546 		    (bmsr & BMSR_LSTATUS))
4547 			break;
4548 		udelay(40);
4549 	}
4550 
4551 	if (bmsr & BMSR_LSTATUS) {
4552 		u32 aux_stat, bmcr;
4553 
4554 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4555 		for (i = 0; i < 2000; i++) {
4556 			udelay(10);
4557 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4558 			    aux_stat)
4559 				break;
4560 		}
4561 
4562 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4563 					     &current_speed,
4564 					     &current_duplex);
4565 
4566 		bmcr = 0;
4567 		for (i = 0; i < 200; i++) {
4568 			tg3_readphy(tp, MII_BMCR, &bmcr);
4569 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4570 				continue;
4571 			if (bmcr && bmcr != 0x7fff)
4572 				break;
4573 			udelay(10);
4574 		}
4575 
4576 		lcl_adv = 0;
4577 		rmt_adv = 0;
4578 
4579 		tp->link_config.active_speed = current_speed;
4580 		tp->link_config.active_duplex = current_duplex;
4581 
4582 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4583 			if ((bmcr & BMCR_ANENABLE) &&
4584 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4585 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4586 				current_link_up = 1;
4587 		} else {
4588 			if (!(bmcr & BMCR_ANENABLE) &&
4589 			    tp->link_config.speed == current_speed &&
4590 			    tp->link_config.duplex == current_duplex &&
4591 			    tp->link_config.flowctrl ==
4592 			    tp->link_config.active_flowctrl) {
4593 				current_link_up = 1;
4594 			}
4595 		}
4596 
4597 		if (current_link_up == 1 &&
4598 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4599 			u32 reg, bit;
4600 
4601 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4602 				reg = MII_TG3_FET_GEN_STAT;
4603 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4604 			} else {
4605 				reg = MII_TG3_EXT_STAT;
4606 				bit = MII_TG3_EXT_STAT_MDIX;
4607 			}
4608 
4609 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4610 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4611 
4612 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4613 		}
4614 	}
4615 
4616 relink:
4617 	if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4618 		tg3_phy_copper_begin(tp);
4619 
4620 		if (tg3_flag(tp, ROBOSWITCH)) {
4621 			current_link_up = 1;
4622 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4623 			current_speed = SPEED_1000;
4624 			current_duplex = DUPLEX_FULL;
4625 			tp->link_config.active_speed = current_speed;
4626 			tp->link_config.active_duplex = current_duplex;
4627 		}
4628 
4629 		tg3_readphy(tp, MII_BMSR, &bmsr);
4630 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4631 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4632 			current_link_up = 1;
4633 	}
4634 
4635 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4636 	if (current_link_up == 1) {
4637 		if (tp->link_config.active_speed == SPEED_100 ||
4638 		    tp->link_config.active_speed == SPEED_10)
4639 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4640 		else
4641 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4642 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4643 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4644 	else
4645 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4646 
4647 	/* In order for the 5750 core in BCM4785 chip to work properly
4648 	 * in RGMII mode, the Led Control Register must be set up.
4649 	 */
4650 	if (tg3_flag(tp, RGMII_MODE)) {
4651 		u32 led_ctrl = tr32(MAC_LED_CTRL);
4652 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4653 
4654 		if (tp->link_config.active_speed == SPEED_10)
4655 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4656 		else if (tp->link_config.active_speed == SPEED_100)
4657 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4658 				     LED_CTRL_100MBPS_ON);
4659 		else if (tp->link_config.active_speed == SPEED_1000)
4660 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4661 				     LED_CTRL_1000MBPS_ON);
4662 
4663 		tw32(MAC_LED_CTRL, led_ctrl);
4664 		udelay(40);
4665 	}
4666 
4667 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4668 	if (tp->link_config.active_duplex == DUPLEX_HALF)
4669 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4670 
4671 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4672 		if (current_link_up == 1 &&
4673 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4674 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4675 		else
4676 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4677 	}
4678 
4679 	/* ??? Without this setting Netgear GA302T PHY does not
4680 	 * ??? send/receive packets...
4681 	 */
4682 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4683 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4684 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4685 		tw32_f(MAC_MI_MODE, tp->mi_mode);
4686 		udelay(80);
4687 	}
4688 
4689 	tw32_f(MAC_MODE, tp->mac_mode);
4690 	udelay(40);
4691 
4692 	tg3_phy_eee_adjust(tp, current_link_up);
4693 
4694 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
4695 		/* Polled via timer. */
4696 		tw32_f(MAC_EVENT, 0);
4697 	} else {
4698 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4699 	}
4700 	udelay(40);
4701 
4702 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4703 	    current_link_up == 1 &&
4704 	    tp->link_config.active_speed == SPEED_1000 &&
4705 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4706 		udelay(120);
4707 		tw32_f(MAC_STATUS,
4708 		     (MAC_STATUS_SYNC_CHANGED |
4709 		      MAC_STATUS_CFG_CHANGED));
4710 		udelay(40);
4711 		tg3_write_mem(tp,
4712 			      NIC_SRAM_FIRMWARE_MBOX,
4713 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4714 	}
4715 
4716 	/* Prevent send BD corruption. */
4717 	if (tg3_flag(tp, CLKREQ_BUG)) {
4718 		if (tp->link_config.active_speed == SPEED_100 ||
4719 		    tp->link_config.active_speed == SPEED_10)
4720 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4721 						   PCI_EXP_LNKCTL_CLKREQ_EN);
4722 		else
4723 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4724 						 PCI_EXP_LNKCTL_CLKREQ_EN);
4725 	}
4726 
4727 	tg3_test_and_report_link_chg(tp, current_link_up);
4728 
4729 	return 0;
4730 }
4731 
4732 struct tg3_fiber_aneginfo {
4733 	int state;
4734 #define ANEG_STATE_UNKNOWN		0
4735 #define ANEG_STATE_AN_ENABLE		1
4736 #define ANEG_STATE_RESTART_INIT		2
4737 #define ANEG_STATE_RESTART		3
4738 #define ANEG_STATE_DISABLE_LINK_OK	4
4739 #define ANEG_STATE_ABILITY_DETECT_INIT	5
4740 #define ANEG_STATE_ABILITY_DETECT	6
4741 #define ANEG_STATE_ACK_DETECT_INIT	7
4742 #define ANEG_STATE_ACK_DETECT		8
4743 #define ANEG_STATE_COMPLETE_ACK_INIT	9
4744 #define ANEG_STATE_COMPLETE_ACK		10
4745 #define ANEG_STATE_IDLE_DETECT_INIT	11
4746 #define ANEG_STATE_IDLE_DETECT		12
4747 #define ANEG_STATE_LINK_OK		13
4748 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
4749 #define ANEG_STATE_NEXT_PAGE_WAIT	15
4750 
4751 	u32 flags;
4752 #define MR_AN_ENABLE		0x00000001
4753 #define MR_RESTART_AN		0x00000002
4754 #define MR_AN_COMPLETE		0x00000004
4755 #define MR_PAGE_RX		0x00000008
4756 #define MR_NP_LOADED		0x00000010
4757 #define MR_TOGGLE_TX		0x00000020
4758 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
4759 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
4760 #define MR_LP_ADV_SYM_PAUSE	0x00000100
4761 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
4762 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
4763 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
4764 #define MR_LP_ADV_NEXT_PAGE	0x00001000
4765 #define MR_TOGGLE_RX		0x00002000
4766 #define MR_NP_RX		0x00004000
4767 
4768 #define MR_LINK_OK		0x80000000
4769 
4770 	unsigned long link_time, cur_time;
4771 
4772 	u32 ability_match_cfg;
4773 	int ability_match_count;
4774 
4775 	char ability_match, idle_match, ack_match;
4776 
4777 	u32 txconfig, rxconfig;
4778 #define ANEG_CFG_NP		0x00000080
4779 #define ANEG_CFG_ACK		0x00000040
4780 #define ANEG_CFG_RF2		0x00000020
4781 #define ANEG_CFG_RF1		0x00000010
4782 #define ANEG_CFG_PS2		0x00000001
4783 #define ANEG_CFG_PS1		0x00008000
4784 #define ANEG_CFG_HD		0x00004000
4785 #define ANEG_CFG_FD		0x00002000
4786 #define ANEG_CFG_INVAL		0x00001f06
4787 
4788 };
4789 #define ANEG_OK		0
4790 #define ANEG_DONE	1
4791 #define ANEG_TIMER_ENAB	2
4792 #define ANEG_FAILED	-1
4793 
4794 #define ANEG_STATE_SETTLE_TIME	10000
4795 
4796 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4797 				   struct tg3_fiber_aneginfo *ap)
4798 {
4799 	u16 flowctrl;
4800 	unsigned long delta;
4801 	u32 rx_cfg_reg;
4802 	int ret;
4803 
4804 	if (ap->state == ANEG_STATE_UNKNOWN) {
4805 		ap->rxconfig = 0;
4806 		ap->link_time = 0;
4807 		ap->cur_time = 0;
4808 		ap->ability_match_cfg = 0;
4809 		ap->ability_match_count = 0;
4810 		ap->ability_match = 0;
4811 		ap->idle_match = 0;
4812 		ap->ack_match = 0;
4813 	}
4814 	ap->cur_time++;
4815 
4816 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4817 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4818 
4819 		if (rx_cfg_reg != ap->ability_match_cfg) {
4820 			ap->ability_match_cfg = rx_cfg_reg;
4821 			ap->ability_match = 0;
4822 			ap->ability_match_count = 0;
4823 		} else {
4824 			if (++ap->ability_match_count > 1) {
4825 				ap->ability_match = 1;
4826 				ap->ability_match_cfg = rx_cfg_reg;
4827 			}
4828 		}
4829 		if (rx_cfg_reg & ANEG_CFG_ACK)
4830 			ap->ack_match = 1;
4831 		else
4832 			ap->ack_match = 0;
4833 
4834 		ap->idle_match = 0;
4835 	} else {
4836 		ap->idle_match = 1;
4837 		ap->ability_match_cfg = 0;
4838 		ap->ability_match_count = 0;
4839 		ap->ability_match = 0;
4840 		ap->ack_match = 0;
4841 
4842 		rx_cfg_reg = 0;
4843 	}
4844 
4845 	ap->rxconfig = rx_cfg_reg;
4846 	ret = ANEG_OK;
4847 
4848 	switch (ap->state) {
4849 	case ANEG_STATE_UNKNOWN:
4850 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4851 			ap->state = ANEG_STATE_AN_ENABLE;
4852 
4853 		/* fallthru */
4854 	case ANEG_STATE_AN_ENABLE:
4855 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4856 		if (ap->flags & MR_AN_ENABLE) {
4857 			ap->link_time = 0;
4858 			ap->cur_time = 0;
4859 			ap->ability_match_cfg = 0;
4860 			ap->ability_match_count = 0;
4861 			ap->ability_match = 0;
4862 			ap->idle_match = 0;
4863 			ap->ack_match = 0;
4864 
4865 			ap->state = ANEG_STATE_RESTART_INIT;
4866 		} else {
4867 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
4868 		}
4869 		break;
4870 
4871 	case ANEG_STATE_RESTART_INIT:
4872 		ap->link_time = ap->cur_time;
4873 		ap->flags &= ~(MR_NP_LOADED);
4874 		ap->txconfig = 0;
4875 		tw32(MAC_TX_AUTO_NEG, 0);
4876 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4877 		tw32_f(MAC_MODE, tp->mac_mode);
4878 		udelay(40);
4879 
4880 		ret = ANEG_TIMER_ENAB;
4881 		ap->state = ANEG_STATE_RESTART;
4882 
4883 		/* fallthru */
4884 	case ANEG_STATE_RESTART:
4885 		delta = ap->cur_time - ap->link_time;
4886 		if (delta > ANEG_STATE_SETTLE_TIME)
4887 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4888 		else
4889 			ret = ANEG_TIMER_ENAB;
4890 		break;
4891 
4892 	case ANEG_STATE_DISABLE_LINK_OK:
4893 		ret = ANEG_DONE;
4894 		break;
4895 
4896 	case ANEG_STATE_ABILITY_DETECT_INIT:
4897 		ap->flags &= ~(MR_TOGGLE_TX);
4898 		ap->txconfig = ANEG_CFG_FD;
4899 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4900 		if (flowctrl & ADVERTISE_1000XPAUSE)
4901 			ap->txconfig |= ANEG_CFG_PS1;
4902 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4903 			ap->txconfig |= ANEG_CFG_PS2;
4904 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4905 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4906 		tw32_f(MAC_MODE, tp->mac_mode);
4907 		udelay(40);
4908 
4909 		ap->state = ANEG_STATE_ABILITY_DETECT;
4910 		break;
4911 
4912 	case ANEG_STATE_ABILITY_DETECT:
4913 		if (ap->ability_match != 0 && ap->rxconfig != 0)
4914 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
4915 		break;
4916 
4917 	case ANEG_STATE_ACK_DETECT_INIT:
4918 		ap->txconfig |= ANEG_CFG_ACK;
4919 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4920 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4921 		tw32_f(MAC_MODE, tp->mac_mode);
4922 		udelay(40);
4923 
4924 		ap->state = ANEG_STATE_ACK_DETECT;
4925 
4926 		/* fallthru */
4927 	case ANEG_STATE_ACK_DETECT:
4928 		if (ap->ack_match != 0) {
4929 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4930 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4931 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4932 			} else {
4933 				ap->state = ANEG_STATE_AN_ENABLE;
4934 			}
4935 		} else if (ap->ability_match != 0 &&
4936 			   ap->rxconfig == 0) {
4937 			ap->state = ANEG_STATE_AN_ENABLE;
4938 		}
4939 		break;
4940 
4941 	case ANEG_STATE_COMPLETE_ACK_INIT:
4942 		if (ap->rxconfig & ANEG_CFG_INVAL) {
4943 			ret = ANEG_FAILED;
4944 			break;
4945 		}
4946 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4947 			       MR_LP_ADV_HALF_DUPLEX |
4948 			       MR_LP_ADV_SYM_PAUSE |
4949 			       MR_LP_ADV_ASYM_PAUSE |
4950 			       MR_LP_ADV_REMOTE_FAULT1 |
4951 			       MR_LP_ADV_REMOTE_FAULT2 |
4952 			       MR_LP_ADV_NEXT_PAGE |
4953 			       MR_TOGGLE_RX |
4954 			       MR_NP_RX);
4955 		if (ap->rxconfig & ANEG_CFG_FD)
4956 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4957 		if (ap->rxconfig & ANEG_CFG_HD)
4958 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4959 		if (ap->rxconfig & ANEG_CFG_PS1)
4960 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
4961 		if (ap->rxconfig & ANEG_CFG_PS2)
4962 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4963 		if (ap->rxconfig & ANEG_CFG_RF1)
4964 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4965 		if (ap->rxconfig & ANEG_CFG_RF2)
4966 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4967 		if (ap->rxconfig & ANEG_CFG_NP)
4968 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
4969 
4970 		ap->link_time = ap->cur_time;
4971 
4972 		ap->flags ^= (MR_TOGGLE_TX);
4973 		if (ap->rxconfig & 0x0008)
4974 			ap->flags |= MR_TOGGLE_RX;
4975 		if (ap->rxconfig & ANEG_CFG_NP)
4976 			ap->flags |= MR_NP_RX;
4977 		ap->flags |= MR_PAGE_RX;
4978 
4979 		ap->state = ANEG_STATE_COMPLETE_ACK;
4980 		ret = ANEG_TIMER_ENAB;
4981 		break;
4982 
4983 	case ANEG_STATE_COMPLETE_ACK:
4984 		if (ap->ability_match != 0 &&
4985 		    ap->rxconfig == 0) {
4986 			ap->state = ANEG_STATE_AN_ENABLE;
4987 			break;
4988 		}
4989 		delta = ap->cur_time - ap->link_time;
4990 		if (delta > ANEG_STATE_SETTLE_TIME) {
4991 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4992 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4993 			} else {
4994 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4995 				    !(ap->flags & MR_NP_RX)) {
4996 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4997 				} else {
4998 					ret = ANEG_FAILED;
4999 				}
5000 			}
5001 		}
5002 		break;
5003 
5004 	case ANEG_STATE_IDLE_DETECT_INIT:
5005 		ap->link_time = ap->cur_time;
5006 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5007 		tw32_f(MAC_MODE, tp->mac_mode);
5008 		udelay(40);
5009 
5010 		ap->state = ANEG_STATE_IDLE_DETECT;
5011 		ret = ANEG_TIMER_ENAB;
5012 		break;
5013 
5014 	case ANEG_STATE_IDLE_DETECT:
5015 		if (ap->ability_match != 0 &&
5016 		    ap->rxconfig == 0) {
5017 			ap->state = ANEG_STATE_AN_ENABLE;
5018 			break;
5019 		}
5020 		delta = ap->cur_time - ap->link_time;
5021 		if (delta > ANEG_STATE_SETTLE_TIME) {
5022 			/* XXX another gem from the Broadcom driver :( */
5023 			ap->state = ANEG_STATE_LINK_OK;
5024 		}
5025 		break;
5026 
5027 	case ANEG_STATE_LINK_OK:
5028 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5029 		ret = ANEG_DONE;
5030 		break;
5031 
5032 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5033 		/* ??? unimplemented */
5034 		break;
5035 
5036 	case ANEG_STATE_NEXT_PAGE_WAIT:
5037 		/* ??? unimplemented */
5038 		break;
5039 
5040 	default:
5041 		ret = ANEG_FAILED;
5042 		break;
5043 	}
5044 
5045 	return ret;
5046 }
5047 
5048 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5049 {
5050 	int res = 0;
5051 	struct tg3_fiber_aneginfo aninfo;
5052 	int status = ANEG_FAILED;
5053 	unsigned int tick;
5054 	u32 tmp;
5055 
5056 	tw32_f(MAC_TX_AUTO_NEG, 0);
5057 
5058 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5059 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5060 	udelay(40);
5061 
5062 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5063 	udelay(40);
5064 
5065 	memset(&aninfo, 0, sizeof(aninfo));
5066 	aninfo.flags |= MR_AN_ENABLE;
5067 	aninfo.state = ANEG_STATE_UNKNOWN;
5068 	aninfo.cur_time = 0;
5069 	tick = 0;
5070 	while (++tick < 195000) {
5071 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5072 		if (status == ANEG_DONE || status == ANEG_FAILED)
5073 			break;
5074 
5075 		udelay(1);
5076 	}
5077 
5078 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5079 	tw32_f(MAC_MODE, tp->mac_mode);
5080 	udelay(40);
5081 
5082 	*txflags = aninfo.txconfig;
5083 	*rxflags = aninfo.flags;
5084 
5085 	if (status == ANEG_DONE &&
5086 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5087 			     MR_LP_ADV_FULL_DUPLEX)))
5088 		res = 1;
5089 
5090 	return res;
5091 }
5092 
5093 static void tg3_init_bcm8002(struct tg3 *tp)
5094 {
5095 	u32 mac_status = tr32(MAC_STATUS);
5096 	int i;
5097 
5098 	/* Reset when initting first time or we have a link. */
5099 	if (tg3_flag(tp, INIT_COMPLETE) &&
5100 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5101 		return;
5102 
5103 	/* Set PLL lock range. */
5104 	tg3_writephy(tp, 0x16, 0x8007);
5105 
5106 	/* SW reset */
5107 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5108 
5109 	/* Wait for reset to complete. */
5110 	/* XXX schedule_timeout() ... */
5111 	for (i = 0; i < 500; i++)
5112 		udelay(10);
5113 
5114 	/* Config mode; select PMA/Ch 1 regs. */
5115 	tg3_writephy(tp, 0x10, 0x8411);
5116 
5117 	/* Enable auto-lock and comdet, select txclk for tx. */
5118 	tg3_writephy(tp, 0x11, 0x0a10);
5119 
5120 	tg3_writephy(tp, 0x18, 0x00a0);
5121 	tg3_writephy(tp, 0x16, 0x41ff);
5122 
5123 	/* Assert and deassert POR. */
5124 	tg3_writephy(tp, 0x13, 0x0400);
5125 	udelay(40);
5126 	tg3_writephy(tp, 0x13, 0x0000);
5127 
5128 	tg3_writephy(tp, 0x11, 0x0a50);
5129 	udelay(40);
5130 	tg3_writephy(tp, 0x11, 0x0a10);
5131 
5132 	/* Wait for signal to stabilize */
5133 	/* XXX schedule_timeout() ... */
5134 	for (i = 0; i < 15000; i++)
5135 		udelay(10);
5136 
5137 	/* Deselect the channel register so we can read the PHYID
5138 	 * later.
5139 	 */
5140 	tg3_writephy(tp, 0x10, 0x8011);
5141 }
5142 
5143 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5144 {
5145 	u16 flowctrl;
5146 	u32 sg_dig_ctrl, sg_dig_status;
5147 	u32 serdes_cfg, expected_sg_dig_ctrl;
5148 	int workaround, port_a;
5149 	int current_link_up;
5150 
5151 	serdes_cfg = 0;
5152 	expected_sg_dig_ctrl = 0;
5153 	workaround = 0;
5154 	port_a = 1;
5155 	current_link_up = 0;
5156 
5157 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5158 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5159 		workaround = 1;
5160 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5161 			port_a = 0;
5162 
5163 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5164 		/* preserve bits 20-23 for voltage regulator */
5165 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5166 	}
5167 
5168 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5169 
5170 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5171 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5172 			if (workaround) {
5173 				u32 val = serdes_cfg;
5174 
5175 				if (port_a)
5176 					val |= 0xc010000;
5177 				else
5178 					val |= 0x4010000;
5179 				tw32_f(MAC_SERDES_CFG, val);
5180 			}
5181 
5182 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5183 		}
5184 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5185 			tg3_setup_flow_control(tp, 0, 0);
5186 			current_link_up = 1;
5187 		}
5188 		goto out;
5189 	}
5190 
5191 	/* Want auto-negotiation.  */
5192 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5193 
5194 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5195 	if (flowctrl & ADVERTISE_1000XPAUSE)
5196 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5197 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5198 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5199 
5200 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5201 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5202 		    tp->serdes_counter &&
5203 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5204 				    MAC_STATUS_RCVD_CFG)) ==
5205 		     MAC_STATUS_PCS_SYNCED)) {
5206 			tp->serdes_counter--;
5207 			current_link_up = 1;
5208 			goto out;
5209 		}
5210 restart_autoneg:
5211 		if (workaround)
5212 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5213 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5214 		udelay(5);
5215 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5216 
5217 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5218 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5219 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5220 				 MAC_STATUS_SIGNAL_DET)) {
5221 		sg_dig_status = tr32(SG_DIG_STATUS);
5222 		mac_status = tr32(MAC_STATUS);
5223 
5224 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5225 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5226 			u32 local_adv = 0, remote_adv = 0;
5227 
5228 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5229 				local_adv |= ADVERTISE_1000XPAUSE;
5230 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5231 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5232 
5233 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5234 				remote_adv |= LPA_1000XPAUSE;
5235 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5236 				remote_adv |= LPA_1000XPAUSE_ASYM;
5237 
5238 			tp->link_config.rmt_adv =
5239 					   mii_adv_to_ethtool_adv_x(remote_adv);
5240 
5241 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5242 			current_link_up = 1;
5243 			tp->serdes_counter = 0;
5244 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5245 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5246 			if (tp->serdes_counter)
5247 				tp->serdes_counter--;
5248 			else {
5249 				if (workaround) {
5250 					u32 val = serdes_cfg;
5251 
5252 					if (port_a)
5253 						val |= 0xc010000;
5254 					else
5255 						val |= 0x4010000;
5256 
5257 					tw32_f(MAC_SERDES_CFG, val);
5258 				}
5259 
5260 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5261 				udelay(40);
5262 
5263 				/* Link parallel detection - link is up */
5264 				/* only if we have PCS_SYNC and not */
5265 				/* receiving config code words */
5266 				mac_status = tr32(MAC_STATUS);
5267 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5268 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5269 					tg3_setup_flow_control(tp, 0, 0);
5270 					current_link_up = 1;
5271 					tp->phy_flags |=
5272 						TG3_PHYFLG_PARALLEL_DETECT;
5273 					tp->serdes_counter =
5274 						SERDES_PARALLEL_DET_TIMEOUT;
5275 				} else
5276 					goto restart_autoneg;
5277 			}
5278 		}
5279 	} else {
5280 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5281 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5282 	}
5283 
5284 out:
5285 	return current_link_up;
5286 }
5287 
5288 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5289 {
5290 	int current_link_up = 0;
5291 
5292 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5293 		goto out;
5294 
5295 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5296 		u32 txflags, rxflags;
5297 		int i;
5298 
5299 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5300 			u32 local_adv = 0, remote_adv = 0;
5301 
5302 			if (txflags & ANEG_CFG_PS1)
5303 				local_adv |= ADVERTISE_1000XPAUSE;
5304 			if (txflags & ANEG_CFG_PS2)
5305 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5306 
5307 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5308 				remote_adv |= LPA_1000XPAUSE;
5309 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5310 				remote_adv |= LPA_1000XPAUSE_ASYM;
5311 
5312 			tp->link_config.rmt_adv =
5313 					   mii_adv_to_ethtool_adv_x(remote_adv);
5314 
5315 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5316 
5317 			current_link_up = 1;
5318 		}
5319 		for (i = 0; i < 30; i++) {
5320 			udelay(20);
5321 			tw32_f(MAC_STATUS,
5322 			       (MAC_STATUS_SYNC_CHANGED |
5323 				MAC_STATUS_CFG_CHANGED));
5324 			udelay(40);
5325 			if ((tr32(MAC_STATUS) &
5326 			     (MAC_STATUS_SYNC_CHANGED |
5327 			      MAC_STATUS_CFG_CHANGED)) == 0)
5328 				break;
5329 		}
5330 
5331 		mac_status = tr32(MAC_STATUS);
5332 		if (current_link_up == 0 &&
5333 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5334 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5335 			current_link_up = 1;
5336 	} else {
5337 		tg3_setup_flow_control(tp, 0, 0);
5338 
5339 		/* Forcing 1000FD link up. */
5340 		current_link_up = 1;
5341 
5342 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5343 		udelay(40);
5344 
5345 		tw32_f(MAC_MODE, tp->mac_mode);
5346 		udelay(40);
5347 	}
5348 
5349 out:
5350 	return current_link_up;
5351 }
5352 
5353 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5354 {
5355 	u32 orig_pause_cfg;
5356 	u16 orig_active_speed;
5357 	u8 orig_active_duplex;
5358 	u32 mac_status;
5359 	int current_link_up;
5360 	int i;
5361 
5362 	orig_pause_cfg = tp->link_config.active_flowctrl;
5363 	orig_active_speed = tp->link_config.active_speed;
5364 	orig_active_duplex = tp->link_config.active_duplex;
5365 
5366 	if (!tg3_flag(tp, HW_AUTONEG) &&
5367 	    tp->link_up &&
5368 	    tg3_flag(tp, INIT_COMPLETE)) {
5369 		mac_status = tr32(MAC_STATUS);
5370 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5371 			       MAC_STATUS_SIGNAL_DET |
5372 			       MAC_STATUS_CFG_CHANGED |
5373 			       MAC_STATUS_RCVD_CFG);
5374 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5375 				   MAC_STATUS_SIGNAL_DET)) {
5376 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5377 					    MAC_STATUS_CFG_CHANGED));
5378 			return 0;
5379 		}
5380 	}
5381 
5382 	tw32_f(MAC_TX_AUTO_NEG, 0);
5383 
5384 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5385 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5386 	tw32_f(MAC_MODE, tp->mac_mode);
5387 	udelay(40);
5388 
5389 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5390 		tg3_init_bcm8002(tp);
5391 
5392 	/* Enable link change event even when serdes polling.  */
5393 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5394 	udelay(40);
5395 
5396 	current_link_up = 0;
5397 	tp->link_config.rmt_adv = 0;
5398 	mac_status = tr32(MAC_STATUS);
5399 
5400 	if (tg3_flag(tp, HW_AUTONEG))
5401 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5402 	else
5403 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5404 
5405 	tp->napi[0].hw_status->status =
5406 		(SD_STATUS_UPDATED |
5407 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5408 
5409 	for (i = 0; i < 100; i++) {
5410 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5411 				    MAC_STATUS_CFG_CHANGED));
5412 		udelay(5);
5413 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5414 					 MAC_STATUS_CFG_CHANGED |
5415 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5416 			break;
5417 	}
5418 
5419 	mac_status = tr32(MAC_STATUS);
5420 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5421 		current_link_up = 0;
5422 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5423 		    tp->serdes_counter == 0) {
5424 			tw32_f(MAC_MODE, (tp->mac_mode |
5425 					  MAC_MODE_SEND_CONFIGS));
5426 			udelay(1);
5427 			tw32_f(MAC_MODE, tp->mac_mode);
5428 		}
5429 	}
5430 
5431 	if (current_link_up == 1) {
5432 		tp->link_config.active_speed = SPEED_1000;
5433 		tp->link_config.active_duplex = DUPLEX_FULL;
5434 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5435 				    LED_CTRL_LNKLED_OVERRIDE |
5436 				    LED_CTRL_1000MBPS_ON));
5437 	} else {
5438 		tp->link_config.active_speed = SPEED_UNKNOWN;
5439 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5440 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5441 				    LED_CTRL_LNKLED_OVERRIDE |
5442 				    LED_CTRL_TRAFFIC_OVERRIDE));
5443 	}
5444 
5445 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5446 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5447 		if (orig_pause_cfg != now_pause_cfg ||
5448 		    orig_active_speed != tp->link_config.active_speed ||
5449 		    orig_active_duplex != tp->link_config.active_duplex)
5450 			tg3_link_report(tp);
5451 	}
5452 
5453 	return 0;
5454 }
5455 
5456 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5457 {
5458 	int current_link_up, err = 0;
5459 	u32 bmsr, bmcr;
5460 	u16 current_speed;
5461 	u8 current_duplex;
5462 	u32 local_adv, remote_adv;
5463 
5464 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5465 	tw32_f(MAC_MODE, tp->mac_mode);
5466 	udelay(40);
5467 
5468 	tw32(MAC_EVENT, 0);
5469 
5470 	tw32_f(MAC_STATUS,
5471 	     (MAC_STATUS_SYNC_CHANGED |
5472 	      MAC_STATUS_CFG_CHANGED |
5473 	      MAC_STATUS_MI_COMPLETION |
5474 	      MAC_STATUS_LNKSTATE_CHANGED));
5475 	udelay(40);
5476 
5477 	if (force_reset)
5478 		tg3_phy_reset(tp);
5479 
5480 	current_link_up = 0;
5481 	current_speed = SPEED_UNKNOWN;
5482 	current_duplex = DUPLEX_UNKNOWN;
5483 	tp->link_config.rmt_adv = 0;
5484 
5485 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5486 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5487 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5488 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5489 			bmsr |= BMSR_LSTATUS;
5490 		else
5491 			bmsr &= ~BMSR_LSTATUS;
5492 	}
5493 
5494 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5495 
5496 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5497 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5498 		/* do nothing, just check for link up at the end */
5499 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5500 		u32 adv, newadv;
5501 
5502 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5503 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5504 				 ADVERTISE_1000XPAUSE |
5505 				 ADVERTISE_1000XPSE_ASYM |
5506 				 ADVERTISE_SLCT);
5507 
5508 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5509 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5510 
5511 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5512 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5513 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5514 			tg3_writephy(tp, MII_BMCR, bmcr);
5515 
5516 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5517 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5518 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5519 
5520 			return err;
5521 		}
5522 	} else {
5523 		u32 new_bmcr;
5524 
5525 		bmcr &= ~BMCR_SPEED1000;
5526 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5527 
5528 		if (tp->link_config.duplex == DUPLEX_FULL)
5529 			new_bmcr |= BMCR_FULLDPLX;
5530 
5531 		if (new_bmcr != bmcr) {
5532 			/* BMCR_SPEED1000 is a reserved bit that needs
5533 			 * to be set on write.
5534 			 */
5535 			new_bmcr |= BMCR_SPEED1000;
5536 
5537 			/* Force a linkdown */
5538 			if (tp->link_up) {
5539 				u32 adv;
5540 
5541 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5542 				adv &= ~(ADVERTISE_1000XFULL |
5543 					 ADVERTISE_1000XHALF |
5544 					 ADVERTISE_SLCT);
5545 				tg3_writephy(tp, MII_ADVERTISE, adv);
5546 				tg3_writephy(tp, MII_BMCR, bmcr |
5547 							   BMCR_ANRESTART |
5548 							   BMCR_ANENABLE);
5549 				udelay(10);
5550 				tg3_carrier_off(tp);
5551 			}
5552 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5553 			bmcr = new_bmcr;
5554 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5555 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5556 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5557 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5558 					bmsr |= BMSR_LSTATUS;
5559 				else
5560 					bmsr &= ~BMSR_LSTATUS;
5561 			}
5562 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5563 		}
5564 	}
5565 
5566 	if (bmsr & BMSR_LSTATUS) {
5567 		current_speed = SPEED_1000;
5568 		current_link_up = 1;
5569 		if (bmcr & BMCR_FULLDPLX)
5570 			current_duplex = DUPLEX_FULL;
5571 		else
5572 			current_duplex = DUPLEX_HALF;
5573 
5574 		local_adv = 0;
5575 		remote_adv = 0;
5576 
5577 		if (bmcr & BMCR_ANENABLE) {
5578 			u32 common;
5579 
5580 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5581 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5582 			common = local_adv & remote_adv;
5583 			if (common & (ADVERTISE_1000XHALF |
5584 				      ADVERTISE_1000XFULL)) {
5585 				if (common & ADVERTISE_1000XFULL)
5586 					current_duplex = DUPLEX_FULL;
5587 				else
5588 					current_duplex = DUPLEX_HALF;
5589 
5590 				tp->link_config.rmt_adv =
5591 					   mii_adv_to_ethtool_adv_x(remote_adv);
5592 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5593 				/* Link is up via parallel detect */
5594 			} else {
5595 				current_link_up = 0;
5596 			}
5597 		}
5598 	}
5599 
5600 	if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5601 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5602 
5603 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5604 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5605 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5606 
5607 	tw32_f(MAC_MODE, tp->mac_mode);
5608 	udelay(40);
5609 
5610 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5611 
5612 	tp->link_config.active_speed = current_speed;
5613 	tp->link_config.active_duplex = current_duplex;
5614 
5615 	tg3_test_and_report_link_chg(tp, current_link_up);
5616 	return err;
5617 }
5618 
5619 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5620 {
5621 	if (tp->serdes_counter) {
5622 		/* Give autoneg time to complete. */
5623 		tp->serdes_counter--;
5624 		return;
5625 	}
5626 
5627 	if (!tp->link_up &&
5628 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5629 		u32 bmcr;
5630 
5631 		tg3_readphy(tp, MII_BMCR, &bmcr);
5632 		if (bmcr & BMCR_ANENABLE) {
5633 			u32 phy1, phy2;
5634 
5635 			/* Select shadow register 0x1f */
5636 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5637 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5638 
5639 			/* Select expansion interrupt status register */
5640 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5641 					 MII_TG3_DSP_EXP1_INT_STAT);
5642 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5643 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5644 
5645 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5646 				/* We have signal detect and not receiving
5647 				 * config code words, link is up by parallel
5648 				 * detection.
5649 				 */
5650 
5651 				bmcr &= ~BMCR_ANENABLE;
5652 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5653 				tg3_writephy(tp, MII_BMCR, bmcr);
5654 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5655 			}
5656 		}
5657 	} else if (tp->link_up &&
5658 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5659 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5660 		u32 phy2;
5661 
5662 		/* Select expansion interrupt status register */
5663 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5664 				 MII_TG3_DSP_EXP1_INT_STAT);
5665 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5666 		if (phy2 & 0x20) {
5667 			u32 bmcr;
5668 
5669 			/* Config code words received, turn on autoneg. */
5670 			tg3_readphy(tp, MII_BMCR, &bmcr);
5671 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5672 
5673 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5674 
5675 		}
5676 	}
5677 }
5678 
5679 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5680 {
5681 	u32 val;
5682 	int err;
5683 
5684 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5685 		err = tg3_setup_fiber_phy(tp, force_reset);
5686 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5687 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
5688 	else
5689 		err = tg3_setup_copper_phy(tp, force_reset);
5690 
5691 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5692 		u32 scale;
5693 
5694 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5695 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5696 			scale = 65;
5697 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5698 			scale = 6;
5699 		else
5700 			scale = 12;
5701 
5702 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5703 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5704 		tw32(GRC_MISC_CFG, val);
5705 	}
5706 
5707 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5708 	      (6 << TX_LENGTHS_IPG_SHIFT);
5709 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5710 	    tg3_asic_rev(tp) == ASIC_REV_5762)
5711 		val |= tr32(MAC_TX_LENGTHS) &
5712 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
5713 			TX_LENGTHS_CNT_DWN_VAL_MSK);
5714 
5715 	if (tp->link_config.active_speed == SPEED_1000 &&
5716 	    tp->link_config.active_duplex == DUPLEX_HALF)
5717 		tw32(MAC_TX_LENGTHS, val |
5718 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5719 	else
5720 		tw32(MAC_TX_LENGTHS, val |
5721 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5722 
5723 	if (!tg3_flag(tp, 5705_PLUS)) {
5724 		if (tp->link_up) {
5725 			tw32(HOSTCC_STAT_COAL_TICKS,
5726 			     tp->coal.stats_block_coalesce_usecs);
5727 		} else {
5728 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
5729 		}
5730 	}
5731 
5732 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
5733 		val = tr32(PCIE_PWR_MGMT_THRESH);
5734 		if (!tp->link_up)
5735 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5736 			      tp->pwrmgmt_thresh;
5737 		else
5738 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5739 		tw32(PCIE_PWR_MGMT_THRESH, val);
5740 	}
5741 
5742 	return err;
5743 }
5744 
5745 /* tp->lock must be held */
5746 static u64 tg3_refclk_read(struct tg3 *tp)
5747 {
5748 	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5749 	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5750 }
5751 
5752 /* tp->lock must be held */
5753 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5754 {
5755 	tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5756 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5757 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5758 	tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5759 }
5760 
5761 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5762 static inline void tg3_full_unlock(struct tg3 *tp);
5763 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5764 {
5765 	struct tg3 *tp = netdev_priv(dev);
5766 
5767 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5768 				SOF_TIMESTAMPING_RX_SOFTWARE |
5769 				SOF_TIMESTAMPING_SOFTWARE    |
5770 				SOF_TIMESTAMPING_TX_HARDWARE |
5771 				SOF_TIMESTAMPING_RX_HARDWARE |
5772 				SOF_TIMESTAMPING_RAW_HARDWARE;
5773 
5774 	if (tp->ptp_clock)
5775 		info->phc_index = ptp_clock_index(tp->ptp_clock);
5776 	else
5777 		info->phc_index = -1;
5778 
5779 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5780 
5781 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5782 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5783 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5784 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5785 	return 0;
5786 }
5787 
5788 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5789 {
5790 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5791 	bool neg_adj = false;
5792 	u32 correction = 0;
5793 
5794 	if (ppb < 0) {
5795 		neg_adj = true;
5796 		ppb = -ppb;
5797 	}
5798 
5799 	/* Frequency adjustment is performed using hardware with a 24 bit
5800 	 * accumulator and a programmable correction value. On each clk, the
5801 	 * correction value gets added to the accumulator and when it
5802 	 * overflows, the time counter is incremented/decremented.
5803 	 *
5804 	 * So conversion from ppb to correction value is
5805 	 *		ppb * (1 << 24) / 1000000000
5806 	 */
5807 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5808 		     TG3_EAV_REF_CLK_CORRECT_MASK;
5809 
5810 	tg3_full_lock(tp, 0);
5811 
5812 	if (correction)
5813 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5814 		     TG3_EAV_REF_CLK_CORRECT_EN |
5815 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5816 	else
5817 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5818 
5819 	tg3_full_unlock(tp);
5820 
5821 	return 0;
5822 }
5823 
5824 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5825 {
5826 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5827 
5828 	tg3_full_lock(tp, 0);
5829 	tp->ptp_adjust += delta;
5830 	tg3_full_unlock(tp);
5831 
5832 	return 0;
5833 }
5834 
5835 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5836 {
5837 	u64 ns;
5838 	u32 remainder;
5839 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5840 
5841 	tg3_full_lock(tp, 0);
5842 	ns = tg3_refclk_read(tp);
5843 	ns += tp->ptp_adjust;
5844 	tg3_full_unlock(tp);
5845 
5846 	ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5847 	ts->tv_nsec = remainder;
5848 
5849 	return 0;
5850 }
5851 
5852 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5853 			   const struct timespec *ts)
5854 {
5855 	u64 ns;
5856 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5857 
5858 	ns = timespec_to_ns(ts);
5859 
5860 	tg3_full_lock(tp, 0);
5861 	tg3_refclk_write(tp, ns);
5862 	tp->ptp_adjust = 0;
5863 	tg3_full_unlock(tp);
5864 
5865 	return 0;
5866 }
5867 
5868 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5869 			  struct ptp_clock_request *rq, int on)
5870 {
5871 	return -EOPNOTSUPP;
5872 }
5873 
5874 static const struct ptp_clock_info tg3_ptp_caps = {
5875 	.owner		= THIS_MODULE,
5876 	.name		= "tg3 clock",
5877 	.max_adj	= 250000000,
5878 	.n_alarm	= 0,
5879 	.n_ext_ts	= 0,
5880 	.n_per_out	= 0,
5881 	.pps		= 0,
5882 	.adjfreq	= tg3_ptp_adjfreq,
5883 	.adjtime	= tg3_ptp_adjtime,
5884 	.gettime	= tg3_ptp_gettime,
5885 	.settime	= tg3_ptp_settime,
5886 	.enable		= tg3_ptp_enable,
5887 };
5888 
5889 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5890 				     struct skb_shared_hwtstamps *timestamp)
5891 {
5892 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5893 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5894 					   tp->ptp_adjust);
5895 }
5896 
5897 /* tp->lock must be held */
5898 static void tg3_ptp_init(struct tg3 *tp)
5899 {
5900 	if (!tg3_flag(tp, PTP_CAPABLE))
5901 		return;
5902 
5903 	/* Initialize the hardware clock to the system time. */
5904 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5905 	tp->ptp_adjust = 0;
5906 	tp->ptp_info = tg3_ptp_caps;
5907 }
5908 
5909 /* tp->lock must be held */
5910 static void tg3_ptp_resume(struct tg3 *tp)
5911 {
5912 	if (!tg3_flag(tp, PTP_CAPABLE))
5913 		return;
5914 
5915 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5916 	tp->ptp_adjust = 0;
5917 }
5918 
5919 static void tg3_ptp_fini(struct tg3 *tp)
5920 {
5921 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5922 		return;
5923 
5924 	ptp_clock_unregister(tp->ptp_clock);
5925 	tp->ptp_clock = NULL;
5926 	tp->ptp_adjust = 0;
5927 }
5928 
5929 static inline int tg3_irq_sync(struct tg3 *tp)
5930 {
5931 	return tp->irq_sync;
5932 }
5933 
5934 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5935 {
5936 	int i;
5937 
5938 	dst = (u32 *)((u8 *)dst + off);
5939 	for (i = 0; i < len; i += sizeof(u32))
5940 		*dst++ = tr32(off + i);
5941 }
5942 
5943 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5944 {
5945 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5946 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5947 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5948 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5949 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5950 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5951 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5952 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5953 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5954 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5955 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5956 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5957 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5958 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5959 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5960 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5961 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5962 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5963 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5964 
5965 	if (tg3_flag(tp, SUPPORT_MSIX))
5966 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5967 
5968 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5969 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5970 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5971 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5972 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5973 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5974 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5975 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5976 
5977 	if (!tg3_flag(tp, 5705_PLUS)) {
5978 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5979 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5980 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5981 	}
5982 
5983 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5984 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5985 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5986 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5987 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5988 
5989 	if (tg3_flag(tp, NVRAM))
5990 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5991 }
5992 
5993 static void tg3_dump_state(struct tg3 *tp)
5994 {
5995 	int i;
5996 	u32 *regs;
5997 
5998 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5999 	if (!regs)
6000 		return;
6001 
6002 	if (tg3_flag(tp, PCI_EXPRESS)) {
6003 		/* Read up to but not including private PCI registers */
6004 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6005 			regs[i / sizeof(u32)] = tr32(i);
6006 	} else
6007 		tg3_dump_legacy_regs(tp, regs);
6008 
6009 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6010 		if (!regs[i + 0] && !regs[i + 1] &&
6011 		    !regs[i + 2] && !regs[i + 3])
6012 			continue;
6013 
6014 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6015 			   i * 4,
6016 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6017 	}
6018 
6019 	kfree(regs);
6020 
6021 	for (i = 0; i < tp->irq_cnt; i++) {
6022 		struct tg3_napi *tnapi = &tp->napi[i];
6023 
6024 		/* SW status block */
6025 		netdev_err(tp->dev,
6026 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6027 			   i,
6028 			   tnapi->hw_status->status,
6029 			   tnapi->hw_status->status_tag,
6030 			   tnapi->hw_status->rx_jumbo_consumer,
6031 			   tnapi->hw_status->rx_consumer,
6032 			   tnapi->hw_status->rx_mini_consumer,
6033 			   tnapi->hw_status->idx[0].rx_producer,
6034 			   tnapi->hw_status->idx[0].tx_consumer);
6035 
6036 		netdev_err(tp->dev,
6037 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6038 			   i,
6039 			   tnapi->last_tag, tnapi->last_irq_tag,
6040 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6041 			   tnapi->rx_rcb_ptr,
6042 			   tnapi->prodring.rx_std_prod_idx,
6043 			   tnapi->prodring.rx_std_cons_idx,
6044 			   tnapi->prodring.rx_jmb_prod_idx,
6045 			   tnapi->prodring.rx_jmb_cons_idx);
6046 	}
6047 }
6048 
6049 /* This is called whenever we suspect that the system chipset is re-
6050  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6051  * is bogus tx completions. We try to recover by setting the
6052  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6053  * in the workqueue.
6054  */
6055 static void tg3_tx_recover(struct tg3 *tp)
6056 {
6057 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6058 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6059 
6060 	netdev_warn(tp->dev,
6061 		    "The system may be re-ordering memory-mapped I/O "
6062 		    "cycles to the network device, attempting to recover. "
6063 		    "Please report the problem to the driver maintainer "
6064 		    "and include system chipset information.\n");
6065 
6066 	spin_lock(&tp->lock);
6067 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6068 	spin_unlock(&tp->lock);
6069 }
6070 
6071 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6072 {
6073 	/* Tell compiler to fetch tx indices from memory. */
6074 	barrier();
6075 	return tnapi->tx_pending -
6076 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6077 }
6078 
6079 /* Tigon3 never reports partial packet sends.  So we do not
6080  * need special logic to handle SKBs that have not had all
6081  * of their frags sent yet, like SunGEM does.
6082  */
6083 static void tg3_tx(struct tg3_napi *tnapi)
6084 {
6085 	struct tg3 *tp = tnapi->tp;
6086 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6087 	u32 sw_idx = tnapi->tx_cons;
6088 	struct netdev_queue *txq;
6089 	int index = tnapi - tp->napi;
6090 	unsigned int pkts_compl = 0, bytes_compl = 0;
6091 
6092 	if (tg3_flag(tp, ENABLE_TSS))
6093 		index--;
6094 
6095 	txq = netdev_get_tx_queue(tp->dev, index);
6096 
6097 	while (sw_idx != hw_idx) {
6098 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6099 		struct sk_buff *skb = ri->skb;
6100 		int i, tx_bug = 0;
6101 
6102 		if (unlikely(skb == NULL)) {
6103 			tg3_tx_recover(tp);
6104 			return;
6105 		}
6106 
6107 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6108 			struct skb_shared_hwtstamps timestamp;
6109 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6110 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6111 
6112 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6113 
6114 			skb_tstamp_tx(skb, &timestamp);
6115 		}
6116 
6117 		pci_unmap_single(tp->pdev,
6118 				 dma_unmap_addr(ri, mapping),
6119 				 skb_headlen(skb),
6120 				 PCI_DMA_TODEVICE);
6121 
6122 		ri->skb = NULL;
6123 
6124 		while (ri->fragmented) {
6125 			ri->fragmented = false;
6126 			sw_idx = NEXT_TX(sw_idx);
6127 			ri = &tnapi->tx_buffers[sw_idx];
6128 		}
6129 
6130 		sw_idx = NEXT_TX(sw_idx);
6131 
6132 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6133 			ri = &tnapi->tx_buffers[sw_idx];
6134 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6135 				tx_bug = 1;
6136 
6137 			pci_unmap_page(tp->pdev,
6138 				       dma_unmap_addr(ri, mapping),
6139 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6140 				       PCI_DMA_TODEVICE);
6141 
6142 			while (ri->fragmented) {
6143 				ri->fragmented = false;
6144 				sw_idx = NEXT_TX(sw_idx);
6145 				ri = &tnapi->tx_buffers[sw_idx];
6146 			}
6147 
6148 			sw_idx = NEXT_TX(sw_idx);
6149 		}
6150 
6151 		pkts_compl++;
6152 		bytes_compl += skb->len;
6153 
6154 		dev_kfree_skb(skb);
6155 
6156 		if (unlikely(tx_bug)) {
6157 			tg3_tx_recover(tp);
6158 			return;
6159 		}
6160 	}
6161 
6162 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6163 
6164 	tnapi->tx_cons = sw_idx;
6165 
6166 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6167 	 * before checking for netif_queue_stopped().  Without the
6168 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6169 	 * will miss it and cause the queue to be stopped forever.
6170 	 */
6171 	smp_mb();
6172 
6173 	if (unlikely(netif_tx_queue_stopped(txq) &&
6174 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6175 		__netif_tx_lock(txq, smp_processor_id());
6176 		if (netif_tx_queue_stopped(txq) &&
6177 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6178 			netif_tx_wake_queue(txq);
6179 		__netif_tx_unlock(txq);
6180 	}
6181 }
6182 
6183 static void tg3_frag_free(bool is_frag, void *data)
6184 {
6185 	if (is_frag)
6186 		put_page(virt_to_head_page(data));
6187 	else
6188 		kfree(data);
6189 }
6190 
6191 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6192 {
6193 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6194 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6195 
6196 	if (!ri->data)
6197 		return;
6198 
6199 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6200 			 map_sz, PCI_DMA_FROMDEVICE);
6201 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6202 	ri->data = NULL;
6203 }
6204 
6205 
6206 /* Returns size of skb allocated or < 0 on error.
6207  *
6208  * We only need to fill in the address because the other members
6209  * of the RX descriptor are invariant, see tg3_init_rings.
6210  *
6211  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6212  * posting buffers we only dirty the first cache line of the RX
6213  * descriptor (containing the address).  Whereas for the RX status
6214  * buffers the cpu only reads the last cacheline of the RX descriptor
6215  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6216  */
6217 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6218 			     u32 opaque_key, u32 dest_idx_unmasked,
6219 			     unsigned int *frag_size)
6220 {
6221 	struct tg3_rx_buffer_desc *desc;
6222 	struct ring_info *map;
6223 	u8 *data;
6224 	dma_addr_t mapping;
6225 	int skb_size, data_size, dest_idx;
6226 
6227 	switch (opaque_key) {
6228 	case RXD_OPAQUE_RING_STD:
6229 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6230 		desc = &tpr->rx_std[dest_idx];
6231 		map = &tpr->rx_std_buffers[dest_idx];
6232 		data_size = tp->rx_pkt_map_sz;
6233 		break;
6234 
6235 	case RXD_OPAQUE_RING_JUMBO:
6236 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6237 		desc = &tpr->rx_jmb[dest_idx].std;
6238 		map = &tpr->rx_jmb_buffers[dest_idx];
6239 		data_size = TG3_RX_JMB_MAP_SZ;
6240 		break;
6241 
6242 	default:
6243 		return -EINVAL;
6244 	}
6245 
6246 	/* Do not overwrite any of the map or rp information
6247 	 * until we are sure we can commit to a new buffer.
6248 	 *
6249 	 * Callers depend upon this behavior and assume that
6250 	 * we leave everything unchanged if we fail.
6251 	 */
6252 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6253 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6254 	if (skb_size <= PAGE_SIZE) {
6255 		data = netdev_alloc_frag(skb_size);
6256 		*frag_size = skb_size;
6257 	} else {
6258 		data = kmalloc(skb_size, GFP_ATOMIC);
6259 		*frag_size = 0;
6260 	}
6261 	if (!data)
6262 		return -ENOMEM;
6263 
6264 	mapping = pci_map_single(tp->pdev,
6265 				 data + TG3_RX_OFFSET(tp),
6266 				 data_size,
6267 				 PCI_DMA_FROMDEVICE);
6268 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6269 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6270 		return -EIO;
6271 	}
6272 
6273 	map->data = data;
6274 	dma_unmap_addr_set(map, mapping, mapping);
6275 
6276 	desc->addr_hi = ((u64)mapping >> 32);
6277 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6278 
6279 	return data_size;
6280 }
6281 
6282 /* We only need to move over in the address because the other
6283  * members of the RX descriptor are invariant.  See notes above
6284  * tg3_alloc_rx_data for full details.
6285  */
6286 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6287 			   struct tg3_rx_prodring_set *dpr,
6288 			   u32 opaque_key, int src_idx,
6289 			   u32 dest_idx_unmasked)
6290 {
6291 	struct tg3 *tp = tnapi->tp;
6292 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6293 	struct ring_info *src_map, *dest_map;
6294 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6295 	int dest_idx;
6296 
6297 	switch (opaque_key) {
6298 	case RXD_OPAQUE_RING_STD:
6299 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6300 		dest_desc = &dpr->rx_std[dest_idx];
6301 		dest_map = &dpr->rx_std_buffers[dest_idx];
6302 		src_desc = &spr->rx_std[src_idx];
6303 		src_map = &spr->rx_std_buffers[src_idx];
6304 		break;
6305 
6306 	case RXD_OPAQUE_RING_JUMBO:
6307 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6308 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6309 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6310 		src_desc = &spr->rx_jmb[src_idx].std;
6311 		src_map = &spr->rx_jmb_buffers[src_idx];
6312 		break;
6313 
6314 	default:
6315 		return;
6316 	}
6317 
6318 	dest_map->data = src_map->data;
6319 	dma_unmap_addr_set(dest_map, mapping,
6320 			   dma_unmap_addr(src_map, mapping));
6321 	dest_desc->addr_hi = src_desc->addr_hi;
6322 	dest_desc->addr_lo = src_desc->addr_lo;
6323 
6324 	/* Ensure that the update to the skb happens after the physical
6325 	 * addresses have been transferred to the new BD location.
6326 	 */
6327 	smp_wmb();
6328 
6329 	src_map->data = NULL;
6330 }
6331 
6332 /* The RX ring scheme is composed of multiple rings which post fresh
6333  * buffers to the chip, and one special ring the chip uses to report
6334  * status back to the host.
6335  *
6336  * The special ring reports the status of received packets to the
6337  * host.  The chip does not write into the original descriptor the
6338  * RX buffer was obtained from.  The chip simply takes the original
6339  * descriptor as provided by the host, updates the status and length
6340  * field, then writes this into the next status ring entry.
6341  *
6342  * Each ring the host uses to post buffers to the chip is described
6343  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6344  * it is first placed into the on-chip ram.  When the packet's length
6345  * is known, it walks down the TG3_BDINFO entries to select the ring.
6346  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6347  * which is within the range of the new packet's length is chosen.
6348  *
6349  * The "separate ring for rx status" scheme may sound queer, but it makes
6350  * sense from a cache coherency perspective.  If only the host writes
6351  * to the buffer post rings, and only the chip writes to the rx status
6352  * rings, then cache lines never move beyond shared-modified state.
6353  * If both the host and chip were to write into the same ring, cache line
6354  * eviction could occur since both entities want it in an exclusive state.
6355  */
6356 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6357 {
6358 	struct tg3 *tp = tnapi->tp;
6359 	u32 work_mask, rx_std_posted = 0;
6360 	u32 std_prod_idx, jmb_prod_idx;
6361 	u32 sw_idx = tnapi->rx_rcb_ptr;
6362 	u16 hw_idx;
6363 	int received;
6364 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6365 
6366 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6367 	/*
6368 	 * We need to order the read of hw_idx and the read of
6369 	 * the opaque cookie.
6370 	 */
6371 	rmb();
6372 	work_mask = 0;
6373 	received = 0;
6374 	std_prod_idx = tpr->rx_std_prod_idx;
6375 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6376 	while (sw_idx != hw_idx && budget > 0) {
6377 		struct ring_info *ri;
6378 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6379 		unsigned int len;
6380 		struct sk_buff *skb;
6381 		dma_addr_t dma_addr;
6382 		u32 opaque_key, desc_idx, *post_ptr;
6383 		u8 *data;
6384 		u64 tstamp = 0;
6385 
6386 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6387 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6388 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6389 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6390 			dma_addr = dma_unmap_addr(ri, mapping);
6391 			data = ri->data;
6392 			post_ptr = &std_prod_idx;
6393 			rx_std_posted++;
6394 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6395 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6396 			dma_addr = dma_unmap_addr(ri, mapping);
6397 			data = ri->data;
6398 			post_ptr = &jmb_prod_idx;
6399 		} else
6400 			goto next_pkt_nopost;
6401 
6402 		work_mask |= opaque_key;
6403 
6404 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6405 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6406 		drop_it:
6407 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6408 				       desc_idx, *post_ptr);
6409 		drop_it_no_recycle:
6410 			/* Other statistics kept track of by card. */
6411 			tp->rx_dropped++;
6412 			goto next_pkt;
6413 		}
6414 
6415 		prefetch(data + TG3_RX_OFFSET(tp));
6416 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6417 		      ETH_FCS_LEN;
6418 
6419 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6420 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6421 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6422 		     RXD_FLAG_PTPSTAT_PTPV2) {
6423 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6424 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6425 		}
6426 
6427 		if (len > TG3_RX_COPY_THRESH(tp)) {
6428 			int skb_size;
6429 			unsigned int frag_size;
6430 
6431 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6432 						    *post_ptr, &frag_size);
6433 			if (skb_size < 0)
6434 				goto drop_it;
6435 
6436 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6437 					 PCI_DMA_FROMDEVICE);
6438 
6439 			skb = build_skb(data, frag_size);
6440 			if (!skb) {
6441 				tg3_frag_free(frag_size != 0, data);
6442 				goto drop_it_no_recycle;
6443 			}
6444 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6445 			/* Ensure that the update to the data happens
6446 			 * after the usage of the old DMA mapping.
6447 			 */
6448 			smp_wmb();
6449 
6450 			ri->data = NULL;
6451 
6452 		} else {
6453 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6454 				       desc_idx, *post_ptr);
6455 
6456 			skb = netdev_alloc_skb(tp->dev,
6457 					       len + TG3_RAW_IP_ALIGN);
6458 			if (skb == NULL)
6459 				goto drop_it_no_recycle;
6460 
6461 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6462 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6463 			memcpy(skb->data,
6464 			       data + TG3_RX_OFFSET(tp),
6465 			       len);
6466 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6467 		}
6468 
6469 		skb_put(skb, len);
6470 		if (tstamp)
6471 			tg3_hwclock_to_timestamp(tp, tstamp,
6472 						 skb_hwtstamps(skb));
6473 
6474 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6475 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6476 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6477 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6478 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6479 		else
6480 			skb_checksum_none_assert(skb);
6481 
6482 		skb->protocol = eth_type_trans(skb, tp->dev);
6483 
6484 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6485 		    skb->protocol != htons(ETH_P_8021Q)) {
6486 			dev_kfree_skb(skb);
6487 			goto drop_it_no_recycle;
6488 		}
6489 
6490 		if (desc->type_flags & RXD_FLAG_VLAN &&
6491 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6492 			__vlan_hwaccel_put_tag(skb,
6493 					       desc->err_vlan & RXD_VLAN_MASK);
6494 
6495 		napi_gro_receive(&tnapi->napi, skb);
6496 
6497 		received++;
6498 		budget--;
6499 
6500 next_pkt:
6501 		(*post_ptr)++;
6502 
6503 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6504 			tpr->rx_std_prod_idx = std_prod_idx &
6505 					       tp->rx_std_ring_mask;
6506 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6507 				     tpr->rx_std_prod_idx);
6508 			work_mask &= ~RXD_OPAQUE_RING_STD;
6509 			rx_std_posted = 0;
6510 		}
6511 next_pkt_nopost:
6512 		sw_idx++;
6513 		sw_idx &= tp->rx_ret_ring_mask;
6514 
6515 		/* Refresh hw_idx to see if there is new work */
6516 		if (sw_idx == hw_idx) {
6517 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6518 			rmb();
6519 		}
6520 	}
6521 
6522 	/* ACK the status ring. */
6523 	tnapi->rx_rcb_ptr = sw_idx;
6524 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6525 
6526 	/* Refill RX ring(s). */
6527 	if (!tg3_flag(tp, ENABLE_RSS)) {
6528 		/* Sync BD data before updating mailbox */
6529 		wmb();
6530 
6531 		if (work_mask & RXD_OPAQUE_RING_STD) {
6532 			tpr->rx_std_prod_idx = std_prod_idx &
6533 					       tp->rx_std_ring_mask;
6534 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6535 				     tpr->rx_std_prod_idx);
6536 		}
6537 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6538 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6539 					       tp->rx_jmb_ring_mask;
6540 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6541 				     tpr->rx_jmb_prod_idx);
6542 		}
6543 		mmiowb();
6544 	} else if (work_mask) {
6545 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6546 		 * updated before the producer indices can be updated.
6547 		 */
6548 		smp_wmb();
6549 
6550 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6551 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6552 
6553 		if (tnapi != &tp->napi[1]) {
6554 			tp->rx_refill = true;
6555 			napi_schedule(&tp->napi[1].napi);
6556 		}
6557 	}
6558 
6559 	return received;
6560 }
6561 
6562 static void tg3_poll_link(struct tg3 *tp)
6563 {
6564 	/* handle link change and other phy events */
6565 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6566 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6567 
6568 		if (sblk->status & SD_STATUS_LINK_CHG) {
6569 			sblk->status = SD_STATUS_UPDATED |
6570 				       (sblk->status & ~SD_STATUS_LINK_CHG);
6571 			spin_lock(&tp->lock);
6572 			if (tg3_flag(tp, USE_PHYLIB)) {
6573 				tw32_f(MAC_STATUS,
6574 				     (MAC_STATUS_SYNC_CHANGED |
6575 				      MAC_STATUS_CFG_CHANGED |
6576 				      MAC_STATUS_MI_COMPLETION |
6577 				      MAC_STATUS_LNKSTATE_CHANGED));
6578 				udelay(40);
6579 			} else
6580 				tg3_setup_phy(tp, 0);
6581 			spin_unlock(&tp->lock);
6582 		}
6583 	}
6584 }
6585 
6586 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6587 				struct tg3_rx_prodring_set *dpr,
6588 				struct tg3_rx_prodring_set *spr)
6589 {
6590 	u32 si, di, cpycnt, src_prod_idx;
6591 	int i, err = 0;
6592 
6593 	while (1) {
6594 		src_prod_idx = spr->rx_std_prod_idx;
6595 
6596 		/* Make sure updates to the rx_std_buffers[] entries and the
6597 		 * standard producer index are seen in the correct order.
6598 		 */
6599 		smp_rmb();
6600 
6601 		if (spr->rx_std_cons_idx == src_prod_idx)
6602 			break;
6603 
6604 		if (spr->rx_std_cons_idx < src_prod_idx)
6605 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6606 		else
6607 			cpycnt = tp->rx_std_ring_mask + 1 -
6608 				 spr->rx_std_cons_idx;
6609 
6610 		cpycnt = min(cpycnt,
6611 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6612 
6613 		si = spr->rx_std_cons_idx;
6614 		di = dpr->rx_std_prod_idx;
6615 
6616 		for (i = di; i < di + cpycnt; i++) {
6617 			if (dpr->rx_std_buffers[i].data) {
6618 				cpycnt = i - di;
6619 				err = -ENOSPC;
6620 				break;
6621 			}
6622 		}
6623 
6624 		if (!cpycnt)
6625 			break;
6626 
6627 		/* Ensure that updates to the rx_std_buffers ring and the
6628 		 * shadowed hardware producer ring from tg3_recycle_skb() are
6629 		 * ordered correctly WRT the skb check above.
6630 		 */
6631 		smp_rmb();
6632 
6633 		memcpy(&dpr->rx_std_buffers[di],
6634 		       &spr->rx_std_buffers[si],
6635 		       cpycnt * sizeof(struct ring_info));
6636 
6637 		for (i = 0; i < cpycnt; i++, di++, si++) {
6638 			struct tg3_rx_buffer_desc *sbd, *dbd;
6639 			sbd = &spr->rx_std[si];
6640 			dbd = &dpr->rx_std[di];
6641 			dbd->addr_hi = sbd->addr_hi;
6642 			dbd->addr_lo = sbd->addr_lo;
6643 		}
6644 
6645 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6646 				       tp->rx_std_ring_mask;
6647 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6648 				       tp->rx_std_ring_mask;
6649 	}
6650 
6651 	while (1) {
6652 		src_prod_idx = spr->rx_jmb_prod_idx;
6653 
6654 		/* Make sure updates to the rx_jmb_buffers[] entries and
6655 		 * the jumbo producer index are seen in the correct order.
6656 		 */
6657 		smp_rmb();
6658 
6659 		if (spr->rx_jmb_cons_idx == src_prod_idx)
6660 			break;
6661 
6662 		if (spr->rx_jmb_cons_idx < src_prod_idx)
6663 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6664 		else
6665 			cpycnt = tp->rx_jmb_ring_mask + 1 -
6666 				 spr->rx_jmb_cons_idx;
6667 
6668 		cpycnt = min(cpycnt,
6669 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6670 
6671 		si = spr->rx_jmb_cons_idx;
6672 		di = dpr->rx_jmb_prod_idx;
6673 
6674 		for (i = di; i < di + cpycnt; i++) {
6675 			if (dpr->rx_jmb_buffers[i].data) {
6676 				cpycnt = i - di;
6677 				err = -ENOSPC;
6678 				break;
6679 			}
6680 		}
6681 
6682 		if (!cpycnt)
6683 			break;
6684 
6685 		/* Ensure that updates to the rx_jmb_buffers ring and the
6686 		 * shadowed hardware producer ring from tg3_recycle_skb() are
6687 		 * ordered correctly WRT the skb check above.
6688 		 */
6689 		smp_rmb();
6690 
6691 		memcpy(&dpr->rx_jmb_buffers[di],
6692 		       &spr->rx_jmb_buffers[si],
6693 		       cpycnt * sizeof(struct ring_info));
6694 
6695 		for (i = 0; i < cpycnt; i++, di++, si++) {
6696 			struct tg3_rx_buffer_desc *sbd, *dbd;
6697 			sbd = &spr->rx_jmb[si].std;
6698 			dbd = &dpr->rx_jmb[di].std;
6699 			dbd->addr_hi = sbd->addr_hi;
6700 			dbd->addr_lo = sbd->addr_lo;
6701 		}
6702 
6703 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6704 				       tp->rx_jmb_ring_mask;
6705 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6706 				       tp->rx_jmb_ring_mask;
6707 	}
6708 
6709 	return err;
6710 }
6711 
6712 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6713 {
6714 	struct tg3 *tp = tnapi->tp;
6715 
6716 	/* run TX completion thread */
6717 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6718 		tg3_tx(tnapi);
6719 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6720 			return work_done;
6721 	}
6722 
6723 	if (!tnapi->rx_rcb_prod_idx)
6724 		return work_done;
6725 
6726 	/* run RX thread, within the bounds set by NAPI.
6727 	 * All RX "locking" is done by ensuring outside
6728 	 * code synchronizes with tg3->napi.poll()
6729 	 */
6730 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6731 		work_done += tg3_rx(tnapi, budget - work_done);
6732 
6733 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6734 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6735 		int i, err = 0;
6736 		u32 std_prod_idx = dpr->rx_std_prod_idx;
6737 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6738 
6739 		tp->rx_refill = false;
6740 		for (i = 1; i <= tp->rxq_cnt; i++)
6741 			err |= tg3_rx_prodring_xfer(tp, dpr,
6742 						    &tp->napi[i].prodring);
6743 
6744 		wmb();
6745 
6746 		if (std_prod_idx != dpr->rx_std_prod_idx)
6747 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6748 				     dpr->rx_std_prod_idx);
6749 
6750 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6751 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6752 				     dpr->rx_jmb_prod_idx);
6753 
6754 		mmiowb();
6755 
6756 		if (err)
6757 			tw32_f(HOSTCC_MODE, tp->coal_now);
6758 	}
6759 
6760 	return work_done;
6761 }
6762 
6763 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6764 {
6765 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6766 		schedule_work(&tp->reset_task);
6767 }
6768 
6769 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6770 {
6771 	cancel_work_sync(&tp->reset_task);
6772 	tg3_flag_clear(tp, RESET_TASK_PENDING);
6773 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6774 }
6775 
6776 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6777 {
6778 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6779 	struct tg3 *tp = tnapi->tp;
6780 	int work_done = 0;
6781 	struct tg3_hw_status *sblk = tnapi->hw_status;
6782 
6783 	while (1) {
6784 		work_done = tg3_poll_work(tnapi, work_done, budget);
6785 
6786 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6787 			goto tx_recovery;
6788 
6789 		if (unlikely(work_done >= budget))
6790 			break;
6791 
6792 		/* tp->last_tag is used in tg3_int_reenable() below
6793 		 * to tell the hw how much work has been processed,
6794 		 * so we must read it before checking for more work.
6795 		 */
6796 		tnapi->last_tag = sblk->status_tag;
6797 		tnapi->last_irq_tag = tnapi->last_tag;
6798 		rmb();
6799 
6800 		/* check for RX/TX work to do */
6801 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6802 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6803 
6804 			/* This test here is not race free, but will reduce
6805 			 * the number of interrupts by looping again.
6806 			 */
6807 			if (tnapi == &tp->napi[1] && tp->rx_refill)
6808 				continue;
6809 
6810 			napi_complete(napi);
6811 			/* Reenable interrupts. */
6812 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6813 
6814 			/* This test here is synchronized by napi_schedule()
6815 			 * and napi_complete() to close the race condition.
6816 			 */
6817 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6818 				tw32(HOSTCC_MODE, tp->coalesce_mode |
6819 						  HOSTCC_MODE_ENABLE |
6820 						  tnapi->coal_now);
6821 			}
6822 			mmiowb();
6823 			break;
6824 		}
6825 	}
6826 
6827 	return work_done;
6828 
6829 tx_recovery:
6830 	/* work_done is guaranteed to be less than budget. */
6831 	napi_complete(napi);
6832 	tg3_reset_task_schedule(tp);
6833 	return work_done;
6834 }
6835 
6836 static void tg3_process_error(struct tg3 *tp)
6837 {
6838 	u32 val;
6839 	bool real_error = false;
6840 
6841 	if (tg3_flag(tp, ERROR_PROCESSED))
6842 		return;
6843 
6844 	/* Check Flow Attention register */
6845 	val = tr32(HOSTCC_FLOW_ATTN);
6846 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6847 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6848 		real_error = true;
6849 	}
6850 
6851 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6852 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6853 		real_error = true;
6854 	}
6855 
6856 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6857 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6858 		real_error = true;
6859 	}
6860 
6861 	if (!real_error)
6862 		return;
6863 
6864 	tg3_dump_state(tp);
6865 
6866 	tg3_flag_set(tp, ERROR_PROCESSED);
6867 	tg3_reset_task_schedule(tp);
6868 }
6869 
6870 static int tg3_poll(struct napi_struct *napi, int budget)
6871 {
6872 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6873 	struct tg3 *tp = tnapi->tp;
6874 	int work_done = 0;
6875 	struct tg3_hw_status *sblk = tnapi->hw_status;
6876 
6877 	while (1) {
6878 		if (sblk->status & SD_STATUS_ERROR)
6879 			tg3_process_error(tp);
6880 
6881 		tg3_poll_link(tp);
6882 
6883 		work_done = tg3_poll_work(tnapi, work_done, budget);
6884 
6885 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6886 			goto tx_recovery;
6887 
6888 		if (unlikely(work_done >= budget))
6889 			break;
6890 
6891 		if (tg3_flag(tp, TAGGED_STATUS)) {
6892 			/* tp->last_tag is used in tg3_int_reenable() below
6893 			 * to tell the hw how much work has been processed,
6894 			 * so we must read it before checking for more work.
6895 			 */
6896 			tnapi->last_tag = sblk->status_tag;
6897 			tnapi->last_irq_tag = tnapi->last_tag;
6898 			rmb();
6899 		} else
6900 			sblk->status &= ~SD_STATUS_UPDATED;
6901 
6902 		if (likely(!tg3_has_work(tnapi))) {
6903 			napi_complete(napi);
6904 			tg3_int_reenable(tnapi);
6905 			break;
6906 		}
6907 	}
6908 
6909 	return work_done;
6910 
6911 tx_recovery:
6912 	/* work_done is guaranteed to be less than budget. */
6913 	napi_complete(napi);
6914 	tg3_reset_task_schedule(tp);
6915 	return work_done;
6916 }
6917 
6918 static void tg3_napi_disable(struct tg3 *tp)
6919 {
6920 	int i;
6921 
6922 	for (i = tp->irq_cnt - 1; i >= 0; i--)
6923 		napi_disable(&tp->napi[i].napi);
6924 }
6925 
6926 static void tg3_napi_enable(struct tg3 *tp)
6927 {
6928 	int i;
6929 
6930 	for (i = 0; i < tp->irq_cnt; i++)
6931 		napi_enable(&tp->napi[i].napi);
6932 }
6933 
6934 static void tg3_napi_init(struct tg3 *tp)
6935 {
6936 	int i;
6937 
6938 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6939 	for (i = 1; i < tp->irq_cnt; i++)
6940 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6941 }
6942 
6943 static void tg3_napi_fini(struct tg3 *tp)
6944 {
6945 	int i;
6946 
6947 	for (i = 0; i < tp->irq_cnt; i++)
6948 		netif_napi_del(&tp->napi[i].napi);
6949 }
6950 
6951 static inline void tg3_netif_stop(struct tg3 *tp)
6952 {
6953 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
6954 	tg3_napi_disable(tp);
6955 	netif_carrier_off(tp->dev);
6956 	netif_tx_disable(tp->dev);
6957 }
6958 
6959 /* tp->lock must be held */
6960 static inline void tg3_netif_start(struct tg3 *tp)
6961 {
6962 	tg3_ptp_resume(tp);
6963 
6964 	/* NOTE: unconditional netif_tx_wake_all_queues is only
6965 	 * appropriate so long as all callers are assured to
6966 	 * have free tx slots (such as after tg3_init_hw)
6967 	 */
6968 	netif_tx_wake_all_queues(tp->dev);
6969 
6970 	if (tp->link_up)
6971 		netif_carrier_on(tp->dev);
6972 
6973 	tg3_napi_enable(tp);
6974 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6975 	tg3_enable_ints(tp);
6976 }
6977 
6978 static void tg3_irq_quiesce(struct tg3 *tp)
6979 {
6980 	int i;
6981 
6982 	BUG_ON(tp->irq_sync);
6983 
6984 	tp->irq_sync = 1;
6985 	smp_mb();
6986 
6987 	for (i = 0; i < tp->irq_cnt; i++)
6988 		synchronize_irq(tp->napi[i].irq_vec);
6989 }
6990 
6991 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6992  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6993  * with as well.  Most of the time, this is not necessary except when
6994  * shutting down the device.
6995  */
6996 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6997 {
6998 	spin_lock_bh(&tp->lock);
6999 	if (irq_sync)
7000 		tg3_irq_quiesce(tp);
7001 }
7002 
7003 static inline void tg3_full_unlock(struct tg3 *tp)
7004 {
7005 	spin_unlock_bh(&tp->lock);
7006 }
7007 
7008 /* One-shot MSI handler - Chip automatically disables interrupt
7009  * after sending MSI so driver doesn't have to do it.
7010  */
7011 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7012 {
7013 	struct tg3_napi *tnapi = dev_id;
7014 	struct tg3 *tp = tnapi->tp;
7015 
7016 	prefetch(tnapi->hw_status);
7017 	if (tnapi->rx_rcb)
7018 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7019 
7020 	if (likely(!tg3_irq_sync(tp)))
7021 		napi_schedule(&tnapi->napi);
7022 
7023 	return IRQ_HANDLED;
7024 }
7025 
7026 /* MSI ISR - No need to check for interrupt sharing and no need to
7027  * flush status block and interrupt mailbox. PCI ordering rules
7028  * guarantee that MSI will arrive after the status block.
7029  */
7030 static irqreturn_t tg3_msi(int irq, void *dev_id)
7031 {
7032 	struct tg3_napi *tnapi = dev_id;
7033 	struct tg3 *tp = tnapi->tp;
7034 
7035 	prefetch(tnapi->hw_status);
7036 	if (tnapi->rx_rcb)
7037 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7038 	/*
7039 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7040 	 * chip-internal interrupt pending events.
7041 	 * Writing non-zero to intr-mbox-0 additional tells the
7042 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7043 	 * event coalescing.
7044 	 */
7045 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7046 	if (likely(!tg3_irq_sync(tp)))
7047 		napi_schedule(&tnapi->napi);
7048 
7049 	return IRQ_RETVAL(1);
7050 }
7051 
7052 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7053 {
7054 	struct tg3_napi *tnapi = dev_id;
7055 	struct tg3 *tp = tnapi->tp;
7056 	struct tg3_hw_status *sblk = tnapi->hw_status;
7057 	unsigned int handled = 1;
7058 
7059 	/* In INTx mode, it is possible for the interrupt to arrive at
7060 	 * the CPU before the status block posted prior to the interrupt.
7061 	 * Reading the PCI State register will confirm whether the
7062 	 * interrupt is ours and will flush the status block.
7063 	 */
7064 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7065 		if (tg3_flag(tp, CHIP_RESETTING) ||
7066 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7067 			handled = 0;
7068 			goto out;
7069 		}
7070 	}
7071 
7072 	/*
7073 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7074 	 * chip-internal interrupt pending events.
7075 	 * Writing non-zero to intr-mbox-0 additional tells the
7076 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7077 	 * event coalescing.
7078 	 *
7079 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7080 	 * spurious interrupts.  The flush impacts performance but
7081 	 * excessive spurious interrupts can be worse in some cases.
7082 	 */
7083 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7084 	if (tg3_irq_sync(tp))
7085 		goto out;
7086 	sblk->status &= ~SD_STATUS_UPDATED;
7087 	if (likely(tg3_has_work(tnapi))) {
7088 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7089 		napi_schedule(&tnapi->napi);
7090 	} else {
7091 		/* No work, shared interrupt perhaps?  re-enable
7092 		 * interrupts, and flush that PCI write
7093 		 */
7094 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7095 			       0x00000000);
7096 	}
7097 out:
7098 	return IRQ_RETVAL(handled);
7099 }
7100 
7101 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7102 {
7103 	struct tg3_napi *tnapi = dev_id;
7104 	struct tg3 *tp = tnapi->tp;
7105 	struct tg3_hw_status *sblk = tnapi->hw_status;
7106 	unsigned int handled = 1;
7107 
7108 	/* In INTx mode, it is possible for the interrupt to arrive at
7109 	 * the CPU before the status block posted prior to the interrupt.
7110 	 * Reading the PCI State register will confirm whether the
7111 	 * interrupt is ours and will flush the status block.
7112 	 */
7113 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7114 		if (tg3_flag(tp, CHIP_RESETTING) ||
7115 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7116 			handled = 0;
7117 			goto out;
7118 		}
7119 	}
7120 
7121 	/*
7122 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7123 	 * chip-internal interrupt pending events.
7124 	 * writing non-zero to intr-mbox-0 additional tells the
7125 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7126 	 * event coalescing.
7127 	 *
7128 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7129 	 * spurious interrupts.  The flush impacts performance but
7130 	 * excessive spurious interrupts can be worse in some cases.
7131 	 */
7132 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7133 
7134 	/*
7135 	 * In a shared interrupt configuration, sometimes other devices'
7136 	 * interrupts will scream.  We record the current status tag here
7137 	 * so that the above check can report that the screaming interrupts
7138 	 * are unhandled.  Eventually they will be silenced.
7139 	 */
7140 	tnapi->last_irq_tag = sblk->status_tag;
7141 
7142 	if (tg3_irq_sync(tp))
7143 		goto out;
7144 
7145 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7146 
7147 	napi_schedule(&tnapi->napi);
7148 
7149 out:
7150 	return IRQ_RETVAL(handled);
7151 }
7152 
7153 /* ISR for interrupt test */
7154 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7155 {
7156 	struct tg3_napi *tnapi = dev_id;
7157 	struct tg3 *tp = tnapi->tp;
7158 	struct tg3_hw_status *sblk = tnapi->hw_status;
7159 
7160 	if ((sblk->status & SD_STATUS_UPDATED) ||
7161 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7162 		tg3_disable_ints(tp);
7163 		return IRQ_RETVAL(1);
7164 	}
7165 	return IRQ_RETVAL(0);
7166 }
7167 
7168 #ifdef CONFIG_NET_POLL_CONTROLLER
7169 static void tg3_poll_controller(struct net_device *dev)
7170 {
7171 	int i;
7172 	struct tg3 *tp = netdev_priv(dev);
7173 
7174 	if (tg3_irq_sync(tp))
7175 		return;
7176 
7177 	for (i = 0; i < tp->irq_cnt; i++)
7178 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7179 }
7180 #endif
7181 
7182 static void tg3_tx_timeout(struct net_device *dev)
7183 {
7184 	struct tg3 *tp = netdev_priv(dev);
7185 
7186 	if (netif_msg_tx_err(tp)) {
7187 		netdev_err(dev, "transmit timed out, resetting\n");
7188 		tg3_dump_state(tp);
7189 	}
7190 
7191 	tg3_reset_task_schedule(tp);
7192 }
7193 
7194 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7195 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7196 {
7197 	u32 base = (u32) mapping & 0xffffffff;
7198 
7199 	return (base > 0xffffdcc0) && (base + len + 8 < base);
7200 }
7201 
7202 /* Test for DMA addresses > 40-bit */
7203 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7204 					  int len)
7205 {
7206 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7207 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7208 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7209 	return 0;
7210 #else
7211 	return 0;
7212 #endif
7213 }
7214 
7215 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7216 				 dma_addr_t mapping, u32 len, u32 flags,
7217 				 u32 mss, u32 vlan)
7218 {
7219 	txbd->addr_hi = ((u64) mapping >> 32);
7220 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7221 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7222 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7223 }
7224 
7225 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7226 			    dma_addr_t map, u32 len, u32 flags,
7227 			    u32 mss, u32 vlan)
7228 {
7229 	struct tg3 *tp = tnapi->tp;
7230 	bool hwbug = false;
7231 
7232 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7233 		hwbug = true;
7234 
7235 	if (tg3_4g_overflow_test(map, len))
7236 		hwbug = true;
7237 
7238 	if (tg3_40bit_overflow_test(tp, map, len))
7239 		hwbug = true;
7240 
7241 	if (tp->dma_limit) {
7242 		u32 prvidx = *entry;
7243 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7244 		while (len > tp->dma_limit && *budget) {
7245 			u32 frag_len = tp->dma_limit;
7246 			len -= tp->dma_limit;
7247 
7248 			/* Avoid the 8byte DMA problem */
7249 			if (len <= 8) {
7250 				len += tp->dma_limit / 2;
7251 				frag_len = tp->dma_limit / 2;
7252 			}
7253 
7254 			tnapi->tx_buffers[*entry].fragmented = true;
7255 
7256 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7257 				      frag_len, tmp_flag, mss, vlan);
7258 			*budget -= 1;
7259 			prvidx = *entry;
7260 			*entry = NEXT_TX(*entry);
7261 
7262 			map += frag_len;
7263 		}
7264 
7265 		if (len) {
7266 			if (*budget) {
7267 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7268 					      len, flags, mss, vlan);
7269 				*budget -= 1;
7270 				*entry = NEXT_TX(*entry);
7271 			} else {
7272 				hwbug = true;
7273 				tnapi->tx_buffers[prvidx].fragmented = false;
7274 			}
7275 		}
7276 	} else {
7277 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7278 			      len, flags, mss, vlan);
7279 		*entry = NEXT_TX(*entry);
7280 	}
7281 
7282 	return hwbug;
7283 }
7284 
7285 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7286 {
7287 	int i;
7288 	struct sk_buff *skb;
7289 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7290 
7291 	skb = txb->skb;
7292 	txb->skb = NULL;
7293 
7294 	pci_unmap_single(tnapi->tp->pdev,
7295 			 dma_unmap_addr(txb, mapping),
7296 			 skb_headlen(skb),
7297 			 PCI_DMA_TODEVICE);
7298 
7299 	while (txb->fragmented) {
7300 		txb->fragmented = false;
7301 		entry = NEXT_TX(entry);
7302 		txb = &tnapi->tx_buffers[entry];
7303 	}
7304 
7305 	for (i = 0; i <= last; i++) {
7306 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7307 
7308 		entry = NEXT_TX(entry);
7309 		txb = &tnapi->tx_buffers[entry];
7310 
7311 		pci_unmap_page(tnapi->tp->pdev,
7312 			       dma_unmap_addr(txb, mapping),
7313 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7314 
7315 		while (txb->fragmented) {
7316 			txb->fragmented = false;
7317 			entry = NEXT_TX(entry);
7318 			txb = &tnapi->tx_buffers[entry];
7319 		}
7320 	}
7321 }
7322 
7323 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7324 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7325 				       struct sk_buff **pskb,
7326 				       u32 *entry, u32 *budget,
7327 				       u32 base_flags, u32 mss, u32 vlan)
7328 {
7329 	struct tg3 *tp = tnapi->tp;
7330 	struct sk_buff *new_skb, *skb = *pskb;
7331 	dma_addr_t new_addr = 0;
7332 	int ret = 0;
7333 
7334 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7335 		new_skb = skb_copy(skb, GFP_ATOMIC);
7336 	else {
7337 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7338 
7339 		new_skb = skb_copy_expand(skb,
7340 					  skb_headroom(skb) + more_headroom,
7341 					  skb_tailroom(skb), GFP_ATOMIC);
7342 	}
7343 
7344 	if (!new_skb) {
7345 		ret = -1;
7346 	} else {
7347 		/* New SKB is guaranteed to be linear. */
7348 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7349 					  PCI_DMA_TODEVICE);
7350 		/* Make sure the mapping succeeded */
7351 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7352 			dev_kfree_skb(new_skb);
7353 			ret = -1;
7354 		} else {
7355 			u32 save_entry = *entry;
7356 
7357 			base_flags |= TXD_FLAG_END;
7358 
7359 			tnapi->tx_buffers[*entry].skb = new_skb;
7360 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7361 					   mapping, new_addr);
7362 
7363 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7364 					    new_skb->len, base_flags,
7365 					    mss, vlan)) {
7366 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7367 				dev_kfree_skb(new_skb);
7368 				ret = -1;
7369 			}
7370 		}
7371 	}
7372 
7373 	dev_kfree_skb(skb);
7374 	*pskb = new_skb;
7375 	return ret;
7376 }
7377 
7378 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7379 
7380 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7381  * TSO header is greater than 80 bytes.
7382  */
7383 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7384 {
7385 	struct sk_buff *segs, *nskb;
7386 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7387 
7388 	/* Estimate the number of fragments in the worst case */
7389 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7390 		netif_stop_queue(tp->dev);
7391 
7392 		/* netif_tx_stop_queue() must be done before checking
7393 		 * checking tx index in tg3_tx_avail() below, because in
7394 		 * tg3_tx(), we update tx index before checking for
7395 		 * netif_tx_queue_stopped().
7396 		 */
7397 		smp_mb();
7398 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7399 			return NETDEV_TX_BUSY;
7400 
7401 		netif_wake_queue(tp->dev);
7402 	}
7403 
7404 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7405 	if (IS_ERR(segs))
7406 		goto tg3_tso_bug_end;
7407 
7408 	do {
7409 		nskb = segs;
7410 		segs = segs->next;
7411 		nskb->next = NULL;
7412 		tg3_start_xmit(nskb, tp->dev);
7413 	} while (segs);
7414 
7415 tg3_tso_bug_end:
7416 	dev_kfree_skb(skb);
7417 
7418 	return NETDEV_TX_OK;
7419 }
7420 
7421 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7422  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7423  */
7424 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7425 {
7426 	struct tg3 *tp = netdev_priv(dev);
7427 	u32 len, entry, base_flags, mss, vlan = 0;
7428 	u32 budget;
7429 	int i = -1, would_hit_hwbug;
7430 	dma_addr_t mapping;
7431 	struct tg3_napi *tnapi;
7432 	struct netdev_queue *txq;
7433 	unsigned int last;
7434 
7435 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7436 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7437 	if (tg3_flag(tp, ENABLE_TSS))
7438 		tnapi++;
7439 
7440 	budget = tg3_tx_avail(tnapi);
7441 
7442 	/* We are running in BH disabled context with netif_tx_lock
7443 	 * and TX reclaim runs via tp->napi.poll inside of a software
7444 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7445 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7446 	 */
7447 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7448 		if (!netif_tx_queue_stopped(txq)) {
7449 			netif_tx_stop_queue(txq);
7450 
7451 			/* This is a hard error, log it. */
7452 			netdev_err(dev,
7453 				   "BUG! Tx Ring full when queue awake!\n");
7454 		}
7455 		return NETDEV_TX_BUSY;
7456 	}
7457 
7458 	entry = tnapi->tx_prod;
7459 	base_flags = 0;
7460 	if (skb->ip_summed == CHECKSUM_PARTIAL)
7461 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
7462 
7463 	mss = skb_shinfo(skb)->gso_size;
7464 	if (mss) {
7465 		struct iphdr *iph;
7466 		u32 tcp_opt_len, hdr_len;
7467 
7468 		if (skb_header_cloned(skb) &&
7469 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7470 			goto drop;
7471 
7472 		iph = ip_hdr(skb);
7473 		tcp_opt_len = tcp_optlen(skb);
7474 
7475 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7476 
7477 		if (!skb_is_gso_v6(skb)) {
7478 			iph->check = 0;
7479 			iph->tot_len = htons(mss + hdr_len);
7480 		}
7481 
7482 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7483 		    tg3_flag(tp, TSO_BUG))
7484 			return tg3_tso_bug(tp, skb);
7485 
7486 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7487 			       TXD_FLAG_CPU_POST_DMA);
7488 
7489 		if (tg3_flag(tp, HW_TSO_1) ||
7490 		    tg3_flag(tp, HW_TSO_2) ||
7491 		    tg3_flag(tp, HW_TSO_3)) {
7492 			tcp_hdr(skb)->check = 0;
7493 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7494 		} else
7495 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7496 								 iph->daddr, 0,
7497 								 IPPROTO_TCP,
7498 								 0);
7499 
7500 		if (tg3_flag(tp, HW_TSO_3)) {
7501 			mss |= (hdr_len & 0xc) << 12;
7502 			if (hdr_len & 0x10)
7503 				base_flags |= 0x00000010;
7504 			base_flags |= (hdr_len & 0x3e0) << 5;
7505 		} else if (tg3_flag(tp, HW_TSO_2))
7506 			mss |= hdr_len << 9;
7507 		else if (tg3_flag(tp, HW_TSO_1) ||
7508 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
7509 			if (tcp_opt_len || iph->ihl > 5) {
7510 				int tsflags;
7511 
7512 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7513 				mss |= (tsflags << 11);
7514 			}
7515 		} else {
7516 			if (tcp_opt_len || iph->ihl > 5) {
7517 				int tsflags;
7518 
7519 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7520 				base_flags |= tsflags << 12;
7521 			}
7522 		}
7523 	}
7524 
7525 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7526 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
7527 		base_flags |= TXD_FLAG_JMB_PKT;
7528 
7529 	if (vlan_tx_tag_present(skb)) {
7530 		base_flags |= TXD_FLAG_VLAN;
7531 		vlan = vlan_tx_tag_get(skb);
7532 	}
7533 
7534 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7535 	    tg3_flag(tp, TX_TSTAMP_EN)) {
7536 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7537 		base_flags |= TXD_FLAG_HWTSTAMP;
7538 	}
7539 
7540 	len = skb_headlen(skb);
7541 
7542 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7543 	if (pci_dma_mapping_error(tp->pdev, mapping))
7544 		goto drop;
7545 
7546 
7547 	tnapi->tx_buffers[entry].skb = skb;
7548 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7549 
7550 	would_hit_hwbug = 0;
7551 
7552 	if (tg3_flag(tp, 5701_DMA_BUG))
7553 		would_hit_hwbug = 1;
7554 
7555 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7556 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7557 			    mss, vlan)) {
7558 		would_hit_hwbug = 1;
7559 	} else if (skb_shinfo(skb)->nr_frags > 0) {
7560 		u32 tmp_mss = mss;
7561 
7562 		if (!tg3_flag(tp, HW_TSO_1) &&
7563 		    !tg3_flag(tp, HW_TSO_2) &&
7564 		    !tg3_flag(tp, HW_TSO_3))
7565 			tmp_mss = 0;
7566 
7567 		/* Now loop through additional data
7568 		 * fragments, and queue them.
7569 		 */
7570 		last = skb_shinfo(skb)->nr_frags - 1;
7571 		for (i = 0; i <= last; i++) {
7572 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7573 
7574 			len = skb_frag_size(frag);
7575 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7576 						   len, DMA_TO_DEVICE);
7577 
7578 			tnapi->tx_buffers[entry].skb = NULL;
7579 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7580 					   mapping);
7581 			if (dma_mapping_error(&tp->pdev->dev, mapping))
7582 				goto dma_error;
7583 
7584 			if (!budget ||
7585 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7586 					    len, base_flags |
7587 					    ((i == last) ? TXD_FLAG_END : 0),
7588 					    tmp_mss, vlan)) {
7589 				would_hit_hwbug = 1;
7590 				break;
7591 			}
7592 		}
7593 	}
7594 
7595 	if (would_hit_hwbug) {
7596 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7597 
7598 		/* If the workaround fails due to memory/mapping
7599 		 * failure, silently drop this packet.
7600 		 */
7601 		entry = tnapi->tx_prod;
7602 		budget = tg3_tx_avail(tnapi);
7603 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7604 						base_flags, mss, vlan))
7605 			goto drop_nofree;
7606 	}
7607 
7608 	skb_tx_timestamp(skb);
7609 	netdev_tx_sent_queue(txq, skb->len);
7610 
7611 	/* Sync BD data before updating mailbox */
7612 	wmb();
7613 
7614 	/* Packets are ready, update Tx producer idx local and on card. */
7615 	tw32_tx_mbox(tnapi->prodmbox, entry);
7616 
7617 	tnapi->tx_prod = entry;
7618 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7619 		netif_tx_stop_queue(txq);
7620 
7621 		/* netif_tx_stop_queue() must be done before checking
7622 		 * checking tx index in tg3_tx_avail() below, because in
7623 		 * tg3_tx(), we update tx index before checking for
7624 		 * netif_tx_queue_stopped().
7625 		 */
7626 		smp_mb();
7627 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7628 			netif_tx_wake_queue(txq);
7629 	}
7630 
7631 	mmiowb();
7632 	return NETDEV_TX_OK;
7633 
7634 dma_error:
7635 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7636 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7637 drop:
7638 	dev_kfree_skb(skb);
7639 drop_nofree:
7640 	tp->tx_dropped++;
7641 	return NETDEV_TX_OK;
7642 }
7643 
7644 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7645 {
7646 	if (enable) {
7647 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7648 				  MAC_MODE_PORT_MODE_MASK);
7649 
7650 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7651 
7652 		if (!tg3_flag(tp, 5705_PLUS))
7653 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7654 
7655 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7656 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7657 		else
7658 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7659 	} else {
7660 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7661 
7662 		if (tg3_flag(tp, 5705_PLUS) ||
7663 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7664 		    tg3_asic_rev(tp) == ASIC_REV_5700)
7665 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7666 	}
7667 
7668 	tw32(MAC_MODE, tp->mac_mode);
7669 	udelay(40);
7670 }
7671 
7672 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7673 {
7674 	u32 val, bmcr, mac_mode, ptest = 0;
7675 
7676 	tg3_phy_toggle_apd(tp, false);
7677 	tg3_phy_toggle_automdix(tp, 0);
7678 
7679 	if (extlpbk && tg3_phy_set_extloopbk(tp))
7680 		return -EIO;
7681 
7682 	bmcr = BMCR_FULLDPLX;
7683 	switch (speed) {
7684 	case SPEED_10:
7685 		break;
7686 	case SPEED_100:
7687 		bmcr |= BMCR_SPEED100;
7688 		break;
7689 	case SPEED_1000:
7690 	default:
7691 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7692 			speed = SPEED_100;
7693 			bmcr |= BMCR_SPEED100;
7694 		} else {
7695 			speed = SPEED_1000;
7696 			bmcr |= BMCR_SPEED1000;
7697 		}
7698 	}
7699 
7700 	if (extlpbk) {
7701 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7702 			tg3_readphy(tp, MII_CTRL1000, &val);
7703 			val |= CTL1000_AS_MASTER |
7704 			       CTL1000_ENABLE_MASTER;
7705 			tg3_writephy(tp, MII_CTRL1000, val);
7706 		} else {
7707 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7708 				MII_TG3_FET_PTEST_TRIM_2;
7709 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7710 		}
7711 	} else
7712 		bmcr |= BMCR_LOOPBACK;
7713 
7714 	tg3_writephy(tp, MII_BMCR, bmcr);
7715 
7716 	/* The write needs to be flushed for the FETs */
7717 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7718 		tg3_readphy(tp, MII_BMCR, &bmcr);
7719 
7720 	udelay(40);
7721 
7722 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7723 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
7724 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7725 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
7726 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
7727 
7728 		/* The write needs to be flushed for the AC131 */
7729 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7730 	}
7731 
7732 	/* Reset to prevent losing 1st rx packet intermittently */
7733 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7734 	    tg3_flag(tp, 5780_CLASS)) {
7735 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7736 		udelay(10);
7737 		tw32_f(MAC_RX_MODE, tp->rx_mode);
7738 	}
7739 
7740 	mac_mode = tp->mac_mode &
7741 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7742 	if (speed == SPEED_1000)
7743 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
7744 	else
7745 		mac_mode |= MAC_MODE_PORT_MODE_MII;
7746 
7747 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
7748 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7749 
7750 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
7751 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
7752 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7753 			mac_mode |= MAC_MODE_LINK_POLARITY;
7754 
7755 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
7756 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7757 	}
7758 
7759 	tw32(MAC_MODE, mac_mode);
7760 	udelay(40);
7761 
7762 	return 0;
7763 }
7764 
7765 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7766 {
7767 	struct tg3 *tp = netdev_priv(dev);
7768 
7769 	if (features & NETIF_F_LOOPBACK) {
7770 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7771 			return;
7772 
7773 		spin_lock_bh(&tp->lock);
7774 		tg3_mac_loopback(tp, true);
7775 		netif_carrier_on(tp->dev);
7776 		spin_unlock_bh(&tp->lock);
7777 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7778 	} else {
7779 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7780 			return;
7781 
7782 		spin_lock_bh(&tp->lock);
7783 		tg3_mac_loopback(tp, false);
7784 		/* Force link status check */
7785 		tg3_setup_phy(tp, 1);
7786 		spin_unlock_bh(&tp->lock);
7787 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7788 	}
7789 }
7790 
7791 static netdev_features_t tg3_fix_features(struct net_device *dev,
7792 	netdev_features_t features)
7793 {
7794 	struct tg3 *tp = netdev_priv(dev);
7795 
7796 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7797 		features &= ~NETIF_F_ALL_TSO;
7798 
7799 	return features;
7800 }
7801 
7802 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7803 {
7804 	netdev_features_t changed = dev->features ^ features;
7805 
7806 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7807 		tg3_set_loopback(dev, features);
7808 
7809 	return 0;
7810 }
7811 
7812 static void tg3_rx_prodring_free(struct tg3 *tp,
7813 				 struct tg3_rx_prodring_set *tpr)
7814 {
7815 	int i;
7816 
7817 	if (tpr != &tp->napi[0].prodring) {
7818 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7819 		     i = (i + 1) & tp->rx_std_ring_mask)
7820 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7821 					tp->rx_pkt_map_sz);
7822 
7823 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
7824 			for (i = tpr->rx_jmb_cons_idx;
7825 			     i != tpr->rx_jmb_prod_idx;
7826 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
7827 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7828 						TG3_RX_JMB_MAP_SZ);
7829 			}
7830 		}
7831 
7832 		return;
7833 	}
7834 
7835 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
7836 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7837 				tp->rx_pkt_map_sz);
7838 
7839 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7840 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7841 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7842 					TG3_RX_JMB_MAP_SZ);
7843 	}
7844 }
7845 
7846 /* Initialize rx rings for packet processing.
7847  *
7848  * The chip has been shut down and the driver detached from
7849  * the networking, so no interrupts or new tx packets will
7850  * end up in the driver.  tp->{tx,}lock are held and thus
7851  * we may not sleep.
7852  */
7853 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7854 				 struct tg3_rx_prodring_set *tpr)
7855 {
7856 	u32 i, rx_pkt_dma_sz;
7857 
7858 	tpr->rx_std_cons_idx = 0;
7859 	tpr->rx_std_prod_idx = 0;
7860 	tpr->rx_jmb_cons_idx = 0;
7861 	tpr->rx_jmb_prod_idx = 0;
7862 
7863 	if (tpr != &tp->napi[0].prodring) {
7864 		memset(&tpr->rx_std_buffers[0], 0,
7865 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
7866 		if (tpr->rx_jmb_buffers)
7867 			memset(&tpr->rx_jmb_buffers[0], 0,
7868 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
7869 		goto done;
7870 	}
7871 
7872 	/* Zero out all descriptors. */
7873 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7874 
7875 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7876 	if (tg3_flag(tp, 5780_CLASS) &&
7877 	    tp->dev->mtu > ETH_DATA_LEN)
7878 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7879 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7880 
7881 	/* Initialize invariants of the rings, we only set this
7882 	 * stuff once.  This works because the card does not
7883 	 * write into the rx buffer posting rings.
7884 	 */
7885 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7886 		struct tg3_rx_buffer_desc *rxd;
7887 
7888 		rxd = &tpr->rx_std[i];
7889 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7890 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7891 		rxd->opaque = (RXD_OPAQUE_RING_STD |
7892 			       (i << RXD_OPAQUE_INDEX_SHIFT));
7893 	}
7894 
7895 	/* Now allocate fresh SKBs for each rx ring. */
7896 	for (i = 0; i < tp->rx_pending; i++) {
7897 		unsigned int frag_size;
7898 
7899 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7900 				      &frag_size) < 0) {
7901 			netdev_warn(tp->dev,
7902 				    "Using a smaller RX standard ring. Only "
7903 				    "%d out of %d buffers were allocated "
7904 				    "successfully\n", i, tp->rx_pending);
7905 			if (i == 0)
7906 				goto initfail;
7907 			tp->rx_pending = i;
7908 			break;
7909 		}
7910 	}
7911 
7912 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7913 		goto done;
7914 
7915 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7916 
7917 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7918 		goto done;
7919 
7920 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7921 		struct tg3_rx_buffer_desc *rxd;
7922 
7923 		rxd = &tpr->rx_jmb[i].std;
7924 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7925 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7926 				  RXD_FLAG_JUMBO;
7927 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7928 		       (i << RXD_OPAQUE_INDEX_SHIFT));
7929 	}
7930 
7931 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
7932 		unsigned int frag_size;
7933 
7934 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7935 				      &frag_size) < 0) {
7936 			netdev_warn(tp->dev,
7937 				    "Using a smaller RX jumbo ring. Only %d "
7938 				    "out of %d buffers were allocated "
7939 				    "successfully\n", i, tp->rx_jumbo_pending);
7940 			if (i == 0)
7941 				goto initfail;
7942 			tp->rx_jumbo_pending = i;
7943 			break;
7944 		}
7945 	}
7946 
7947 done:
7948 	return 0;
7949 
7950 initfail:
7951 	tg3_rx_prodring_free(tp, tpr);
7952 	return -ENOMEM;
7953 }
7954 
7955 static void tg3_rx_prodring_fini(struct tg3 *tp,
7956 				 struct tg3_rx_prodring_set *tpr)
7957 {
7958 	kfree(tpr->rx_std_buffers);
7959 	tpr->rx_std_buffers = NULL;
7960 	kfree(tpr->rx_jmb_buffers);
7961 	tpr->rx_jmb_buffers = NULL;
7962 	if (tpr->rx_std) {
7963 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7964 				  tpr->rx_std, tpr->rx_std_mapping);
7965 		tpr->rx_std = NULL;
7966 	}
7967 	if (tpr->rx_jmb) {
7968 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7969 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
7970 		tpr->rx_jmb = NULL;
7971 	}
7972 }
7973 
7974 static int tg3_rx_prodring_init(struct tg3 *tp,
7975 				struct tg3_rx_prodring_set *tpr)
7976 {
7977 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7978 				      GFP_KERNEL);
7979 	if (!tpr->rx_std_buffers)
7980 		return -ENOMEM;
7981 
7982 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7983 					 TG3_RX_STD_RING_BYTES(tp),
7984 					 &tpr->rx_std_mapping,
7985 					 GFP_KERNEL);
7986 	if (!tpr->rx_std)
7987 		goto err_out;
7988 
7989 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7990 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7991 					      GFP_KERNEL);
7992 		if (!tpr->rx_jmb_buffers)
7993 			goto err_out;
7994 
7995 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7996 						 TG3_RX_JMB_RING_BYTES(tp),
7997 						 &tpr->rx_jmb_mapping,
7998 						 GFP_KERNEL);
7999 		if (!tpr->rx_jmb)
8000 			goto err_out;
8001 	}
8002 
8003 	return 0;
8004 
8005 err_out:
8006 	tg3_rx_prodring_fini(tp, tpr);
8007 	return -ENOMEM;
8008 }
8009 
8010 /* Free up pending packets in all rx/tx rings.
8011  *
8012  * The chip has been shut down and the driver detached from
8013  * the networking, so no interrupts or new tx packets will
8014  * end up in the driver.  tp->{tx,}lock is not held and we are not
8015  * in an interrupt context and thus may sleep.
8016  */
8017 static void tg3_free_rings(struct tg3 *tp)
8018 {
8019 	int i, j;
8020 
8021 	for (j = 0; j < tp->irq_cnt; j++) {
8022 		struct tg3_napi *tnapi = &tp->napi[j];
8023 
8024 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8025 
8026 		if (!tnapi->tx_buffers)
8027 			continue;
8028 
8029 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8030 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8031 
8032 			if (!skb)
8033 				continue;
8034 
8035 			tg3_tx_skb_unmap(tnapi, i,
8036 					 skb_shinfo(skb)->nr_frags - 1);
8037 
8038 			dev_kfree_skb_any(skb);
8039 		}
8040 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8041 	}
8042 }
8043 
8044 /* Initialize tx/rx rings for packet processing.
8045  *
8046  * The chip has been shut down and the driver detached from
8047  * the networking, so no interrupts or new tx packets will
8048  * end up in the driver.  tp->{tx,}lock are held and thus
8049  * we may not sleep.
8050  */
8051 static int tg3_init_rings(struct tg3 *tp)
8052 {
8053 	int i;
8054 
8055 	/* Free up all the SKBs. */
8056 	tg3_free_rings(tp);
8057 
8058 	for (i = 0; i < tp->irq_cnt; i++) {
8059 		struct tg3_napi *tnapi = &tp->napi[i];
8060 
8061 		tnapi->last_tag = 0;
8062 		tnapi->last_irq_tag = 0;
8063 		tnapi->hw_status->status = 0;
8064 		tnapi->hw_status->status_tag = 0;
8065 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8066 
8067 		tnapi->tx_prod = 0;
8068 		tnapi->tx_cons = 0;
8069 		if (tnapi->tx_ring)
8070 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8071 
8072 		tnapi->rx_rcb_ptr = 0;
8073 		if (tnapi->rx_rcb)
8074 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8075 
8076 		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8077 			tg3_free_rings(tp);
8078 			return -ENOMEM;
8079 		}
8080 	}
8081 
8082 	return 0;
8083 }
8084 
8085 static void tg3_mem_tx_release(struct tg3 *tp)
8086 {
8087 	int i;
8088 
8089 	for (i = 0; i < tp->irq_max; i++) {
8090 		struct tg3_napi *tnapi = &tp->napi[i];
8091 
8092 		if (tnapi->tx_ring) {
8093 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8094 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8095 			tnapi->tx_ring = NULL;
8096 		}
8097 
8098 		kfree(tnapi->tx_buffers);
8099 		tnapi->tx_buffers = NULL;
8100 	}
8101 }
8102 
8103 static int tg3_mem_tx_acquire(struct tg3 *tp)
8104 {
8105 	int i;
8106 	struct tg3_napi *tnapi = &tp->napi[0];
8107 
8108 	/* If multivector TSS is enabled, vector 0 does not handle
8109 	 * tx interrupts.  Don't allocate any resources for it.
8110 	 */
8111 	if (tg3_flag(tp, ENABLE_TSS))
8112 		tnapi++;
8113 
8114 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8115 		tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8116 					    TG3_TX_RING_SIZE, GFP_KERNEL);
8117 		if (!tnapi->tx_buffers)
8118 			goto err_out;
8119 
8120 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8121 						    TG3_TX_RING_BYTES,
8122 						    &tnapi->tx_desc_mapping,
8123 						    GFP_KERNEL);
8124 		if (!tnapi->tx_ring)
8125 			goto err_out;
8126 	}
8127 
8128 	return 0;
8129 
8130 err_out:
8131 	tg3_mem_tx_release(tp);
8132 	return -ENOMEM;
8133 }
8134 
8135 static void tg3_mem_rx_release(struct tg3 *tp)
8136 {
8137 	int i;
8138 
8139 	for (i = 0; i < tp->irq_max; i++) {
8140 		struct tg3_napi *tnapi = &tp->napi[i];
8141 
8142 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8143 
8144 		if (!tnapi->rx_rcb)
8145 			continue;
8146 
8147 		dma_free_coherent(&tp->pdev->dev,
8148 				  TG3_RX_RCB_RING_BYTES(tp),
8149 				  tnapi->rx_rcb,
8150 				  tnapi->rx_rcb_mapping);
8151 		tnapi->rx_rcb = NULL;
8152 	}
8153 }
8154 
8155 static int tg3_mem_rx_acquire(struct tg3 *tp)
8156 {
8157 	unsigned int i, limit;
8158 
8159 	limit = tp->rxq_cnt;
8160 
8161 	/* If RSS is enabled, we need a (dummy) producer ring
8162 	 * set on vector zero.  This is the true hw prodring.
8163 	 */
8164 	if (tg3_flag(tp, ENABLE_RSS))
8165 		limit++;
8166 
8167 	for (i = 0; i < limit; i++) {
8168 		struct tg3_napi *tnapi = &tp->napi[i];
8169 
8170 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8171 			goto err_out;
8172 
8173 		/* If multivector RSS is enabled, vector 0
8174 		 * does not handle rx or tx interrupts.
8175 		 * Don't allocate any resources for it.
8176 		 */
8177 		if (!i && tg3_flag(tp, ENABLE_RSS))
8178 			continue;
8179 
8180 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8181 						   TG3_RX_RCB_RING_BYTES(tp),
8182 						   &tnapi->rx_rcb_mapping,
8183 						   GFP_KERNEL | __GFP_ZERO);
8184 		if (!tnapi->rx_rcb)
8185 			goto err_out;
8186 	}
8187 
8188 	return 0;
8189 
8190 err_out:
8191 	tg3_mem_rx_release(tp);
8192 	return -ENOMEM;
8193 }
8194 
8195 /*
8196  * Must not be invoked with interrupt sources disabled and
8197  * the hardware shutdown down.
8198  */
8199 static void tg3_free_consistent(struct tg3 *tp)
8200 {
8201 	int i;
8202 
8203 	for (i = 0; i < tp->irq_cnt; i++) {
8204 		struct tg3_napi *tnapi = &tp->napi[i];
8205 
8206 		if (tnapi->hw_status) {
8207 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8208 					  tnapi->hw_status,
8209 					  tnapi->status_mapping);
8210 			tnapi->hw_status = NULL;
8211 		}
8212 	}
8213 
8214 	tg3_mem_rx_release(tp);
8215 	tg3_mem_tx_release(tp);
8216 
8217 	if (tp->hw_stats) {
8218 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8219 				  tp->hw_stats, tp->stats_mapping);
8220 		tp->hw_stats = NULL;
8221 	}
8222 }
8223 
8224 /*
8225  * Must not be invoked with interrupt sources disabled and
8226  * the hardware shutdown down.  Can sleep.
8227  */
8228 static int tg3_alloc_consistent(struct tg3 *tp)
8229 {
8230 	int i;
8231 
8232 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8233 					  sizeof(struct tg3_hw_stats),
8234 					  &tp->stats_mapping,
8235 					  GFP_KERNEL | __GFP_ZERO);
8236 	if (!tp->hw_stats)
8237 		goto err_out;
8238 
8239 	for (i = 0; i < tp->irq_cnt; i++) {
8240 		struct tg3_napi *tnapi = &tp->napi[i];
8241 		struct tg3_hw_status *sblk;
8242 
8243 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8244 						      TG3_HW_STATUS_SIZE,
8245 						      &tnapi->status_mapping,
8246 						      GFP_KERNEL | __GFP_ZERO);
8247 		if (!tnapi->hw_status)
8248 			goto err_out;
8249 
8250 		sblk = tnapi->hw_status;
8251 
8252 		if (tg3_flag(tp, ENABLE_RSS)) {
8253 			u16 *prodptr = NULL;
8254 
8255 			/*
8256 			 * When RSS is enabled, the status block format changes
8257 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8258 			 * and "rx_mini_consumer" members get mapped to the
8259 			 * other three rx return ring producer indexes.
8260 			 */
8261 			switch (i) {
8262 			case 1:
8263 				prodptr = &sblk->idx[0].rx_producer;
8264 				break;
8265 			case 2:
8266 				prodptr = &sblk->rx_jumbo_consumer;
8267 				break;
8268 			case 3:
8269 				prodptr = &sblk->reserved;
8270 				break;
8271 			case 4:
8272 				prodptr = &sblk->rx_mini_consumer;
8273 				break;
8274 			}
8275 			tnapi->rx_rcb_prod_idx = prodptr;
8276 		} else {
8277 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8278 		}
8279 	}
8280 
8281 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8282 		goto err_out;
8283 
8284 	return 0;
8285 
8286 err_out:
8287 	tg3_free_consistent(tp);
8288 	return -ENOMEM;
8289 }
8290 
8291 #define MAX_WAIT_CNT 1000
8292 
8293 /* To stop a block, clear the enable bit and poll till it
8294  * clears.  tp->lock is held.
8295  */
8296 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8297 {
8298 	unsigned int i;
8299 	u32 val;
8300 
8301 	if (tg3_flag(tp, 5705_PLUS)) {
8302 		switch (ofs) {
8303 		case RCVLSC_MODE:
8304 		case DMAC_MODE:
8305 		case MBFREE_MODE:
8306 		case BUFMGR_MODE:
8307 		case MEMARB_MODE:
8308 			/* We can't enable/disable these bits of the
8309 			 * 5705/5750, just say success.
8310 			 */
8311 			return 0;
8312 
8313 		default:
8314 			break;
8315 		}
8316 	}
8317 
8318 	val = tr32(ofs);
8319 	val &= ~enable_bit;
8320 	tw32_f(ofs, val);
8321 
8322 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8323 		udelay(100);
8324 		val = tr32(ofs);
8325 		if ((val & enable_bit) == 0)
8326 			break;
8327 	}
8328 
8329 	if (i == MAX_WAIT_CNT && !silent) {
8330 		dev_err(&tp->pdev->dev,
8331 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8332 			ofs, enable_bit);
8333 		return -ENODEV;
8334 	}
8335 
8336 	return 0;
8337 }
8338 
8339 /* tp->lock is held. */
8340 static int tg3_abort_hw(struct tg3 *tp, int silent)
8341 {
8342 	int i, err;
8343 
8344 	tg3_disable_ints(tp);
8345 
8346 	tp->rx_mode &= ~RX_MODE_ENABLE;
8347 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8348 	udelay(10);
8349 
8350 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8351 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8352 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8353 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8354 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8355 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8356 
8357 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8358 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8359 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8360 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8361 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8362 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8363 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8364 
8365 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8366 	tw32_f(MAC_MODE, tp->mac_mode);
8367 	udelay(40);
8368 
8369 	tp->tx_mode &= ~TX_MODE_ENABLE;
8370 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8371 
8372 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8373 		udelay(100);
8374 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8375 			break;
8376 	}
8377 	if (i >= MAX_WAIT_CNT) {
8378 		dev_err(&tp->pdev->dev,
8379 			"%s timed out, TX_MODE_ENABLE will not clear "
8380 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8381 		err |= -ENODEV;
8382 	}
8383 
8384 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8385 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8386 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8387 
8388 	tw32(FTQ_RESET, 0xffffffff);
8389 	tw32(FTQ_RESET, 0x00000000);
8390 
8391 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8392 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8393 
8394 	for (i = 0; i < tp->irq_cnt; i++) {
8395 		struct tg3_napi *tnapi = &tp->napi[i];
8396 		if (tnapi->hw_status)
8397 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8398 	}
8399 
8400 	return err;
8401 }
8402 
8403 /* Save PCI command register before chip reset */
8404 static void tg3_save_pci_state(struct tg3 *tp)
8405 {
8406 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8407 }
8408 
8409 /* Restore PCI state after chip reset */
8410 static void tg3_restore_pci_state(struct tg3 *tp)
8411 {
8412 	u32 val;
8413 
8414 	/* Re-enable indirect register accesses. */
8415 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8416 			       tp->misc_host_ctrl);
8417 
8418 	/* Set MAX PCI retry to zero. */
8419 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8420 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8421 	    tg3_flag(tp, PCIX_MODE))
8422 		val |= PCISTATE_RETRY_SAME_DMA;
8423 	/* Allow reads and writes to the APE register and memory space. */
8424 	if (tg3_flag(tp, ENABLE_APE))
8425 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8426 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8427 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8428 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8429 
8430 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8431 
8432 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8433 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8434 				      tp->pci_cacheline_sz);
8435 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8436 				      tp->pci_lat_timer);
8437 	}
8438 
8439 	/* Make sure PCI-X relaxed ordering bit is clear. */
8440 	if (tg3_flag(tp, PCIX_MODE)) {
8441 		u16 pcix_cmd;
8442 
8443 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8444 				     &pcix_cmd);
8445 		pcix_cmd &= ~PCI_X_CMD_ERO;
8446 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8447 				      pcix_cmd);
8448 	}
8449 
8450 	if (tg3_flag(tp, 5780_CLASS)) {
8451 
8452 		/* Chip reset on 5780 will reset MSI enable bit,
8453 		 * so need to restore it.
8454 		 */
8455 		if (tg3_flag(tp, USING_MSI)) {
8456 			u16 ctrl;
8457 
8458 			pci_read_config_word(tp->pdev,
8459 					     tp->msi_cap + PCI_MSI_FLAGS,
8460 					     &ctrl);
8461 			pci_write_config_word(tp->pdev,
8462 					      tp->msi_cap + PCI_MSI_FLAGS,
8463 					      ctrl | PCI_MSI_FLAGS_ENABLE);
8464 			val = tr32(MSGINT_MODE);
8465 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8466 		}
8467 	}
8468 }
8469 
8470 /* tp->lock is held. */
8471 static int tg3_chip_reset(struct tg3 *tp)
8472 {
8473 	u32 val;
8474 	void (*write_op)(struct tg3 *, u32, u32);
8475 	int i, err;
8476 
8477 	tg3_nvram_lock(tp);
8478 
8479 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8480 
8481 	/* No matching tg3_nvram_unlock() after this because
8482 	 * chip reset below will undo the nvram lock.
8483 	 */
8484 	tp->nvram_lock_cnt = 0;
8485 
8486 	/* GRC_MISC_CFG core clock reset will clear the memory
8487 	 * enable bit in PCI register 4 and the MSI enable bit
8488 	 * on some chips, so we save relevant registers here.
8489 	 */
8490 	tg3_save_pci_state(tp);
8491 
8492 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8493 	    tg3_flag(tp, 5755_PLUS))
8494 		tw32(GRC_FASTBOOT_PC, 0);
8495 
8496 	/*
8497 	 * We must avoid the readl() that normally takes place.
8498 	 * It locks machines, causes machine checks, and other
8499 	 * fun things.  So, temporarily disable the 5701
8500 	 * hardware workaround, while we do the reset.
8501 	 */
8502 	write_op = tp->write32;
8503 	if (write_op == tg3_write_flush_reg32)
8504 		tp->write32 = tg3_write32;
8505 
8506 	/* Prevent the irq handler from reading or writing PCI registers
8507 	 * during chip reset when the memory enable bit in the PCI command
8508 	 * register may be cleared.  The chip does not generate interrupt
8509 	 * at this time, but the irq handler may still be called due to irq
8510 	 * sharing or irqpoll.
8511 	 */
8512 	tg3_flag_set(tp, CHIP_RESETTING);
8513 	for (i = 0; i < tp->irq_cnt; i++) {
8514 		struct tg3_napi *tnapi = &tp->napi[i];
8515 		if (tnapi->hw_status) {
8516 			tnapi->hw_status->status = 0;
8517 			tnapi->hw_status->status_tag = 0;
8518 		}
8519 		tnapi->last_tag = 0;
8520 		tnapi->last_irq_tag = 0;
8521 	}
8522 	smp_mb();
8523 
8524 	for (i = 0; i < tp->irq_cnt; i++)
8525 		synchronize_irq(tp->napi[i].irq_vec);
8526 
8527 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8528 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8529 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8530 	}
8531 
8532 	/* do the reset */
8533 	val = GRC_MISC_CFG_CORECLK_RESET;
8534 
8535 	if (tg3_flag(tp, PCI_EXPRESS)) {
8536 		/* Force PCIe 1.0a mode */
8537 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8538 		    !tg3_flag(tp, 57765_PLUS) &&
8539 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
8540 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8541 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8542 
8543 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8544 			tw32(GRC_MISC_CFG, (1 << 29));
8545 			val |= (1 << 29);
8546 		}
8547 	}
8548 
8549 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8550 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8551 		tw32(GRC_VCPU_EXT_CTRL,
8552 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8553 	}
8554 
8555 	/* Manage gphy power for all CPMU absent PCIe devices. */
8556 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8557 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8558 
8559 	tw32(GRC_MISC_CFG, val);
8560 
8561 	/* restore 5701 hardware bug workaround write method */
8562 	tp->write32 = write_op;
8563 
8564 	/* Unfortunately, we have to delay before the PCI read back.
8565 	 * Some 575X chips even will not respond to a PCI cfg access
8566 	 * when the reset command is given to the chip.
8567 	 *
8568 	 * How do these hardware designers expect things to work
8569 	 * properly if the PCI write is posted for a long period
8570 	 * of time?  It is always necessary to have some method by
8571 	 * which a register read back can occur to push the write
8572 	 * out which does the reset.
8573 	 *
8574 	 * For most tg3 variants the trick below was working.
8575 	 * Ho hum...
8576 	 */
8577 	udelay(120);
8578 
8579 	/* Flush PCI posted writes.  The normal MMIO registers
8580 	 * are inaccessible at this time so this is the only
8581 	 * way to make this reliably (actually, this is no longer
8582 	 * the case, see above).  I tried to use indirect
8583 	 * register read/write but this upset some 5701 variants.
8584 	 */
8585 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8586 
8587 	udelay(120);
8588 
8589 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8590 		u16 val16;
8591 
8592 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8593 			int j;
8594 			u32 cfg_val;
8595 
8596 			/* Wait for link training to complete.  */
8597 			for (j = 0; j < 5000; j++)
8598 				udelay(100);
8599 
8600 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8601 			pci_write_config_dword(tp->pdev, 0xc4,
8602 					       cfg_val | (1 << 15));
8603 		}
8604 
8605 		/* Clear the "no snoop" and "relaxed ordering" bits. */
8606 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8607 		/*
8608 		 * Older PCIe devices only support the 128 byte
8609 		 * MPS setting.  Enforce the restriction.
8610 		 */
8611 		if (!tg3_flag(tp, CPMU_PRESENT))
8612 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8613 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8614 
8615 		/* Clear error status */
8616 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8617 				      PCI_EXP_DEVSTA_CED |
8618 				      PCI_EXP_DEVSTA_NFED |
8619 				      PCI_EXP_DEVSTA_FED |
8620 				      PCI_EXP_DEVSTA_URD);
8621 	}
8622 
8623 	tg3_restore_pci_state(tp);
8624 
8625 	tg3_flag_clear(tp, CHIP_RESETTING);
8626 	tg3_flag_clear(tp, ERROR_PROCESSED);
8627 
8628 	val = 0;
8629 	if (tg3_flag(tp, 5780_CLASS))
8630 		val = tr32(MEMARB_MODE);
8631 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8632 
8633 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8634 		tg3_stop_fw(tp);
8635 		tw32(0x5000, 0x400);
8636 	}
8637 
8638 	if (tg3_flag(tp, IS_SSB_CORE)) {
8639 		/*
8640 		 * BCM4785: In order to avoid repercussions from using
8641 		 * potentially defective internal ROM, stop the Rx RISC CPU,
8642 		 * which is not required.
8643 		 */
8644 		tg3_stop_fw(tp);
8645 		tg3_halt_cpu(tp, RX_CPU_BASE);
8646 	}
8647 
8648 	tw32(GRC_MODE, tp->grc_mode);
8649 
8650 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8651 		val = tr32(0xc4);
8652 
8653 		tw32(0xc4, val | (1 << 15));
8654 	}
8655 
8656 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8657 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
8658 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8659 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
8660 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8661 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8662 	}
8663 
8664 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8665 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8666 		val = tp->mac_mode;
8667 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8668 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8669 		val = tp->mac_mode;
8670 	} else
8671 		val = 0;
8672 
8673 	tw32_f(MAC_MODE, val);
8674 	udelay(40);
8675 
8676 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8677 
8678 	err = tg3_poll_fw(tp);
8679 	if (err)
8680 		return err;
8681 
8682 	tg3_mdio_start(tp);
8683 
8684 	if (tg3_flag(tp, PCI_EXPRESS) &&
8685 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
8686 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
8687 	    !tg3_flag(tp, 57765_PLUS)) {
8688 		val = tr32(0x7c00);
8689 
8690 		tw32(0x7c00, val | (1 << 25));
8691 	}
8692 
8693 	if (tg3_asic_rev(tp) == ASIC_REV_5720) {
8694 		val = tr32(TG3_CPMU_CLCK_ORIDE);
8695 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8696 	}
8697 
8698 	/* Reprobe ASF enable state.  */
8699 	tg3_flag_clear(tp, ENABLE_ASF);
8700 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8701 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8702 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8703 		u32 nic_cfg;
8704 
8705 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8706 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8707 			tg3_flag_set(tp, ENABLE_ASF);
8708 			tp->last_event_jiffies = jiffies;
8709 			if (tg3_flag(tp, 5750_PLUS))
8710 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8711 		}
8712 	}
8713 
8714 	return 0;
8715 }
8716 
8717 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8718 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8719 
8720 /* tp->lock is held. */
8721 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8722 {
8723 	int err;
8724 
8725 	tg3_stop_fw(tp);
8726 
8727 	tg3_write_sig_pre_reset(tp, kind);
8728 
8729 	tg3_abort_hw(tp, silent);
8730 	err = tg3_chip_reset(tp);
8731 
8732 	__tg3_set_mac_addr(tp, 0);
8733 
8734 	tg3_write_sig_legacy(tp, kind);
8735 	tg3_write_sig_post_reset(tp, kind);
8736 
8737 	if (tp->hw_stats) {
8738 		/* Save the stats across chip resets... */
8739 		tg3_get_nstats(tp, &tp->net_stats_prev);
8740 		tg3_get_estats(tp, &tp->estats_prev);
8741 
8742 		/* And make sure the next sample is new data */
8743 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8744 	}
8745 
8746 	if (err)
8747 		return err;
8748 
8749 	return 0;
8750 }
8751 
8752 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8753 {
8754 	struct tg3 *tp = netdev_priv(dev);
8755 	struct sockaddr *addr = p;
8756 	int err = 0, skip_mac_1 = 0;
8757 
8758 	if (!is_valid_ether_addr(addr->sa_data))
8759 		return -EADDRNOTAVAIL;
8760 
8761 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8762 
8763 	if (!netif_running(dev))
8764 		return 0;
8765 
8766 	if (tg3_flag(tp, ENABLE_ASF)) {
8767 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
8768 
8769 		addr0_high = tr32(MAC_ADDR_0_HIGH);
8770 		addr0_low = tr32(MAC_ADDR_0_LOW);
8771 		addr1_high = tr32(MAC_ADDR_1_HIGH);
8772 		addr1_low = tr32(MAC_ADDR_1_LOW);
8773 
8774 		/* Skip MAC addr 1 if ASF is using it. */
8775 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8776 		    !(addr1_high == 0 && addr1_low == 0))
8777 			skip_mac_1 = 1;
8778 	}
8779 	spin_lock_bh(&tp->lock);
8780 	__tg3_set_mac_addr(tp, skip_mac_1);
8781 	spin_unlock_bh(&tp->lock);
8782 
8783 	return err;
8784 }
8785 
8786 /* tp->lock is held. */
8787 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8788 			   dma_addr_t mapping, u32 maxlen_flags,
8789 			   u32 nic_addr)
8790 {
8791 	tg3_write_mem(tp,
8792 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8793 		      ((u64) mapping >> 32));
8794 	tg3_write_mem(tp,
8795 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8796 		      ((u64) mapping & 0xffffffff));
8797 	tg3_write_mem(tp,
8798 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8799 		       maxlen_flags);
8800 
8801 	if (!tg3_flag(tp, 5705_PLUS))
8802 		tg3_write_mem(tp,
8803 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8804 			      nic_addr);
8805 }
8806 
8807 
8808 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8809 {
8810 	int i = 0;
8811 
8812 	if (!tg3_flag(tp, ENABLE_TSS)) {
8813 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8814 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8815 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8816 	} else {
8817 		tw32(HOSTCC_TXCOL_TICKS, 0);
8818 		tw32(HOSTCC_TXMAX_FRAMES, 0);
8819 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8820 
8821 		for (; i < tp->txq_cnt; i++) {
8822 			u32 reg;
8823 
8824 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8825 			tw32(reg, ec->tx_coalesce_usecs);
8826 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8827 			tw32(reg, ec->tx_max_coalesced_frames);
8828 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8829 			tw32(reg, ec->tx_max_coalesced_frames_irq);
8830 		}
8831 	}
8832 
8833 	for (; i < tp->irq_max - 1; i++) {
8834 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8835 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8836 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8837 	}
8838 }
8839 
8840 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8841 {
8842 	int i = 0;
8843 	u32 limit = tp->rxq_cnt;
8844 
8845 	if (!tg3_flag(tp, ENABLE_RSS)) {
8846 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8847 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8848 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8849 		limit--;
8850 	} else {
8851 		tw32(HOSTCC_RXCOL_TICKS, 0);
8852 		tw32(HOSTCC_RXMAX_FRAMES, 0);
8853 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8854 	}
8855 
8856 	for (; i < limit; i++) {
8857 		u32 reg;
8858 
8859 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8860 		tw32(reg, ec->rx_coalesce_usecs);
8861 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8862 		tw32(reg, ec->rx_max_coalesced_frames);
8863 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8864 		tw32(reg, ec->rx_max_coalesced_frames_irq);
8865 	}
8866 
8867 	for (; i < tp->irq_max - 1; i++) {
8868 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8869 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8870 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8871 	}
8872 }
8873 
8874 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8875 {
8876 	tg3_coal_tx_init(tp, ec);
8877 	tg3_coal_rx_init(tp, ec);
8878 
8879 	if (!tg3_flag(tp, 5705_PLUS)) {
8880 		u32 val = ec->stats_block_coalesce_usecs;
8881 
8882 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8883 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8884 
8885 		if (!tp->link_up)
8886 			val = 0;
8887 
8888 		tw32(HOSTCC_STAT_COAL_TICKS, val);
8889 	}
8890 }
8891 
8892 /* tp->lock is held. */
8893 static void tg3_rings_reset(struct tg3 *tp)
8894 {
8895 	int i;
8896 	u32 stblk, txrcb, rxrcb, limit;
8897 	struct tg3_napi *tnapi = &tp->napi[0];
8898 
8899 	/* Disable all transmit rings but the first. */
8900 	if (!tg3_flag(tp, 5705_PLUS))
8901 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8902 	else if (tg3_flag(tp, 5717_PLUS))
8903 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8904 	else if (tg3_flag(tp, 57765_CLASS) ||
8905 		 tg3_asic_rev(tp) == ASIC_REV_5762)
8906 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8907 	else
8908 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8909 
8910 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8911 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8912 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8913 			      BDINFO_FLAGS_DISABLED);
8914 
8915 
8916 	/* Disable all receive return rings but the first. */
8917 	if (tg3_flag(tp, 5717_PLUS))
8918 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8919 	else if (!tg3_flag(tp, 5705_PLUS))
8920 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8921 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
8922 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
8923 		 tg3_flag(tp, 57765_CLASS))
8924 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8925 	else
8926 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8927 
8928 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8929 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8930 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8931 			      BDINFO_FLAGS_DISABLED);
8932 
8933 	/* Disable interrupts */
8934 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8935 	tp->napi[0].chk_msi_cnt = 0;
8936 	tp->napi[0].last_rx_cons = 0;
8937 	tp->napi[0].last_tx_cons = 0;
8938 
8939 	/* Zero mailbox registers. */
8940 	if (tg3_flag(tp, SUPPORT_MSIX)) {
8941 		for (i = 1; i < tp->irq_max; i++) {
8942 			tp->napi[i].tx_prod = 0;
8943 			tp->napi[i].tx_cons = 0;
8944 			if (tg3_flag(tp, ENABLE_TSS))
8945 				tw32_mailbox(tp->napi[i].prodmbox, 0);
8946 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
8947 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8948 			tp->napi[i].chk_msi_cnt = 0;
8949 			tp->napi[i].last_rx_cons = 0;
8950 			tp->napi[i].last_tx_cons = 0;
8951 		}
8952 		if (!tg3_flag(tp, ENABLE_TSS))
8953 			tw32_mailbox(tp->napi[0].prodmbox, 0);
8954 	} else {
8955 		tp->napi[0].tx_prod = 0;
8956 		tp->napi[0].tx_cons = 0;
8957 		tw32_mailbox(tp->napi[0].prodmbox, 0);
8958 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
8959 	}
8960 
8961 	/* Make sure the NIC-based send BD rings are disabled. */
8962 	if (!tg3_flag(tp, 5705_PLUS)) {
8963 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8964 		for (i = 0; i < 16; i++)
8965 			tw32_tx_mbox(mbox + i * 8, 0);
8966 	}
8967 
8968 	txrcb = NIC_SRAM_SEND_RCB;
8969 	rxrcb = NIC_SRAM_RCV_RET_RCB;
8970 
8971 	/* Clear status block in ram. */
8972 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8973 
8974 	/* Set status block DMA address */
8975 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8976 	     ((u64) tnapi->status_mapping >> 32));
8977 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8978 	     ((u64) tnapi->status_mapping & 0xffffffff));
8979 
8980 	if (tnapi->tx_ring) {
8981 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8982 			       (TG3_TX_RING_SIZE <<
8983 				BDINFO_FLAGS_MAXLEN_SHIFT),
8984 			       NIC_SRAM_TX_BUFFER_DESC);
8985 		txrcb += TG3_BDINFO_SIZE;
8986 	}
8987 
8988 	if (tnapi->rx_rcb) {
8989 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8990 			       (tp->rx_ret_ring_mask + 1) <<
8991 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8992 		rxrcb += TG3_BDINFO_SIZE;
8993 	}
8994 
8995 	stblk = HOSTCC_STATBLCK_RING1;
8996 
8997 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8998 		u64 mapping = (u64)tnapi->status_mapping;
8999 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9000 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9001 
9002 		/* Clear status block in ram. */
9003 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9004 
9005 		if (tnapi->tx_ring) {
9006 			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9007 				       (TG3_TX_RING_SIZE <<
9008 					BDINFO_FLAGS_MAXLEN_SHIFT),
9009 				       NIC_SRAM_TX_BUFFER_DESC);
9010 			txrcb += TG3_BDINFO_SIZE;
9011 		}
9012 
9013 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9014 			       ((tp->rx_ret_ring_mask + 1) <<
9015 				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9016 
9017 		stblk += 8;
9018 		rxrcb += TG3_BDINFO_SIZE;
9019 	}
9020 }
9021 
9022 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9023 {
9024 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9025 
9026 	if (!tg3_flag(tp, 5750_PLUS) ||
9027 	    tg3_flag(tp, 5780_CLASS) ||
9028 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9029 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9030 	    tg3_flag(tp, 57765_PLUS))
9031 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9032 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9033 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9034 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9035 	else
9036 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9037 
9038 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9039 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9040 
9041 	val = min(nic_rep_thresh, host_rep_thresh);
9042 	tw32(RCVBDI_STD_THRESH, val);
9043 
9044 	if (tg3_flag(tp, 57765_PLUS))
9045 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9046 
9047 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9048 		return;
9049 
9050 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9051 
9052 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9053 
9054 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9055 	tw32(RCVBDI_JUMBO_THRESH, val);
9056 
9057 	if (tg3_flag(tp, 57765_PLUS))
9058 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9059 }
9060 
9061 static inline u32 calc_crc(unsigned char *buf, int len)
9062 {
9063 	u32 reg;
9064 	u32 tmp;
9065 	int j, k;
9066 
9067 	reg = 0xffffffff;
9068 
9069 	for (j = 0; j < len; j++) {
9070 		reg ^= buf[j];
9071 
9072 		for (k = 0; k < 8; k++) {
9073 			tmp = reg & 0x01;
9074 
9075 			reg >>= 1;
9076 
9077 			if (tmp)
9078 				reg ^= 0xedb88320;
9079 		}
9080 	}
9081 
9082 	return ~reg;
9083 }
9084 
9085 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9086 {
9087 	/* accept or reject all multicast frames */
9088 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9089 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9090 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9091 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9092 }
9093 
9094 static void __tg3_set_rx_mode(struct net_device *dev)
9095 {
9096 	struct tg3 *tp = netdev_priv(dev);
9097 	u32 rx_mode;
9098 
9099 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9100 				  RX_MODE_KEEP_VLAN_TAG);
9101 
9102 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9103 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9104 	 * flag clear.
9105 	 */
9106 	if (!tg3_flag(tp, ENABLE_ASF))
9107 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9108 #endif
9109 
9110 	if (dev->flags & IFF_PROMISC) {
9111 		/* Promiscuous mode. */
9112 		rx_mode |= RX_MODE_PROMISC;
9113 	} else if (dev->flags & IFF_ALLMULTI) {
9114 		/* Accept all multicast. */
9115 		tg3_set_multi(tp, 1);
9116 	} else if (netdev_mc_empty(dev)) {
9117 		/* Reject all multicast. */
9118 		tg3_set_multi(tp, 0);
9119 	} else {
9120 		/* Accept one or more multicast(s). */
9121 		struct netdev_hw_addr *ha;
9122 		u32 mc_filter[4] = { 0, };
9123 		u32 regidx;
9124 		u32 bit;
9125 		u32 crc;
9126 
9127 		netdev_for_each_mc_addr(ha, dev) {
9128 			crc = calc_crc(ha->addr, ETH_ALEN);
9129 			bit = ~crc & 0x7f;
9130 			regidx = (bit & 0x60) >> 5;
9131 			bit &= 0x1f;
9132 			mc_filter[regidx] |= (1 << bit);
9133 		}
9134 
9135 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9136 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9137 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9138 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9139 	}
9140 
9141 	if (rx_mode != tp->rx_mode) {
9142 		tp->rx_mode = rx_mode;
9143 		tw32_f(MAC_RX_MODE, rx_mode);
9144 		udelay(10);
9145 	}
9146 }
9147 
9148 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9149 {
9150 	int i;
9151 
9152 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9153 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9154 }
9155 
9156 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9157 {
9158 	int i;
9159 
9160 	if (!tg3_flag(tp, SUPPORT_MSIX))
9161 		return;
9162 
9163 	if (tp->rxq_cnt == 1) {
9164 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9165 		return;
9166 	}
9167 
9168 	/* Validate table against current IRQ count */
9169 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9170 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9171 			break;
9172 	}
9173 
9174 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9175 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9176 }
9177 
9178 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9179 {
9180 	int i = 0;
9181 	u32 reg = MAC_RSS_INDIR_TBL_0;
9182 
9183 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9184 		u32 val = tp->rss_ind_tbl[i];
9185 		i++;
9186 		for (; i % 8; i++) {
9187 			val <<= 4;
9188 			val |= tp->rss_ind_tbl[i];
9189 		}
9190 		tw32(reg, val);
9191 		reg += 4;
9192 	}
9193 }
9194 
9195 /* tp->lock is held. */
9196 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9197 {
9198 	u32 val, rdmac_mode;
9199 	int i, err, limit;
9200 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9201 
9202 	tg3_disable_ints(tp);
9203 
9204 	tg3_stop_fw(tp);
9205 
9206 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9207 
9208 	if (tg3_flag(tp, INIT_COMPLETE))
9209 		tg3_abort_hw(tp, 1);
9210 
9211 	/* Enable MAC control of LPI */
9212 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9213 		val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9214 		      TG3_CPMU_EEE_LNKIDL_UART_IDL;
9215 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9216 			val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9217 
9218 		tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9219 
9220 		tw32_f(TG3_CPMU_EEE_CTRL,
9221 		       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9222 
9223 		val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9224 		      TG3_CPMU_EEEMD_LPI_IN_TX |
9225 		      TG3_CPMU_EEEMD_LPI_IN_RX |
9226 		      TG3_CPMU_EEEMD_EEE_ENABLE;
9227 
9228 		if (tg3_asic_rev(tp) != ASIC_REV_5717)
9229 			val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9230 
9231 		if (tg3_flag(tp, ENABLE_APE))
9232 			val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9233 
9234 		tw32_f(TG3_CPMU_EEE_MODE, val);
9235 
9236 		tw32_f(TG3_CPMU_EEE_DBTMR1,
9237 		       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9238 		       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9239 
9240 		tw32_f(TG3_CPMU_EEE_DBTMR2,
9241 		       TG3_CPMU_DBTMR2_APE_TX_2047US |
9242 		       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9243 	}
9244 
9245 	if (reset_phy)
9246 		tg3_phy_reset(tp);
9247 
9248 	err = tg3_chip_reset(tp);
9249 	if (err)
9250 		return err;
9251 
9252 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9253 
9254 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9255 		val = tr32(TG3_CPMU_CTRL);
9256 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9257 		tw32(TG3_CPMU_CTRL, val);
9258 
9259 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9260 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9261 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9262 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9263 
9264 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9265 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9266 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9267 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9268 
9269 		val = tr32(TG3_CPMU_HST_ACC);
9270 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9271 		val |= CPMU_HST_ACC_MACCLK_6_25;
9272 		tw32(TG3_CPMU_HST_ACC, val);
9273 	}
9274 
9275 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9276 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9277 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9278 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9279 		tw32(PCIE_PWR_MGMT_THRESH, val);
9280 
9281 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9282 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9283 
9284 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9285 
9286 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9287 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9288 	}
9289 
9290 	if (tg3_flag(tp, L1PLLPD_EN)) {
9291 		u32 grc_mode = tr32(GRC_MODE);
9292 
9293 		/* Access the lower 1K of PL PCIE block registers. */
9294 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9295 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9296 
9297 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9298 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9299 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9300 
9301 		tw32(GRC_MODE, grc_mode);
9302 	}
9303 
9304 	if (tg3_flag(tp, 57765_CLASS)) {
9305 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9306 			u32 grc_mode = tr32(GRC_MODE);
9307 
9308 			/* Access the lower 1K of PL PCIE block registers. */
9309 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9310 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9311 
9312 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9313 				   TG3_PCIE_PL_LO_PHYCTL5);
9314 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9315 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9316 
9317 			tw32(GRC_MODE, grc_mode);
9318 		}
9319 
9320 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9321 			u32 grc_mode;
9322 
9323 			/* Fix transmit hangs */
9324 			val = tr32(TG3_CPMU_PADRNG_CTL);
9325 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9326 			tw32(TG3_CPMU_PADRNG_CTL, val);
9327 
9328 			grc_mode = tr32(GRC_MODE);
9329 
9330 			/* Access the lower 1K of DL PCIE block registers. */
9331 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9332 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9333 
9334 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9335 				   TG3_PCIE_DL_LO_FTSMAX);
9336 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9337 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9338 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9339 
9340 			tw32(GRC_MODE, grc_mode);
9341 		}
9342 
9343 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9344 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9345 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9346 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9347 	}
9348 
9349 	/* This works around an issue with Athlon chipsets on
9350 	 * B3 tigon3 silicon.  This bit has no effect on any
9351 	 * other revision.  But do not set this on PCI Express
9352 	 * chips and don't even touch the clocks if the CPMU is present.
9353 	 */
9354 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9355 		if (!tg3_flag(tp, PCI_EXPRESS))
9356 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9357 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9358 	}
9359 
9360 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9361 	    tg3_flag(tp, PCIX_MODE)) {
9362 		val = tr32(TG3PCI_PCISTATE);
9363 		val |= PCISTATE_RETRY_SAME_DMA;
9364 		tw32(TG3PCI_PCISTATE, val);
9365 	}
9366 
9367 	if (tg3_flag(tp, ENABLE_APE)) {
9368 		/* Allow reads and writes to the
9369 		 * APE register and memory space.
9370 		 */
9371 		val = tr32(TG3PCI_PCISTATE);
9372 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9373 		       PCISTATE_ALLOW_APE_SHMEM_WR |
9374 		       PCISTATE_ALLOW_APE_PSPACE_WR;
9375 		tw32(TG3PCI_PCISTATE, val);
9376 	}
9377 
9378 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9379 		/* Enable some hw fixes.  */
9380 		val = tr32(TG3PCI_MSI_DATA);
9381 		val |= (1 << 26) | (1 << 28) | (1 << 29);
9382 		tw32(TG3PCI_MSI_DATA, val);
9383 	}
9384 
9385 	/* Descriptor ring init may make accesses to the
9386 	 * NIC SRAM area to setup the TX descriptors, so we
9387 	 * can only do this after the hardware has been
9388 	 * successfully reset.
9389 	 */
9390 	err = tg3_init_rings(tp);
9391 	if (err)
9392 		return err;
9393 
9394 	if (tg3_flag(tp, 57765_PLUS)) {
9395 		val = tr32(TG3PCI_DMA_RW_CTRL) &
9396 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9397 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9398 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9399 		if (!tg3_flag(tp, 57765_CLASS) &&
9400 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
9401 		    tg3_asic_rev(tp) != ASIC_REV_5762)
9402 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
9403 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9404 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9405 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
9406 		/* This value is determined during the probe time DMA
9407 		 * engine test, tg3_test_dma.
9408 		 */
9409 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9410 	}
9411 
9412 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9413 			  GRC_MODE_4X_NIC_SEND_RINGS |
9414 			  GRC_MODE_NO_TX_PHDR_CSUM |
9415 			  GRC_MODE_NO_RX_PHDR_CSUM);
9416 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9417 
9418 	/* Pseudo-header checksum is done by hardware logic and not
9419 	 * the offload processers, so make the chip do the pseudo-
9420 	 * header checksums on receive.  For transmit it is more
9421 	 * convenient to do the pseudo-header checksum in software
9422 	 * as Linux does that on transmit for us in all cases.
9423 	 */
9424 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9425 
9426 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9427 	if (tp->rxptpctl)
9428 		tw32(TG3_RX_PTP_CTL,
9429 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9430 
9431 	if (tg3_flag(tp, PTP_CAPABLE))
9432 		val |= GRC_MODE_TIME_SYNC_ENABLE;
9433 
9434 	tw32(GRC_MODE, tp->grc_mode | val);
9435 
9436 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
9437 	val = tr32(GRC_MISC_CFG);
9438 	val &= ~0xff;
9439 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9440 	tw32(GRC_MISC_CFG, val);
9441 
9442 	/* Initialize MBUF/DESC pool. */
9443 	if (tg3_flag(tp, 5750_PLUS)) {
9444 		/* Do nothing.  */
9445 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9446 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9447 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
9448 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9449 		else
9450 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9451 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9452 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9453 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
9454 		int fw_len;
9455 
9456 		fw_len = tp->fw_len;
9457 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9458 		tw32(BUFMGR_MB_POOL_ADDR,
9459 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9460 		tw32(BUFMGR_MB_POOL_SIZE,
9461 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9462 	}
9463 
9464 	if (tp->dev->mtu <= ETH_DATA_LEN) {
9465 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9466 		     tp->bufmgr_config.mbuf_read_dma_low_water);
9467 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9468 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
9469 		tw32(BUFMGR_MB_HIGH_WATER,
9470 		     tp->bufmgr_config.mbuf_high_water);
9471 	} else {
9472 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9473 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9474 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9475 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9476 		tw32(BUFMGR_MB_HIGH_WATER,
9477 		     tp->bufmgr_config.mbuf_high_water_jumbo);
9478 	}
9479 	tw32(BUFMGR_DMA_LOW_WATER,
9480 	     tp->bufmgr_config.dma_low_water);
9481 	tw32(BUFMGR_DMA_HIGH_WATER,
9482 	     tp->bufmgr_config.dma_high_water);
9483 
9484 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9485 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9486 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9487 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9488 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9489 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9490 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9491 	tw32(BUFMGR_MODE, val);
9492 	for (i = 0; i < 2000; i++) {
9493 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9494 			break;
9495 		udelay(10);
9496 	}
9497 	if (i >= 2000) {
9498 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9499 		return -ENODEV;
9500 	}
9501 
9502 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9503 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9504 
9505 	tg3_setup_rxbd_thresholds(tp);
9506 
9507 	/* Initialize TG3_BDINFO's at:
9508 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
9509 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
9510 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
9511 	 *
9512 	 * like so:
9513 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
9514 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
9515 	 *                              ring attribute flags
9516 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
9517 	 *
9518 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9519 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9520 	 *
9521 	 * The size of each ring is fixed in the firmware, but the location is
9522 	 * configurable.
9523 	 */
9524 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9525 	     ((u64) tpr->rx_std_mapping >> 32));
9526 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9527 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
9528 	if (!tg3_flag(tp, 5717_PLUS))
9529 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9530 		     NIC_SRAM_RX_BUFFER_DESC);
9531 
9532 	/* Disable the mini ring */
9533 	if (!tg3_flag(tp, 5705_PLUS))
9534 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9535 		     BDINFO_FLAGS_DISABLED);
9536 
9537 	/* Program the jumbo buffer descriptor ring control
9538 	 * blocks on those devices that have them.
9539 	 */
9540 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9541 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9542 
9543 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9544 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9545 			     ((u64) tpr->rx_jmb_mapping >> 32));
9546 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9547 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9548 			val = TG3_RX_JMB_RING_SIZE(tp) <<
9549 			      BDINFO_FLAGS_MAXLEN_SHIFT;
9550 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9551 			     val | BDINFO_FLAGS_USE_EXT_RECV);
9552 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9553 			    tg3_flag(tp, 57765_CLASS) ||
9554 			    tg3_asic_rev(tp) == ASIC_REV_5762)
9555 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9556 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9557 		} else {
9558 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9559 			     BDINFO_FLAGS_DISABLED);
9560 		}
9561 
9562 		if (tg3_flag(tp, 57765_PLUS)) {
9563 			val = TG3_RX_STD_RING_SIZE(tp);
9564 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9565 			val |= (TG3_RX_STD_DMA_SZ << 2);
9566 		} else
9567 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9568 	} else
9569 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9570 
9571 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9572 
9573 	tpr->rx_std_prod_idx = tp->rx_pending;
9574 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9575 
9576 	tpr->rx_jmb_prod_idx =
9577 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9578 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9579 
9580 	tg3_rings_reset(tp);
9581 
9582 	/* Initialize MAC address and backoff seed. */
9583 	__tg3_set_mac_addr(tp, 0);
9584 
9585 	/* MTU + ethernet header + FCS + optional VLAN tag */
9586 	tw32(MAC_RX_MTU_SIZE,
9587 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9588 
9589 	/* The slot time is changed by tg3_setup_phy if we
9590 	 * run at gigabit with half duplex.
9591 	 */
9592 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9593 	      (6 << TX_LENGTHS_IPG_SHIFT) |
9594 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9595 
9596 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9597 	    tg3_asic_rev(tp) == ASIC_REV_5762)
9598 		val |= tr32(MAC_TX_LENGTHS) &
9599 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
9600 			TX_LENGTHS_CNT_DWN_VAL_MSK);
9601 
9602 	tw32(MAC_TX_LENGTHS, val);
9603 
9604 	/* Receive rules. */
9605 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9606 	tw32(RCVLPC_CONFIG, 0x0181);
9607 
9608 	/* Calculate RDMAC_MODE setting early, we need it to determine
9609 	 * the RCVLPC_STATE_ENABLE mask.
9610 	 */
9611 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9612 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9613 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9614 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9615 		      RDMAC_MODE_LNGREAD_ENAB);
9616 
9617 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
9618 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9619 
9620 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9621 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
9622 	    tg3_asic_rev(tp) == ASIC_REV_57780)
9623 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9624 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9625 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9626 
9627 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9628 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9629 		if (tg3_flag(tp, TSO_CAPABLE) &&
9630 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
9631 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9632 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9633 			   !tg3_flag(tp, IS_5788)) {
9634 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9635 		}
9636 	}
9637 
9638 	if (tg3_flag(tp, PCI_EXPRESS))
9639 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9640 
9641 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9642 		tp->dma_limit = 0;
9643 		if (tp->dev->mtu <= ETH_DATA_LEN) {
9644 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
9645 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
9646 		}
9647 	}
9648 
9649 	if (tg3_flag(tp, HW_TSO_1) ||
9650 	    tg3_flag(tp, HW_TSO_2) ||
9651 	    tg3_flag(tp, HW_TSO_3))
9652 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9653 
9654 	if (tg3_flag(tp, 57765_PLUS) ||
9655 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
9656 	    tg3_asic_rev(tp) == ASIC_REV_57780)
9657 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9658 
9659 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9660 	    tg3_asic_rev(tp) == ASIC_REV_5762)
9661 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9662 
9663 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
9664 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
9665 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
9666 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
9667 	    tg3_flag(tp, 57765_PLUS)) {
9668 		u32 tgtreg;
9669 
9670 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
9671 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
9672 		else
9673 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
9674 
9675 		val = tr32(tgtreg);
9676 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9677 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
9678 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9679 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9680 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9681 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9682 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9683 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9684 		}
9685 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9686 	}
9687 
9688 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
9689 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
9690 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
9691 		u32 tgtreg;
9692 
9693 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
9694 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
9695 		else
9696 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
9697 
9698 		val = tr32(tgtreg);
9699 		tw32(tgtreg, val |
9700 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9701 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9702 	}
9703 
9704 	/* Receive/send statistics. */
9705 	if (tg3_flag(tp, 5750_PLUS)) {
9706 		val = tr32(RCVLPC_STATS_ENABLE);
9707 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
9708 		tw32(RCVLPC_STATS_ENABLE, val);
9709 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9710 		   tg3_flag(tp, TSO_CAPABLE)) {
9711 		val = tr32(RCVLPC_STATS_ENABLE);
9712 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9713 		tw32(RCVLPC_STATS_ENABLE, val);
9714 	} else {
9715 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9716 	}
9717 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9718 	tw32(SNDDATAI_STATSENAB, 0xffffff);
9719 	tw32(SNDDATAI_STATSCTRL,
9720 	     (SNDDATAI_SCTRL_ENABLE |
9721 	      SNDDATAI_SCTRL_FASTUPD));
9722 
9723 	/* Setup host coalescing engine. */
9724 	tw32(HOSTCC_MODE, 0);
9725 	for (i = 0; i < 2000; i++) {
9726 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9727 			break;
9728 		udelay(10);
9729 	}
9730 
9731 	__tg3_set_coalesce(tp, &tp->coal);
9732 
9733 	if (!tg3_flag(tp, 5705_PLUS)) {
9734 		/* Status/statistics block address.  See tg3_timer,
9735 		 * the tg3_periodic_fetch_stats call there, and
9736 		 * tg3_get_stats to see how this works for 5705/5750 chips.
9737 		 */
9738 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9739 		     ((u64) tp->stats_mapping >> 32));
9740 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9741 		     ((u64) tp->stats_mapping & 0xffffffff));
9742 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9743 
9744 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9745 
9746 		/* Clear statistics and status block memory areas */
9747 		for (i = NIC_SRAM_STATS_BLK;
9748 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9749 		     i += sizeof(u32)) {
9750 			tg3_write_mem(tp, i, 0);
9751 			udelay(40);
9752 		}
9753 	}
9754 
9755 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9756 
9757 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9758 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9759 	if (!tg3_flag(tp, 5705_PLUS))
9760 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9761 
9762 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9763 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9764 		/* reset to prevent losing 1st rx packet intermittently */
9765 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9766 		udelay(10);
9767 	}
9768 
9769 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9770 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9771 			MAC_MODE_FHDE_ENABLE;
9772 	if (tg3_flag(tp, ENABLE_APE))
9773 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9774 	if (!tg3_flag(tp, 5705_PLUS) &&
9775 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9776 	    tg3_asic_rev(tp) != ASIC_REV_5700)
9777 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9778 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9779 	udelay(40);
9780 
9781 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9782 	 * If TG3_FLAG_IS_NIC is zero, we should read the
9783 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
9784 	 * whether used as inputs or outputs, are set by boot code after
9785 	 * reset.
9786 	 */
9787 	if (!tg3_flag(tp, IS_NIC)) {
9788 		u32 gpio_mask;
9789 
9790 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9791 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9792 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9793 
9794 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
9795 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9796 				     GRC_LCLCTRL_GPIO_OUTPUT3;
9797 
9798 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
9799 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9800 
9801 		tp->grc_local_ctrl &= ~gpio_mask;
9802 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9803 
9804 		/* GPIO1 must be driven high for eeprom write protect */
9805 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
9806 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9807 					       GRC_LCLCTRL_GPIO_OUTPUT1);
9808 	}
9809 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9810 	udelay(100);
9811 
9812 	if (tg3_flag(tp, USING_MSIX)) {
9813 		val = tr32(MSGINT_MODE);
9814 		val |= MSGINT_MODE_ENABLE;
9815 		if (tp->irq_cnt > 1)
9816 			val |= MSGINT_MODE_MULTIVEC_EN;
9817 		if (!tg3_flag(tp, 1SHOT_MSI))
9818 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9819 		tw32(MSGINT_MODE, val);
9820 	}
9821 
9822 	if (!tg3_flag(tp, 5705_PLUS)) {
9823 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9824 		udelay(40);
9825 	}
9826 
9827 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9828 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9829 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9830 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9831 	       WDMAC_MODE_LNGREAD_ENAB);
9832 
9833 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9834 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9835 		if (tg3_flag(tp, TSO_CAPABLE) &&
9836 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
9837 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
9838 			/* nothing */
9839 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9840 			   !tg3_flag(tp, IS_5788)) {
9841 			val |= WDMAC_MODE_RX_ACCEL;
9842 		}
9843 	}
9844 
9845 	/* Enable host coalescing bug fix */
9846 	if (tg3_flag(tp, 5755_PLUS))
9847 		val |= WDMAC_MODE_STATUS_TAG_FIX;
9848 
9849 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
9850 		val |= WDMAC_MODE_BURST_ALL_DATA;
9851 
9852 	tw32_f(WDMAC_MODE, val);
9853 	udelay(40);
9854 
9855 	if (tg3_flag(tp, PCIX_MODE)) {
9856 		u16 pcix_cmd;
9857 
9858 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9859 				     &pcix_cmd);
9860 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
9861 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9862 			pcix_cmd |= PCI_X_CMD_READ_2K;
9863 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
9864 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9865 			pcix_cmd |= PCI_X_CMD_READ_2K;
9866 		}
9867 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9868 				      pcix_cmd);
9869 	}
9870 
9871 	tw32_f(RDMAC_MODE, rdmac_mode);
9872 	udelay(40);
9873 
9874 	if (tg3_asic_rev(tp) == ASIC_REV_5719) {
9875 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9876 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9877 				break;
9878 		}
9879 		if (i < TG3_NUM_RDMA_CHANNELS) {
9880 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9881 			val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9882 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9883 			tg3_flag_set(tp, 5719_RDMA_BUG);
9884 		}
9885 	}
9886 
9887 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9888 	if (!tg3_flag(tp, 5705_PLUS))
9889 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9890 
9891 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
9892 		tw32(SNDDATAC_MODE,
9893 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9894 	else
9895 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9896 
9897 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9898 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9899 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9900 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
9901 		val |= RCVDBDI_MODE_LRG_RING_SZ;
9902 	tw32(RCVDBDI_MODE, val);
9903 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9904 	if (tg3_flag(tp, HW_TSO_1) ||
9905 	    tg3_flag(tp, HW_TSO_2) ||
9906 	    tg3_flag(tp, HW_TSO_3))
9907 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9908 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9909 	if (tg3_flag(tp, ENABLE_TSS))
9910 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
9911 	tw32(SNDBDI_MODE, val);
9912 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9913 
9914 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
9915 		err = tg3_load_5701_a0_firmware_fix(tp);
9916 		if (err)
9917 			return err;
9918 	}
9919 
9920 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9921 		/* Ignore any errors for the firmware download. If download
9922 		 * fails, the device will operate with EEE disabled
9923 		 */
9924 		tg3_load_57766_firmware(tp);
9925 	}
9926 
9927 	if (tg3_flag(tp, TSO_CAPABLE)) {
9928 		err = tg3_load_tso_firmware(tp);
9929 		if (err)
9930 			return err;
9931 	}
9932 
9933 	tp->tx_mode = TX_MODE_ENABLE;
9934 
9935 	if (tg3_flag(tp, 5755_PLUS) ||
9936 	    tg3_asic_rev(tp) == ASIC_REV_5906)
9937 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9938 
9939 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9940 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
9941 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9942 		tp->tx_mode &= ~val;
9943 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9944 	}
9945 
9946 	tw32_f(MAC_TX_MODE, tp->tx_mode);
9947 	udelay(100);
9948 
9949 	if (tg3_flag(tp, ENABLE_RSS)) {
9950 		tg3_rss_write_indir_tbl(tp);
9951 
9952 		/* Setup the "secret" hash key. */
9953 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9954 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9955 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9956 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9957 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9958 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9959 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9960 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9961 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9962 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9963 	}
9964 
9965 	tp->rx_mode = RX_MODE_ENABLE;
9966 	if (tg3_flag(tp, 5755_PLUS))
9967 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9968 
9969 	if (tg3_flag(tp, ENABLE_RSS))
9970 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
9971 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
9972 			       RX_MODE_RSS_IPV6_HASH_EN |
9973 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
9974 			       RX_MODE_RSS_IPV4_HASH_EN |
9975 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
9976 
9977 	tw32_f(MAC_RX_MODE, tp->rx_mode);
9978 	udelay(10);
9979 
9980 	tw32(MAC_LED_CTRL, tp->led_ctrl);
9981 
9982 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9983 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9984 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9985 		udelay(10);
9986 	}
9987 	tw32_f(MAC_RX_MODE, tp->rx_mode);
9988 	udelay(10);
9989 
9990 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9991 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
9992 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9993 			/* Set drive transmission level to 1.2V  */
9994 			/* only if the signal pre-emphasis bit is not set  */
9995 			val = tr32(MAC_SERDES_CFG);
9996 			val &= 0xfffff000;
9997 			val |= 0x880;
9998 			tw32(MAC_SERDES_CFG, val);
9999 		}
10000 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10001 			tw32(MAC_SERDES_CFG, 0x616000);
10002 	}
10003 
10004 	/* Prevent chip from dropping frames when flow control
10005 	 * is enabled.
10006 	 */
10007 	if (tg3_flag(tp, 57765_CLASS))
10008 		val = 1;
10009 	else
10010 		val = 2;
10011 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10012 
10013 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10014 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10015 		/* Use hardware link auto-negotiation */
10016 		tg3_flag_set(tp, HW_AUTONEG);
10017 	}
10018 
10019 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10020 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10021 		u32 tmp;
10022 
10023 		tmp = tr32(SERDES_RX_CTRL);
10024 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10025 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10026 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10027 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10028 	}
10029 
10030 	if (!tg3_flag(tp, USE_PHYLIB)) {
10031 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10032 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10033 
10034 		err = tg3_setup_phy(tp, 0);
10035 		if (err)
10036 			return err;
10037 
10038 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10039 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10040 			u32 tmp;
10041 
10042 			/* Clear CRC stats. */
10043 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10044 				tg3_writephy(tp, MII_TG3_TEST1,
10045 					     tmp | MII_TG3_TEST1_CRC_EN);
10046 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10047 			}
10048 		}
10049 	}
10050 
10051 	__tg3_set_rx_mode(tp->dev);
10052 
10053 	/* Initialize receive rules. */
10054 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10055 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10056 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10057 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10058 
10059 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10060 		limit = 8;
10061 	else
10062 		limit = 16;
10063 	if (tg3_flag(tp, ENABLE_ASF))
10064 		limit -= 4;
10065 	switch (limit) {
10066 	case 16:
10067 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10068 	case 15:
10069 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10070 	case 14:
10071 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10072 	case 13:
10073 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10074 	case 12:
10075 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10076 	case 11:
10077 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10078 	case 10:
10079 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10080 	case 9:
10081 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10082 	case 8:
10083 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10084 	case 7:
10085 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10086 	case 6:
10087 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10088 	case 5:
10089 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10090 	case 4:
10091 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10092 	case 3:
10093 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10094 	case 2:
10095 	case 1:
10096 
10097 	default:
10098 		break;
10099 	}
10100 
10101 	if (tg3_flag(tp, ENABLE_APE))
10102 		/* Write our heartbeat update interval to APE. */
10103 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10104 				APE_HOST_HEARTBEAT_INT_DISABLE);
10105 
10106 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10107 
10108 	return 0;
10109 }
10110 
10111 /* Called at device open time to get the chip ready for
10112  * packet processing.  Invoked with tp->lock held.
10113  */
10114 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
10115 {
10116 	tg3_switch_clocks(tp);
10117 
10118 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10119 
10120 	return tg3_reset_hw(tp, reset_phy);
10121 }
10122 
10123 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10124 {
10125 	int i;
10126 
10127 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10128 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10129 
10130 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10131 		off += len;
10132 
10133 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10134 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10135 			memset(ocir, 0, TG3_OCIR_LEN);
10136 	}
10137 }
10138 
10139 /* sysfs attributes for hwmon */
10140 static ssize_t tg3_show_temp(struct device *dev,
10141 			     struct device_attribute *devattr, char *buf)
10142 {
10143 	struct pci_dev *pdev = to_pci_dev(dev);
10144 	struct net_device *netdev = pci_get_drvdata(pdev);
10145 	struct tg3 *tp = netdev_priv(netdev);
10146 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10147 	u32 temperature;
10148 
10149 	spin_lock_bh(&tp->lock);
10150 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10151 				sizeof(temperature));
10152 	spin_unlock_bh(&tp->lock);
10153 	return sprintf(buf, "%u\n", temperature);
10154 }
10155 
10156 
10157 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10158 			  TG3_TEMP_SENSOR_OFFSET);
10159 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10160 			  TG3_TEMP_CAUTION_OFFSET);
10161 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10162 			  TG3_TEMP_MAX_OFFSET);
10163 
10164 static struct attribute *tg3_attributes[] = {
10165 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10166 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10167 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10168 	NULL
10169 };
10170 
10171 static const struct attribute_group tg3_group = {
10172 	.attrs = tg3_attributes,
10173 };
10174 
10175 static void tg3_hwmon_close(struct tg3 *tp)
10176 {
10177 	if (tp->hwmon_dev) {
10178 		hwmon_device_unregister(tp->hwmon_dev);
10179 		tp->hwmon_dev = NULL;
10180 		sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10181 	}
10182 }
10183 
10184 static void tg3_hwmon_open(struct tg3 *tp)
10185 {
10186 	int i, err;
10187 	u32 size = 0;
10188 	struct pci_dev *pdev = tp->pdev;
10189 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10190 
10191 	tg3_sd_scan_scratchpad(tp, ocirs);
10192 
10193 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10194 		if (!ocirs[i].src_data_length)
10195 			continue;
10196 
10197 		size += ocirs[i].src_hdr_length;
10198 		size += ocirs[i].src_data_length;
10199 	}
10200 
10201 	if (!size)
10202 		return;
10203 
10204 	/* Register hwmon sysfs hooks */
10205 	err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10206 	if (err) {
10207 		dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10208 		return;
10209 	}
10210 
10211 	tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10212 	if (IS_ERR(tp->hwmon_dev)) {
10213 		tp->hwmon_dev = NULL;
10214 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10215 		sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10216 	}
10217 }
10218 
10219 
10220 #define TG3_STAT_ADD32(PSTAT, REG) \
10221 do {	u32 __val = tr32(REG); \
10222 	(PSTAT)->low += __val; \
10223 	if ((PSTAT)->low < __val) \
10224 		(PSTAT)->high += 1; \
10225 } while (0)
10226 
10227 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10228 {
10229 	struct tg3_hw_stats *sp = tp->hw_stats;
10230 
10231 	if (!tp->link_up)
10232 		return;
10233 
10234 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10235 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10236 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10237 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10238 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10239 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10240 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10241 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10242 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10243 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10244 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10245 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10246 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10247 	if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
10248 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10249 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10250 		u32 val;
10251 
10252 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10253 		val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
10254 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10255 		tg3_flag_clear(tp, 5719_RDMA_BUG);
10256 	}
10257 
10258 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10259 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10260 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10261 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10262 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10263 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10264 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10265 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10266 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10267 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10268 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10269 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10270 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10271 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10272 
10273 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10274 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10275 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10276 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10277 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10278 	} else {
10279 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10280 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10281 		if (val) {
10282 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10283 			sp->rx_discards.low += val;
10284 			if (sp->rx_discards.low < val)
10285 				sp->rx_discards.high += 1;
10286 		}
10287 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10288 	}
10289 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10290 }
10291 
10292 static void tg3_chk_missed_msi(struct tg3 *tp)
10293 {
10294 	u32 i;
10295 
10296 	for (i = 0; i < tp->irq_cnt; i++) {
10297 		struct tg3_napi *tnapi = &tp->napi[i];
10298 
10299 		if (tg3_has_work(tnapi)) {
10300 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10301 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10302 				if (tnapi->chk_msi_cnt < 1) {
10303 					tnapi->chk_msi_cnt++;
10304 					return;
10305 				}
10306 				tg3_msi(0, tnapi);
10307 			}
10308 		}
10309 		tnapi->chk_msi_cnt = 0;
10310 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10311 		tnapi->last_tx_cons = tnapi->tx_cons;
10312 	}
10313 }
10314 
10315 static void tg3_timer(unsigned long __opaque)
10316 {
10317 	struct tg3 *tp = (struct tg3 *) __opaque;
10318 
10319 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10320 		goto restart_timer;
10321 
10322 	spin_lock(&tp->lock);
10323 
10324 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10325 	    tg3_flag(tp, 57765_CLASS))
10326 		tg3_chk_missed_msi(tp);
10327 
10328 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10329 		/* BCM4785: Flush posted writes from GbE to host memory. */
10330 		tr32(HOSTCC_MODE);
10331 	}
10332 
10333 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10334 		/* All of this garbage is because when using non-tagged
10335 		 * IRQ status the mailbox/status_block protocol the chip
10336 		 * uses with the cpu is race prone.
10337 		 */
10338 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10339 			tw32(GRC_LOCAL_CTRL,
10340 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10341 		} else {
10342 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10343 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10344 		}
10345 
10346 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10347 			spin_unlock(&tp->lock);
10348 			tg3_reset_task_schedule(tp);
10349 			goto restart_timer;
10350 		}
10351 	}
10352 
10353 	/* This part only runs once per second. */
10354 	if (!--tp->timer_counter) {
10355 		if (tg3_flag(tp, 5705_PLUS))
10356 			tg3_periodic_fetch_stats(tp);
10357 
10358 		if (tp->setlpicnt && !--tp->setlpicnt)
10359 			tg3_phy_eee_enable(tp);
10360 
10361 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
10362 			u32 mac_stat;
10363 			int phy_event;
10364 
10365 			mac_stat = tr32(MAC_STATUS);
10366 
10367 			phy_event = 0;
10368 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10369 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10370 					phy_event = 1;
10371 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10372 				phy_event = 1;
10373 
10374 			if (phy_event)
10375 				tg3_setup_phy(tp, 0);
10376 		} else if (tg3_flag(tp, POLL_SERDES)) {
10377 			u32 mac_stat = tr32(MAC_STATUS);
10378 			int need_setup = 0;
10379 
10380 			if (tp->link_up &&
10381 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10382 				need_setup = 1;
10383 			}
10384 			if (!tp->link_up &&
10385 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
10386 					 MAC_STATUS_SIGNAL_DET))) {
10387 				need_setup = 1;
10388 			}
10389 			if (need_setup) {
10390 				if (!tp->serdes_counter) {
10391 					tw32_f(MAC_MODE,
10392 					     (tp->mac_mode &
10393 					      ~MAC_MODE_PORT_MODE_MASK));
10394 					udelay(40);
10395 					tw32_f(MAC_MODE, tp->mac_mode);
10396 					udelay(40);
10397 				}
10398 				tg3_setup_phy(tp, 0);
10399 			}
10400 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10401 			   tg3_flag(tp, 5780_CLASS)) {
10402 			tg3_serdes_parallel_detect(tp);
10403 		}
10404 
10405 		tp->timer_counter = tp->timer_multiplier;
10406 	}
10407 
10408 	/* Heartbeat is only sent once every 2 seconds.
10409 	 *
10410 	 * The heartbeat is to tell the ASF firmware that the host
10411 	 * driver is still alive.  In the event that the OS crashes,
10412 	 * ASF needs to reset the hardware to free up the FIFO space
10413 	 * that may be filled with rx packets destined for the host.
10414 	 * If the FIFO is full, ASF will no longer function properly.
10415 	 *
10416 	 * Unintended resets have been reported on real time kernels
10417 	 * where the timer doesn't run on time.  Netpoll will also have
10418 	 * same problem.
10419 	 *
10420 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10421 	 * to check the ring condition when the heartbeat is expiring
10422 	 * before doing the reset.  This will prevent most unintended
10423 	 * resets.
10424 	 */
10425 	if (!--tp->asf_counter) {
10426 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10427 			tg3_wait_for_event_ack(tp);
10428 
10429 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10430 				      FWCMD_NICDRV_ALIVE3);
10431 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10432 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10433 				      TG3_FW_UPDATE_TIMEOUT_SEC);
10434 
10435 			tg3_generate_fw_event(tp);
10436 		}
10437 		tp->asf_counter = tp->asf_multiplier;
10438 	}
10439 
10440 	spin_unlock(&tp->lock);
10441 
10442 restart_timer:
10443 	tp->timer.expires = jiffies + tp->timer_offset;
10444 	add_timer(&tp->timer);
10445 }
10446 
10447 static void tg3_timer_init(struct tg3 *tp)
10448 {
10449 	if (tg3_flag(tp, TAGGED_STATUS) &&
10450 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10451 	    !tg3_flag(tp, 57765_CLASS))
10452 		tp->timer_offset = HZ;
10453 	else
10454 		tp->timer_offset = HZ / 10;
10455 
10456 	BUG_ON(tp->timer_offset > HZ);
10457 
10458 	tp->timer_multiplier = (HZ / tp->timer_offset);
10459 	tp->asf_multiplier = (HZ / tp->timer_offset) *
10460 			     TG3_FW_UPDATE_FREQ_SEC;
10461 
10462 	init_timer(&tp->timer);
10463 	tp->timer.data = (unsigned long) tp;
10464 	tp->timer.function = tg3_timer;
10465 }
10466 
10467 static void tg3_timer_start(struct tg3 *tp)
10468 {
10469 	tp->asf_counter   = tp->asf_multiplier;
10470 	tp->timer_counter = tp->timer_multiplier;
10471 
10472 	tp->timer.expires = jiffies + tp->timer_offset;
10473 	add_timer(&tp->timer);
10474 }
10475 
10476 static void tg3_timer_stop(struct tg3 *tp)
10477 {
10478 	del_timer_sync(&tp->timer);
10479 }
10480 
10481 /* Restart hardware after configuration changes, self-test, etc.
10482  * Invoked with tp->lock held.
10483  */
10484 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10485 	__releases(tp->lock)
10486 	__acquires(tp->lock)
10487 {
10488 	int err;
10489 
10490 	err = tg3_init_hw(tp, reset_phy);
10491 	if (err) {
10492 		netdev_err(tp->dev,
10493 			   "Failed to re-initialize device, aborting\n");
10494 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10495 		tg3_full_unlock(tp);
10496 		tg3_timer_stop(tp);
10497 		tp->irq_sync = 0;
10498 		tg3_napi_enable(tp);
10499 		dev_close(tp->dev);
10500 		tg3_full_lock(tp, 0);
10501 	}
10502 	return err;
10503 }
10504 
10505 static void tg3_reset_task(struct work_struct *work)
10506 {
10507 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
10508 	int err;
10509 
10510 	tg3_full_lock(tp, 0);
10511 
10512 	if (!netif_running(tp->dev)) {
10513 		tg3_flag_clear(tp, RESET_TASK_PENDING);
10514 		tg3_full_unlock(tp);
10515 		return;
10516 	}
10517 
10518 	tg3_full_unlock(tp);
10519 
10520 	tg3_phy_stop(tp);
10521 
10522 	tg3_netif_stop(tp);
10523 
10524 	tg3_full_lock(tp, 1);
10525 
10526 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10527 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
10528 		tp->write32_rx_mbox = tg3_write_flush_reg32;
10529 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
10530 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10531 	}
10532 
10533 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10534 	err = tg3_init_hw(tp, 1);
10535 	if (err)
10536 		goto out;
10537 
10538 	tg3_netif_start(tp);
10539 
10540 out:
10541 	tg3_full_unlock(tp);
10542 
10543 	if (!err)
10544 		tg3_phy_start(tp);
10545 
10546 	tg3_flag_clear(tp, RESET_TASK_PENDING);
10547 }
10548 
10549 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10550 {
10551 	irq_handler_t fn;
10552 	unsigned long flags;
10553 	char *name;
10554 	struct tg3_napi *tnapi = &tp->napi[irq_num];
10555 
10556 	if (tp->irq_cnt == 1)
10557 		name = tp->dev->name;
10558 	else {
10559 		name = &tnapi->irq_lbl[0];
10560 		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10561 		name[IFNAMSIZ-1] = 0;
10562 	}
10563 
10564 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10565 		fn = tg3_msi;
10566 		if (tg3_flag(tp, 1SHOT_MSI))
10567 			fn = tg3_msi_1shot;
10568 		flags = 0;
10569 	} else {
10570 		fn = tg3_interrupt;
10571 		if (tg3_flag(tp, TAGGED_STATUS))
10572 			fn = tg3_interrupt_tagged;
10573 		flags = IRQF_SHARED;
10574 	}
10575 
10576 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10577 }
10578 
10579 static int tg3_test_interrupt(struct tg3 *tp)
10580 {
10581 	struct tg3_napi *tnapi = &tp->napi[0];
10582 	struct net_device *dev = tp->dev;
10583 	int err, i, intr_ok = 0;
10584 	u32 val;
10585 
10586 	if (!netif_running(dev))
10587 		return -ENODEV;
10588 
10589 	tg3_disable_ints(tp);
10590 
10591 	free_irq(tnapi->irq_vec, tnapi);
10592 
10593 	/*
10594 	 * Turn off MSI one shot mode.  Otherwise this test has no
10595 	 * observable way to know whether the interrupt was delivered.
10596 	 */
10597 	if (tg3_flag(tp, 57765_PLUS)) {
10598 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10599 		tw32(MSGINT_MODE, val);
10600 	}
10601 
10602 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
10603 			  IRQF_SHARED, dev->name, tnapi);
10604 	if (err)
10605 		return err;
10606 
10607 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10608 	tg3_enable_ints(tp);
10609 
10610 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10611 	       tnapi->coal_now);
10612 
10613 	for (i = 0; i < 5; i++) {
10614 		u32 int_mbox, misc_host_ctrl;
10615 
10616 		int_mbox = tr32_mailbox(tnapi->int_mbox);
10617 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10618 
10619 		if ((int_mbox != 0) ||
10620 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10621 			intr_ok = 1;
10622 			break;
10623 		}
10624 
10625 		if (tg3_flag(tp, 57765_PLUS) &&
10626 		    tnapi->hw_status->status_tag != tnapi->last_tag)
10627 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10628 
10629 		msleep(10);
10630 	}
10631 
10632 	tg3_disable_ints(tp);
10633 
10634 	free_irq(tnapi->irq_vec, tnapi);
10635 
10636 	err = tg3_request_irq(tp, 0);
10637 
10638 	if (err)
10639 		return err;
10640 
10641 	if (intr_ok) {
10642 		/* Reenable MSI one shot mode. */
10643 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10644 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10645 			tw32(MSGINT_MODE, val);
10646 		}
10647 		return 0;
10648 	}
10649 
10650 	return -EIO;
10651 }
10652 
10653 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10654  * successfully restored
10655  */
10656 static int tg3_test_msi(struct tg3 *tp)
10657 {
10658 	int err;
10659 	u16 pci_cmd;
10660 
10661 	if (!tg3_flag(tp, USING_MSI))
10662 		return 0;
10663 
10664 	/* Turn off SERR reporting in case MSI terminates with Master
10665 	 * Abort.
10666 	 */
10667 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10668 	pci_write_config_word(tp->pdev, PCI_COMMAND,
10669 			      pci_cmd & ~PCI_COMMAND_SERR);
10670 
10671 	err = tg3_test_interrupt(tp);
10672 
10673 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10674 
10675 	if (!err)
10676 		return 0;
10677 
10678 	/* other failures */
10679 	if (err != -EIO)
10680 		return err;
10681 
10682 	/* MSI test failed, go back to INTx mode */
10683 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10684 		    "to INTx mode. Please report this failure to the PCI "
10685 		    "maintainer and include system chipset information\n");
10686 
10687 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10688 
10689 	pci_disable_msi(tp->pdev);
10690 
10691 	tg3_flag_clear(tp, USING_MSI);
10692 	tp->napi[0].irq_vec = tp->pdev->irq;
10693 
10694 	err = tg3_request_irq(tp, 0);
10695 	if (err)
10696 		return err;
10697 
10698 	/* Need to reset the chip because the MSI cycle may have terminated
10699 	 * with Master Abort.
10700 	 */
10701 	tg3_full_lock(tp, 1);
10702 
10703 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10704 	err = tg3_init_hw(tp, 1);
10705 
10706 	tg3_full_unlock(tp);
10707 
10708 	if (err)
10709 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10710 
10711 	return err;
10712 }
10713 
10714 static int tg3_request_firmware(struct tg3 *tp)
10715 {
10716 	const struct tg3_firmware_hdr *fw_hdr;
10717 
10718 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10719 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10720 			   tp->fw_needed);
10721 		return -ENOENT;
10722 	}
10723 
10724 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10725 
10726 	/* Firmware blob starts with version numbers, followed by
10727 	 * start address and _full_ length including BSS sections
10728 	 * (which must be longer than the actual data, of course
10729 	 */
10730 
10731 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
10732 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10733 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10734 			   tp->fw_len, tp->fw_needed);
10735 		release_firmware(tp->fw);
10736 		tp->fw = NULL;
10737 		return -EINVAL;
10738 	}
10739 
10740 	/* We no longer need firmware; we have it. */
10741 	tp->fw_needed = NULL;
10742 	return 0;
10743 }
10744 
10745 static u32 tg3_irq_count(struct tg3 *tp)
10746 {
10747 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10748 
10749 	if (irq_cnt > 1) {
10750 		/* We want as many rx rings enabled as there are cpus.
10751 		 * In multiqueue MSI-X mode, the first MSI-X vector
10752 		 * only deals with link interrupts, etc, so we add
10753 		 * one to the number of vectors we are requesting.
10754 		 */
10755 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10756 	}
10757 
10758 	return irq_cnt;
10759 }
10760 
10761 static bool tg3_enable_msix(struct tg3 *tp)
10762 {
10763 	int i, rc;
10764 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10765 
10766 	tp->txq_cnt = tp->txq_req;
10767 	tp->rxq_cnt = tp->rxq_req;
10768 	if (!tp->rxq_cnt)
10769 		tp->rxq_cnt = netif_get_num_default_rss_queues();
10770 	if (tp->rxq_cnt > tp->rxq_max)
10771 		tp->rxq_cnt = tp->rxq_max;
10772 
10773 	/* Disable multiple TX rings by default.  Simple round-robin hardware
10774 	 * scheduling of the TX rings can cause starvation of rings with
10775 	 * small packets when other rings have TSO or jumbo packets.
10776 	 */
10777 	if (!tp->txq_req)
10778 		tp->txq_cnt = 1;
10779 
10780 	tp->irq_cnt = tg3_irq_count(tp);
10781 
10782 	for (i = 0; i < tp->irq_max; i++) {
10783 		msix_ent[i].entry  = i;
10784 		msix_ent[i].vector = 0;
10785 	}
10786 
10787 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10788 	if (rc < 0) {
10789 		return false;
10790 	} else if (rc != 0) {
10791 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
10792 			return false;
10793 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10794 			      tp->irq_cnt, rc);
10795 		tp->irq_cnt = rc;
10796 		tp->rxq_cnt = max(rc - 1, 1);
10797 		if (tp->txq_cnt)
10798 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10799 	}
10800 
10801 	for (i = 0; i < tp->irq_max; i++)
10802 		tp->napi[i].irq_vec = msix_ent[i].vector;
10803 
10804 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10805 		pci_disable_msix(tp->pdev);
10806 		return false;
10807 	}
10808 
10809 	if (tp->irq_cnt == 1)
10810 		return true;
10811 
10812 	tg3_flag_set(tp, ENABLE_RSS);
10813 
10814 	if (tp->txq_cnt > 1)
10815 		tg3_flag_set(tp, ENABLE_TSS);
10816 
10817 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10818 
10819 	return true;
10820 }
10821 
10822 static void tg3_ints_init(struct tg3 *tp)
10823 {
10824 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10825 	    !tg3_flag(tp, TAGGED_STATUS)) {
10826 		/* All MSI supporting chips should support tagged
10827 		 * status.  Assert that this is the case.
10828 		 */
10829 		netdev_warn(tp->dev,
10830 			    "MSI without TAGGED_STATUS? Not using MSI\n");
10831 		goto defcfg;
10832 	}
10833 
10834 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10835 		tg3_flag_set(tp, USING_MSIX);
10836 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10837 		tg3_flag_set(tp, USING_MSI);
10838 
10839 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10840 		u32 msi_mode = tr32(MSGINT_MODE);
10841 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10842 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10843 		if (!tg3_flag(tp, 1SHOT_MSI))
10844 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10845 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10846 	}
10847 defcfg:
10848 	if (!tg3_flag(tp, USING_MSIX)) {
10849 		tp->irq_cnt = 1;
10850 		tp->napi[0].irq_vec = tp->pdev->irq;
10851 	}
10852 
10853 	if (tp->irq_cnt == 1) {
10854 		tp->txq_cnt = 1;
10855 		tp->rxq_cnt = 1;
10856 		netif_set_real_num_tx_queues(tp->dev, 1);
10857 		netif_set_real_num_rx_queues(tp->dev, 1);
10858 	}
10859 }
10860 
10861 static void tg3_ints_fini(struct tg3 *tp)
10862 {
10863 	if (tg3_flag(tp, USING_MSIX))
10864 		pci_disable_msix(tp->pdev);
10865 	else if (tg3_flag(tp, USING_MSI))
10866 		pci_disable_msi(tp->pdev);
10867 	tg3_flag_clear(tp, USING_MSI);
10868 	tg3_flag_clear(tp, USING_MSIX);
10869 	tg3_flag_clear(tp, ENABLE_RSS);
10870 	tg3_flag_clear(tp, ENABLE_TSS);
10871 }
10872 
10873 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10874 		     bool init)
10875 {
10876 	struct net_device *dev = tp->dev;
10877 	int i, err;
10878 
10879 	/*
10880 	 * Setup interrupts first so we know how
10881 	 * many NAPI resources to allocate
10882 	 */
10883 	tg3_ints_init(tp);
10884 
10885 	tg3_rss_check_indir_tbl(tp);
10886 
10887 	/* The placement of this call is tied
10888 	 * to the setup and use of Host TX descriptors.
10889 	 */
10890 	err = tg3_alloc_consistent(tp);
10891 	if (err)
10892 		goto err_out1;
10893 
10894 	tg3_napi_init(tp);
10895 
10896 	tg3_napi_enable(tp);
10897 
10898 	for (i = 0; i < tp->irq_cnt; i++) {
10899 		struct tg3_napi *tnapi = &tp->napi[i];
10900 		err = tg3_request_irq(tp, i);
10901 		if (err) {
10902 			for (i--; i >= 0; i--) {
10903 				tnapi = &tp->napi[i];
10904 				free_irq(tnapi->irq_vec, tnapi);
10905 			}
10906 			goto err_out2;
10907 		}
10908 	}
10909 
10910 	tg3_full_lock(tp, 0);
10911 
10912 	err = tg3_init_hw(tp, reset_phy);
10913 	if (err) {
10914 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10915 		tg3_free_rings(tp);
10916 	}
10917 
10918 	tg3_full_unlock(tp);
10919 
10920 	if (err)
10921 		goto err_out3;
10922 
10923 	if (test_irq && tg3_flag(tp, USING_MSI)) {
10924 		err = tg3_test_msi(tp);
10925 
10926 		if (err) {
10927 			tg3_full_lock(tp, 0);
10928 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10929 			tg3_free_rings(tp);
10930 			tg3_full_unlock(tp);
10931 
10932 			goto err_out2;
10933 		}
10934 
10935 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10936 			u32 val = tr32(PCIE_TRANSACTION_CFG);
10937 
10938 			tw32(PCIE_TRANSACTION_CFG,
10939 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
10940 		}
10941 	}
10942 
10943 	tg3_phy_start(tp);
10944 
10945 	tg3_hwmon_open(tp);
10946 
10947 	tg3_full_lock(tp, 0);
10948 
10949 	tg3_timer_start(tp);
10950 	tg3_flag_set(tp, INIT_COMPLETE);
10951 	tg3_enable_ints(tp);
10952 
10953 	if (init)
10954 		tg3_ptp_init(tp);
10955 	else
10956 		tg3_ptp_resume(tp);
10957 
10958 
10959 	tg3_full_unlock(tp);
10960 
10961 	netif_tx_start_all_queues(dev);
10962 
10963 	/*
10964 	 * Reset loopback feature if it was turned on while the device was down
10965 	 * make sure that it's installed properly now.
10966 	 */
10967 	if (dev->features & NETIF_F_LOOPBACK)
10968 		tg3_set_loopback(dev, dev->features);
10969 
10970 	return 0;
10971 
10972 err_out3:
10973 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
10974 		struct tg3_napi *tnapi = &tp->napi[i];
10975 		free_irq(tnapi->irq_vec, tnapi);
10976 	}
10977 
10978 err_out2:
10979 	tg3_napi_disable(tp);
10980 	tg3_napi_fini(tp);
10981 	tg3_free_consistent(tp);
10982 
10983 err_out1:
10984 	tg3_ints_fini(tp);
10985 
10986 	return err;
10987 }
10988 
10989 static void tg3_stop(struct tg3 *tp)
10990 {
10991 	int i;
10992 
10993 	tg3_reset_task_cancel(tp);
10994 	tg3_netif_stop(tp);
10995 
10996 	tg3_timer_stop(tp);
10997 
10998 	tg3_hwmon_close(tp);
10999 
11000 	tg3_phy_stop(tp);
11001 
11002 	tg3_full_lock(tp, 1);
11003 
11004 	tg3_disable_ints(tp);
11005 
11006 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11007 	tg3_free_rings(tp);
11008 	tg3_flag_clear(tp, INIT_COMPLETE);
11009 
11010 	tg3_full_unlock(tp);
11011 
11012 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11013 		struct tg3_napi *tnapi = &tp->napi[i];
11014 		free_irq(tnapi->irq_vec, tnapi);
11015 	}
11016 
11017 	tg3_ints_fini(tp);
11018 
11019 	tg3_napi_fini(tp);
11020 
11021 	tg3_free_consistent(tp);
11022 }
11023 
11024 static int tg3_open(struct net_device *dev)
11025 {
11026 	struct tg3 *tp = netdev_priv(dev);
11027 	int err;
11028 
11029 	if (tp->fw_needed) {
11030 		err = tg3_request_firmware(tp);
11031 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11032 			if (err) {
11033 				netdev_warn(tp->dev, "EEE capability disabled\n");
11034 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11035 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11036 				netdev_warn(tp->dev, "EEE capability restored\n");
11037 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11038 			}
11039 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11040 			if (err)
11041 				return err;
11042 		} else if (err) {
11043 			netdev_warn(tp->dev, "TSO capability disabled\n");
11044 			tg3_flag_clear(tp, TSO_CAPABLE);
11045 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11046 			netdev_notice(tp->dev, "TSO capability restored\n");
11047 			tg3_flag_set(tp, TSO_CAPABLE);
11048 		}
11049 	}
11050 
11051 	tg3_carrier_off(tp);
11052 
11053 	err = tg3_power_up(tp);
11054 	if (err)
11055 		return err;
11056 
11057 	tg3_full_lock(tp, 0);
11058 
11059 	tg3_disable_ints(tp);
11060 	tg3_flag_clear(tp, INIT_COMPLETE);
11061 
11062 	tg3_full_unlock(tp);
11063 
11064 	err = tg3_start(tp, true, true, true);
11065 	if (err) {
11066 		tg3_frob_aux_power(tp, false);
11067 		pci_set_power_state(tp->pdev, PCI_D3hot);
11068 	}
11069 
11070 	if (tg3_flag(tp, PTP_CAPABLE)) {
11071 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11072 						   &tp->pdev->dev);
11073 		if (IS_ERR(tp->ptp_clock))
11074 			tp->ptp_clock = NULL;
11075 	}
11076 
11077 	return err;
11078 }
11079 
11080 static int tg3_close(struct net_device *dev)
11081 {
11082 	struct tg3 *tp = netdev_priv(dev);
11083 
11084 	tg3_ptp_fini(tp);
11085 
11086 	tg3_stop(tp);
11087 
11088 	/* Clear stats across close / open calls */
11089 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11090 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11091 
11092 	tg3_power_down(tp);
11093 
11094 	tg3_carrier_off(tp);
11095 
11096 	return 0;
11097 }
11098 
11099 static inline u64 get_stat64(tg3_stat64_t *val)
11100 {
11101        return ((u64)val->high << 32) | ((u64)val->low);
11102 }
11103 
11104 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11105 {
11106 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11107 
11108 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11109 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11110 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11111 		u32 val;
11112 
11113 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11114 			tg3_writephy(tp, MII_TG3_TEST1,
11115 				     val | MII_TG3_TEST1_CRC_EN);
11116 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11117 		} else
11118 			val = 0;
11119 
11120 		tp->phy_crc_errors += val;
11121 
11122 		return tp->phy_crc_errors;
11123 	}
11124 
11125 	return get_stat64(&hw_stats->rx_fcs_errors);
11126 }
11127 
11128 #define ESTAT_ADD(member) \
11129 	estats->member =	old_estats->member + \
11130 				get_stat64(&hw_stats->member)
11131 
11132 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11133 {
11134 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11135 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11136 
11137 	ESTAT_ADD(rx_octets);
11138 	ESTAT_ADD(rx_fragments);
11139 	ESTAT_ADD(rx_ucast_packets);
11140 	ESTAT_ADD(rx_mcast_packets);
11141 	ESTAT_ADD(rx_bcast_packets);
11142 	ESTAT_ADD(rx_fcs_errors);
11143 	ESTAT_ADD(rx_align_errors);
11144 	ESTAT_ADD(rx_xon_pause_rcvd);
11145 	ESTAT_ADD(rx_xoff_pause_rcvd);
11146 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11147 	ESTAT_ADD(rx_xoff_entered);
11148 	ESTAT_ADD(rx_frame_too_long_errors);
11149 	ESTAT_ADD(rx_jabbers);
11150 	ESTAT_ADD(rx_undersize_packets);
11151 	ESTAT_ADD(rx_in_length_errors);
11152 	ESTAT_ADD(rx_out_length_errors);
11153 	ESTAT_ADD(rx_64_or_less_octet_packets);
11154 	ESTAT_ADD(rx_65_to_127_octet_packets);
11155 	ESTAT_ADD(rx_128_to_255_octet_packets);
11156 	ESTAT_ADD(rx_256_to_511_octet_packets);
11157 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11158 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11159 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11160 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11161 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11162 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11163 
11164 	ESTAT_ADD(tx_octets);
11165 	ESTAT_ADD(tx_collisions);
11166 	ESTAT_ADD(tx_xon_sent);
11167 	ESTAT_ADD(tx_xoff_sent);
11168 	ESTAT_ADD(tx_flow_control);
11169 	ESTAT_ADD(tx_mac_errors);
11170 	ESTAT_ADD(tx_single_collisions);
11171 	ESTAT_ADD(tx_mult_collisions);
11172 	ESTAT_ADD(tx_deferred);
11173 	ESTAT_ADD(tx_excessive_collisions);
11174 	ESTAT_ADD(tx_late_collisions);
11175 	ESTAT_ADD(tx_collide_2times);
11176 	ESTAT_ADD(tx_collide_3times);
11177 	ESTAT_ADD(tx_collide_4times);
11178 	ESTAT_ADD(tx_collide_5times);
11179 	ESTAT_ADD(tx_collide_6times);
11180 	ESTAT_ADD(tx_collide_7times);
11181 	ESTAT_ADD(tx_collide_8times);
11182 	ESTAT_ADD(tx_collide_9times);
11183 	ESTAT_ADD(tx_collide_10times);
11184 	ESTAT_ADD(tx_collide_11times);
11185 	ESTAT_ADD(tx_collide_12times);
11186 	ESTAT_ADD(tx_collide_13times);
11187 	ESTAT_ADD(tx_collide_14times);
11188 	ESTAT_ADD(tx_collide_15times);
11189 	ESTAT_ADD(tx_ucast_packets);
11190 	ESTAT_ADD(tx_mcast_packets);
11191 	ESTAT_ADD(tx_bcast_packets);
11192 	ESTAT_ADD(tx_carrier_sense_errors);
11193 	ESTAT_ADD(tx_discards);
11194 	ESTAT_ADD(tx_errors);
11195 
11196 	ESTAT_ADD(dma_writeq_full);
11197 	ESTAT_ADD(dma_write_prioq_full);
11198 	ESTAT_ADD(rxbds_empty);
11199 	ESTAT_ADD(rx_discards);
11200 	ESTAT_ADD(rx_errors);
11201 	ESTAT_ADD(rx_threshold_hit);
11202 
11203 	ESTAT_ADD(dma_readq_full);
11204 	ESTAT_ADD(dma_read_prioq_full);
11205 	ESTAT_ADD(tx_comp_queue_full);
11206 
11207 	ESTAT_ADD(ring_set_send_prod_index);
11208 	ESTAT_ADD(ring_status_update);
11209 	ESTAT_ADD(nic_irqs);
11210 	ESTAT_ADD(nic_avoided_irqs);
11211 	ESTAT_ADD(nic_tx_threshold_hit);
11212 
11213 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11214 }
11215 
11216 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11217 {
11218 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11219 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11220 
11221 	stats->rx_packets = old_stats->rx_packets +
11222 		get_stat64(&hw_stats->rx_ucast_packets) +
11223 		get_stat64(&hw_stats->rx_mcast_packets) +
11224 		get_stat64(&hw_stats->rx_bcast_packets);
11225 
11226 	stats->tx_packets = old_stats->tx_packets +
11227 		get_stat64(&hw_stats->tx_ucast_packets) +
11228 		get_stat64(&hw_stats->tx_mcast_packets) +
11229 		get_stat64(&hw_stats->tx_bcast_packets);
11230 
11231 	stats->rx_bytes = old_stats->rx_bytes +
11232 		get_stat64(&hw_stats->rx_octets);
11233 	stats->tx_bytes = old_stats->tx_bytes +
11234 		get_stat64(&hw_stats->tx_octets);
11235 
11236 	stats->rx_errors = old_stats->rx_errors +
11237 		get_stat64(&hw_stats->rx_errors);
11238 	stats->tx_errors = old_stats->tx_errors +
11239 		get_stat64(&hw_stats->tx_errors) +
11240 		get_stat64(&hw_stats->tx_mac_errors) +
11241 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11242 		get_stat64(&hw_stats->tx_discards);
11243 
11244 	stats->multicast = old_stats->multicast +
11245 		get_stat64(&hw_stats->rx_mcast_packets);
11246 	stats->collisions = old_stats->collisions +
11247 		get_stat64(&hw_stats->tx_collisions);
11248 
11249 	stats->rx_length_errors = old_stats->rx_length_errors +
11250 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11251 		get_stat64(&hw_stats->rx_undersize_packets);
11252 
11253 	stats->rx_over_errors = old_stats->rx_over_errors +
11254 		get_stat64(&hw_stats->rxbds_empty);
11255 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11256 		get_stat64(&hw_stats->rx_align_errors);
11257 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11258 		get_stat64(&hw_stats->tx_discards);
11259 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11260 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11261 
11262 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11263 		tg3_calc_crc_errors(tp);
11264 
11265 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11266 		get_stat64(&hw_stats->rx_discards);
11267 
11268 	stats->rx_dropped = tp->rx_dropped;
11269 	stats->tx_dropped = tp->tx_dropped;
11270 }
11271 
11272 static int tg3_get_regs_len(struct net_device *dev)
11273 {
11274 	return TG3_REG_BLK_SIZE;
11275 }
11276 
11277 static void tg3_get_regs(struct net_device *dev,
11278 		struct ethtool_regs *regs, void *_p)
11279 {
11280 	struct tg3 *tp = netdev_priv(dev);
11281 
11282 	regs->version = 0;
11283 
11284 	memset(_p, 0, TG3_REG_BLK_SIZE);
11285 
11286 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11287 		return;
11288 
11289 	tg3_full_lock(tp, 0);
11290 
11291 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11292 
11293 	tg3_full_unlock(tp);
11294 }
11295 
11296 static int tg3_get_eeprom_len(struct net_device *dev)
11297 {
11298 	struct tg3 *tp = netdev_priv(dev);
11299 
11300 	return tp->nvram_size;
11301 }
11302 
11303 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11304 {
11305 	struct tg3 *tp = netdev_priv(dev);
11306 	int ret;
11307 	u8  *pd;
11308 	u32 i, offset, len, b_offset, b_count;
11309 	__be32 val;
11310 
11311 	if (tg3_flag(tp, NO_NVRAM))
11312 		return -EINVAL;
11313 
11314 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11315 		return -EAGAIN;
11316 
11317 	offset = eeprom->offset;
11318 	len = eeprom->len;
11319 	eeprom->len = 0;
11320 
11321 	eeprom->magic = TG3_EEPROM_MAGIC;
11322 
11323 	if (offset & 3) {
11324 		/* adjustments to start on required 4 byte boundary */
11325 		b_offset = offset & 3;
11326 		b_count = 4 - b_offset;
11327 		if (b_count > len) {
11328 			/* i.e. offset=1 len=2 */
11329 			b_count = len;
11330 		}
11331 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11332 		if (ret)
11333 			return ret;
11334 		memcpy(data, ((char *)&val) + b_offset, b_count);
11335 		len -= b_count;
11336 		offset += b_count;
11337 		eeprom->len += b_count;
11338 	}
11339 
11340 	/* read bytes up to the last 4 byte boundary */
11341 	pd = &data[eeprom->len];
11342 	for (i = 0; i < (len - (len & 3)); i += 4) {
11343 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
11344 		if (ret) {
11345 			eeprom->len += i;
11346 			return ret;
11347 		}
11348 		memcpy(pd + i, &val, 4);
11349 	}
11350 	eeprom->len += i;
11351 
11352 	if (len & 3) {
11353 		/* read last bytes not ending on 4 byte boundary */
11354 		pd = &data[eeprom->len];
11355 		b_count = len & 3;
11356 		b_offset = offset + len - b_count;
11357 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
11358 		if (ret)
11359 			return ret;
11360 		memcpy(pd, &val, b_count);
11361 		eeprom->len += b_count;
11362 	}
11363 	return 0;
11364 }
11365 
11366 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11367 {
11368 	struct tg3 *tp = netdev_priv(dev);
11369 	int ret;
11370 	u32 offset, len, b_offset, odd_len;
11371 	u8 *buf;
11372 	__be32 start, end;
11373 
11374 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11375 		return -EAGAIN;
11376 
11377 	if (tg3_flag(tp, NO_NVRAM) ||
11378 	    eeprom->magic != TG3_EEPROM_MAGIC)
11379 		return -EINVAL;
11380 
11381 	offset = eeprom->offset;
11382 	len = eeprom->len;
11383 
11384 	if ((b_offset = (offset & 3))) {
11385 		/* adjustments to start on required 4 byte boundary */
11386 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11387 		if (ret)
11388 			return ret;
11389 		len += b_offset;
11390 		offset &= ~3;
11391 		if (len < 4)
11392 			len = 4;
11393 	}
11394 
11395 	odd_len = 0;
11396 	if (len & 3) {
11397 		/* adjustments to end on required 4 byte boundary */
11398 		odd_len = 1;
11399 		len = (len + 3) & ~3;
11400 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11401 		if (ret)
11402 			return ret;
11403 	}
11404 
11405 	buf = data;
11406 	if (b_offset || odd_len) {
11407 		buf = kmalloc(len, GFP_KERNEL);
11408 		if (!buf)
11409 			return -ENOMEM;
11410 		if (b_offset)
11411 			memcpy(buf, &start, 4);
11412 		if (odd_len)
11413 			memcpy(buf+len-4, &end, 4);
11414 		memcpy(buf + b_offset, data, eeprom->len);
11415 	}
11416 
11417 	ret = tg3_nvram_write_block(tp, offset, len, buf);
11418 
11419 	if (buf != data)
11420 		kfree(buf);
11421 
11422 	return ret;
11423 }
11424 
11425 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11426 {
11427 	struct tg3 *tp = netdev_priv(dev);
11428 
11429 	if (tg3_flag(tp, USE_PHYLIB)) {
11430 		struct phy_device *phydev;
11431 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11432 			return -EAGAIN;
11433 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11434 		return phy_ethtool_gset(phydev, cmd);
11435 	}
11436 
11437 	cmd->supported = (SUPPORTED_Autoneg);
11438 
11439 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11440 		cmd->supported |= (SUPPORTED_1000baseT_Half |
11441 				   SUPPORTED_1000baseT_Full);
11442 
11443 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11444 		cmd->supported |= (SUPPORTED_100baseT_Half |
11445 				  SUPPORTED_100baseT_Full |
11446 				  SUPPORTED_10baseT_Half |
11447 				  SUPPORTED_10baseT_Full |
11448 				  SUPPORTED_TP);
11449 		cmd->port = PORT_TP;
11450 	} else {
11451 		cmd->supported |= SUPPORTED_FIBRE;
11452 		cmd->port = PORT_FIBRE;
11453 	}
11454 
11455 	cmd->advertising = tp->link_config.advertising;
11456 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
11457 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11458 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11459 				cmd->advertising |= ADVERTISED_Pause;
11460 			} else {
11461 				cmd->advertising |= ADVERTISED_Pause |
11462 						    ADVERTISED_Asym_Pause;
11463 			}
11464 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11465 			cmd->advertising |= ADVERTISED_Asym_Pause;
11466 		}
11467 	}
11468 	if (netif_running(dev) && tp->link_up) {
11469 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11470 		cmd->duplex = tp->link_config.active_duplex;
11471 		cmd->lp_advertising = tp->link_config.rmt_adv;
11472 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11473 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11474 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
11475 			else
11476 				cmd->eth_tp_mdix = ETH_TP_MDI;
11477 		}
11478 	} else {
11479 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11480 		cmd->duplex = DUPLEX_UNKNOWN;
11481 		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11482 	}
11483 	cmd->phy_address = tp->phy_addr;
11484 	cmd->transceiver = XCVR_INTERNAL;
11485 	cmd->autoneg = tp->link_config.autoneg;
11486 	cmd->maxtxpkt = 0;
11487 	cmd->maxrxpkt = 0;
11488 	return 0;
11489 }
11490 
11491 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11492 {
11493 	struct tg3 *tp = netdev_priv(dev);
11494 	u32 speed = ethtool_cmd_speed(cmd);
11495 
11496 	if (tg3_flag(tp, USE_PHYLIB)) {
11497 		struct phy_device *phydev;
11498 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11499 			return -EAGAIN;
11500 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11501 		return phy_ethtool_sset(phydev, cmd);
11502 	}
11503 
11504 	if (cmd->autoneg != AUTONEG_ENABLE &&
11505 	    cmd->autoneg != AUTONEG_DISABLE)
11506 		return -EINVAL;
11507 
11508 	if (cmd->autoneg == AUTONEG_DISABLE &&
11509 	    cmd->duplex != DUPLEX_FULL &&
11510 	    cmd->duplex != DUPLEX_HALF)
11511 		return -EINVAL;
11512 
11513 	if (cmd->autoneg == AUTONEG_ENABLE) {
11514 		u32 mask = ADVERTISED_Autoneg |
11515 			   ADVERTISED_Pause |
11516 			   ADVERTISED_Asym_Pause;
11517 
11518 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11519 			mask |= ADVERTISED_1000baseT_Half |
11520 				ADVERTISED_1000baseT_Full;
11521 
11522 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11523 			mask |= ADVERTISED_100baseT_Half |
11524 				ADVERTISED_100baseT_Full |
11525 				ADVERTISED_10baseT_Half |
11526 				ADVERTISED_10baseT_Full |
11527 				ADVERTISED_TP;
11528 		else
11529 			mask |= ADVERTISED_FIBRE;
11530 
11531 		if (cmd->advertising & ~mask)
11532 			return -EINVAL;
11533 
11534 		mask &= (ADVERTISED_1000baseT_Half |
11535 			 ADVERTISED_1000baseT_Full |
11536 			 ADVERTISED_100baseT_Half |
11537 			 ADVERTISED_100baseT_Full |
11538 			 ADVERTISED_10baseT_Half |
11539 			 ADVERTISED_10baseT_Full);
11540 
11541 		cmd->advertising &= mask;
11542 	} else {
11543 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11544 			if (speed != SPEED_1000)
11545 				return -EINVAL;
11546 
11547 			if (cmd->duplex != DUPLEX_FULL)
11548 				return -EINVAL;
11549 		} else {
11550 			if (speed != SPEED_100 &&
11551 			    speed != SPEED_10)
11552 				return -EINVAL;
11553 		}
11554 	}
11555 
11556 	tg3_full_lock(tp, 0);
11557 
11558 	tp->link_config.autoneg = cmd->autoneg;
11559 	if (cmd->autoneg == AUTONEG_ENABLE) {
11560 		tp->link_config.advertising = (cmd->advertising |
11561 					      ADVERTISED_Autoneg);
11562 		tp->link_config.speed = SPEED_UNKNOWN;
11563 		tp->link_config.duplex = DUPLEX_UNKNOWN;
11564 	} else {
11565 		tp->link_config.advertising = 0;
11566 		tp->link_config.speed = speed;
11567 		tp->link_config.duplex = cmd->duplex;
11568 	}
11569 
11570 	if (netif_running(dev))
11571 		tg3_setup_phy(tp, 1);
11572 
11573 	tg3_full_unlock(tp);
11574 
11575 	return 0;
11576 }
11577 
11578 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11579 {
11580 	struct tg3 *tp = netdev_priv(dev);
11581 
11582 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11583 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11584 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11585 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11586 }
11587 
11588 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11589 {
11590 	struct tg3 *tp = netdev_priv(dev);
11591 
11592 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11593 		wol->supported = WAKE_MAGIC;
11594 	else
11595 		wol->supported = 0;
11596 	wol->wolopts = 0;
11597 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11598 		wol->wolopts = WAKE_MAGIC;
11599 	memset(&wol->sopass, 0, sizeof(wol->sopass));
11600 }
11601 
11602 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11603 {
11604 	struct tg3 *tp = netdev_priv(dev);
11605 	struct device *dp = &tp->pdev->dev;
11606 
11607 	if (wol->wolopts & ~WAKE_MAGIC)
11608 		return -EINVAL;
11609 	if ((wol->wolopts & WAKE_MAGIC) &&
11610 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11611 		return -EINVAL;
11612 
11613 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11614 
11615 	spin_lock_bh(&tp->lock);
11616 	if (device_may_wakeup(dp))
11617 		tg3_flag_set(tp, WOL_ENABLE);
11618 	else
11619 		tg3_flag_clear(tp, WOL_ENABLE);
11620 	spin_unlock_bh(&tp->lock);
11621 
11622 	return 0;
11623 }
11624 
11625 static u32 tg3_get_msglevel(struct net_device *dev)
11626 {
11627 	struct tg3 *tp = netdev_priv(dev);
11628 	return tp->msg_enable;
11629 }
11630 
11631 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11632 {
11633 	struct tg3 *tp = netdev_priv(dev);
11634 	tp->msg_enable = value;
11635 }
11636 
11637 static int tg3_nway_reset(struct net_device *dev)
11638 {
11639 	struct tg3 *tp = netdev_priv(dev);
11640 	int r;
11641 
11642 	if (!netif_running(dev))
11643 		return -EAGAIN;
11644 
11645 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11646 		return -EINVAL;
11647 
11648 	if (tg3_flag(tp, USE_PHYLIB)) {
11649 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11650 			return -EAGAIN;
11651 		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11652 	} else {
11653 		u32 bmcr;
11654 
11655 		spin_lock_bh(&tp->lock);
11656 		r = -EINVAL;
11657 		tg3_readphy(tp, MII_BMCR, &bmcr);
11658 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11659 		    ((bmcr & BMCR_ANENABLE) ||
11660 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11661 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11662 						   BMCR_ANENABLE);
11663 			r = 0;
11664 		}
11665 		spin_unlock_bh(&tp->lock);
11666 	}
11667 
11668 	return r;
11669 }
11670 
11671 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11672 {
11673 	struct tg3 *tp = netdev_priv(dev);
11674 
11675 	ering->rx_max_pending = tp->rx_std_ring_mask;
11676 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
11677 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11678 	else
11679 		ering->rx_jumbo_max_pending = 0;
11680 
11681 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11682 
11683 	ering->rx_pending = tp->rx_pending;
11684 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
11685 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11686 	else
11687 		ering->rx_jumbo_pending = 0;
11688 
11689 	ering->tx_pending = tp->napi[0].tx_pending;
11690 }
11691 
11692 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11693 {
11694 	struct tg3 *tp = netdev_priv(dev);
11695 	int i, irq_sync = 0, err = 0;
11696 
11697 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11698 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11699 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11700 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
11701 	    (tg3_flag(tp, TSO_BUG) &&
11702 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11703 		return -EINVAL;
11704 
11705 	if (netif_running(dev)) {
11706 		tg3_phy_stop(tp);
11707 		tg3_netif_stop(tp);
11708 		irq_sync = 1;
11709 	}
11710 
11711 	tg3_full_lock(tp, irq_sync);
11712 
11713 	tp->rx_pending = ering->rx_pending;
11714 
11715 	if (tg3_flag(tp, MAX_RXPEND_64) &&
11716 	    tp->rx_pending > 63)
11717 		tp->rx_pending = 63;
11718 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11719 
11720 	for (i = 0; i < tp->irq_max; i++)
11721 		tp->napi[i].tx_pending = ering->tx_pending;
11722 
11723 	if (netif_running(dev)) {
11724 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11725 		err = tg3_restart_hw(tp, 1);
11726 		if (!err)
11727 			tg3_netif_start(tp);
11728 	}
11729 
11730 	tg3_full_unlock(tp);
11731 
11732 	if (irq_sync && !err)
11733 		tg3_phy_start(tp);
11734 
11735 	return err;
11736 }
11737 
11738 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11739 {
11740 	struct tg3 *tp = netdev_priv(dev);
11741 
11742 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11743 
11744 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11745 		epause->rx_pause = 1;
11746 	else
11747 		epause->rx_pause = 0;
11748 
11749 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11750 		epause->tx_pause = 1;
11751 	else
11752 		epause->tx_pause = 0;
11753 }
11754 
11755 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11756 {
11757 	struct tg3 *tp = netdev_priv(dev);
11758 	int err = 0;
11759 
11760 	if (tg3_flag(tp, USE_PHYLIB)) {
11761 		u32 newadv;
11762 		struct phy_device *phydev;
11763 
11764 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11765 
11766 		if (!(phydev->supported & SUPPORTED_Pause) ||
11767 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11768 		     (epause->rx_pause != epause->tx_pause)))
11769 			return -EINVAL;
11770 
11771 		tp->link_config.flowctrl = 0;
11772 		if (epause->rx_pause) {
11773 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
11774 
11775 			if (epause->tx_pause) {
11776 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
11777 				newadv = ADVERTISED_Pause;
11778 			} else
11779 				newadv = ADVERTISED_Pause |
11780 					 ADVERTISED_Asym_Pause;
11781 		} else if (epause->tx_pause) {
11782 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
11783 			newadv = ADVERTISED_Asym_Pause;
11784 		} else
11785 			newadv = 0;
11786 
11787 		if (epause->autoneg)
11788 			tg3_flag_set(tp, PAUSE_AUTONEG);
11789 		else
11790 			tg3_flag_clear(tp, PAUSE_AUTONEG);
11791 
11792 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11793 			u32 oldadv = phydev->advertising &
11794 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11795 			if (oldadv != newadv) {
11796 				phydev->advertising &=
11797 					~(ADVERTISED_Pause |
11798 					  ADVERTISED_Asym_Pause);
11799 				phydev->advertising |= newadv;
11800 				if (phydev->autoneg) {
11801 					/*
11802 					 * Always renegotiate the link to
11803 					 * inform our link partner of our
11804 					 * flow control settings, even if the
11805 					 * flow control is forced.  Let
11806 					 * tg3_adjust_link() do the final
11807 					 * flow control setup.
11808 					 */
11809 					return phy_start_aneg(phydev);
11810 				}
11811 			}
11812 
11813 			if (!epause->autoneg)
11814 				tg3_setup_flow_control(tp, 0, 0);
11815 		} else {
11816 			tp->link_config.advertising &=
11817 					~(ADVERTISED_Pause |
11818 					  ADVERTISED_Asym_Pause);
11819 			tp->link_config.advertising |= newadv;
11820 		}
11821 	} else {
11822 		int irq_sync = 0;
11823 
11824 		if (netif_running(dev)) {
11825 			tg3_netif_stop(tp);
11826 			irq_sync = 1;
11827 		}
11828 
11829 		tg3_full_lock(tp, irq_sync);
11830 
11831 		if (epause->autoneg)
11832 			tg3_flag_set(tp, PAUSE_AUTONEG);
11833 		else
11834 			tg3_flag_clear(tp, PAUSE_AUTONEG);
11835 		if (epause->rx_pause)
11836 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
11837 		else
11838 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11839 		if (epause->tx_pause)
11840 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
11841 		else
11842 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11843 
11844 		if (netif_running(dev)) {
11845 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11846 			err = tg3_restart_hw(tp, 1);
11847 			if (!err)
11848 				tg3_netif_start(tp);
11849 		}
11850 
11851 		tg3_full_unlock(tp);
11852 	}
11853 
11854 	return err;
11855 }
11856 
11857 static int tg3_get_sset_count(struct net_device *dev, int sset)
11858 {
11859 	switch (sset) {
11860 	case ETH_SS_TEST:
11861 		return TG3_NUM_TEST;
11862 	case ETH_SS_STATS:
11863 		return TG3_NUM_STATS;
11864 	default:
11865 		return -EOPNOTSUPP;
11866 	}
11867 }
11868 
11869 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11870 			 u32 *rules __always_unused)
11871 {
11872 	struct tg3 *tp = netdev_priv(dev);
11873 
11874 	if (!tg3_flag(tp, SUPPORT_MSIX))
11875 		return -EOPNOTSUPP;
11876 
11877 	switch (info->cmd) {
11878 	case ETHTOOL_GRXRINGS:
11879 		if (netif_running(tp->dev))
11880 			info->data = tp->rxq_cnt;
11881 		else {
11882 			info->data = num_online_cpus();
11883 			if (info->data > TG3_RSS_MAX_NUM_QS)
11884 				info->data = TG3_RSS_MAX_NUM_QS;
11885 		}
11886 
11887 		/* The first interrupt vector only
11888 		 * handles link interrupts.
11889 		 */
11890 		info->data -= 1;
11891 		return 0;
11892 
11893 	default:
11894 		return -EOPNOTSUPP;
11895 	}
11896 }
11897 
11898 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11899 {
11900 	u32 size = 0;
11901 	struct tg3 *tp = netdev_priv(dev);
11902 
11903 	if (tg3_flag(tp, SUPPORT_MSIX))
11904 		size = TG3_RSS_INDIR_TBL_SIZE;
11905 
11906 	return size;
11907 }
11908 
11909 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11910 {
11911 	struct tg3 *tp = netdev_priv(dev);
11912 	int i;
11913 
11914 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11915 		indir[i] = tp->rss_ind_tbl[i];
11916 
11917 	return 0;
11918 }
11919 
11920 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11921 {
11922 	struct tg3 *tp = netdev_priv(dev);
11923 	size_t i;
11924 
11925 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11926 		tp->rss_ind_tbl[i] = indir[i];
11927 
11928 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11929 		return 0;
11930 
11931 	/* It is legal to write the indirection
11932 	 * table while the device is running.
11933 	 */
11934 	tg3_full_lock(tp, 0);
11935 	tg3_rss_write_indir_tbl(tp);
11936 	tg3_full_unlock(tp);
11937 
11938 	return 0;
11939 }
11940 
11941 static void tg3_get_channels(struct net_device *dev,
11942 			     struct ethtool_channels *channel)
11943 {
11944 	struct tg3 *tp = netdev_priv(dev);
11945 	u32 deflt_qs = netif_get_num_default_rss_queues();
11946 
11947 	channel->max_rx = tp->rxq_max;
11948 	channel->max_tx = tp->txq_max;
11949 
11950 	if (netif_running(dev)) {
11951 		channel->rx_count = tp->rxq_cnt;
11952 		channel->tx_count = tp->txq_cnt;
11953 	} else {
11954 		if (tp->rxq_req)
11955 			channel->rx_count = tp->rxq_req;
11956 		else
11957 			channel->rx_count = min(deflt_qs, tp->rxq_max);
11958 
11959 		if (tp->txq_req)
11960 			channel->tx_count = tp->txq_req;
11961 		else
11962 			channel->tx_count = min(deflt_qs, tp->txq_max);
11963 	}
11964 }
11965 
11966 static int tg3_set_channels(struct net_device *dev,
11967 			    struct ethtool_channels *channel)
11968 {
11969 	struct tg3 *tp = netdev_priv(dev);
11970 
11971 	if (!tg3_flag(tp, SUPPORT_MSIX))
11972 		return -EOPNOTSUPP;
11973 
11974 	if (channel->rx_count > tp->rxq_max ||
11975 	    channel->tx_count > tp->txq_max)
11976 		return -EINVAL;
11977 
11978 	tp->rxq_req = channel->rx_count;
11979 	tp->txq_req = channel->tx_count;
11980 
11981 	if (!netif_running(dev))
11982 		return 0;
11983 
11984 	tg3_stop(tp);
11985 
11986 	tg3_carrier_off(tp);
11987 
11988 	tg3_start(tp, true, false, false);
11989 
11990 	return 0;
11991 }
11992 
11993 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11994 {
11995 	switch (stringset) {
11996 	case ETH_SS_STATS:
11997 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11998 		break;
11999 	case ETH_SS_TEST:
12000 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12001 		break;
12002 	default:
12003 		WARN_ON(1);	/* we need a WARN() */
12004 		break;
12005 	}
12006 }
12007 
12008 static int tg3_set_phys_id(struct net_device *dev,
12009 			    enum ethtool_phys_id_state state)
12010 {
12011 	struct tg3 *tp = netdev_priv(dev);
12012 
12013 	if (!netif_running(tp->dev))
12014 		return -EAGAIN;
12015 
12016 	switch (state) {
12017 	case ETHTOOL_ID_ACTIVE:
12018 		return 1;	/* cycle on/off once per second */
12019 
12020 	case ETHTOOL_ID_ON:
12021 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12022 		     LED_CTRL_1000MBPS_ON |
12023 		     LED_CTRL_100MBPS_ON |
12024 		     LED_CTRL_10MBPS_ON |
12025 		     LED_CTRL_TRAFFIC_OVERRIDE |
12026 		     LED_CTRL_TRAFFIC_BLINK |
12027 		     LED_CTRL_TRAFFIC_LED);
12028 		break;
12029 
12030 	case ETHTOOL_ID_OFF:
12031 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12032 		     LED_CTRL_TRAFFIC_OVERRIDE);
12033 		break;
12034 
12035 	case ETHTOOL_ID_INACTIVE:
12036 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12037 		break;
12038 	}
12039 
12040 	return 0;
12041 }
12042 
12043 static void tg3_get_ethtool_stats(struct net_device *dev,
12044 				   struct ethtool_stats *estats, u64 *tmp_stats)
12045 {
12046 	struct tg3 *tp = netdev_priv(dev);
12047 
12048 	if (tp->hw_stats)
12049 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12050 	else
12051 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12052 }
12053 
12054 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12055 {
12056 	int i;
12057 	__be32 *buf;
12058 	u32 offset = 0, len = 0;
12059 	u32 magic, val;
12060 
12061 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12062 		return NULL;
12063 
12064 	if (magic == TG3_EEPROM_MAGIC) {
12065 		for (offset = TG3_NVM_DIR_START;
12066 		     offset < TG3_NVM_DIR_END;
12067 		     offset += TG3_NVM_DIRENT_SIZE) {
12068 			if (tg3_nvram_read(tp, offset, &val))
12069 				return NULL;
12070 
12071 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12072 			    TG3_NVM_DIRTYPE_EXTVPD)
12073 				break;
12074 		}
12075 
12076 		if (offset != TG3_NVM_DIR_END) {
12077 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12078 			if (tg3_nvram_read(tp, offset + 4, &offset))
12079 				return NULL;
12080 
12081 			offset = tg3_nvram_logical_addr(tp, offset);
12082 		}
12083 	}
12084 
12085 	if (!offset || !len) {
12086 		offset = TG3_NVM_VPD_OFF;
12087 		len = TG3_NVM_VPD_LEN;
12088 	}
12089 
12090 	buf = kmalloc(len, GFP_KERNEL);
12091 	if (buf == NULL)
12092 		return NULL;
12093 
12094 	if (magic == TG3_EEPROM_MAGIC) {
12095 		for (i = 0; i < len; i += 4) {
12096 			/* The data is in little-endian format in NVRAM.
12097 			 * Use the big-endian read routines to preserve
12098 			 * the byte order as it exists in NVRAM.
12099 			 */
12100 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12101 				goto error;
12102 		}
12103 	} else {
12104 		u8 *ptr;
12105 		ssize_t cnt;
12106 		unsigned int pos = 0;
12107 
12108 		ptr = (u8 *)&buf[0];
12109 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12110 			cnt = pci_read_vpd(tp->pdev, pos,
12111 					   len - pos, ptr);
12112 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12113 				cnt = 0;
12114 			else if (cnt < 0)
12115 				goto error;
12116 		}
12117 		if (pos != len)
12118 			goto error;
12119 	}
12120 
12121 	*vpdlen = len;
12122 
12123 	return buf;
12124 
12125 error:
12126 	kfree(buf);
12127 	return NULL;
12128 }
12129 
12130 #define NVRAM_TEST_SIZE 0x100
12131 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12132 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12133 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12134 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12135 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12136 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12137 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12138 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12139 
12140 static int tg3_test_nvram(struct tg3 *tp)
12141 {
12142 	u32 csum, magic, len;
12143 	__be32 *buf;
12144 	int i, j, k, err = 0, size;
12145 
12146 	if (tg3_flag(tp, NO_NVRAM))
12147 		return 0;
12148 
12149 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12150 		return -EIO;
12151 
12152 	if (magic == TG3_EEPROM_MAGIC)
12153 		size = NVRAM_TEST_SIZE;
12154 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12155 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12156 		    TG3_EEPROM_SB_FORMAT_1) {
12157 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12158 			case TG3_EEPROM_SB_REVISION_0:
12159 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12160 				break;
12161 			case TG3_EEPROM_SB_REVISION_2:
12162 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12163 				break;
12164 			case TG3_EEPROM_SB_REVISION_3:
12165 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12166 				break;
12167 			case TG3_EEPROM_SB_REVISION_4:
12168 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12169 				break;
12170 			case TG3_EEPROM_SB_REVISION_5:
12171 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12172 				break;
12173 			case TG3_EEPROM_SB_REVISION_6:
12174 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12175 				break;
12176 			default:
12177 				return -EIO;
12178 			}
12179 		} else
12180 			return 0;
12181 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12182 		size = NVRAM_SELFBOOT_HW_SIZE;
12183 	else
12184 		return -EIO;
12185 
12186 	buf = kmalloc(size, GFP_KERNEL);
12187 	if (buf == NULL)
12188 		return -ENOMEM;
12189 
12190 	err = -EIO;
12191 	for (i = 0, j = 0; i < size; i += 4, j++) {
12192 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12193 		if (err)
12194 			break;
12195 	}
12196 	if (i < size)
12197 		goto out;
12198 
12199 	/* Selfboot format */
12200 	magic = be32_to_cpu(buf[0]);
12201 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12202 	    TG3_EEPROM_MAGIC_FW) {
12203 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12204 
12205 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12206 		    TG3_EEPROM_SB_REVISION_2) {
12207 			/* For rev 2, the csum doesn't include the MBA. */
12208 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12209 				csum8 += buf8[i];
12210 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12211 				csum8 += buf8[i];
12212 		} else {
12213 			for (i = 0; i < size; i++)
12214 				csum8 += buf8[i];
12215 		}
12216 
12217 		if (csum8 == 0) {
12218 			err = 0;
12219 			goto out;
12220 		}
12221 
12222 		err = -EIO;
12223 		goto out;
12224 	}
12225 
12226 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12227 	    TG3_EEPROM_MAGIC_HW) {
12228 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12229 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12230 		u8 *buf8 = (u8 *) buf;
12231 
12232 		/* Separate the parity bits and the data bytes.  */
12233 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12234 			if ((i == 0) || (i == 8)) {
12235 				int l;
12236 				u8 msk;
12237 
12238 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12239 					parity[k++] = buf8[i] & msk;
12240 				i++;
12241 			} else if (i == 16) {
12242 				int l;
12243 				u8 msk;
12244 
12245 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12246 					parity[k++] = buf8[i] & msk;
12247 				i++;
12248 
12249 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12250 					parity[k++] = buf8[i] & msk;
12251 				i++;
12252 			}
12253 			data[j++] = buf8[i];
12254 		}
12255 
12256 		err = -EIO;
12257 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12258 			u8 hw8 = hweight8(data[i]);
12259 
12260 			if ((hw8 & 0x1) && parity[i])
12261 				goto out;
12262 			else if (!(hw8 & 0x1) && !parity[i])
12263 				goto out;
12264 		}
12265 		err = 0;
12266 		goto out;
12267 	}
12268 
12269 	err = -EIO;
12270 
12271 	/* Bootstrap checksum at offset 0x10 */
12272 	csum = calc_crc((unsigned char *) buf, 0x10);
12273 	if (csum != le32_to_cpu(buf[0x10/4]))
12274 		goto out;
12275 
12276 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12277 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12278 	if (csum != le32_to_cpu(buf[0xfc/4]))
12279 		goto out;
12280 
12281 	kfree(buf);
12282 
12283 	buf = tg3_vpd_readblock(tp, &len);
12284 	if (!buf)
12285 		return -ENOMEM;
12286 
12287 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12288 	if (i > 0) {
12289 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12290 		if (j < 0)
12291 			goto out;
12292 
12293 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12294 			goto out;
12295 
12296 		i += PCI_VPD_LRDT_TAG_SIZE;
12297 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12298 					      PCI_VPD_RO_KEYWORD_CHKSUM);
12299 		if (j > 0) {
12300 			u8 csum8 = 0;
12301 
12302 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
12303 
12304 			for (i = 0; i <= j; i++)
12305 				csum8 += ((u8 *)buf)[i];
12306 
12307 			if (csum8)
12308 				goto out;
12309 		}
12310 	}
12311 
12312 	err = 0;
12313 
12314 out:
12315 	kfree(buf);
12316 	return err;
12317 }
12318 
12319 #define TG3_SERDES_TIMEOUT_SEC	2
12320 #define TG3_COPPER_TIMEOUT_SEC	6
12321 
12322 static int tg3_test_link(struct tg3 *tp)
12323 {
12324 	int i, max;
12325 
12326 	if (!netif_running(tp->dev))
12327 		return -ENODEV;
12328 
12329 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12330 		max = TG3_SERDES_TIMEOUT_SEC;
12331 	else
12332 		max = TG3_COPPER_TIMEOUT_SEC;
12333 
12334 	for (i = 0; i < max; i++) {
12335 		if (tp->link_up)
12336 			return 0;
12337 
12338 		if (msleep_interruptible(1000))
12339 			break;
12340 	}
12341 
12342 	return -EIO;
12343 }
12344 
12345 /* Only test the commonly used registers */
12346 static int tg3_test_registers(struct tg3 *tp)
12347 {
12348 	int i, is_5705, is_5750;
12349 	u32 offset, read_mask, write_mask, val, save_val, read_val;
12350 	static struct {
12351 		u16 offset;
12352 		u16 flags;
12353 #define TG3_FL_5705	0x1
12354 #define TG3_FL_NOT_5705	0x2
12355 #define TG3_FL_NOT_5788	0x4
12356 #define TG3_FL_NOT_5750	0x8
12357 		u32 read_mask;
12358 		u32 write_mask;
12359 	} reg_tbl[] = {
12360 		/* MAC Control Registers */
12361 		{ MAC_MODE, TG3_FL_NOT_5705,
12362 			0x00000000, 0x00ef6f8c },
12363 		{ MAC_MODE, TG3_FL_5705,
12364 			0x00000000, 0x01ef6b8c },
12365 		{ MAC_STATUS, TG3_FL_NOT_5705,
12366 			0x03800107, 0x00000000 },
12367 		{ MAC_STATUS, TG3_FL_5705,
12368 			0x03800100, 0x00000000 },
12369 		{ MAC_ADDR_0_HIGH, 0x0000,
12370 			0x00000000, 0x0000ffff },
12371 		{ MAC_ADDR_0_LOW, 0x0000,
12372 			0x00000000, 0xffffffff },
12373 		{ MAC_RX_MTU_SIZE, 0x0000,
12374 			0x00000000, 0x0000ffff },
12375 		{ MAC_TX_MODE, 0x0000,
12376 			0x00000000, 0x00000070 },
12377 		{ MAC_TX_LENGTHS, 0x0000,
12378 			0x00000000, 0x00003fff },
12379 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
12380 			0x00000000, 0x000007fc },
12381 		{ MAC_RX_MODE, TG3_FL_5705,
12382 			0x00000000, 0x000007dc },
12383 		{ MAC_HASH_REG_0, 0x0000,
12384 			0x00000000, 0xffffffff },
12385 		{ MAC_HASH_REG_1, 0x0000,
12386 			0x00000000, 0xffffffff },
12387 		{ MAC_HASH_REG_2, 0x0000,
12388 			0x00000000, 0xffffffff },
12389 		{ MAC_HASH_REG_3, 0x0000,
12390 			0x00000000, 0xffffffff },
12391 
12392 		/* Receive Data and Receive BD Initiator Control Registers. */
12393 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12394 			0x00000000, 0xffffffff },
12395 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12396 			0x00000000, 0xffffffff },
12397 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12398 			0x00000000, 0x00000003 },
12399 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12400 			0x00000000, 0xffffffff },
12401 		{ RCVDBDI_STD_BD+0, 0x0000,
12402 			0x00000000, 0xffffffff },
12403 		{ RCVDBDI_STD_BD+4, 0x0000,
12404 			0x00000000, 0xffffffff },
12405 		{ RCVDBDI_STD_BD+8, 0x0000,
12406 			0x00000000, 0xffff0002 },
12407 		{ RCVDBDI_STD_BD+0xc, 0x0000,
12408 			0x00000000, 0xffffffff },
12409 
12410 		/* Receive BD Initiator Control Registers. */
12411 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12412 			0x00000000, 0xffffffff },
12413 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
12414 			0x00000000, 0x000003ff },
12415 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12416 			0x00000000, 0xffffffff },
12417 
12418 		/* Host Coalescing Control Registers. */
12419 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
12420 			0x00000000, 0x00000004 },
12421 		{ HOSTCC_MODE, TG3_FL_5705,
12422 			0x00000000, 0x000000f6 },
12423 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12424 			0x00000000, 0xffffffff },
12425 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12426 			0x00000000, 0x000003ff },
12427 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12428 			0x00000000, 0xffffffff },
12429 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12430 			0x00000000, 0x000003ff },
12431 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12432 			0x00000000, 0xffffffff },
12433 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12434 			0x00000000, 0x000000ff },
12435 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12436 			0x00000000, 0xffffffff },
12437 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12438 			0x00000000, 0x000000ff },
12439 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12440 			0x00000000, 0xffffffff },
12441 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12442 			0x00000000, 0xffffffff },
12443 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12444 			0x00000000, 0xffffffff },
12445 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12446 			0x00000000, 0x000000ff },
12447 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12448 			0x00000000, 0xffffffff },
12449 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12450 			0x00000000, 0x000000ff },
12451 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12452 			0x00000000, 0xffffffff },
12453 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12454 			0x00000000, 0xffffffff },
12455 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12456 			0x00000000, 0xffffffff },
12457 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12458 			0x00000000, 0xffffffff },
12459 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12460 			0x00000000, 0xffffffff },
12461 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12462 			0xffffffff, 0x00000000 },
12463 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12464 			0xffffffff, 0x00000000 },
12465 
12466 		/* Buffer Manager Control Registers. */
12467 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12468 			0x00000000, 0x007fff80 },
12469 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12470 			0x00000000, 0x007fffff },
12471 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12472 			0x00000000, 0x0000003f },
12473 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12474 			0x00000000, 0x000001ff },
12475 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
12476 			0x00000000, 0x000001ff },
12477 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12478 			0xffffffff, 0x00000000 },
12479 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12480 			0xffffffff, 0x00000000 },
12481 
12482 		/* Mailbox Registers */
12483 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12484 			0x00000000, 0x000001ff },
12485 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12486 			0x00000000, 0x000001ff },
12487 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12488 			0x00000000, 0x000007ff },
12489 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12490 			0x00000000, 0x000001ff },
12491 
12492 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
12493 	};
12494 
12495 	is_5705 = is_5750 = 0;
12496 	if (tg3_flag(tp, 5705_PLUS)) {
12497 		is_5705 = 1;
12498 		if (tg3_flag(tp, 5750_PLUS))
12499 			is_5750 = 1;
12500 	}
12501 
12502 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12503 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12504 			continue;
12505 
12506 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12507 			continue;
12508 
12509 		if (tg3_flag(tp, IS_5788) &&
12510 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
12511 			continue;
12512 
12513 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12514 			continue;
12515 
12516 		offset = (u32) reg_tbl[i].offset;
12517 		read_mask = reg_tbl[i].read_mask;
12518 		write_mask = reg_tbl[i].write_mask;
12519 
12520 		/* Save the original register content */
12521 		save_val = tr32(offset);
12522 
12523 		/* Determine the read-only value. */
12524 		read_val = save_val & read_mask;
12525 
12526 		/* Write zero to the register, then make sure the read-only bits
12527 		 * are not changed and the read/write bits are all zeros.
12528 		 */
12529 		tw32(offset, 0);
12530 
12531 		val = tr32(offset);
12532 
12533 		/* Test the read-only and read/write bits. */
12534 		if (((val & read_mask) != read_val) || (val & write_mask))
12535 			goto out;
12536 
12537 		/* Write ones to all the bits defined by RdMask and WrMask, then
12538 		 * make sure the read-only bits are not changed and the
12539 		 * read/write bits are all ones.
12540 		 */
12541 		tw32(offset, read_mask | write_mask);
12542 
12543 		val = tr32(offset);
12544 
12545 		/* Test the read-only bits. */
12546 		if ((val & read_mask) != read_val)
12547 			goto out;
12548 
12549 		/* Test the read/write bits. */
12550 		if ((val & write_mask) != write_mask)
12551 			goto out;
12552 
12553 		tw32(offset, save_val);
12554 	}
12555 
12556 	return 0;
12557 
12558 out:
12559 	if (netif_msg_hw(tp))
12560 		netdev_err(tp->dev,
12561 			   "Register test failed at offset %x\n", offset);
12562 	tw32(offset, save_val);
12563 	return -EIO;
12564 }
12565 
12566 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12567 {
12568 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12569 	int i;
12570 	u32 j;
12571 
12572 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12573 		for (j = 0; j < len; j += 4) {
12574 			u32 val;
12575 
12576 			tg3_write_mem(tp, offset + j, test_pattern[i]);
12577 			tg3_read_mem(tp, offset + j, &val);
12578 			if (val != test_pattern[i])
12579 				return -EIO;
12580 		}
12581 	}
12582 	return 0;
12583 }
12584 
12585 static int tg3_test_memory(struct tg3 *tp)
12586 {
12587 	static struct mem_entry {
12588 		u32 offset;
12589 		u32 len;
12590 	} mem_tbl_570x[] = {
12591 		{ 0x00000000, 0x00b50},
12592 		{ 0x00002000, 0x1c000},
12593 		{ 0xffffffff, 0x00000}
12594 	}, mem_tbl_5705[] = {
12595 		{ 0x00000100, 0x0000c},
12596 		{ 0x00000200, 0x00008},
12597 		{ 0x00004000, 0x00800},
12598 		{ 0x00006000, 0x01000},
12599 		{ 0x00008000, 0x02000},
12600 		{ 0x00010000, 0x0e000},
12601 		{ 0xffffffff, 0x00000}
12602 	}, mem_tbl_5755[] = {
12603 		{ 0x00000200, 0x00008},
12604 		{ 0x00004000, 0x00800},
12605 		{ 0x00006000, 0x00800},
12606 		{ 0x00008000, 0x02000},
12607 		{ 0x00010000, 0x0c000},
12608 		{ 0xffffffff, 0x00000}
12609 	}, mem_tbl_5906[] = {
12610 		{ 0x00000200, 0x00008},
12611 		{ 0x00004000, 0x00400},
12612 		{ 0x00006000, 0x00400},
12613 		{ 0x00008000, 0x01000},
12614 		{ 0x00010000, 0x01000},
12615 		{ 0xffffffff, 0x00000}
12616 	}, mem_tbl_5717[] = {
12617 		{ 0x00000200, 0x00008},
12618 		{ 0x00010000, 0x0a000},
12619 		{ 0x00020000, 0x13c00},
12620 		{ 0xffffffff, 0x00000}
12621 	}, mem_tbl_57765[] = {
12622 		{ 0x00000200, 0x00008},
12623 		{ 0x00004000, 0x00800},
12624 		{ 0x00006000, 0x09800},
12625 		{ 0x00010000, 0x0a000},
12626 		{ 0xffffffff, 0x00000}
12627 	};
12628 	struct mem_entry *mem_tbl;
12629 	int err = 0;
12630 	int i;
12631 
12632 	if (tg3_flag(tp, 5717_PLUS))
12633 		mem_tbl = mem_tbl_5717;
12634 	else if (tg3_flag(tp, 57765_CLASS) ||
12635 		 tg3_asic_rev(tp) == ASIC_REV_5762)
12636 		mem_tbl = mem_tbl_57765;
12637 	else if (tg3_flag(tp, 5755_PLUS))
12638 		mem_tbl = mem_tbl_5755;
12639 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
12640 		mem_tbl = mem_tbl_5906;
12641 	else if (tg3_flag(tp, 5705_PLUS))
12642 		mem_tbl = mem_tbl_5705;
12643 	else
12644 		mem_tbl = mem_tbl_570x;
12645 
12646 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12647 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12648 		if (err)
12649 			break;
12650 	}
12651 
12652 	return err;
12653 }
12654 
12655 #define TG3_TSO_MSS		500
12656 
12657 #define TG3_TSO_IP_HDR_LEN	20
12658 #define TG3_TSO_TCP_HDR_LEN	20
12659 #define TG3_TSO_TCP_OPT_LEN	12
12660 
12661 static const u8 tg3_tso_header[] = {
12662 0x08, 0x00,
12663 0x45, 0x00, 0x00, 0x00,
12664 0x00, 0x00, 0x40, 0x00,
12665 0x40, 0x06, 0x00, 0x00,
12666 0x0a, 0x00, 0x00, 0x01,
12667 0x0a, 0x00, 0x00, 0x02,
12668 0x0d, 0x00, 0xe0, 0x00,
12669 0x00, 0x00, 0x01, 0x00,
12670 0x00, 0x00, 0x02, 0x00,
12671 0x80, 0x10, 0x10, 0x00,
12672 0x14, 0x09, 0x00, 0x00,
12673 0x01, 0x01, 0x08, 0x0a,
12674 0x11, 0x11, 0x11, 0x11,
12675 0x11, 0x11, 0x11, 0x11,
12676 };
12677 
12678 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12679 {
12680 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12681 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12682 	u32 budget;
12683 	struct sk_buff *skb;
12684 	u8 *tx_data, *rx_data;
12685 	dma_addr_t map;
12686 	int num_pkts, tx_len, rx_len, i, err;
12687 	struct tg3_rx_buffer_desc *desc;
12688 	struct tg3_napi *tnapi, *rnapi;
12689 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12690 
12691 	tnapi = &tp->napi[0];
12692 	rnapi = &tp->napi[0];
12693 	if (tp->irq_cnt > 1) {
12694 		if (tg3_flag(tp, ENABLE_RSS))
12695 			rnapi = &tp->napi[1];
12696 		if (tg3_flag(tp, ENABLE_TSS))
12697 			tnapi = &tp->napi[1];
12698 	}
12699 	coal_now = tnapi->coal_now | rnapi->coal_now;
12700 
12701 	err = -EIO;
12702 
12703 	tx_len = pktsz;
12704 	skb = netdev_alloc_skb(tp->dev, tx_len);
12705 	if (!skb)
12706 		return -ENOMEM;
12707 
12708 	tx_data = skb_put(skb, tx_len);
12709 	memcpy(tx_data, tp->dev->dev_addr, 6);
12710 	memset(tx_data + 6, 0x0, 8);
12711 
12712 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12713 
12714 	if (tso_loopback) {
12715 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12716 
12717 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12718 			      TG3_TSO_TCP_OPT_LEN;
12719 
12720 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12721 		       sizeof(tg3_tso_header));
12722 		mss = TG3_TSO_MSS;
12723 
12724 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12725 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12726 
12727 		/* Set the total length field in the IP header */
12728 		iph->tot_len = htons((u16)(mss + hdr_len));
12729 
12730 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
12731 			      TXD_FLAG_CPU_POST_DMA);
12732 
12733 		if (tg3_flag(tp, HW_TSO_1) ||
12734 		    tg3_flag(tp, HW_TSO_2) ||
12735 		    tg3_flag(tp, HW_TSO_3)) {
12736 			struct tcphdr *th;
12737 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12738 			th = (struct tcphdr *)&tx_data[val];
12739 			th->check = 0;
12740 		} else
12741 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
12742 
12743 		if (tg3_flag(tp, HW_TSO_3)) {
12744 			mss |= (hdr_len & 0xc) << 12;
12745 			if (hdr_len & 0x10)
12746 				base_flags |= 0x00000010;
12747 			base_flags |= (hdr_len & 0x3e0) << 5;
12748 		} else if (tg3_flag(tp, HW_TSO_2))
12749 			mss |= hdr_len << 9;
12750 		else if (tg3_flag(tp, HW_TSO_1) ||
12751 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
12752 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12753 		} else {
12754 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12755 		}
12756 
12757 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12758 	} else {
12759 		num_pkts = 1;
12760 		data_off = ETH_HLEN;
12761 
12762 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12763 		    tx_len > VLAN_ETH_FRAME_LEN)
12764 			base_flags |= TXD_FLAG_JMB_PKT;
12765 	}
12766 
12767 	for (i = data_off; i < tx_len; i++)
12768 		tx_data[i] = (u8) (i & 0xff);
12769 
12770 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12771 	if (pci_dma_mapping_error(tp->pdev, map)) {
12772 		dev_kfree_skb(skb);
12773 		return -EIO;
12774 	}
12775 
12776 	val = tnapi->tx_prod;
12777 	tnapi->tx_buffers[val].skb = skb;
12778 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12779 
12780 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12781 	       rnapi->coal_now);
12782 
12783 	udelay(10);
12784 
12785 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12786 
12787 	budget = tg3_tx_avail(tnapi);
12788 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12789 			    base_flags | TXD_FLAG_END, mss, 0)) {
12790 		tnapi->tx_buffers[val].skb = NULL;
12791 		dev_kfree_skb(skb);
12792 		return -EIO;
12793 	}
12794 
12795 	tnapi->tx_prod++;
12796 
12797 	/* Sync BD data before updating mailbox */
12798 	wmb();
12799 
12800 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12801 	tr32_mailbox(tnapi->prodmbox);
12802 
12803 	udelay(10);
12804 
12805 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12806 	for (i = 0; i < 35; i++) {
12807 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12808 		       coal_now);
12809 
12810 		udelay(10);
12811 
12812 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12813 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
12814 		if ((tx_idx == tnapi->tx_prod) &&
12815 		    (rx_idx == (rx_start_idx + num_pkts)))
12816 			break;
12817 	}
12818 
12819 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12820 	dev_kfree_skb(skb);
12821 
12822 	if (tx_idx != tnapi->tx_prod)
12823 		goto out;
12824 
12825 	if (rx_idx != rx_start_idx + num_pkts)
12826 		goto out;
12827 
12828 	val = data_off;
12829 	while (rx_idx != rx_start_idx) {
12830 		desc = &rnapi->rx_rcb[rx_start_idx++];
12831 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12832 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12833 
12834 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12835 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12836 			goto out;
12837 
12838 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12839 			 - ETH_FCS_LEN;
12840 
12841 		if (!tso_loopback) {
12842 			if (rx_len != tx_len)
12843 				goto out;
12844 
12845 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12846 				if (opaque_key != RXD_OPAQUE_RING_STD)
12847 					goto out;
12848 			} else {
12849 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12850 					goto out;
12851 			}
12852 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12853 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12854 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
12855 			goto out;
12856 		}
12857 
12858 		if (opaque_key == RXD_OPAQUE_RING_STD) {
12859 			rx_data = tpr->rx_std_buffers[desc_idx].data;
12860 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12861 					     mapping);
12862 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12863 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12864 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12865 					     mapping);
12866 		} else
12867 			goto out;
12868 
12869 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12870 					    PCI_DMA_FROMDEVICE);
12871 
12872 		rx_data += TG3_RX_OFFSET(tp);
12873 		for (i = data_off; i < rx_len; i++, val++) {
12874 			if (*(rx_data + i) != (u8) (val & 0xff))
12875 				goto out;
12876 		}
12877 	}
12878 
12879 	err = 0;
12880 
12881 	/* tg3_free_rings will unmap and free the rx_data */
12882 out:
12883 	return err;
12884 }
12885 
12886 #define TG3_STD_LOOPBACK_FAILED		1
12887 #define TG3_JMB_LOOPBACK_FAILED		2
12888 #define TG3_TSO_LOOPBACK_FAILED		4
12889 #define TG3_LOOPBACK_FAILED \
12890 	(TG3_STD_LOOPBACK_FAILED | \
12891 	 TG3_JMB_LOOPBACK_FAILED | \
12892 	 TG3_TSO_LOOPBACK_FAILED)
12893 
12894 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12895 {
12896 	int err = -EIO;
12897 	u32 eee_cap;
12898 	u32 jmb_pkt_sz = 9000;
12899 
12900 	if (tp->dma_limit)
12901 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12902 
12903 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12904 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12905 
12906 	if (!netif_running(tp->dev)) {
12907 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12908 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12909 		if (do_extlpbk)
12910 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12911 		goto done;
12912 	}
12913 
12914 	err = tg3_reset_hw(tp, 1);
12915 	if (err) {
12916 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12917 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12918 		if (do_extlpbk)
12919 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12920 		goto done;
12921 	}
12922 
12923 	if (tg3_flag(tp, ENABLE_RSS)) {
12924 		int i;
12925 
12926 		/* Reroute all rx packets to the 1st queue */
12927 		for (i = MAC_RSS_INDIR_TBL_0;
12928 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12929 			tw32(i, 0x0);
12930 	}
12931 
12932 	/* HW errata - mac loopback fails in some cases on 5780.
12933 	 * Normal traffic and PHY loopback are not affected by
12934 	 * errata.  Also, the MAC loopback test is deprecated for
12935 	 * all newer ASIC revisions.
12936 	 */
12937 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
12938 	    !tg3_flag(tp, CPMU_PRESENT)) {
12939 		tg3_mac_loopback(tp, true);
12940 
12941 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12942 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12943 
12944 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12945 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12946 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12947 
12948 		tg3_mac_loopback(tp, false);
12949 	}
12950 
12951 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12952 	    !tg3_flag(tp, USE_PHYLIB)) {
12953 		int i;
12954 
12955 		tg3_phy_lpbk_set(tp, 0, false);
12956 
12957 		/* Wait for link */
12958 		for (i = 0; i < 100; i++) {
12959 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12960 				break;
12961 			mdelay(1);
12962 		}
12963 
12964 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12965 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12966 		if (tg3_flag(tp, TSO_CAPABLE) &&
12967 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12968 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12969 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12970 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12971 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12972 
12973 		if (do_extlpbk) {
12974 			tg3_phy_lpbk_set(tp, 0, true);
12975 
12976 			/* All link indications report up, but the hardware
12977 			 * isn't really ready for about 20 msec.  Double it
12978 			 * to be sure.
12979 			 */
12980 			mdelay(40);
12981 
12982 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12983 				data[TG3_EXT_LOOPB_TEST] |=
12984 							TG3_STD_LOOPBACK_FAILED;
12985 			if (tg3_flag(tp, TSO_CAPABLE) &&
12986 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12987 				data[TG3_EXT_LOOPB_TEST] |=
12988 							TG3_TSO_LOOPBACK_FAILED;
12989 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12990 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12991 				data[TG3_EXT_LOOPB_TEST] |=
12992 							TG3_JMB_LOOPBACK_FAILED;
12993 		}
12994 
12995 		/* Re-enable gphy autopowerdown. */
12996 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12997 			tg3_phy_toggle_apd(tp, true);
12998 	}
12999 
13000 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13001 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13002 
13003 done:
13004 	tp->phy_flags |= eee_cap;
13005 
13006 	return err;
13007 }
13008 
13009 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13010 			  u64 *data)
13011 {
13012 	struct tg3 *tp = netdev_priv(dev);
13013 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13014 
13015 	if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13016 	    tg3_power_up(tp)) {
13017 		etest->flags |= ETH_TEST_FL_FAILED;
13018 		memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13019 		return;
13020 	}
13021 
13022 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13023 
13024 	if (tg3_test_nvram(tp) != 0) {
13025 		etest->flags |= ETH_TEST_FL_FAILED;
13026 		data[TG3_NVRAM_TEST] = 1;
13027 	}
13028 	if (!doextlpbk && tg3_test_link(tp)) {
13029 		etest->flags |= ETH_TEST_FL_FAILED;
13030 		data[TG3_LINK_TEST] = 1;
13031 	}
13032 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13033 		int err, err2 = 0, irq_sync = 0;
13034 
13035 		if (netif_running(dev)) {
13036 			tg3_phy_stop(tp);
13037 			tg3_netif_stop(tp);
13038 			irq_sync = 1;
13039 		}
13040 
13041 		tg3_full_lock(tp, irq_sync);
13042 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13043 		err = tg3_nvram_lock(tp);
13044 		tg3_halt_cpu(tp, RX_CPU_BASE);
13045 		if (!tg3_flag(tp, 5705_PLUS))
13046 			tg3_halt_cpu(tp, TX_CPU_BASE);
13047 		if (!err)
13048 			tg3_nvram_unlock(tp);
13049 
13050 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13051 			tg3_phy_reset(tp);
13052 
13053 		if (tg3_test_registers(tp) != 0) {
13054 			etest->flags |= ETH_TEST_FL_FAILED;
13055 			data[TG3_REGISTER_TEST] = 1;
13056 		}
13057 
13058 		if (tg3_test_memory(tp) != 0) {
13059 			etest->flags |= ETH_TEST_FL_FAILED;
13060 			data[TG3_MEMORY_TEST] = 1;
13061 		}
13062 
13063 		if (doextlpbk)
13064 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13065 
13066 		if (tg3_test_loopback(tp, data, doextlpbk))
13067 			etest->flags |= ETH_TEST_FL_FAILED;
13068 
13069 		tg3_full_unlock(tp);
13070 
13071 		if (tg3_test_interrupt(tp) != 0) {
13072 			etest->flags |= ETH_TEST_FL_FAILED;
13073 			data[TG3_INTERRUPT_TEST] = 1;
13074 		}
13075 
13076 		tg3_full_lock(tp, 0);
13077 
13078 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13079 		if (netif_running(dev)) {
13080 			tg3_flag_set(tp, INIT_COMPLETE);
13081 			err2 = tg3_restart_hw(tp, 1);
13082 			if (!err2)
13083 				tg3_netif_start(tp);
13084 		}
13085 
13086 		tg3_full_unlock(tp);
13087 
13088 		if (irq_sync && !err2)
13089 			tg3_phy_start(tp);
13090 	}
13091 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13092 		tg3_power_down(tp);
13093 
13094 }
13095 
13096 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13097 			      struct ifreq *ifr, int cmd)
13098 {
13099 	struct tg3 *tp = netdev_priv(dev);
13100 	struct hwtstamp_config stmpconf;
13101 
13102 	if (!tg3_flag(tp, PTP_CAPABLE))
13103 		return -EINVAL;
13104 
13105 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13106 		return -EFAULT;
13107 
13108 	if (stmpconf.flags)
13109 		return -EINVAL;
13110 
13111 	switch (stmpconf.tx_type) {
13112 	case HWTSTAMP_TX_ON:
13113 		tg3_flag_set(tp, TX_TSTAMP_EN);
13114 		break;
13115 	case HWTSTAMP_TX_OFF:
13116 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13117 		break;
13118 	default:
13119 		return -ERANGE;
13120 	}
13121 
13122 	switch (stmpconf.rx_filter) {
13123 	case HWTSTAMP_FILTER_NONE:
13124 		tp->rxptpctl = 0;
13125 		break;
13126 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13127 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13128 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13129 		break;
13130 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13131 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13132 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13133 		break;
13134 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13135 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13136 			       TG3_RX_PTP_CTL_DELAY_REQ;
13137 		break;
13138 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13139 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13140 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13141 		break;
13142 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13143 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13144 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13145 		break;
13146 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13147 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13148 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13149 		break;
13150 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13151 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13152 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13153 		break;
13154 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13155 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13156 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13157 		break;
13158 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13159 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13160 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13161 		break;
13162 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13163 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13164 			       TG3_RX_PTP_CTL_DELAY_REQ;
13165 		break;
13166 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13167 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13168 			       TG3_RX_PTP_CTL_DELAY_REQ;
13169 		break;
13170 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13171 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13172 			       TG3_RX_PTP_CTL_DELAY_REQ;
13173 		break;
13174 	default:
13175 		return -ERANGE;
13176 	}
13177 
13178 	if (netif_running(dev) && tp->rxptpctl)
13179 		tw32(TG3_RX_PTP_CTL,
13180 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13181 
13182 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13183 		-EFAULT : 0;
13184 }
13185 
13186 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13187 {
13188 	struct mii_ioctl_data *data = if_mii(ifr);
13189 	struct tg3 *tp = netdev_priv(dev);
13190 	int err;
13191 
13192 	if (tg3_flag(tp, USE_PHYLIB)) {
13193 		struct phy_device *phydev;
13194 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13195 			return -EAGAIN;
13196 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13197 		return phy_mii_ioctl(phydev, ifr, cmd);
13198 	}
13199 
13200 	switch (cmd) {
13201 	case SIOCGMIIPHY:
13202 		data->phy_id = tp->phy_addr;
13203 
13204 		/* fallthru */
13205 	case SIOCGMIIREG: {
13206 		u32 mii_regval;
13207 
13208 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13209 			break;			/* We have no PHY */
13210 
13211 		if (!netif_running(dev))
13212 			return -EAGAIN;
13213 
13214 		spin_lock_bh(&tp->lock);
13215 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
13216 				    data->reg_num & 0x1f, &mii_regval);
13217 		spin_unlock_bh(&tp->lock);
13218 
13219 		data->val_out = mii_regval;
13220 
13221 		return err;
13222 	}
13223 
13224 	case SIOCSMIIREG:
13225 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13226 			break;			/* We have no PHY */
13227 
13228 		if (!netif_running(dev))
13229 			return -EAGAIN;
13230 
13231 		spin_lock_bh(&tp->lock);
13232 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
13233 				     data->reg_num & 0x1f, data->val_in);
13234 		spin_unlock_bh(&tp->lock);
13235 
13236 		return err;
13237 
13238 	case SIOCSHWTSTAMP:
13239 		return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13240 
13241 	default:
13242 		/* do nothing */
13243 		break;
13244 	}
13245 	return -EOPNOTSUPP;
13246 }
13247 
13248 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13249 {
13250 	struct tg3 *tp = netdev_priv(dev);
13251 
13252 	memcpy(ec, &tp->coal, sizeof(*ec));
13253 	return 0;
13254 }
13255 
13256 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13257 {
13258 	struct tg3 *tp = netdev_priv(dev);
13259 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13260 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13261 
13262 	if (!tg3_flag(tp, 5705_PLUS)) {
13263 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13264 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13265 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13266 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13267 	}
13268 
13269 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13270 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13271 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13272 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13273 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13274 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13275 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13276 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13277 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13278 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13279 		return -EINVAL;
13280 
13281 	/* No rx interrupts will be generated if both are zero */
13282 	if ((ec->rx_coalesce_usecs == 0) &&
13283 	    (ec->rx_max_coalesced_frames == 0))
13284 		return -EINVAL;
13285 
13286 	/* No tx interrupts will be generated if both are zero */
13287 	if ((ec->tx_coalesce_usecs == 0) &&
13288 	    (ec->tx_max_coalesced_frames == 0))
13289 		return -EINVAL;
13290 
13291 	/* Only copy relevant parameters, ignore all others. */
13292 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13293 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13294 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13295 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13296 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13297 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13298 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13299 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13300 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13301 
13302 	if (netif_running(dev)) {
13303 		tg3_full_lock(tp, 0);
13304 		__tg3_set_coalesce(tp, &tp->coal);
13305 		tg3_full_unlock(tp);
13306 	}
13307 	return 0;
13308 }
13309 
13310 static const struct ethtool_ops tg3_ethtool_ops = {
13311 	.get_settings		= tg3_get_settings,
13312 	.set_settings		= tg3_set_settings,
13313 	.get_drvinfo		= tg3_get_drvinfo,
13314 	.get_regs_len		= tg3_get_regs_len,
13315 	.get_regs		= tg3_get_regs,
13316 	.get_wol		= tg3_get_wol,
13317 	.set_wol		= tg3_set_wol,
13318 	.get_msglevel		= tg3_get_msglevel,
13319 	.set_msglevel		= tg3_set_msglevel,
13320 	.nway_reset		= tg3_nway_reset,
13321 	.get_link		= ethtool_op_get_link,
13322 	.get_eeprom_len		= tg3_get_eeprom_len,
13323 	.get_eeprom		= tg3_get_eeprom,
13324 	.set_eeprom		= tg3_set_eeprom,
13325 	.get_ringparam		= tg3_get_ringparam,
13326 	.set_ringparam		= tg3_set_ringparam,
13327 	.get_pauseparam		= tg3_get_pauseparam,
13328 	.set_pauseparam		= tg3_set_pauseparam,
13329 	.self_test		= tg3_self_test,
13330 	.get_strings		= tg3_get_strings,
13331 	.set_phys_id		= tg3_set_phys_id,
13332 	.get_ethtool_stats	= tg3_get_ethtool_stats,
13333 	.get_coalesce		= tg3_get_coalesce,
13334 	.set_coalesce		= tg3_set_coalesce,
13335 	.get_sset_count		= tg3_get_sset_count,
13336 	.get_rxnfc		= tg3_get_rxnfc,
13337 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13338 	.get_rxfh_indir		= tg3_get_rxfh_indir,
13339 	.set_rxfh_indir		= tg3_set_rxfh_indir,
13340 	.get_channels		= tg3_get_channels,
13341 	.set_channels		= tg3_set_channels,
13342 	.get_ts_info		= tg3_get_ts_info,
13343 };
13344 
13345 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13346 						struct rtnl_link_stats64 *stats)
13347 {
13348 	struct tg3 *tp = netdev_priv(dev);
13349 
13350 	spin_lock_bh(&tp->lock);
13351 	if (!tp->hw_stats) {
13352 		spin_unlock_bh(&tp->lock);
13353 		return &tp->net_stats_prev;
13354 	}
13355 
13356 	tg3_get_nstats(tp, stats);
13357 	spin_unlock_bh(&tp->lock);
13358 
13359 	return stats;
13360 }
13361 
13362 static void tg3_set_rx_mode(struct net_device *dev)
13363 {
13364 	struct tg3 *tp = netdev_priv(dev);
13365 
13366 	if (!netif_running(dev))
13367 		return;
13368 
13369 	tg3_full_lock(tp, 0);
13370 	__tg3_set_rx_mode(dev);
13371 	tg3_full_unlock(tp);
13372 }
13373 
13374 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13375 			       int new_mtu)
13376 {
13377 	dev->mtu = new_mtu;
13378 
13379 	if (new_mtu > ETH_DATA_LEN) {
13380 		if (tg3_flag(tp, 5780_CLASS)) {
13381 			netdev_update_features(dev);
13382 			tg3_flag_clear(tp, TSO_CAPABLE);
13383 		} else {
13384 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
13385 		}
13386 	} else {
13387 		if (tg3_flag(tp, 5780_CLASS)) {
13388 			tg3_flag_set(tp, TSO_CAPABLE);
13389 			netdev_update_features(dev);
13390 		}
13391 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13392 	}
13393 }
13394 
13395 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13396 {
13397 	struct tg3 *tp = netdev_priv(dev);
13398 	int err, reset_phy = 0;
13399 
13400 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13401 		return -EINVAL;
13402 
13403 	if (!netif_running(dev)) {
13404 		/* We'll just catch it later when the
13405 		 * device is up'd.
13406 		 */
13407 		tg3_set_mtu(dev, tp, new_mtu);
13408 		return 0;
13409 	}
13410 
13411 	tg3_phy_stop(tp);
13412 
13413 	tg3_netif_stop(tp);
13414 
13415 	tg3_full_lock(tp, 1);
13416 
13417 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13418 
13419 	tg3_set_mtu(dev, tp, new_mtu);
13420 
13421 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
13422 	 * breaks all requests to 256 bytes.
13423 	 */
13424 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
13425 		reset_phy = 1;
13426 
13427 	err = tg3_restart_hw(tp, reset_phy);
13428 
13429 	if (!err)
13430 		tg3_netif_start(tp);
13431 
13432 	tg3_full_unlock(tp);
13433 
13434 	if (!err)
13435 		tg3_phy_start(tp);
13436 
13437 	return err;
13438 }
13439 
13440 static const struct net_device_ops tg3_netdev_ops = {
13441 	.ndo_open		= tg3_open,
13442 	.ndo_stop		= tg3_close,
13443 	.ndo_start_xmit		= tg3_start_xmit,
13444 	.ndo_get_stats64	= tg3_get_stats64,
13445 	.ndo_validate_addr	= eth_validate_addr,
13446 	.ndo_set_rx_mode	= tg3_set_rx_mode,
13447 	.ndo_set_mac_address	= tg3_set_mac_addr,
13448 	.ndo_do_ioctl		= tg3_ioctl,
13449 	.ndo_tx_timeout		= tg3_tx_timeout,
13450 	.ndo_change_mtu		= tg3_change_mtu,
13451 	.ndo_fix_features	= tg3_fix_features,
13452 	.ndo_set_features	= tg3_set_features,
13453 #ifdef CONFIG_NET_POLL_CONTROLLER
13454 	.ndo_poll_controller	= tg3_poll_controller,
13455 #endif
13456 };
13457 
13458 static void tg3_get_eeprom_size(struct tg3 *tp)
13459 {
13460 	u32 cursize, val, magic;
13461 
13462 	tp->nvram_size = EEPROM_CHIP_SIZE;
13463 
13464 	if (tg3_nvram_read(tp, 0, &magic) != 0)
13465 		return;
13466 
13467 	if ((magic != TG3_EEPROM_MAGIC) &&
13468 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13469 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13470 		return;
13471 
13472 	/*
13473 	 * Size the chip by reading offsets at increasing powers of two.
13474 	 * When we encounter our validation signature, we know the addressing
13475 	 * has wrapped around, and thus have our chip size.
13476 	 */
13477 	cursize = 0x10;
13478 
13479 	while (cursize < tp->nvram_size) {
13480 		if (tg3_nvram_read(tp, cursize, &val) != 0)
13481 			return;
13482 
13483 		if (val == magic)
13484 			break;
13485 
13486 		cursize <<= 1;
13487 	}
13488 
13489 	tp->nvram_size = cursize;
13490 }
13491 
13492 static void tg3_get_nvram_size(struct tg3 *tp)
13493 {
13494 	u32 val;
13495 
13496 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13497 		return;
13498 
13499 	/* Selfboot format */
13500 	if (val != TG3_EEPROM_MAGIC) {
13501 		tg3_get_eeprom_size(tp);
13502 		return;
13503 	}
13504 
13505 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13506 		if (val != 0) {
13507 			/* This is confusing.  We want to operate on the
13508 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13509 			 * call will read from NVRAM and byteswap the data
13510 			 * according to the byteswapping settings for all
13511 			 * other register accesses.  This ensures the data we
13512 			 * want will always reside in the lower 16-bits.
13513 			 * However, the data in NVRAM is in LE format, which
13514 			 * means the data from the NVRAM read will always be
13515 			 * opposite the endianness of the CPU.  The 16-bit
13516 			 * byteswap then brings the data to CPU endianness.
13517 			 */
13518 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13519 			return;
13520 		}
13521 	}
13522 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13523 }
13524 
13525 static void tg3_get_nvram_info(struct tg3 *tp)
13526 {
13527 	u32 nvcfg1;
13528 
13529 	nvcfg1 = tr32(NVRAM_CFG1);
13530 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13531 		tg3_flag_set(tp, FLASH);
13532 	} else {
13533 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13534 		tw32(NVRAM_CFG1, nvcfg1);
13535 	}
13536 
13537 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13538 	    tg3_flag(tp, 5780_CLASS)) {
13539 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13540 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13541 			tp->nvram_jedecnum = JEDEC_ATMEL;
13542 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13543 			tg3_flag_set(tp, NVRAM_BUFFERED);
13544 			break;
13545 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13546 			tp->nvram_jedecnum = JEDEC_ATMEL;
13547 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13548 			break;
13549 		case FLASH_VENDOR_ATMEL_EEPROM:
13550 			tp->nvram_jedecnum = JEDEC_ATMEL;
13551 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13552 			tg3_flag_set(tp, NVRAM_BUFFERED);
13553 			break;
13554 		case FLASH_VENDOR_ST:
13555 			tp->nvram_jedecnum = JEDEC_ST;
13556 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13557 			tg3_flag_set(tp, NVRAM_BUFFERED);
13558 			break;
13559 		case FLASH_VENDOR_SAIFUN:
13560 			tp->nvram_jedecnum = JEDEC_SAIFUN;
13561 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13562 			break;
13563 		case FLASH_VENDOR_SST_SMALL:
13564 		case FLASH_VENDOR_SST_LARGE:
13565 			tp->nvram_jedecnum = JEDEC_SST;
13566 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13567 			break;
13568 		}
13569 	} else {
13570 		tp->nvram_jedecnum = JEDEC_ATMEL;
13571 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13572 		tg3_flag_set(tp, NVRAM_BUFFERED);
13573 	}
13574 }
13575 
13576 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13577 {
13578 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13579 	case FLASH_5752PAGE_SIZE_256:
13580 		tp->nvram_pagesize = 256;
13581 		break;
13582 	case FLASH_5752PAGE_SIZE_512:
13583 		tp->nvram_pagesize = 512;
13584 		break;
13585 	case FLASH_5752PAGE_SIZE_1K:
13586 		tp->nvram_pagesize = 1024;
13587 		break;
13588 	case FLASH_5752PAGE_SIZE_2K:
13589 		tp->nvram_pagesize = 2048;
13590 		break;
13591 	case FLASH_5752PAGE_SIZE_4K:
13592 		tp->nvram_pagesize = 4096;
13593 		break;
13594 	case FLASH_5752PAGE_SIZE_264:
13595 		tp->nvram_pagesize = 264;
13596 		break;
13597 	case FLASH_5752PAGE_SIZE_528:
13598 		tp->nvram_pagesize = 528;
13599 		break;
13600 	}
13601 }
13602 
13603 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13604 {
13605 	u32 nvcfg1;
13606 
13607 	nvcfg1 = tr32(NVRAM_CFG1);
13608 
13609 	/* NVRAM protection for TPM */
13610 	if (nvcfg1 & (1 << 27))
13611 		tg3_flag_set(tp, PROTECTED_NVRAM);
13612 
13613 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13614 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13615 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13616 		tp->nvram_jedecnum = JEDEC_ATMEL;
13617 		tg3_flag_set(tp, NVRAM_BUFFERED);
13618 		break;
13619 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13620 		tp->nvram_jedecnum = JEDEC_ATMEL;
13621 		tg3_flag_set(tp, NVRAM_BUFFERED);
13622 		tg3_flag_set(tp, FLASH);
13623 		break;
13624 	case FLASH_5752VENDOR_ST_M45PE10:
13625 	case FLASH_5752VENDOR_ST_M45PE20:
13626 	case FLASH_5752VENDOR_ST_M45PE40:
13627 		tp->nvram_jedecnum = JEDEC_ST;
13628 		tg3_flag_set(tp, NVRAM_BUFFERED);
13629 		tg3_flag_set(tp, FLASH);
13630 		break;
13631 	}
13632 
13633 	if (tg3_flag(tp, FLASH)) {
13634 		tg3_nvram_get_pagesize(tp, nvcfg1);
13635 	} else {
13636 		/* For eeprom, set pagesize to maximum eeprom size */
13637 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13638 
13639 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13640 		tw32(NVRAM_CFG1, nvcfg1);
13641 	}
13642 }
13643 
13644 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13645 {
13646 	u32 nvcfg1, protect = 0;
13647 
13648 	nvcfg1 = tr32(NVRAM_CFG1);
13649 
13650 	/* NVRAM protection for TPM */
13651 	if (nvcfg1 & (1 << 27)) {
13652 		tg3_flag_set(tp, PROTECTED_NVRAM);
13653 		protect = 1;
13654 	}
13655 
13656 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13657 	switch (nvcfg1) {
13658 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
13659 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
13660 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
13661 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
13662 		tp->nvram_jedecnum = JEDEC_ATMEL;
13663 		tg3_flag_set(tp, NVRAM_BUFFERED);
13664 		tg3_flag_set(tp, FLASH);
13665 		tp->nvram_pagesize = 264;
13666 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13667 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13668 			tp->nvram_size = (protect ? 0x3e200 :
13669 					  TG3_NVRAM_SIZE_512KB);
13670 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13671 			tp->nvram_size = (protect ? 0x1f200 :
13672 					  TG3_NVRAM_SIZE_256KB);
13673 		else
13674 			tp->nvram_size = (protect ? 0x1f200 :
13675 					  TG3_NVRAM_SIZE_128KB);
13676 		break;
13677 	case FLASH_5752VENDOR_ST_M45PE10:
13678 	case FLASH_5752VENDOR_ST_M45PE20:
13679 	case FLASH_5752VENDOR_ST_M45PE40:
13680 		tp->nvram_jedecnum = JEDEC_ST;
13681 		tg3_flag_set(tp, NVRAM_BUFFERED);
13682 		tg3_flag_set(tp, FLASH);
13683 		tp->nvram_pagesize = 256;
13684 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13685 			tp->nvram_size = (protect ?
13686 					  TG3_NVRAM_SIZE_64KB :
13687 					  TG3_NVRAM_SIZE_128KB);
13688 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13689 			tp->nvram_size = (protect ?
13690 					  TG3_NVRAM_SIZE_64KB :
13691 					  TG3_NVRAM_SIZE_256KB);
13692 		else
13693 			tp->nvram_size = (protect ?
13694 					  TG3_NVRAM_SIZE_128KB :
13695 					  TG3_NVRAM_SIZE_512KB);
13696 		break;
13697 	}
13698 }
13699 
13700 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13701 {
13702 	u32 nvcfg1;
13703 
13704 	nvcfg1 = tr32(NVRAM_CFG1);
13705 
13706 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13707 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13708 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13709 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13710 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13711 		tp->nvram_jedecnum = JEDEC_ATMEL;
13712 		tg3_flag_set(tp, NVRAM_BUFFERED);
13713 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13714 
13715 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13716 		tw32(NVRAM_CFG1, nvcfg1);
13717 		break;
13718 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13719 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
13720 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
13721 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
13722 		tp->nvram_jedecnum = JEDEC_ATMEL;
13723 		tg3_flag_set(tp, NVRAM_BUFFERED);
13724 		tg3_flag_set(tp, FLASH);
13725 		tp->nvram_pagesize = 264;
13726 		break;
13727 	case FLASH_5752VENDOR_ST_M45PE10:
13728 	case FLASH_5752VENDOR_ST_M45PE20:
13729 	case FLASH_5752VENDOR_ST_M45PE40:
13730 		tp->nvram_jedecnum = JEDEC_ST;
13731 		tg3_flag_set(tp, NVRAM_BUFFERED);
13732 		tg3_flag_set(tp, FLASH);
13733 		tp->nvram_pagesize = 256;
13734 		break;
13735 	}
13736 }
13737 
13738 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13739 {
13740 	u32 nvcfg1, protect = 0;
13741 
13742 	nvcfg1 = tr32(NVRAM_CFG1);
13743 
13744 	/* NVRAM protection for TPM */
13745 	if (nvcfg1 & (1 << 27)) {
13746 		tg3_flag_set(tp, PROTECTED_NVRAM);
13747 		protect = 1;
13748 	}
13749 
13750 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13751 	switch (nvcfg1) {
13752 	case FLASH_5761VENDOR_ATMEL_ADB021D:
13753 	case FLASH_5761VENDOR_ATMEL_ADB041D:
13754 	case FLASH_5761VENDOR_ATMEL_ADB081D:
13755 	case FLASH_5761VENDOR_ATMEL_ADB161D:
13756 	case FLASH_5761VENDOR_ATMEL_MDB021D:
13757 	case FLASH_5761VENDOR_ATMEL_MDB041D:
13758 	case FLASH_5761VENDOR_ATMEL_MDB081D:
13759 	case FLASH_5761VENDOR_ATMEL_MDB161D:
13760 		tp->nvram_jedecnum = JEDEC_ATMEL;
13761 		tg3_flag_set(tp, NVRAM_BUFFERED);
13762 		tg3_flag_set(tp, FLASH);
13763 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13764 		tp->nvram_pagesize = 256;
13765 		break;
13766 	case FLASH_5761VENDOR_ST_A_M45PE20:
13767 	case FLASH_5761VENDOR_ST_A_M45PE40:
13768 	case FLASH_5761VENDOR_ST_A_M45PE80:
13769 	case FLASH_5761VENDOR_ST_A_M45PE16:
13770 	case FLASH_5761VENDOR_ST_M_M45PE20:
13771 	case FLASH_5761VENDOR_ST_M_M45PE40:
13772 	case FLASH_5761VENDOR_ST_M_M45PE80:
13773 	case FLASH_5761VENDOR_ST_M_M45PE16:
13774 		tp->nvram_jedecnum = JEDEC_ST;
13775 		tg3_flag_set(tp, NVRAM_BUFFERED);
13776 		tg3_flag_set(tp, FLASH);
13777 		tp->nvram_pagesize = 256;
13778 		break;
13779 	}
13780 
13781 	if (protect) {
13782 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13783 	} else {
13784 		switch (nvcfg1) {
13785 		case FLASH_5761VENDOR_ATMEL_ADB161D:
13786 		case FLASH_5761VENDOR_ATMEL_MDB161D:
13787 		case FLASH_5761VENDOR_ST_A_M45PE16:
13788 		case FLASH_5761VENDOR_ST_M_M45PE16:
13789 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13790 			break;
13791 		case FLASH_5761VENDOR_ATMEL_ADB081D:
13792 		case FLASH_5761VENDOR_ATMEL_MDB081D:
13793 		case FLASH_5761VENDOR_ST_A_M45PE80:
13794 		case FLASH_5761VENDOR_ST_M_M45PE80:
13795 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13796 			break;
13797 		case FLASH_5761VENDOR_ATMEL_ADB041D:
13798 		case FLASH_5761VENDOR_ATMEL_MDB041D:
13799 		case FLASH_5761VENDOR_ST_A_M45PE40:
13800 		case FLASH_5761VENDOR_ST_M_M45PE40:
13801 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13802 			break;
13803 		case FLASH_5761VENDOR_ATMEL_ADB021D:
13804 		case FLASH_5761VENDOR_ATMEL_MDB021D:
13805 		case FLASH_5761VENDOR_ST_A_M45PE20:
13806 		case FLASH_5761VENDOR_ST_M_M45PE20:
13807 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13808 			break;
13809 		}
13810 	}
13811 }
13812 
13813 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13814 {
13815 	tp->nvram_jedecnum = JEDEC_ATMEL;
13816 	tg3_flag_set(tp, NVRAM_BUFFERED);
13817 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13818 }
13819 
13820 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13821 {
13822 	u32 nvcfg1;
13823 
13824 	nvcfg1 = tr32(NVRAM_CFG1);
13825 
13826 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13827 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13828 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13829 		tp->nvram_jedecnum = JEDEC_ATMEL;
13830 		tg3_flag_set(tp, NVRAM_BUFFERED);
13831 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13832 
13833 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13834 		tw32(NVRAM_CFG1, nvcfg1);
13835 		return;
13836 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13837 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13838 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13839 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13840 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13841 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13842 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13843 		tp->nvram_jedecnum = JEDEC_ATMEL;
13844 		tg3_flag_set(tp, NVRAM_BUFFERED);
13845 		tg3_flag_set(tp, FLASH);
13846 
13847 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13848 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13849 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13850 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13851 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13852 			break;
13853 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13854 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13855 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13856 			break;
13857 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13858 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13859 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13860 			break;
13861 		}
13862 		break;
13863 	case FLASH_5752VENDOR_ST_M45PE10:
13864 	case FLASH_5752VENDOR_ST_M45PE20:
13865 	case FLASH_5752VENDOR_ST_M45PE40:
13866 		tp->nvram_jedecnum = JEDEC_ST;
13867 		tg3_flag_set(tp, NVRAM_BUFFERED);
13868 		tg3_flag_set(tp, FLASH);
13869 
13870 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13871 		case FLASH_5752VENDOR_ST_M45PE10:
13872 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13873 			break;
13874 		case FLASH_5752VENDOR_ST_M45PE20:
13875 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13876 			break;
13877 		case FLASH_5752VENDOR_ST_M45PE40:
13878 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13879 			break;
13880 		}
13881 		break;
13882 	default:
13883 		tg3_flag_set(tp, NO_NVRAM);
13884 		return;
13885 	}
13886 
13887 	tg3_nvram_get_pagesize(tp, nvcfg1);
13888 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13889 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13890 }
13891 
13892 
13893 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13894 {
13895 	u32 nvcfg1;
13896 
13897 	nvcfg1 = tr32(NVRAM_CFG1);
13898 
13899 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13900 	case FLASH_5717VENDOR_ATMEL_EEPROM:
13901 	case FLASH_5717VENDOR_MICRO_EEPROM:
13902 		tp->nvram_jedecnum = JEDEC_ATMEL;
13903 		tg3_flag_set(tp, NVRAM_BUFFERED);
13904 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13905 
13906 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13907 		tw32(NVRAM_CFG1, nvcfg1);
13908 		return;
13909 	case FLASH_5717VENDOR_ATMEL_MDB011D:
13910 	case FLASH_5717VENDOR_ATMEL_ADB011B:
13911 	case FLASH_5717VENDOR_ATMEL_ADB011D:
13912 	case FLASH_5717VENDOR_ATMEL_MDB021D:
13913 	case FLASH_5717VENDOR_ATMEL_ADB021B:
13914 	case FLASH_5717VENDOR_ATMEL_ADB021D:
13915 	case FLASH_5717VENDOR_ATMEL_45USPT:
13916 		tp->nvram_jedecnum = JEDEC_ATMEL;
13917 		tg3_flag_set(tp, NVRAM_BUFFERED);
13918 		tg3_flag_set(tp, FLASH);
13919 
13920 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13921 		case FLASH_5717VENDOR_ATMEL_MDB021D:
13922 			/* Detect size with tg3_nvram_get_size() */
13923 			break;
13924 		case FLASH_5717VENDOR_ATMEL_ADB021B:
13925 		case FLASH_5717VENDOR_ATMEL_ADB021D:
13926 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13927 			break;
13928 		default:
13929 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13930 			break;
13931 		}
13932 		break;
13933 	case FLASH_5717VENDOR_ST_M_M25PE10:
13934 	case FLASH_5717VENDOR_ST_A_M25PE10:
13935 	case FLASH_5717VENDOR_ST_M_M45PE10:
13936 	case FLASH_5717VENDOR_ST_A_M45PE10:
13937 	case FLASH_5717VENDOR_ST_M_M25PE20:
13938 	case FLASH_5717VENDOR_ST_A_M25PE20:
13939 	case FLASH_5717VENDOR_ST_M_M45PE20:
13940 	case FLASH_5717VENDOR_ST_A_M45PE20:
13941 	case FLASH_5717VENDOR_ST_25USPT:
13942 	case FLASH_5717VENDOR_ST_45USPT:
13943 		tp->nvram_jedecnum = JEDEC_ST;
13944 		tg3_flag_set(tp, NVRAM_BUFFERED);
13945 		tg3_flag_set(tp, FLASH);
13946 
13947 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13948 		case FLASH_5717VENDOR_ST_M_M25PE20:
13949 		case FLASH_5717VENDOR_ST_M_M45PE20:
13950 			/* Detect size with tg3_nvram_get_size() */
13951 			break;
13952 		case FLASH_5717VENDOR_ST_A_M25PE20:
13953 		case FLASH_5717VENDOR_ST_A_M45PE20:
13954 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13955 			break;
13956 		default:
13957 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13958 			break;
13959 		}
13960 		break;
13961 	default:
13962 		tg3_flag_set(tp, NO_NVRAM);
13963 		return;
13964 	}
13965 
13966 	tg3_nvram_get_pagesize(tp, nvcfg1);
13967 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13968 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13969 }
13970 
13971 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13972 {
13973 	u32 nvcfg1, nvmpinstrp;
13974 
13975 	nvcfg1 = tr32(NVRAM_CFG1);
13976 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13977 
13978 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
13979 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
13980 			tg3_flag_set(tp, NO_NVRAM);
13981 			return;
13982 		}
13983 
13984 		switch (nvmpinstrp) {
13985 		case FLASH_5762_EEPROM_HD:
13986 			nvmpinstrp = FLASH_5720_EEPROM_HD;
13987 			break;
13988 		case FLASH_5762_EEPROM_LD:
13989 			nvmpinstrp = FLASH_5720_EEPROM_LD;
13990 			break;
13991 		}
13992 	}
13993 
13994 	switch (nvmpinstrp) {
13995 	case FLASH_5720_EEPROM_HD:
13996 	case FLASH_5720_EEPROM_LD:
13997 		tp->nvram_jedecnum = JEDEC_ATMEL;
13998 		tg3_flag_set(tp, NVRAM_BUFFERED);
13999 
14000 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14001 		tw32(NVRAM_CFG1, nvcfg1);
14002 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14003 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14004 		else
14005 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14006 		return;
14007 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14008 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14009 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14010 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14011 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14012 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14013 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14014 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14015 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14016 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14017 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14018 	case FLASH_5720VENDOR_ATMEL_45USPT:
14019 		tp->nvram_jedecnum = JEDEC_ATMEL;
14020 		tg3_flag_set(tp, NVRAM_BUFFERED);
14021 		tg3_flag_set(tp, FLASH);
14022 
14023 		switch (nvmpinstrp) {
14024 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14025 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14026 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14027 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14028 			break;
14029 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14030 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14031 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14032 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14033 			break;
14034 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14035 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14036 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14037 			break;
14038 		default:
14039 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14040 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14041 			break;
14042 		}
14043 		break;
14044 	case FLASH_5720VENDOR_M_ST_M25PE10:
14045 	case FLASH_5720VENDOR_M_ST_M45PE10:
14046 	case FLASH_5720VENDOR_A_ST_M25PE10:
14047 	case FLASH_5720VENDOR_A_ST_M45PE10:
14048 	case FLASH_5720VENDOR_M_ST_M25PE20:
14049 	case FLASH_5720VENDOR_M_ST_M45PE20:
14050 	case FLASH_5720VENDOR_A_ST_M25PE20:
14051 	case FLASH_5720VENDOR_A_ST_M45PE20:
14052 	case FLASH_5720VENDOR_M_ST_M25PE40:
14053 	case FLASH_5720VENDOR_M_ST_M45PE40:
14054 	case FLASH_5720VENDOR_A_ST_M25PE40:
14055 	case FLASH_5720VENDOR_A_ST_M45PE40:
14056 	case FLASH_5720VENDOR_M_ST_M25PE80:
14057 	case FLASH_5720VENDOR_M_ST_M45PE80:
14058 	case FLASH_5720VENDOR_A_ST_M25PE80:
14059 	case FLASH_5720VENDOR_A_ST_M45PE80:
14060 	case FLASH_5720VENDOR_ST_25USPT:
14061 	case FLASH_5720VENDOR_ST_45USPT:
14062 		tp->nvram_jedecnum = JEDEC_ST;
14063 		tg3_flag_set(tp, NVRAM_BUFFERED);
14064 		tg3_flag_set(tp, FLASH);
14065 
14066 		switch (nvmpinstrp) {
14067 		case FLASH_5720VENDOR_M_ST_M25PE20:
14068 		case FLASH_5720VENDOR_M_ST_M45PE20:
14069 		case FLASH_5720VENDOR_A_ST_M25PE20:
14070 		case FLASH_5720VENDOR_A_ST_M45PE20:
14071 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14072 			break;
14073 		case FLASH_5720VENDOR_M_ST_M25PE40:
14074 		case FLASH_5720VENDOR_M_ST_M45PE40:
14075 		case FLASH_5720VENDOR_A_ST_M25PE40:
14076 		case FLASH_5720VENDOR_A_ST_M45PE40:
14077 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14078 			break;
14079 		case FLASH_5720VENDOR_M_ST_M25PE80:
14080 		case FLASH_5720VENDOR_M_ST_M45PE80:
14081 		case FLASH_5720VENDOR_A_ST_M25PE80:
14082 		case FLASH_5720VENDOR_A_ST_M45PE80:
14083 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14084 			break;
14085 		default:
14086 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14087 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14088 			break;
14089 		}
14090 		break;
14091 	default:
14092 		tg3_flag_set(tp, NO_NVRAM);
14093 		return;
14094 	}
14095 
14096 	tg3_nvram_get_pagesize(tp, nvcfg1);
14097 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14098 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14099 
14100 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14101 		u32 val;
14102 
14103 		if (tg3_nvram_read(tp, 0, &val))
14104 			return;
14105 
14106 		if (val != TG3_EEPROM_MAGIC &&
14107 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14108 			tg3_flag_set(tp, NO_NVRAM);
14109 	}
14110 }
14111 
14112 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14113 static void tg3_nvram_init(struct tg3 *tp)
14114 {
14115 	if (tg3_flag(tp, IS_SSB_CORE)) {
14116 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14117 		tg3_flag_clear(tp, NVRAM);
14118 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14119 		tg3_flag_set(tp, NO_NVRAM);
14120 		return;
14121 	}
14122 
14123 	tw32_f(GRC_EEPROM_ADDR,
14124 	     (EEPROM_ADDR_FSM_RESET |
14125 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14126 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14127 
14128 	msleep(1);
14129 
14130 	/* Enable seeprom accesses. */
14131 	tw32_f(GRC_LOCAL_CTRL,
14132 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14133 	udelay(100);
14134 
14135 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14136 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
14137 		tg3_flag_set(tp, NVRAM);
14138 
14139 		if (tg3_nvram_lock(tp)) {
14140 			netdev_warn(tp->dev,
14141 				    "Cannot get nvram lock, %s failed\n",
14142 				    __func__);
14143 			return;
14144 		}
14145 		tg3_enable_nvram_access(tp);
14146 
14147 		tp->nvram_size = 0;
14148 
14149 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
14150 			tg3_get_5752_nvram_info(tp);
14151 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14152 			tg3_get_5755_nvram_info(tp);
14153 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14154 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14155 			 tg3_asic_rev(tp) == ASIC_REV_5785)
14156 			tg3_get_5787_nvram_info(tp);
14157 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14158 			tg3_get_5761_nvram_info(tp);
14159 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14160 			tg3_get_5906_nvram_info(tp);
14161 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14162 			 tg3_flag(tp, 57765_CLASS))
14163 			tg3_get_57780_nvram_info(tp);
14164 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14165 			 tg3_asic_rev(tp) == ASIC_REV_5719)
14166 			tg3_get_5717_nvram_info(tp);
14167 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14168 			 tg3_asic_rev(tp) == ASIC_REV_5762)
14169 			tg3_get_5720_nvram_info(tp);
14170 		else
14171 			tg3_get_nvram_info(tp);
14172 
14173 		if (tp->nvram_size == 0)
14174 			tg3_get_nvram_size(tp);
14175 
14176 		tg3_disable_nvram_access(tp);
14177 		tg3_nvram_unlock(tp);
14178 
14179 	} else {
14180 		tg3_flag_clear(tp, NVRAM);
14181 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14182 
14183 		tg3_get_eeprom_size(tp);
14184 	}
14185 }
14186 
14187 struct subsys_tbl_ent {
14188 	u16 subsys_vendor, subsys_devid;
14189 	u32 phy_id;
14190 };
14191 
14192 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14193 	/* Broadcom boards. */
14194 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14195 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14196 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14197 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14198 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14199 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14200 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14201 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14202 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14203 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14204 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14205 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14206 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14207 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14208 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14209 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14210 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14211 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14212 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14213 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14214 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14215 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14216 
14217 	/* 3com boards. */
14218 	{ TG3PCI_SUBVENDOR_ID_3COM,
14219 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14220 	{ TG3PCI_SUBVENDOR_ID_3COM,
14221 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14222 	{ TG3PCI_SUBVENDOR_ID_3COM,
14223 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14224 	{ TG3PCI_SUBVENDOR_ID_3COM,
14225 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14226 	{ TG3PCI_SUBVENDOR_ID_3COM,
14227 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14228 
14229 	/* DELL boards. */
14230 	{ TG3PCI_SUBVENDOR_ID_DELL,
14231 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14232 	{ TG3PCI_SUBVENDOR_ID_DELL,
14233 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14234 	{ TG3PCI_SUBVENDOR_ID_DELL,
14235 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14236 	{ TG3PCI_SUBVENDOR_ID_DELL,
14237 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14238 
14239 	/* Compaq boards. */
14240 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14241 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14242 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14243 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14244 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14245 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14246 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14247 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14248 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14249 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14250 
14251 	/* IBM boards. */
14252 	{ TG3PCI_SUBVENDOR_ID_IBM,
14253 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14254 };
14255 
14256 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14257 {
14258 	int i;
14259 
14260 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14261 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
14262 		     tp->pdev->subsystem_vendor) &&
14263 		    (subsys_id_to_phy_id[i].subsys_devid ==
14264 		     tp->pdev->subsystem_device))
14265 			return &subsys_id_to_phy_id[i];
14266 	}
14267 	return NULL;
14268 }
14269 
14270 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14271 {
14272 	u32 val;
14273 
14274 	tp->phy_id = TG3_PHY_ID_INVALID;
14275 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14276 
14277 	/* Assume an onboard device and WOL capable by default.  */
14278 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
14279 	tg3_flag_set(tp, WOL_CAP);
14280 
14281 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14282 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14283 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14284 			tg3_flag_set(tp, IS_NIC);
14285 		}
14286 		val = tr32(VCPU_CFGSHDW);
14287 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
14288 			tg3_flag_set(tp, ASPM_WORKAROUND);
14289 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14290 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14291 			tg3_flag_set(tp, WOL_ENABLE);
14292 			device_set_wakeup_enable(&tp->pdev->dev, true);
14293 		}
14294 		goto done;
14295 	}
14296 
14297 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14298 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14299 		u32 nic_cfg, led_cfg;
14300 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14301 		int eeprom_phy_serdes = 0;
14302 
14303 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14304 		tp->nic_sram_data_cfg = nic_cfg;
14305 
14306 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14307 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
14308 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14309 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
14310 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
14311 		    (ver > 0) && (ver < 0x100))
14312 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14313 
14314 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
14315 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14316 
14317 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14318 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14319 			eeprom_phy_serdes = 1;
14320 
14321 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14322 		if (nic_phy_id != 0) {
14323 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14324 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14325 
14326 			eeprom_phy_id  = (id1 >> 16) << 10;
14327 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
14328 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14329 		} else
14330 			eeprom_phy_id = 0;
14331 
14332 		tp->phy_id = eeprom_phy_id;
14333 		if (eeprom_phy_serdes) {
14334 			if (!tg3_flag(tp, 5705_PLUS))
14335 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14336 			else
14337 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14338 		}
14339 
14340 		if (tg3_flag(tp, 5750_PLUS))
14341 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14342 				    SHASTA_EXT_LED_MODE_MASK);
14343 		else
14344 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14345 
14346 		switch (led_cfg) {
14347 		default:
14348 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14349 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14350 			break;
14351 
14352 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14353 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14354 			break;
14355 
14356 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14357 			tp->led_ctrl = LED_CTRL_MODE_MAC;
14358 
14359 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
14360 			 * read on some older 5700/5701 bootcode.
14361 			 */
14362 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14363 			    tg3_asic_rev(tp) == ASIC_REV_5701)
14364 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14365 
14366 			break;
14367 
14368 		case SHASTA_EXT_LED_SHARED:
14369 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
14370 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14371 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14372 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14373 						 LED_CTRL_MODE_PHY_2);
14374 			break;
14375 
14376 		case SHASTA_EXT_LED_MAC:
14377 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14378 			break;
14379 
14380 		case SHASTA_EXT_LED_COMBO:
14381 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
14382 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14383 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14384 						 LED_CTRL_MODE_PHY_2);
14385 			break;
14386 
14387 		}
14388 
14389 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14390 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
14391 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14392 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14393 
14394 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14395 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14396 
14397 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14398 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
14399 			if ((tp->pdev->subsystem_vendor ==
14400 			     PCI_VENDOR_ID_ARIMA) &&
14401 			    (tp->pdev->subsystem_device == 0x205a ||
14402 			     tp->pdev->subsystem_device == 0x2063))
14403 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14404 		} else {
14405 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14406 			tg3_flag_set(tp, IS_NIC);
14407 		}
14408 
14409 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14410 			tg3_flag_set(tp, ENABLE_ASF);
14411 			if (tg3_flag(tp, 5750_PLUS))
14412 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14413 		}
14414 
14415 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14416 		    tg3_flag(tp, 5750_PLUS))
14417 			tg3_flag_set(tp, ENABLE_APE);
14418 
14419 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14420 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14421 			tg3_flag_clear(tp, WOL_CAP);
14422 
14423 		if (tg3_flag(tp, WOL_CAP) &&
14424 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14425 			tg3_flag_set(tp, WOL_ENABLE);
14426 			device_set_wakeup_enable(&tp->pdev->dev, true);
14427 		}
14428 
14429 		if (cfg2 & (1 << 17))
14430 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14431 
14432 		/* serdes signal pre-emphasis in register 0x590 set by */
14433 		/* bootcode if bit 18 is set */
14434 		if (cfg2 & (1 << 18))
14435 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14436 
14437 		if ((tg3_flag(tp, 57765_PLUS) ||
14438 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14439 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14440 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14441 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14442 
14443 		if (tg3_flag(tp, PCI_EXPRESS) &&
14444 		    tg3_asic_rev(tp) != ASIC_REV_5785 &&
14445 		    !tg3_flag(tp, 57765_PLUS)) {
14446 			u32 cfg3;
14447 
14448 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14449 			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14450 				tg3_flag_set(tp, ASPM_WORKAROUND);
14451 		}
14452 
14453 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14454 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14455 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14456 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14457 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14458 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14459 	}
14460 done:
14461 	if (tg3_flag(tp, WOL_CAP))
14462 		device_set_wakeup_enable(&tp->pdev->dev,
14463 					 tg3_flag(tp, WOL_ENABLE));
14464 	else
14465 		device_set_wakeup_capable(&tp->pdev->dev, false);
14466 }
14467 
14468 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14469 {
14470 	int i, err;
14471 	u32 val2, off = offset * 8;
14472 
14473 	err = tg3_nvram_lock(tp);
14474 	if (err)
14475 		return err;
14476 
14477 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14478 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14479 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14480 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14481 	udelay(10);
14482 
14483 	for (i = 0; i < 100; i++) {
14484 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14485 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
14486 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14487 			break;
14488 		}
14489 		udelay(10);
14490 	}
14491 
14492 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14493 
14494 	tg3_nvram_unlock(tp);
14495 	if (val2 & APE_OTP_STATUS_CMD_DONE)
14496 		return 0;
14497 
14498 	return -EBUSY;
14499 }
14500 
14501 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14502 {
14503 	int i;
14504 	u32 val;
14505 
14506 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14507 	tw32(OTP_CTRL, cmd);
14508 
14509 	/* Wait for up to 1 ms for command to execute. */
14510 	for (i = 0; i < 100; i++) {
14511 		val = tr32(OTP_STATUS);
14512 		if (val & OTP_STATUS_CMD_DONE)
14513 			break;
14514 		udelay(10);
14515 	}
14516 
14517 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14518 }
14519 
14520 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14521  * configuration is a 32-bit value that straddles the alignment boundary.
14522  * We do two 32-bit reads and then shift and merge the results.
14523  */
14524 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14525 {
14526 	u32 bhalf_otp, thalf_otp;
14527 
14528 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14529 
14530 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14531 		return 0;
14532 
14533 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14534 
14535 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14536 		return 0;
14537 
14538 	thalf_otp = tr32(OTP_READ_DATA);
14539 
14540 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14541 
14542 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14543 		return 0;
14544 
14545 	bhalf_otp = tr32(OTP_READ_DATA);
14546 
14547 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14548 }
14549 
14550 static void tg3_phy_init_link_config(struct tg3 *tp)
14551 {
14552 	u32 adv = ADVERTISED_Autoneg;
14553 
14554 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14555 		adv |= ADVERTISED_1000baseT_Half |
14556 		       ADVERTISED_1000baseT_Full;
14557 
14558 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14559 		adv |= ADVERTISED_100baseT_Half |
14560 		       ADVERTISED_100baseT_Full |
14561 		       ADVERTISED_10baseT_Half |
14562 		       ADVERTISED_10baseT_Full |
14563 		       ADVERTISED_TP;
14564 	else
14565 		adv |= ADVERTISED_FIBRE;
14566 
14567 	tp->link_config.advertising = adv;
14568 	tp->link_config.speed = SPEED_UNKNOWN;
14569 	tp->link_config.duplex = DUPLEX_UNKNOWN;
14570 	tp->link_config.autoneg = AUTONEG_ENABLE;
14571 	tp->link_config.active_speed = SPEED_UNKNOWN;
14572 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14573 
14574 	tp->old_link = -1;
14575 }
14576 
14577 static int tg3_phy_probe(struct tg3 *tp)
14578 {
14579 	u32 hw_phy_id_1, hw_phy_id_2;
14580 	u32 hw_phy_id, hw_phy_id_masked;
14581 	int err;
14582 
14583 	/* flow control autonegotiation is default behavior */
14584 	tg3_flag_set(tp, PAUSE_AUTONEG);
14585 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14586 
14587 	if (tg3_flag(tp, ENABLE_APE)) {
14588 		switch (tp->pci_fn) {
14589 		case 0:
14590 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14591 			break;
14592 		case 1:
14593 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14594 			break;
14595 		case 2:
14596 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14597 			break;
14598 		case 3:
14599 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14600 			break;
14601 		}
14602 	}
14603 
14604 	if (tg3_flag(tp, USE_PHYLIB))
14605 		return tg3_phy_init(tp);
14606 
14607 	/* Reading the PHY ID register can conflict with ASF
14608 	 * firmware access to the PHY hardware.
14609 	 */
14610 	err = 0;
14611 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14612 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14613 	} else {
14614 		/* Now read the physical PHY_ID from the chip and verify
14615 		 * that it is sane.  If it doesn't look good, we fall back
14616 		 * to either the hard-coded table based PHY_ID and failing
14617 		 * that the value found in the eeprom area.
14618 		 */
14619 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14620 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14621 
14622 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14623 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14624 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14625 
14626 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14627 	}
14628 
14629 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14630 		tp->phy_id = hw_phy_id;
14631 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14632 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14633 		else
14634 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14635 	} else {
14636 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
14637 			/* Do nothing, phy ID already set up in
14638 			 * tg3_get_eeprom_hw_cfg().
14639 			 */
14640 		} else {
14641 			struct subsys_tbl_ent *p;
14642 
14643 			/* No eeprom signature?  Try the hardcoded
14644 			 * subsys device table.
14645 			 */
14646 			p = tg3_lookup_by_subsys(tp);
14647 			if (p) {
14648 				tp->phy_id = p->phy_id;
14649 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
14650 				/* For now we saw the IDs 0xbc050cd0,
14651 				 * 0xbc050f80 and 0xbc050c30 on devices
14652 				 * connected to an BCM4785 and there are
14653 				 * probably more. Just assume that the phy is
14654 				 * supported when it is connected to a SSB core
14655 				 * for now.
14656 				 */
14657 				return -ENODEV;
14658 			}
14659 
14660 			if (!tp->phy_id ||
14661 			    tp->phy_id == TG3_PHY_ID_BCM8002)
14662 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14663 		}
14664 	}
14665 
14666 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14667 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14668 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
14669 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
14670 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
14671 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14672 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
14673 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
14674 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
14675 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14676 
14677 	tg3_phy_init_link_config(tp);
14678 
14679 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14680 	    !tg3_flag(tp, ENABLE_APE) &&
14681 	    !tg3_flag(tp, ENABLE_ASF)) {
14682 		u32 bmsr, dummy;
14683 
14684 		tg3_readphy(tp, MII_BMSR, &bmsr);
14685 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14686 		    (bmsr & BMSR_LSTATUS))
14687 			goto skip_phy_reset;
14688 
14689 		err = tg3_phy_reset(tp);
14690 		if (err)
14691 			return err;
14692 
14693 		tg3_phy_set_wirespeed(tp);
14694 
14695 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14696 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14697 					    tp->link_config.flowctrl);
14698 
14699 			tg3_writephy(tp, MII_BMCR,
14700 				     BMCR_ANENABLE | BMCR_ANRESTART);
14701 		}
14702 	}
14703 
14704 skip_phy_reset:
14705 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14706 		err = tg3_init_5401phy_dsp(tp);
14707 		if (err)
14708 			return err;
14709 
14710 		err = tg3_init_5401phy_dsp(tp);
14711 	}
14712 
14713 	return err;
14714 }
14715 
14716 static void tg3_read_vpd(struct tg3 *tp)
14717 {
14718 	u8 *vpd_data;
14719 	unsigned int block_end, rosize, len;
14720 	u32 vpdlen;
14721 	int j, i = 0;
14722 
14723 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14724 	if (!vpd_data)
14725 		goto out_no_vpd;
14726 
14727 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14728 	if (i < 0)
14729 		goto out_not_found;
14730 
14731 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14732 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14733 	i += PCI_VPD_LRDT_TAG_SIZE;
14734 
14735 	if (block_end > vpdlen)
14736 		goto out_not_found;
14737 
14738 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14739 				      PCI_VPD_RO_KEYWORD_MFR_ID);
14740 	if (j > 0) {
14741 		len = pci_vpd_info_field_size(&vpd_data[j]);
14742 
14743 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
14744 		if (j + len > block_end || len != 4 ||
14745 		    memcmp(&vpd_data[j], "1028", 4))
14746 			goto partno;
14747 
14748 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14749 					      PCI_VPD_RO_KEYWORD_VENDOR0);
14750 		if (j < 0)
14751 			goto partno;
14752 
14753 		len = pci_vpd_info_field_size(&vpd_data[j]);
14754 
14755 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
14756 		if (j + len > block_end)
14757 			goto partno;
14758 
14759 		memcpy(tp->fw_ver, &vpd_data[j], len);
14760 		strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14761 	}
14762 
14763 partno:
14764 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14765 				      PCI_VPD_RO_KEYWORD_PARTNO);
14766 	if (i < 0)
14767 		goto out_not_found;
14768 
14769 	len = pci_vpd_info_field_size(&vpd_data[i]);
14770 
14771 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
14772 	if (len > TG3_BPN_SIZE ||
14773 	    (len + i) > vpdlen)
14774 		goto out_not_found;
14775 
14776 	memcpy(tp->board_part_number, &vpd_data[i], len);
14777 
14778 out_not_found:
14779 	kfree(vpd_data);
14780 	if (tp->board_part_number[0])
14781 		return;
14782 
14783 out_no_vpd:
14784 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
14785 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14786 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14787 			strcpy(tp->board_part_number, "BCM5717");
14788 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14789 			strcpy(tp->board_part_number, "BCM5718");
14790 		else
14791 			goto nomatch;
14792 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
14793 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14794 			strcpy(tp->board_part_number, "BCM57780");
14795 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14796 			strcpy(tp->board_part_number, "BCM57760");
14797 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14798 			strcpy(tp->board_part_number, "BCM57790");
14799 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14800 			strcpy(tp->board_part_number, "BCM57788");
14801 		else
14802 			goto nomatch;
14803 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
14804 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14805 			strcpy(tp->board_part_number, "BCM57761");
14806 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14807 			strcpy(tp->board_part_number, "BCM57765");
14808 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14809 			strcpy(tp->board_part_number, "BCM57781");
14810 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14811 			strcpy(tp->board_part_number, "BCM57785");
14812 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14813 			strcpy(tp->board_part_number, "BCM57791");
14814 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14815 			strcpy(tp->board_part_number, "BCM57795");
14816 		else
14817 			goto nomatch;
14818 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
14819 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14820 			strcpy(tp->board_part_number, "BCM57762");
14821 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14822 			strcpy(tp->board_part_number, "BCM57766");
14823 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14824 			strcpy(tp->board_part_number, "BCM57782");
14825 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14826 			strcpy(tp->board_part_number, "BCM57786");
14827 		else
14828 			goto nomatch;
14829 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14830 		strcpy(tp->board_part_number, "BCM95906");
14831 	} else {
14832 nomatch:
14833 		strcpy(tp->board_part_number, "none");
14834 	}
14835 }
14836 
14837 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14838 {
14839 	u32 val;
14840 
14841 	if (tg3_nvram_read(tp, offset, &val) ||
14842 	    (val & 0xfc000000) != 0x0c000000 ||
14843 	    tg3_nvram_read(tp, offset + 4, &val) ||
14844 	    val != 0)
14845 		return 0;
14846 
14847 	return 1;
14848 }
14849 
14850 static void tg3_read_bc_ver(struct tg3 *tp)
14851 {
14852 	u32 val, offset, start, ver_offset;
14853 	int i, dst_off;
14854 	bool newver = false;
14855 
14856 	if (tg3_nvram_read(tp, 0xc, &offset) ||
14857 	    tg3_nvram_read(tp, 0x4, &start))
14858 		return;
14859 
14860 	offset = tg3_nvram_logical_addr(tp, offset);
14861 
14862 	if (tg3_nvram_read(tp, offset, &val))
14863 		return;
14864 
14865 	if ((val & 0xfc000000) == 0x0c000000) {
14866 		if (tg3_nvram_read(tp, offset + 4, &val))
14867 			return;
14868 
14869 		if (val == 0)
14870 			newver = true;
14871 	}
14872 
14873 	dst_off = strlen(tp->fw_ver);
14874 
14875 	if (newver) {
14876 		if (TG3_VER_SIZE - dst_off < 16 ||
14877 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
14878 			return;
14879 
14880 		offset = offset + ver_offset - start;
14881 		for (i = 0; i < 16; i += 4) {
14882 			__be32 v;
14883 			if (tg3_nvram_read_be32(tp, offset + i, &v))
14884 				return;
14885 
14886 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14887 		}
14888 	} else {
14889 		u32 major, minor;
14890 
14891 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14892 			return;
14893 
14894 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14895 			TG3_NVM_BCVER_MAJSFT;
14896 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14897 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14898 			 "v%d.%02d", major, minor);
14899 	}
14900 }
14901 
14902 static void tg3_read_hwsb_ver(struct tg3 *tp)
14903 {
14904 	u32 val, major, minor;
14905 
14906 	/* Use native endian representation */
14907 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14908 		return;
14909 
14910 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14911 		TG3_NVM_HWSB_CFG1_MAJSFT;
14912 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14913 		TG3_NVM_HWSB_CFG1_MINSFT;
14914 
14915 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14916 }
14917 
14918 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14919 {
14920 	u32 offset, major, minor, build;
14921 
14922 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14923 
14924 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14925 		return;
14926 
14927 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14928 	case TG3_EEPROM_SB_REVISION_0:
14929 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14930 		break;
14931 	case TG3_EEPROM_SB_REVISION_2:
14932 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14933 		break;
14934 	case TG3_EEPROM_SB_REVISION_3:
14935 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14936 		break;
14937 	case TG3_EEPROM_SB_REVISION_4:
14938 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14939 		break;
14940 	case TG3_EEPROM_SB_REVISION_5:
14941 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14942 		break;
14943 	case TG3_EEPROM_SB_REVISION_6:
14944 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14945 		break;
14946 	default:
14947 		return;
14948 	}
14949 
14950 	if (tg3_nvram_read(tp, offset, &val))
14951 		return;
14952 
14953 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14954 		TG3_EEPROM_SB_EDH_BLD_SHFT;
14955 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14956 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
14957 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14958 
14959 	if (minor > 99 || build > 26)
14960 		return;
14961 
14962 	offset = strlen(tp->fw_ver);
14963 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14964 		 " v%d.%02d", major, minor);
14965 
14966 	if (build > 0) {
14967 		offset = strlen(tp->fw_ver);
14968 		if (offset < TG3_VER_SIZE - 1)
14969 			tp->fw_ver[offset] = 'a' + build - 1;
14970 	}
14971 }
14972 
14973 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14974 {
14975 	u32 val, offset, start;
14976 	int i, vlen;
14977 
14978 	for (offset = TG3_NVM_DIR_START;
14979 	     offset < TG3_NVM_DIR_END;
14980 	     offset += TG3_NVM_DIRENT_SIZE) {
14981 		if (tg3_nvram_read(tp, offset, &val))
14982 			return;
14983 
14984 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14985 			break;
14986 	}
14987 
14988 	if (offset == TG3_NVM_DIR_END)
14989 		return;
14990 
14991 	if (!tg3_flag(tp, 5705_PLUS))
14992 		start = 0x08000000;
14993 	else if (tg3_nvram_read(tp, offset - 4, &start))
14994 		return;
14995 
14996 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
14997 	    !tg3_fw_img_is_valid(tp, offset) ||
14998 	    tg3_nvram_read(tp, offset + 8, &val))
14999 		return;
15000 
15001 	offset += val - start;
15002 
15003 	vlen = strlen(tp->fw_ver);
15004 
15005 	tp->fw_ver[vlen++] = ',';
15006 	tp->fw_ver[vlen++] = ' ';
15007 
15008 	for (i = 0; i < 4; i++) {
15009 		__be32 v;
15010 		if (tg3_nvram_read_be32(tp, offset, &v))
15011 			return;
15012 
15013 		offset += sizeof(v);
15014 
15015 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15016 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15017 			break;
15018 		}
15019 
15020 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15021 		vlen += sizeof(v);
15022 	}
15023 }
15024 
15025 static void tg3_probe_ncsi(struct tg3 *tp)
15026 {
15027 	u32 apedata;
15028 
15029 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15030 	if (apedata != APE_SEG_SIG_MAGIC)
15031 		return;
15032 
15033 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15034 	if (!(apedata & APE_FW_STATUS_READY))
15035 		return;
15036 
15037 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15038 		tg3_flag_set(tp, APE_HAS_NCSI);
15039 }
15040 
15041 static void tg3_read_dash_ver(struct tg3 *tp)
15042 {
15043 	int vlen;
15044 	u32 apedata;
15045 	char *fwtype;
15046 
15047 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15048 
15049 	if (tg3_flag(tp, APE_HAS_NCSI))
15050 		fwtype = "NCSI";
15051 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15052 		fwtype = "SMASH";
15053 	else
15054 		fwtype = "DASH";
15055 
15056 	vlen = strlen(tp->fw_ver);
15057 
15058 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15059 		 fwtype,
15060 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15061 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15062 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15063 		 (apedata & APE_FW_VERSION_BLDMSK));
15064 }
15065 
15066 static void tg3_read_otp_ver(struct tg3 *tp)
15067 {
15068 	u32 val, val2;
15069 
15070 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15071 		return;
15072 
15073 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15074 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15075 	    TG3_OTP_MAGIC0_VALID(val)) {
15076 		u64 val64 = (u64) val << 32 | val2;
15077 		u32 ver = 0;
15078 		int i, vlen;
15079 
15080 		for (i = 0; i < 7; i++) {
15081 			if ((val64 & 0xff) == 0)
15082 				break;
15083 			ver = val64 & 0xff;
15084 			val64 >>= 8;
15085 		}
15086 		vlen = strlen(tp->fw_ver);
15087 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15088 	}
15089 }
15090 
15091 static void tg3_read_fw_ver(struct tg3 *tp)
15092 {
15093 	u32 val;
15094 	bool vpd_vers = false;
15095 
15096 	if (tp->fw_ver[0] != 0)
15097 		vpd_vers = true;
15098 
15099 	if (tg3_flag(tp, NO_NVRAM)) {
15100 		strcat(tp->fw_ver, "sb");
15101 		tg3_read_otp_ver(tp);
15102 		return;
15103 	}
15104 
15105 	if (tg3_nvram_read(tp, 0, &val))
15106 		return;
15107 
15108 	if (val == TG3_EEPROM_MAGIC)
15109 		tg3_read_bc_ver(tp);
15110 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15111 		tg3_read_sb_ver(tp, val);
15112 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15113 		tg3_read_hwsb_ver(tp);
15114 
15115 	if (tg3_flag(tp, ENABLE_ASF)) {
15116 		if (tg3_flag(tp, ENABLE_APE)) {
15117 			tg3_probe_ncsi(tp);
15118 			if (!vpd_vers)
15119 				tg3_read_dash_ver(tp);
15120 		} else if (!vpd_vers) {
15121 			tg3_read_mgmtfw_ver(tp);
15122 		}
15123 	}
15124 
15125 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15126 }
15127 
15128 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15129 {
15130 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
15131 		return TG3_RX_RET_MAX_SIZE_5717;
15132 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15133 		return TG3_RX_RET_MAX_SIZE_5700;
15134 	else
15135 		return TG3_RX_RET_MAX_SIZE_5705;
15136 }
15137 
15138 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15139 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15140 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15141 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15142 	{ },
15143 };
15144 
15145 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15146 {
15147 	struct pci_dev *peer;
15148 	unsigned int func, devnr = tp->pdev->devfn & ~7;
15149 
15150 	for (func = 0; func < 8; func++) {
15151 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
15152 		if (peer && peer != tp->pdev)
15153 			break;
15154 		pci_dev_put(peer);
15155 	}
15156 	/* 5704 can be configured in single-port mode, set peer to
15157 	 * tp->pdev in that case.
15158 	 */
15159 	if (!peer) {
15160 		peer = tp->pdev;
15161 		return peer;
15162 	}
15163 
15164 	/*
15165 	 * We don't need to keep the refcount elevated; there's no way
15166 	 * to remove one half of this device without removing the other
15167 	 */
15168 	pci_dev_put(peer);
15169 
15170 	return peer;
15171 }
15172 
15173 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15174 {
15175 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15176 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15177 		u32 reg;
15178 
15179 		/* All devices that use the alternate
15180 		 * ASIC REV location have a CPMU.
15181 		 */
15182 		tg3_flag_set(tp, CPMU_PRESENT);
15183 
15184 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15185 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15186 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15187 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15188 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15189 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15190 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15191 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15192 			reg = TG3PCI_GEN2_PRODID_ASICREV;
15193 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15194 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15195 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15196 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15197 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15198 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15199 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15200 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15201 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15202 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15203 			reg = TG3PCI_GEN15_PRODID_ASICREV;
15204 		else
15205 			reg = TG3PCI_PRODID_ASICREV;
15206 
15207 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15208 	}
15209 
15210 	/* Wrong chip ID in 5752 A0. This code can be removed later
15211 	 * as A0 is not in production.
15212 	 */
15213 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15214 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15215 
15216 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15217 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15218 
15219 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15220 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15221 	    tg3_asic_rev(tp) == ASIC_REV_5720)
15222 		tg3_flag_set(tp, 5717_PLUS);
15223 
15224 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15225 	    tg3_asic_rev(tp) == ASIC_REV_57766)
15226 		tg3_flag_set(tp, 57765_CLASS);
15227 
15228 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15229 	     tg3_asic_rev(tp) == ASIC_REV_5762)
15230 		tg3_flag_set(tp, 57765_PLUS);
15231 
15232 	/* Intentionally exclude ASIC_REV_5906 */
15233 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15234 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
15235 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
15236 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
15237 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
15238 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
15239 	    tg3_flag(tp, 57765_PLUS))
15240 		tg3_flag_set(tp, 5755_PLUS);
15241 
15242 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15243 	    tg3_asic_rev(tp) == ASIC_REV_5714)
15244 		tg3_flag_set(tp, 5780_CLASS);
15245 
15246 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15247 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
15248 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
15249 	    tg3_flag(tp, 5755_PLUS) ||
15250 	    tg3_flag(tp, 5780_CLASS))
15251 		tg3_flag_set(tp, 5750_PLUS);
15252 
15253 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15254 	    tg3_flag(tp, 5750_PLUS))
15255 		tg3_flag_set(tp, 5705_PLUS);
15256 }
15257 
15258 static bool tg3_10_100_only_device(struct tg3 *tp,
15259 				   const struct pci_device_id *ent)
15260 {
15261 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15262 
15263 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15264 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15265 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
15266 		return true;
15267 
15268 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15269 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15270 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15271 				return true;
15272 		} else {
15273 			return true;
15274 		}
15275 	}
15276 
15277 	return false;
15278 }
15279 
15280 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15281 {
15282 	u32 misc_ctrl_reg;
15283 	u32 pci_state_reg, grc_misc_cfg;
15284 	u32 val;
15285 	u16 pci_cmd;
15286 	int err;
15287 
15288 	/* Force memory write invalidate off.  If we leave it on,
15289 	 * then on 5700_BX chips we have to enable a workaround.
15290 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15291 	 * to match the cacheline size.  The Broadcom driver have this
15292 	 * workaround but turns MWI off all the times so never uses
15293 	 * it.  This seems to suggest that the workaround is insufficient.
15294 	 */
15295 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15296 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15297 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15298 
15299 	/* Important! -- Make sure register accesses are byteswapped
15300 	 * correctly.  Also, for those chips that require it, make
15301 	 * sure that indirect register accesses are enabled before
15302 	 * the first operation.
15303 	 */
15304 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15305 			      &misc_ctrl_reg);
15306 	tp->misc_host_ctrl |= (misc_ctrl_reg &
15307 			       MISC_HOST_CTRL_CHIPREV);
15308 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15309 			       tp->misc_host_ctrl);
15310 
15311 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
15312 
15313 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15314 	 * we need to disable memory and use config. cycles
15315 	 * only to access all registers. The 5702/03 chips
15316 	 * can mistakenly decode the special cycles from the
15317 	 * ICH chipsets as memory write cycles, causing corruption
15318 	 * of register and memory space. Only certain ICH bridges
15319 	 * will drive special cycles with non-zero data during the
15320 	 * address phase which can fall within the 5703's address
15321 	 * range. This is not an ICH bug as the PCI spec allows
15322 	 * non-zero address during special cycles. However, only
15323 	 * these ICH bridges are known to drive non-zero addresses
15324 	 * during special cycles.
15325 	 *
15326 	 * Since special cycles do not cross PCI bridges, we only
15327 	 * enable this workaround if the 5703 is on the secondary
15328 	 * bus of these ICH bridges.
15329 	 */
15330 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15331 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15332 		static struct tg3_dev_id {
15333 			u32	vendor;
15334 			u32	device;
15335 			u32	rev;
15336 		} ich_chipsets[] = {
15337 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15338 			  PCI_ANY_ID },
15339 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15340 			  PCI_ANY_ID },
15341 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15342 			  0xa },
15343 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15344 			  PCI_ANY_ID },
15345 			{ },
15346 		};
15347 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
15348 		struct pci_dev *bridge = NULL;
15349 
15350 		while (pci_id->vendor != 0) {
15351 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
15352 						bridge);
15353 			if (!bridge) {
15354 				pci_id++;
15355 				continue;
15356 			}
15357 			if (pci_id->rev != PCI_ANY_ID) {
15358 				if (bridge->revision > pci_id->rev)
15359 					continue;
15360 			}
15361 			if (bridge->subordinate &&
15362 			    (bridge->subordinate->number ==
15363 			     tp->pdev->bus->number)) {
15364 				tg3_flag_set(tp, ICH_WORKAROUND);
15365 				pci_dev_put(bridge);
15366 				break;
15367 			}
15368 		}
15369 	}
15370 
15371 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15372 		static struct tg3_dev_id {
15373 			u32	vendor;
15374 			u32	device;
15375 		} bridge_chipsets[] = {
15376 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15377 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15378 			{ },
15379 		};
15380 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15381 		struct pci_dev *bridge = NULL;
15382 
15383 		while (pci_id->vendor != 0) {
15384 			bridge = pci_get_device(pci_id->vendor,
15385 						pci_id->device,
15386 						bridge);
15387 			if (!bridge) {
15388 				pci_id++;
15389 				continue;
15390 			}
15391 			if (bridge->subordinate &&
15392 			    (bridge->subordinate->number <=
15393 			     tp->pdev->bus->number) &&
15394 			    (bridge->subordinate->busn_res.end >=
15395 			     tp->pdev->bus->number)) {
15396 				tg3_flag_set(tp, 5701_DMA_BUG);
15397 				pci_dev_put(bridge);
15398 				break;
15399 			}
15400 		}
15401 	}
15402 
15403 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
15404 	 * DMA addresses > 40-bit. This bridge may have other additional
15405 	 * 57xx devices behind it in some 4-port NIC designs for example.
15406 	 * Any tg3 device found behind the bridge will also need the 40-bit
15407 	 * DMA workaround.
15408 	 */
15409 	if (tg3_flag(tp, 5780_CLASS)) {
15410 		tg3_flag_set(tp, 40BIT_DMA_BUG);
15411 		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15412 	} else {
15413 		struct pci_dev *bridge = NULL;
15414 
15415 		do {
15416 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15417 						PCI_DEVICE_ID_SERVERWORKS_EPB,
15418 						bridge);
15419 			if (bridge && bridge->subordinate &&
15420 			    (bridge->subordinate->number <=
15421 			     tp->pdev->bus->number) &&
15422 			    (bridge->subordinate->busn_res.end >=
15423 			     tp->pdev->bus->number)) {
15424 				tg3_flag_set(tp, 40BIT_DMA_BUG);
15425 				pci_dev_put(bridge);
15426 				break;
15427 			}
15428 		} while (bridge);
15429 	}
15430 
15431 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15432 	    tg3_asic_rev(tp) == ASIC_REV_5714)
15433 		tp->pdev_peer = tg3_find_peer(tp);
15434 
15435 	/* Determine TSO capabilities */
15436 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15437 		; /* Do nothing. HW bug. */
15438 	else if (tg3_flag(tp, 57765_PLUS))
15439 		tg3_flag_set(tp, HW_TSO_3);
15440 	else if (tg3_flag(tp, 5755_PLUS) ||
15441 		 tg3_asic_rev(tp) == ASIC_REV_5906)
15442 		tg3_flag_set(tp, HW_TSO_2);
15443 	else if (tg3_flag(tp, 5750_PLUS)) {
15444 		tg3_flag_set(tp, HW_TSO_1);
15445 		tg3_flag_set(tp, TSO_BUG);
15446 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15447 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15448 			tg3_flag_clear(tp, TSO_BUG);
15449 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15450 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
15451 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15452 		tg3_flag_set(tp, FW_TSO);
15453 		tg3_flag_set(tp, TSO_BUG);
15454 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
15455 			tp->fw_needed = FIRMWARE_TG3TSO5;
15456 		else
15457 			tp->fw_needed = FIRMWARE_TG3TSO;
15458 	}
15459 
15460 	/* Selectively allow TSO based on operating conditions */
15461 	if (tg3_flag(tp, HW_TSO_1) ||
15462 	    tg3_flag(tp, HW_TSO_2) ||
15463 	    tg3_flag(tp, HW_TSO_3) ||
15464 	    tg3_flag(tp, FW_TSO)) {
15465 		/* For firmware TSO, assume ASF is disabled.
15466 		 * We'll disable TSO later if we discover ASF
15467 		 * is enabled in tg3_get_eeprom_hw_cfg().
15468 		 */
15469 		tg3_flag_set(tp, TSO_CAPABLE);
15470 	} else {
15471 		tg3_flag_clear(tp, TSO_CAPABLE);
15472 		tg3_flag_clear(tp, TSO_BUG);
15473 		tp->fw_needed = NULL;
15474 	}
15475 
15476 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15477 		tp->fw_needed = FIRMWARE_TG3;
15478 
15479 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
15480 		tp->fw_needed = FIRMWARE_TG357766;
15481 
15482 	tp->irq_max = 1;
15483 
15484 	if (tg3_flag(tp, 5750_PLUS)) {
15485 		tg3_flag_set(tp, SUPPORT_MSI);
15486 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15487 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15488 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15489 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15490 		     tp->pdev_peer == tp->pdev))
15491 			tg3_flag_clear(tp, SUPPORT_MSI);
15492 
15493 		if (tg3_flag(tp, 5755_PLUS) ||
15494 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
15495 			tg3_flag_set(tp, 1SHOT_MSI);
15496 		}
15497 
15498 		if (tg3_flag(tp, 57765_PLUS)) {
15499 			tg3_flag_set(tp, SUPPORT_MSIX);
15500 			tp->irq_max = TG3_IRQ_MAX_VECS;
15501 		}
15502 	}
15503 
15504 	tp->txq_max = 1;
15505 	tp->rxq_max = 1;
15506 	if (tp->irq_max > 1) {
15507 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15508 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15509 
15510 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15511 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15512 			tp->txq_max = tp->irq_max - 1;
15513 	}
15514 
15515 	if (tg3_flag(tp, 5755_PLUS) ||
15516 	    tg3_asic_rev(tp) == ASIC_REV_5906)
15517 		tg3_flag_set(tp, SHORT_DMA_BUG);
15518 
15519 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
15520 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15521 
15522 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15523 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15524 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
15525 	    tg3_asic_rev(tp) == ASIC_REV_5762)
15526 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
15527 
15528 	if (tg3_flag(tp, 57765_PLUS) &&
15529 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15530 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15531 
15532 	if (!tg3_flag(tp, 5705_PLUS) ||
15533 	    tg3_flag(tp, 5780_CLASS) ||
15534 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
15535 		tg3_flag_set(tp, JUMBO_CAPABLE);
15536 
15537 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15538 			      &pci_state_reg);
15539 
15540 	if (pci_is_pcie(tp->pdev)) {
15541 		u16 lnkctl;
15542 
15543 		tg3_flag_set(tp, PCI_EXPRESS);
15544 
15545 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15546 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15547 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15548 				tg3_flag_clear(tp, HW_TSO_2);
15549 				tg3_flag_clear(tp, TSO_CAPABLE);
15550 			}
15551 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15552 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
15553 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15554 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15555 				tg3_flag_set(tp, CLKREQ_BUG);
15556 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15557 			tg3_flag_set(tp, L1PLLPD_EN);
15558 		}
15559 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15560 		/* BCM5785 devices are effectively PCIe devices, and should
15561 		 * follow PCIe codepaths, but do not have a PCIe capabilities
15562 		 * section.
15563 		 */
15564 		tg3_flag_set(tp, PCI_EXPRESS);
15565 	} else if (!tg3_flag(tp, 5705_PLUS) ||
15566 		   tg3_flag(tp, 5780_CLASS)) {
15567 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15568 		if (!tp->pcix_cap) {
15569 			dev_err(&tp->pdev->dev,
15570 				"Cannot find PCI-X capability, aborting\n");
15571 			return -EIO;
15572 		}
15573 
15574 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15575 			tg3_flag_set(tp, PCIX_MODE);
15576 	}
15577 
15578 	/* If we have an AMD 762 or VIA K8T800 chipset, write
15579 	 * reordering to the mailbox registers done by the host
15580 	 * controller can cause major troubles.  We read back from
15581 	 * every mailbox register write to force the writes to be
15582 	 * posted to the chip in order.
15583 	 */
15584 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
15585 	    !tg3_flag(tp, PCI_EXPRESS))
15586 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
15587 
15588 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15589 			     &tp->pci_cacheline_sz);
15590 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15591 			     &tp->pci_lat_timer);
15592 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15593 	    tp->pci_lat_timer < 64) {
15594 		tp->pci_lat_timer = 64;
15595 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15596 				      tp->pci_lat_timer);
15597 	}
15598 
15599 	/* Important! -- It is critical that the PCI-X hw workaround
15600 	 * situation is decided before the first MMIO register access.
15601 	 */
15602 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
15603 		/* 5700 BX chips need to have their TX producer index
15604 		 * mailboxes written twice to workaround a bug.
15605 		 */
15606 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
15607 
15608 		/* If we are in PCI-X mode, enable register write workaround.
15609 		 *
15610 		 * The workaround is to use indirect register accesses
15611 		 * for all chip writes not to mailbox registers.
15612 		 */
15613 		if (tg3_flag(tp, PCIX_MODE)) {
15614 			u32 pm_reg;
15615 
15616 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15617 
15618 			/* The chip can have it's power management PCI config
15619 			 * space registers clobbered due to this bug.
15620 			 * So explicitly force the chip into D0 here.
15621 			 */
15622 			pci_read_config_dword(tp->pdev,
15623 					      tp->pm_cap + PCI_PM_CTRL,
15624 					      &pm_reg);
15625 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15626 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15627 			pci_write_config_dword(tp->pdev,
15628 					       tp->pm_cap + PCI_PM_CTRL,
15629 					       pm_reg);
15630 
15631 			/* Also, force SERR#/PERR# in PCI command. */
15632 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15633 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15634 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15635 		}
15636 	}
15637 
15638 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15639 		tg3_flag_set(tp, PCI_HIGH_SPEED);
15640 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15641 		tg3_flag_set(tp, PCI_32BIT);
15642 
15643 	/* Chip-specific fixup from Broadcom driver */
15644 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
15645 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15646 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15647 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15648 	}
15649 
15650 	/* Default fast path register access methods */
15651 	tp->read32 = tg3_read32;
15652 	tp->write32 = tg3_write32;
15653 	tp->read32_mbox = tg3_read32;
15654 	tp->write32_mbox = tg3_write32;
15655 	tp->write32_tx_mbox = tg3_write32;
15656 	tp->write32_rx_mbox = tg3_write32;
15657 
15658 	/* Various workaround register access methods */
15659 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15660 		tp->write32 = tg3_write_indirect_reg32;
15661 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
15662 		 (tg3_flag(tp, PCI_EXPRESS) &&
15663 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
15664 		/*
15665 		 * Back to back register writes can cause problems on these
15666 		 * chips, the workaround is to read back all reg writes
15667 		 * except those to mailbox regs.
15668 		 *
15669 		 * See tg3_write_indirect_reg32().
15670 		 */
15671 		tp->write32 = tg3_write_flush_reg32;
15672 	}
15673 
15674 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15675 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
15676 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
15677 			tp->write32_rx_mbox = tg3_write_flush_reg32;
15678 	}
15679 
15680 	if (tg3_flag(tp, ICH_WORKAROUND)) {
15681 		tp->read32 = tg3_read_indirect_reg32;
15682 		tp->write32 = tg3_write_indirect_reg32;
15683 		tp->read32_mbox = tg3_read_indirect_mbox;
15684 		tp->write32_mbox = tg3_write_indirect_mbox;
15685 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
15686 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
15687 
15688 		iounmap(tp->regs);
15689 		tp->regs = NULL;
15690 
15691 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15692 		pci_cmd &= ~PCI_COMMAND_MEMORY;
15693 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15694 	}
15695 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15696 		tp->read32_mbox = tg3_read32_mbox_5906;
15697 		tp->write32_mbox = tg3_write32_mbox_5906;
15698 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
15699 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
15700 	}
15701 
15702 	if (tp->write32 == tg3_write_indirect_reg32 ||
15703 	    (tg3_flag(tp, PCIX_MODE) &&
15704 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15705 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
15706 		tg3_flag_set(tp, SRAM_USE_CONFIG);
15707 
15708 	/* The memory arbiter has to be enabled in order for SRAM accesses
15709 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
15710 	 * sure it is enabled, but other entities such as system netboot
15711 	 * code might disable it.
15712 	 */
15713 	val = tr32(MEMARB_MODE);
15714 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15715 
15716 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15717 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15718 	    tg3_flag(tp, 5780_CLASS)) {
15719 		if (tg3_flag(tp, PCIX_MODE)) {
15720 			pci_read_config_dword(tp->pdev,
15721 					      tp->pcix_cap + PCI_X_STATUS,
15722 					      &val);
15723 			tp->pci_fn = val & 0x7;
15724 		}
15725 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15726 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
15727 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
15728 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15729 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
15730 			val = tr32(TG3_CPMU_STATUS);
15731 
15732 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
15733 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
15734 		else
15735 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15736 				     TG3_CPMU_STATUS_FSHFT_5719;
15737 	}
15738 
15739 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
15740 		tp->write32_tx_mbox = tg3_write_flush_reg32;
15741 		tp->write32_rx_mbox = tg3_write_flush_reg32;
15742 	}
15743 
15744 	/* Get eeprom hw config before calling tg3_set_power_state().
15745 	 * In particular, the TG3_FLAG_IS_NIC flag must be
15746 	 * determined before calling tg3_set_power_state() so that
15747 	 * we know whether or not to switch out of Vaux power.
15748 	 * When the flag is set, it means that GPIO1 is used for eeprom
15749 	 * write protect and also implies that it is a LOM where GPIOs
15750 	 * are not used to switch power.
15751 	 */
15752 	tg3_get_eeprom_hw_cfg(tp);
15753 
15754 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15755 		tg3_flag_clear(tp, TSO_CAPABLE);
15756 		tg3_flag_clear(tp, TSO_BUG);
15757 		tp->fw_needed = NULL;
15758 	}
15759 
15760 	if (tg3_flag(tp, ENABLE_APE)) {
15761 		/* Allow reads and writes to the
15762 		 * APE register and memory space.
15763 		 */
15764 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15765 				 PCISTATE_ALLOW_APE_SHMEM_WR |
15766 				 PCISTATE_ALLOW_APE_PSPACE_WR;
15767 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15768 				       pci_state_reg);
15769 
15770 		tg3_ape_lock_init(tp);
15771 	}
15772 
15773 	/* Set up tp->grc_local_ctrl before calling
15774 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15775 	 * will bring 5700's external PHY out of reset.
15776 	 * It is also used as eeprom write protect on LOMs.
15777 	 */
15778 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15779 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15780 	    tg3_flag(tp, EEPROM_WRITE_PROT))
15781 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15782 				       GRC_LCLCTRL_GPIO_OUTPUT1);
15783 	/* Unused GPIO3 must be driven as output on 5752 because there
15784 	 * are no pull-up resistors on unused GPIO pins.
15785 	 */
15786 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
15787 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15788 
15789 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15790 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
15791 	    tg3_flag(tp, 57765_CLASS))
15792 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15793 
15794 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15795 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15796 		/* Turn off the debug UART. */
15797 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15798 		if (tg3_flag(tp, IS_NIC))
15799 			/* Keep VMain power. */
15800 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15801 					      GRC_LCLCTRL_GPIO_OUTPUT0;
15802 	}
15803 
15804 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
15805 		tp->grc_local_ctrl |=
15806 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
15807 
15808 	/* Switch out of Vaux if it is a NIC */
15809 	tg3_pwrsrc_switch_to_vmain(tp);
15810 
15811 	/* Derive initial jumbo mode from MTU assigned in
15812 	 * ether_setup() via the alloc_etherdev() call
15813 	 */
15814 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15815 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
15816 
15817 	/* Determine WakeOnLan speed to use. */
15818 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15819 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15820 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15821 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
15822 		tg3_flag_clear(tp, WOL_SPEED_100MB);
15823 	} else {
15824 		tg3_flag_set(tp, WOL_SPEED_100MB);
15825 	}
15826 
15827 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
15828 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
15829 
15830 	/* A few boards don't want Ethernet@WireSpeed phy feature */
15831 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15832 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15833 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
15834 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
15835 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15836 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15837 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15838 
15839 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
15840 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
15841 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15842 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
15843 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15844 
15845 	if (tg3_flag(tp, 5705_PLUS) &&
15846 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15847 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
15848 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
15849 	    !tg3_flag(tp, 57765_PLUS)) {
15850 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15851 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
15852 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
15853 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
15854 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15855 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15856 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15857 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15858 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15859 		} else
15860 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15861 	}
15862 
15863 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15864 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
15865 		tp->phy_otp = tg3_read_otp_phycfg(tp);
15866 		if (tp->phy_otp == 0)
15867 			tp->phy_otp = TG3_OTP_DEFAULT;
15868 	}
15869 
15870 	if (tg3_flag(tp, CPMU_PRESENT))
15871 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15872 	else
15873 		tp->mi_mode = MAC_MI_MODE_BASE;
15874 
15875 	tp->coalesce_mode = 0;
15876 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
15877 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
15878 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15879 
15880 	/* Set these bits to enable statistics workaround. */
15881 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15882 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
15883 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
15884 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15885 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15886 	}
15887 
15888 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
15889 	    tg3_asic_rev(tp) == ASIC_REV_57780)
15890 		tg3_flag_set(tp, USE_PHYLIB);
15891 
15892 	err = tg3_mdio_init(tp);
15893 	if (err)
15894 		return err;
15895 
15896 	/* Initialize data/descriptor byte/word swapping. */
15897 	val = tr32(GRC_MODE);
15898 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15899 	    tg3_asic_rev(tp) == ASIC_REV_5762)
15900 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15901 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
15902 			GRC_MODE_B2HRX_ENABLE |
15903 			GRC_MODE_HTX2B_ENABLE |
15904 			GRC_MODE_HOST_STACKUP);
15905 	else
15906 		val &= GRC_MODE_HOST_STACKUP;
15907 
15908 	tw32(GRC_MODE, val | tp->grc_mode);
15909 
15910 	tg3_switch_clocks(tp);
15911 
15912 	/* Clear this out for sanity. */
15913 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15914 
15915 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15916 			      &pci_state_reg);
15917 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15918 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15919 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
15920 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
15921 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
15922 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
15923 			void __iomem *sram_base;
15924 
15925 			/* Write some dummy words into the SRAM status block
15926 			 * area, see if it reads back correctly.  If the return
15927 			 * value is bad, force enable the PCIX workaround.
15928 			 */
15929 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15930 
15931 			writel(0x00000000, sram_base);
15932 			writel(0x00000000, sram_base + 4);
15933 			writel(0xffffffff, sram_base + 4);
15934 			if (readl(sram_base) != 0x00000000)
15935 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15936 		}
15937 	}
15938 
15939 	udelay(50);
15940 	tg3_nvram_init(tp);
15941 
15942 	/* If the device has an NVRAM, no need to load patch firmware */
15943 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
15944 	    !tg3_flag(tp, NO_NVRAM))
15945 		tp->fw_needed = NULL;
15946 
15947 	grc_misc_cfg = tr32(GRC_MISC_CFG);
15948 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15949 
15950 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
15951 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15952 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15953 		tg3_flag_set(tp, IS_5788);
15954 
15955 	if (!tg3_flag(tp, IS_5788) &&
15956 	    tg3_asic_rev(tp) != ASIC_REV_5700)
15957 		tg3_flag_set(tp, TAGGED_STATUS);
15958 	if (tg3_flag(tp, TAGGED_STATUS)) {
15959 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15960 				      HOSTCC_MODE_CLRTICK_TXBD);
15961 
15962 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15963 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15964 				       tp->misc_host_ctrl);
15965 	}
15966 
15967 	/* Preserve the APE MAC_MODE bits */
15968 	if (tg3_flag(tp, ENABLE_APE))
15969 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15970 	else
15971 		tp->mac_mode = 0;
15972 
15973 	if (tg3_10_100_only_device(tp, ent))
15974 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15975 
15976 	err = tg3_phy_probe(tp);
15977 	if (err) {
15978 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15979 		/* ... but do not return immediately ... */
15980 		tg3_mdio_fini(tp);
15981 	}
15982 
15983 	tg3_read_vpd(tp);
15984 	tg3_read_fw_ver(tp);
15985 
15986 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15987 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15988 	} else {
15989 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
15990 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15991 		else
15992 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15993 	}
15994 
15995 	/* 5700 {AX,BX} chips have a broken status block link
15996 	 * change bit implementation, so we must use the
15997 	 * status register in those cases.
15998 	 */
15999 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16000 		tg3_flag_set(tp, USE_LINKCHG_REG);
16001 	else
16002 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16003 
16004 	/* The led_ctrl is set during tg3_phy_probe, here we might
16005 	 * have to force the link status polling mechanism based
16006 	 * upon subsystem IDs.
16007 	 */
16008 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16009 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16010 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16011 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16012 		tg3_flag_set(tp, USE_LINKCHG_REG);
16013 	}
16014 
16015 	/* For all SERDES we poll the MAC status register. */
16016 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16017 		tg3_flag_set(tp, POLL_SERDES);
16018 	else
16019 		tg3_flag_clear(tp, POLL_SERDES);
16020 
16021 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16022 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16023 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16024 	    tg3_flag(tp, PCIX_MODE)) {
16025 		tp->rx_offset = NET_SKB_PAD;
16026 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16027 		tp->rx_copy_thresh = ~(u16)0;
16028 #endif
16029 	}
16030 
16031 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16032 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16033 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16034 
16035 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16036 
16037 	/* Increment the rx prod index on the rx std ring by at most
16038 	 * 8 for these chips to workaround hw errata.
16039 	 */
16040 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16041 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16042 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16043 		tp->rx_std_max_post = 8;
16044 
16045 	if (tg3_flag(tp, ASPM_WORKAROUND))
16046 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16047 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16048 
16049 	return err;
16050 }
16051 
16052 #ifdef CONFIG_SPARC
16053 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16054 {
16055 	struct net_device *dev = tp->dev;
16056 	struct pci_dev *pdev = tp->pdev;
16057 	struct device_node *dp = pci_device_to_OF_node(pdev);
16058 	const unsigned char *addr;
16059 	int len;
16060 
16061 	addr = of_get_property(dp, "local-mac-address", &len);
16062 	if (addr && len == 6) {
16063 		memcpy(dev->dev_addr, addr, 6);
16064 		return 0;
16065 	}
16066 	return -ENODEV;
16067 }
16068 
16069 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16070 {
16071 	struct net_device *dev = tp->dev;
16072 
16073 	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16074 	return 0;
16075 }
16076 #endif
16077 
16078 static int tg3_get_device_address(struct tg3 *tp)
16079 {
16080 	struct net_device *dev = tp->dev;
16081 	u32 hi, lo, mac_offset;
16082 	int addr_ok = 0;
16083 	int err;
16084 
16085 #ifdef CONFIG_SPARC
16086 	if (!tg3_get_macaddr_sparc(tp))
16087 		return 0;
16088 #endif
16089 
16090 	if (tg3_flag(tp, IS_SSB_CORE)) {
16091 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16092 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16093 			return 0;
16094 	}
16095 
16096 	mac_offset = 0x7c;
16097 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16098 	    tg3_flag(tp, 5780_CLASS)) {
16099 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16100 			mac_offset = 0xcc;
16101 		if (tg3_nvram_lock(tp))
16102 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16103 		else
16104 			tg3_nvram_unlock(tp);
16105 	} else if (tg3_flag(tp, 5717_PLUS)) {
16106 		if (tp->pci_fn & 1)
16107 			mac_offset = 0xcc;
16108 		if (tp->pci_fn > 1)
16109 			mac_offset += 0x18c;
16110 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16111 		mac_offset = 0x10;
16112 
16113 	/* First try to get it from MAC address mailbox. */
16114 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16115 	if ((hi >> 16) == 0x484b) {
16116 		dev->dev_addr[0] = (hi >>  8) & 0xff;
16117 		dev->dev_addr[1] = (hi >>  0) & 0xff;
16118 
16119 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16120 		dev->dev_addr[2] = (lo >> 24) & 0xff;
16121 		dev->dev_addr[3] = (lo >> 16) & 0xff;
16122 		dev->dev_addr[4] = (lo >>  8) & 0xff;
16123 		dev->dev_addr[5] = (lo >>  0) & 0xff;
16124 
16125 		/* Some old bootcode may report a 0 MAC address in SRAM */
16126 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16127 	}
16128 	if (!addr_ok) {
16129 		/* Next, try NVRAM. */
16130 		if (!tg3_flag(tp, NO_NVRAM) &&
16131 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16132 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16133 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16134 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16135 		}
16136 		/* Finally just fetch it out of the MAC control regs. */
16137 		else {
16138 			hi = tr32(MAC_ADDR_0_HIGH);
16139 			lo = tr32(MAC_ADDR_0_LOW);
16140 
16141 			dev->dev_addr[5] = lo & 0xff;
16142 			dev->dev_addr[4] = (lo >> 8) & 0xff;
16143 			dev->dev_addr[3] = (lo >> 16) & 0xff;
16144 			dev->dev_addr[2] = (lo >> 24) & 0xff;
16145 			dev->dev_addr[1] = hi & 0xff;
16146 			dev->dev_addr[0] = (hi >> 8) & 0xff;
16147 		}
16148 	}
16149 
16150 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16151 #ifdef CONFIG_SPARC
16152 		if (!tg3_get_default_macaddr_sparc(tp))
16153 			return 0;
16154 #endif
16155 		return -EINVAL;
16156 	}
16157 	return 0;
16158 }
16159 
16160 #define BOUNDARY_SINGLE_CACHELINE	1
16161 #define BOUNDARY_MULTI_CACHELINE	2
16162 
16163 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16164 {
16165 	int cacheline_size;
16166 	u8 byte;
16167 	int goal;
16168 
16169 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16170 	if (byte == 0)
16171 		cacheline_size = 1024;
16172 	else
16173 		cacheline_size = (int) byte * 4;
16174 
16175 	/* On 5703 and later chips, the boundary bits have no
16176 	 * effect.
16177 	 */
16178 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16179 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16180 	    !tg3_flag(tp, PCI_EXPRESS))
16181 		goto out;
16182 
16183 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16184 	goal = BOUNDARY_MULTI_CACHELINE;
16185 #else
16186 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16187 	goal = BOUNDARY_SINGLE_CACHELINE;
16188 #else
16189 	goal = 0;
16190 #endif
16191 #endif
16192 
16193 	if (tg3_flag(tp, 57765_PLUS)) {
16194 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16195 		goto out;
16196 	}
16197 
16198 	if (!goal)
16199 		goto out;
16200 
16201 	/* PCI controllers on most RISC systems tend to disconnect
16202 	 * when a device tries to burst across a cache-line boundary.
16203 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16204 	 *
16205 	 * Unfortunately, for PCI-E there are only limited
16206 	 * write-side controls for this, and thus for reads
16207 	 * we will still get the disconnects.  We'll also waste
16208 	 * these PCI cycles for both read and write for chips
16209 	 * other than 5700 and 5701 which do not implement the
16210 	 * boundary bits.
16211 	 */
16212 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16213 		switch (cacheline_size) {
16214 		case 16:
16215 		case 32:
16216 		case 64:
16217 		case 128:
16218 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16219 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16220 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16221 			} else {
16222 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16223 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16224 			}
16225 			break;
16226 
16227 		case 256:
16228 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16229 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16230 			break;
16231 
16232 		default:
16233 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16234 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16235 			break;
16236 		}
16237 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
16238 		switch (cacheline_size) {
16239 		case 16:
16240 		case 32:
16241 		case 64:
16242 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16243 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16244 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16245 				break;
16246 			}
16247 			/* fallthrough */
16248 		case 128:
16249 		default:
16250 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16251 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16252 			break;
16253 		}
16254 	} else {
16255 		switch (cacheline_size) {
16256 		case 16:
16257 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16258 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
16259 					DMA_RWCTRL_WRITE_BNDRY_16);
16260 				break;
16261 			}
16262 			/* fallthrough */
16263 		case 32:
16264 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16265 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
16266 					DMA_RWCTRL_WRITE_BNDRY_32);
16267 				break;
16268 			}
16269 			/* fallthrough */
16270 		case 64:
16271 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16272 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
16273 					DMA_RWCTRL_WRITE_BNDRY_64);
16274 				break;
16275 			}
16276 			/* fallthrough */
16277 		case 128:
16278 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16279 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
16280 					DMA_RWCTRL_WRITE_BNDRY_128);
16281 				break;
16282 			}
16283 			/* fallthrough */
16284 		case 256:
16285 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
16286 				DMA_RWCTRL_WRITE_BNDRY_256);
16287 			break;
16288 		case 512:
16289 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
16290 				DMA_RWCTRL_WRITE_BNDRY_512);
16291 			break;
16292 		case 1024:
16293 		default:
16294 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16295 				DMA_RWCTRL_WRITE_BNDRY_1024);
16296 			break;
16297 		}
16298 	}
16299 
16300 out:
16301 	return val;
16302 }
16303 
16304 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16305 			   int size, int to_device)
16306 {
16307 	struct tg3_internal_buffer_desc test_desc;
16308 	u32 sram_dma_descs;
16309 	int i, ret;
16310 
16311 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16312 
16313 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16314 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16315 	tw32(RDMAC_STATUS, 0);
16316 	tw32(WDMAC_STATUS, 0);
16317 
16318 	tw32(BUFMGR_MODE, 0);
16319 	tw32(FTQ_RESET, 0);
16320 
16321 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
16322 	test_desc.addr_lo = buf_dma & 0xffffffff;
16323 	test_desc.nic_mbuf = 0x00002100;
16324 	test_desc.len = size;
16325 
16326 	/*
16327 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16328 	 * the *second* time the tg3 driver was getting loaded after an
16329 	 * initial scan.
16330 	 *
16331 	 * Broadcom tells me:
16332 	 *   ...the DMA engine is connected to the GRC block and a DMA
16333 	 *   reset may affect the GRC block in some unpredictable way...
16334 	 *   The behavior of resets to individual blocks has not been tested.
16335 	 *
16336 	 * Broadcom noted the GRC reset will also reset all sub-components.
16337 	 */
16338 	if (to_device) {
16339 		test_desc.cqid_sqid = (13 << 8) | 2;
16340 
16341 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16342 		udelay(40);
16343 	} else {
16344 		test_desc.cqid_sqid = (16 << 8) | 7;
16345 
16346 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16347 		udelay(40);
16348 	}
16349 	test_desc.flags = 0x00000005;
16350 
16351 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16352 		u32 val;
16353 
16354 		val = *(((u32 *)&test_desc) + i);
16355 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16356 				       sram_dma_descs + (i * sizeof(u32)));
16357 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16358 	}
16359 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16360 
16361 	if (to_device)
16362 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16363 	else
16364 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16365 
16366 	ret = -ENODEV;
16367 	for (i = 0; i < 40; i++) {
16368 		u32 val;
16369 
16370 		if (to_device)
16371 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16372 		else
16373 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16374 		if ((val & 0xffff) == sram_dma_descs) {
16375 			ret = 0;
16376 			break;
16377 		}
16378 
16379 		udelay(100);
16380 	}
16381 
16382 	return ret;
16383 }
16384 
16385 #define TEST_BUFFER_SIZE	0x2000
16386 
16387 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16388 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16389 	{ },
16390 };
16391 
16392 static int tg3_test_dma(struct tg3 *tp)
16393 {
16394 	dma_addr_t buf_dma;
16395 	u32 *buf, saved_dma_rwctrl;
16396 	int ret = 0;
16397 
16398 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16399 				 &buf_dma, GFP_KERNEL);
16400 	if (!buf) {
16401 		ret = -ENOMEM;
16402 		goto out_nofree;
16403 	}
16404 
16405 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16406 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16407 
16408 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16409 
16410 	if (tg3_flag(tp, 57765_PLUS))
16411 		goto out;
16412 
16413 	if (tg3_flag(tp, PCI_EXPRESS)) {
16414 		/* DMA read watermark not used on PCIE */
16415 		tp->dma_rwctrl |= 0x00180000;
16416 	} else if (!tg3_flag(tp, PCIX_MODE)) {
16417 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16418 		    tg3_asic_rev(tp) == ASIC_REV_5750)
16419 			tp->dma_rwctrl |= 0x003f0000;
16420 		else
16421 			tp->dma_rwctrl |= 0x003f000f;
16422 	} else {
16423 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16424 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
16425 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16426 			u32 read_water = 0x7;
16427 
16428 			/* If the 5704 is behind the EPB bridge, we can
16429 			 * do the less restrictive ONE_DMA workaround for
16430 			 * better performance.
16431 			 */
16432 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16433 			    tg3_asic_rev(tp) == ASIC_REV_5704)
16434 				tp->dma_rwctrl |= 0x8000;
16435 			else if (ccval == 0x6 || ccval == 0x7)
16436 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16437 
16438 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
16439 				read_water = 4;
16440 			/* Set bit 23 to enable PCIX hw bug fix */
16441 			tp->dma_rwctrl |=
16442 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16443 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16444 				(1 << 23);
16445 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16446 			/* 5780 always in PCIX mode */
16447 			tp->dma_rwctrl |= 0x00144000;
16448 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16449 			/* 5714 always in PCIX mode */
16450 			tp->dma_rwctrl |= 0x00148000;
16451 		} else {
16452 			tp->dma_rwctrl |= 0x001b000f;
16453 		}
16454 	}
16455 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16456 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16457 
16458 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16459 	    tg3_asic_rev(tp) == ASIC_REV_5704)
16460 		tp->dma_rwctrl &= 0xfffffff0;
16461 
16462 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16463 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
16464 		/* Remove this if it causes problems for some boards. */
16465 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16466 
16467 		/* On 5700/5701 chips, we need to set this bit.
16468 		 * Otherwise the chip will issue cacheline transactions
16469 		 * to streamable DMA memory with not all the byte
16470 		 * enables turned on.  This is an error on several
16471 		 * RISC PCI controllers, in particular sparc64.
16472 		 *
16473 		 * On 5703/5704 chips, this bit has been reassigned
16474 		 * a different meaning.  In particular, it is used
16475 		 * on those chips to enable a PCI-X workaround.
16476 		 */
16477 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16478 	}
16479 
16480 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16481 
16482 #if 0
16483 	/* Unneeded, already done by tg3_get_invariants.  */
16484 	tg3_switch_clocks(tp);
16485 #endif
16486 
16487 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16488 	    tg3_asic_rev(tp) != ASIC_REV_5701)
16489 		goto out;
16490 
16491 	/* It is best to perform DMA test with maximum write burst size
16492 	 * to expose the 5700/5701 write DMA bug.
16493 	 */
16494 	saved_dma_rwctrl = tp->dma_rwctrl;
16495 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16496 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16497 
16498 	while (1) {
16499 		u32 *p = buf, i;
16500 
16501 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16502 			p[i] = i;
16503 
16504 		/* Send the buffer to the chip. */
16505 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16506 		if (ret) {
16507 			dev_err(&tp->pdev->dev,
16508 				"%s: Buffer write failed. err = %d\n",
16509 				__func__, ret);
16510 			break;
16511 		}
16512 
16513 #if 0
16514 		/* validate data reached card RAM correctly. */
16515 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16516 			u32 val;
16517 			tg3_read_mem(tp, 0x2100 + (i*4), &val);
16518 			if (le32_to_cpu(val) != p[i]) {
16519 				dev_err(&tp->pdev->dev,
16520 					"%s: Buffer corrupted on device! "
16521 					"(%d != %d)\n", __func__, val, i);
16522 				/* ret = -ENODEV here? */
16523 			}
16524 			p[i] = 0;
16525 		}
16526 #endif
16527 		/* Now read it back. */
16528 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16529 		if (ret) {
16530 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16531 				"err = %d\n", __func__, ret);
16532 			break;
16533 		}
16534 
16535 		/* Verify it. */
16536 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16537 			if (p[i] == i)
16538 				continue;
16539 
16540 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16541 			    DMA_RWCTRL_WRITE_BNDRY_16) {
16542 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16543 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16544 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16545 				break;
16546 			} else {
16547 				dev_err(&tp->pdev->dev,
16548 					"%s: Buffer corrupted on read back! "
16549 					"(%d != %d)\n", __func__, p[i], i);
16550 				ret = -ENODEV;
16551 				goto out;
16552 			}
16553 		}
16554 
16555 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16556 			/* Success. */
16557 			ret = 0;
16558 			break;
16559 		}
16560 	}
16561 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16562 	    DMA_RWCTRL_WRITE_BNDRY_16) {
16563 		/* DMA test passed without adjusting DMA boundary,
16564 		 * now look for chipsets that are known to expose the
16565 		 * DMA bug without failing the test.
16566 		 */
16567 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16568 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16569 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16570 		} else {
16571 			/* Safe to use the calculated DMA boundary. */
16572 			tp->dma_rwctrl = saved_dma_rwctrl;
16573 		}
16574 
16575 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16576 	}
16577 
16578 out:
16579 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16580 out_nofree:
16581 	return ret;
16582 }
16583 
16584 static void tg3_init_bufmgr_config(struct tg3 *tp)
16585 {
16586 	if (tg3_flag(tp, 57765_PLUS)) {
16587 		tp->bufmgr_config.mbuf_read_dma_low_water =
16588 			DEFAULT_MB_RDMA_LOW_WATER_5705;
16589 		tp->bufmgr_config.mbuf_mac_rx_low_water =
16590 			DEFAULT_MB_MACRX_LOW_WATER_57765;
16591 		tp->bufmgr_config.mbuf_high_water =
16592 			DEFAULT_MB_HIGH_WATER_57765;
16593 
16594 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16595 			DEFAULT_MB_RDMA_LOW_WATER_5705;
16596 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16597 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16598 		tp->bufmgr_config.mbuf_high_water_jumbo =
16599 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16600 	} else if (tg3_flag(tp, 5705_PLUS)) {
16601 		tp->bufmgr_config.mbuf_read_dma_low_water =
16602 			DEFAULT_MB_RDMA_LOW_WATER_5705;
16603 		tp->bufmgr_config.mbuf_mac_rx_low_water =
16604 			DEFAULT_MB_MACRX_LOW_WATER_5705;
16605 		tp->bufmgr_config.mbuf_high_water =
16606 			DEFAULT_MB_HIGH_WATER_5705;
16607 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16608 			tp->bufmgr_config.mbuf_mac_rx_low_water =
16609 				DEFAULT_MB_MACRX_LOW_WATER_5906;
16610 			tp->bufmgr_config.mbuf_high_water =
16611 				DEFAULT_MB_HIGH_WATER_5906;
16612 		}
16613 
16614 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16615 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16616 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16617 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16618 		tp->bufmgr_config.mbuf_high_water_jumbo =
16619 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16620 	} else {
16621 		tp->bufmgr_config.mbuf_read_dma_low_water =
16622 			DEFAULT_MB_RDMA_LOW_WATER;
16623 		tp->bufmgr_config.mbuf_mac_rx_low_water =
16624 			DEFAULT_MB_MACRX_LOW_WATER;
16625 		tp->bufmgr_config.mbuf_high_water =
16626 			DEFAULT_MB_HIGH_WATER;
16627 
16628 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16629 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16630 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16631 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16632 		tp->bufmgr_config.mbuf_high_water_jumbo =
16633 			DEFAULT_MB_HIGH_WATER_JUMBO;
16634 	}
16635 
16636 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16637 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16638 }
16639 
16640 static char *tg3_phy_string(struct tg3 *tp)
16641 {
16642 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
16643 	case TG3_PHY_ID_BCM5400:	return "5400";
16644 	case TG3_PHY_ID_BCM5401:	return "5401";
16645 	case TG3_PHY_ID_BCM5411:	return "5411";
16646 	case TG3_PHY_ID_BCM5701:	return "5701";
16647 	case TG3_PHY_ID_BCM5703:	return "5703";
16648 	case TG3_PHY_ID_BCM5704:	return "5704";
16649 	case TG3_PHY_ID_BCM5705:	return "5705";
16650 	case TG3_PHY_ID_BCM5750:	return "5750";
16651 	case TG3_PHY_ID_BCM5752:	return "5752";
16652 	case TG3_PHY_ID_BCM5714:	return "5714";
16653 	case TG3_PHY_ID_BCM5780:	return "5780";
16654 	case TG3_PHY_ID_BCM5755:	return "5755";
16655 	case TG3_PHY_ID_BCM5787:	return "5787";
16656 	case TG3_PHY_ID_BCM5784:	return "5784";
16657 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
16658 	case TG3_PHY_ID_BCM5906:	return "5906";
16659 	case TG3_PHY_ID_BCM5761:	return "5761";
16660 	case TG3_PHY_ID_BCM5718C:	return "5718C";
16661 	case TG3_PHY_ID_BCM5718S:	return "5718S";
16662 	case TG3_PHY_ID_BCM57765:	return "57765";
16663 	case TG3_PHY_ID_BCM5719C:	return "5719C";
16664 	case TG3_PHY_ID_BCM5720C:	return "5720C";
16665 	case TG3_PHY_ID_BCM5762:	return "5762C";
16666 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
16667 	case 0:			return "serdes";
16668 	default:		return "unknown";
16669 	}
16670 }
16671 
16672 static char *tg3_bus_string(struct tg3 *tp, char *str)
16673 {
16674 	if (tg3_flag(tp, PCI_EXPRESS)) {
16675 		strcpy(str, "PCI Express");
16676 		return str;
16677 	} else if (tg3_flag(tp, PCIX_MODE)) {
16678 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16679 
16680 		strcpy(str, "PCIX:");
16681 
16682 		if ((clock_ctrl == 7) ||
16683 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16684 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16685 			strcat(str, "133MHz");
16686 		else if (clock_ctrl == 0)
16687 			strcat(str, "33MHz");
16688 		else if (clock_ctrl == 2)
16689 			strcat(str, "50MHz");
16690 		else if (clock_ctrl == 4)
16691 			strcat(str, "66MHz");
16692 		else if (clock_ctrl == 6)
16693 			strcat(str, "100MHz");
16694 	} else {
16695 		strcpy(str, "PCI:");
16696 		if (tg3_flag(tp, PCI_HIGH_SPEED))
16697 			strcat(str, "66MHz");
16698 		else
16699 			strcat(str, "33MHz");
16700 	}
16701 	if (tg3_flag(tp, PCI_32BIT))
16702 		strcat(str, ":32-bit");
16703 	else
16704 		strcat(str, ":64-bit");
16705 	return str;
16706 }
16707 
16708 static void tg3_init_coal(struct tg3 *tp)
16709 {
16710 	struct ethtool_coalesce *ec = &tp->coal;
16711 
16712 	memset(ec, 0, sizeof(*ec));
16713 	ec->cmd = ETHTOOL_GCOALESCE;
16714 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16715 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16716 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16717 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16718 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16719 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16720 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16721 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16722 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16723 
16724 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16725 				 HOSTCC_MODE_CLRTICK_TXBD)) {
16726 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16727 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16728 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16729 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16730 	}
16731 
16732 	if (tg3_flag(tp, 5705_PLUS)) {
16733 		ec->rx_coalesce_usecs_irq = 0;
16734 		ec->tx_coalesce_usecs_irq = 0;
16735 		ec->stats_block_coalesce_usecs = 0;
16736 	}
16737 }
16738 
16739 static int tg3_init_one(struct pci_dev *pdev,
16740 				  const struct pci_device_id *ent)
16741 {
16742 	struct net_device *dev;
16743 	struct tg3 *tp;
16744 	int i, err, pm_cap;
16745 	u32 sndmbx, rcvmbx, intmbx;
16746 	char str[40];
16747 	u64 dma_mask, persist_dma_mask;
16748 	netdev_features_t features = 0;
16749 
16750 	printk_once(KERN_INFO "%s\n", version);
16751 
16752 	err = pci_enable_device(pdev);
16753 	if (err) {
16754 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16755 		return err;
16756 	}
16757 
16758 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
16759 	if (err) {
16760 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16761 		goto err_out_disable_pdev;
16762 	}
16763 
16764 	pci_set_master(pdev);
16765 
16766 	/* Find power-management capability. */
16767 	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16768 	if (pm_cap == 0) {
16769 		dev_err(&pdev->dev,
16770 			"Cannot find Power Management capability, aborting\n");
16771 		err = -EIO;
16772 		goto err_out_free_res;
16773 	}
16774 
16775 	err = pci_set_power_state(pdev, PCI_D0);
16776 	if (err) {
16777 		dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16778 		goto err_out_free_res;
16779 	}
16780 
16781 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16782 	if (!dev) {
16783 		err = -ENOMEM;
16784 		goto err_out_power_down;
16785 	}
16786 
16787 	SET_NETDEV_DEV(dev, &pdev->dev);
16788 
16789 	tp = netdev_priv(dev);
16790 	tp->pdev = pdev;
16791 	tp->dev = dev;
16792 	tp->pm_cap = pm_cap;
16793 	tp->rx_mode = TG3_DEF_RX_MODE;
16794 	tp->tx_mode = TG3_DEF_TX_MODE;
16795 	tp->irq_sync = 1;
16796 
16797 	if (tg3_debug > 0)
16798 		tp->msg_enable = tg3_debug;
16799 	else
16800 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
16801 
16802 	if (pdev_is_ssb_gige_core(pdev)) {
16803 		tg3_flag_set(tp, IS_SSB_CORE);
16804 		if (ssb_gige_must_flush_posted_writes(pdev))
16805 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
16806 		if (ssb_gige_one_dma_at_once(pdev))
16807 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
16808 		if (ssb_gige_have_roboswitch(pdev))
16809 			tg3_flag_set(tp, ROBOSWITCH);
16810 		if (ssb_gige_is_rgmii(pdev))
16811 			tg3_flag_set(tp, RGMII_MODE);
16812 	}
16813 
16814 	/* The word/byte swap controls here control register access byte
16815 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16816 	 * setting below.
16817 	 */
16818 	tp->misc_host_ctrl =
16819 		MISC_HOST_CTRL_MASK_PCI_INT |
16820 		MISC_HOST_CTRL_WORD_SWAP |
16821 		MISC_HOST_CTRL_INDIR_ACCESS |
16822 		MISC_HOST_CTRL_PCISTATE_RW;
16823 
16824 	/* The NONFRM (non-frame) byte/word swap controls take effect
16825 	 * on descriptor entries, anything which isn't packet data.
16826 	 *
16827 	 * The StrongARM chips on the board (one for tx, one for rx)
16828 	 * are running in big-endian mode.
16829 	 */
16830 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16831 			GRC_MODE_WSWAP_NONFRM_DATA);
16832 #ifdef __BIG_ENDIAN
16833 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16834 #endif
16835 	spin_lock_init(&tp->lock);
16836 	spin_lock_init(&tp->indirect_lock);
16837 	INIT_WORK(&tp->reset_task, tg3_reset_task);
16838 
16839 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
16840 	if (!tp->regs) {
16841 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16842 		err = -ENOMEM;
16843 		goto err_out_free_dev;
16844 	}
16845 
16846 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16847 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16848 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16849 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16850 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16851 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16852 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16853 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16854 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16855 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16856 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16857 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
16858 		tg3_flag_set(tp, ENABLE_APE);
16859 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16860 		if (!tp->aperegs) {
16861 			dev_err(&pdev->dev,
16862 				"Cannot map APE registers, aborting\n");
16863 			err = -ENOMEM;
16864 			goto err_out_iounmap;
16865 		}
16866 	}
16867 
16868 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16869 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16870 
16871 	dev->ethtool_ops = &tg3_ethtool_ops;
16872 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
16873 	dev->netdev_ops = &tg3_netdev_ops;
16874 	dev->irq = pdev->irq;
16875 
16876 	err = tg3_get_invariants(tp, ent);
16877 	if (err) {
16878 		dev_err(&pdev->dev,
16879 			"Problem fetching invariants of chip, aborting\n");
16880 		goto err_out_apeunmap;
16881 	}
16882 
16883 	/* The EPB bridge inside 5714, 5715, and 5780 and any
16884 	 * device behind the EPB cannot support DMA addresses > 40-bit.
16885 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16886 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16887 	 * do DMA address check in tg3_start_xmit().
16888 	 */
16889 	if (tg3_flag(tp, IS_5788))
16890 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16891 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16892 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16893 #ifdef CONFIG_HIGHMEM
16894 		dma_mask = DMA_BIT_MASK(64);
16895 #endif
16896 	} else
16897 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16898 
16899 	/* Configure DMA attributes. */
16900 	if (dma_mask > DMA_BIT_MASK(32)) {
16901 		err = pci_set_dma_mask(pdev, dma_mask);
16902 		if (!err) {
16903 			features |= NETIF_F_HIGHDMA;
16904 			err = pci_set_consistent_dma_mask(pdev,
16905 							  persist_dma_mask);
16906 			if (err < 0) {
16907 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
16908 					"DMA for consistent allocations\n");
16909 				goto err_out_apeunmap;
16910 			}
16911 		}
16912 	}
16913 	if (err || dma_mask == DMA_BIT_MASK(32)) {
16914 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16915 		if (err) {
16916 			dev_err(&pdev->dev,
16917 				"No usable DMA configuration, aborting\n");
16918 			goto err_out_apeunmap;
16919 		}
16920 	}
16921 
16922 	tg3_init_bufmgr_config(tp);
16923 
16924 	features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16925 
16926 	/* 5700 B0 chips do not support checksumming correctly due
16927 	 * to hardware bugs.
16928 	 */
16929 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
16930 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16931 
16932 		if (tg3_flag(tp, 5755_PLUS))
16933 			features |= NETIF_F_IPV6_CSUM;
16934 	}
16935 
16936 	/* TSO is on by default on chips that support hardware TSO.
16937 	 * Firmware TSO on older chips gives lower performance, so it
16938 	 * is off by default, but can be enabled using ethtool.
16939 	 */
16940 	if ((tg3_flag(tp, HW_TSO_1) ||
16941 	     tg3_flag(tp, HW_TSO_2) ||
16942 	     tg3_flag(tp, HW_TSO_3)) &&
16943 	    (features & NETIF_F_IP_CSUM))
16944 		features |= NETIF_F_TSO;
16945 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16946 		if (features & NETIF_F_IPV6_CSUM)
16947 			features |= NETIF_F_TSO6;
16948 		if (tg3_flag(tp, HW_TSO_3) ||
16949 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16950 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16951 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
16952 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16953 		    tg3_asic_rev(tp) == ASIC_REV_57780)
16954 			features |= NETIF_F_TSO_ECN;
16955 	}
16956 
16957 	dev->features |= features;
16958 	dev->vlan_features |= features;
16959 
16960 	/*
16961 	 * Add loopback capability only for a subset of devices that support
16962 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16963 	 * loopback for the remaining devices.
16964 	 */
16965 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
16966 	    !tg3_flag(tp, CPMU_PRESENT))
16967 		/* Add the loopback capability */
16968 		features |= NETIF_F_LOOPBACK;
16969 
16970 	dev->hw_features |= features;
16971 
16972 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
16973 	    !tg3_flag(tp, TSO_CAPABLE) &&
16974 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16975 		tg3_flag_set(tp, MAX_RXPEND_64);
16976 		tp->rx_pending = 63;
16977 	}
16978 
16979 	err = tg3_get_device_address(tp);
16980 	if (err) {
16981 		dev_err(&pdev->dev,
16982 			"Could not obtain valid ethernet address, aborting\n");
16983 		goto err_out_apeunmap;
16984 	}
16985 
16986 	/*
16987 	 * Reset chip in case UNDI or EFI driver did not shutdown
16988 	 * DMA self test will enable WDMAC and we'll see (spurious)
16989 	 * pending DMA on the PCI bus at that point.
16990 	 */
16991 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16992 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16993 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16994 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16995 	}
16996 
16997 	err = tg3_test_dma(tp);
16998 	if (err) {
16999 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17000 		goto err_out_apeunmap;
17001 	}
17002 
17003 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17004 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17005 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17006 	for (i = 0; i < tp->irq_max; i++) {
17007 		struct tg3_napi *tnapi = &tp->napi[i];
17008 
17009 		tnapi->tp = tp;
17010 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17011 
17012 		tnapi->int_mbox = intmbx;
17013 		if (i <= 4)
17014 			intmbx += 0x8;
17015 		else
17016 			intmbx += 0x4;
17017 
17018 		tnapi->consmbox = rcvmbx;
17019 		tnapi->prodmbox = sndmbx;
17020 
17021 		if (i)
17022 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17023 		else
17024 			tnapi->coal_now = HOSTCC_MODE_NOW;
17025 
17026 		if (!tg3_flag(tp, SUPPORT_MSIX))
17027 			break;
17028 
17029 		/*
17030 		 * If we support MSIX, we'll be using RSS.  If we're using
17031 		 * RSS, the first vector only handles link interrupts and the
17032 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17033 		 * mailbox values for the next iteration.  The values we setup
17034 		 * above are still useful for the single vectored mode.
17035 		 */
17036 		if (!i)
17037 			continue;
17038 
17039 		rcvmbx += 0x8;
17040 
17041 		if (sndmbx & 0x4)
17042 			sndmbx -= 0x4;
17043 		else
17044 			sndmbx += 0xc;
17045 	}
17046 
17047 	tg3_init_coal(tp);
17048 
17049 	pci_set_drvdata(pdev, dev);
17050 
17051 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17052 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17053 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17054 		tg3_flag_set(tp, PTP_CAPABLE);
17055 
17056 	if (tg3_flag(tp, 5717_PLUS)) {
17057 		/* Resume a low-power mode */
17058 		tg3_frob_aux_power(tp, false);
17059 	}
17060 
17061 	tg3_timer_init(tp);
17062 
17063 	tg3_carrier_off(tp);
17064 
17065 	err = register_netdev(dev);
17066 	if (err) {
17067 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17068 		goto err_out_apeunmap;
17069 	}
17070 
17071 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17072 		    tp->board_part_number,
17073 		    tg3_chip_rev_id(tp),
17074 		    tg3_bus_string(tp, str),
17075 		    dev->dev_addr);
17076 
17077 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17078 		struct phy_device *phydev;
17079 		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17080 		netdev_info(dev,
17081 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17082 			    phydev->drv->name, dev_name(&phydev->dev));
17083 	} else {
17084 		char *ethtype;
17085 
17086 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17087 			ethtype = "10/100Base-TX";
17088 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17089 			ethtype = "1000Base-SX";
17090 		else
17091 			ethtype = "10/100/1000Base-T";
17092 
17093 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17094 			    "(WireSpeed[%d], EEE[%d])\n",
17095 			    tg3_phy_string(tp), ethtype,
17096 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17097 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17098 	}
17099 
17100 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17101 		    (dev->features & NETIF_F_RXCSUM) != 0,
17102 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17103 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17104 		    tg3_flag(tp, ENABLE_ASF) != 0,
17105 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17106 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17107 		    tp->dma_rwctrl,
17108 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17109 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17110 
17111 	pci_save_state(pdev);
17112 
17113 	return 0;
17114 
17115 err_out_apeunmap:
17116 	if (tp->aperegs) {
17117 		iounmap(tp->aperegs);
17118 		tp->aperegs = NULL;
17119 	}
17120 
17121 err_out_iounmap:
17122 	if (tp->regs) {
17123 		iounmap(tp->regs);
17124 		tp->regs = NULL;
17125 	}
17126 
17127 err_out_free_dev:
17128 	free_netdev(dev);
17129 
17130 err_out_power_down:
17131 	pci_set_power_state(pdev, PCI_D3hot);
17132 
17133 err_out_free_res:
17134 	pci_release_regions(pdev);
17135 
17136 err_out_disable_pdev:
17137 	pci_disable_device(pdev);
17138 	pci_set_drvdata(pdev, NULL);
17139 	return err;
17140 }
17141 
17142 static void tg3_remove_one(struct pci_dev *pdev)
17143 {
17144 	struct net_device *dev = pci_get_drvdata(pdev);
17145 
17146 	if (dev) {
17147 		struct tg3 *tp = netdev_priv(dev);
17148 
17149 		release_firmware(tp->fw);
17150 
17151 		tg3_reset_task_cancel(tp);
17152 
17153 		if (tg3_flag(tp, USE_PHYLIB)) {
17154 			tg3_phy_fini(tp);
17155 			tg3_mdio_fini(tp);
17156 		}
17157 
17158 		unregister_netdev(dev);
17159 		if (tp->aperegs) {
17160 			iounmap(tp->aperegs);
17161 			tp->aperegs = NULL;
17162 		}
17163 		if (tp->regs) {
17164 			iounmap(tp->regs);
17165 			tp->regs = NULL;
17166 		}
17167 		free_netdev(dev);
17168 		pci_release_regions(pdev);
17169 		pci_disable_device(pdev);
17170 		pci_set_drvdata(pdev, NULL);
17171 	}
17172 }
17173 
17174 #ifdef CONFIG_PM_SLEEP
17175 static int tg3_suspend(struct device *device)
17176 {
17177 	struct pci_dev *pdev = to_pci_dev(device);
17178 	struct net_device *dev = pci_get_drvdata(pdev);
17179 	struct tg3 *tp = netdev_priv(dev);
17180 	int err;
17181 
17182 	if (!netif_running(dev))
17183 		return 0;
17184 
17185 	tg3_reset_task_cancel(tp);
17186 	tg3_phy_stop(tp);
17187 	tg3_netif_stop(tp);
17188 
17189 	tg3_timer_stop(tp);
17190 
17191 	tg3_full_lock(tp, 1);
17192 	tg3_disable_ints(tp);
17193 	tg3_full_unlock(tp);
17194 
17195 	netif_device_detach(dev);
17196 
17197 	tg3_full_lock(tp, 0);
17198 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17199 	tg3_flag_clear(tp, INIT_COMPLETE);
17200 	tg3_full_unlock(tp);
17201 
17202 	err = tg3_power_down_prepare(tp);
17203 	if (err) {
17204 		int err2;
17205 
17206 		tg3_full_lock(tp, 0);
17207 
17208 		tg3_flag_set(tp, INIT_COMPLETE);
17209 		err2 = tg3_restart_hw(tp, 1);
17210 		if (err2)
17211 			goto out;
17212 
17213 		tg3_timer_start(tp);
17214 
17215 		netif_device_attach(dev);
17216 		tg3_netif_start(tp);
17217 
17218 out:
17219 		tg3_full_unlock(tp);
17220 
17221 		if (!err2)
17222 			tg3_phy_start(tp);
17223 	}
17224 
17225 	return err;
17226 }
17227 
17228 static int tg3_resume(struct device *device)
17229 {
17230 	struct pci_dev *pdev = to_pci_dev(device);
17231 	struct net_device *dev = pci_get_drvdata(pdev);
17232 	struct tg3 *tp = netdev_priv(dev);
17233 	int err;
17234 
17235 	if (!netif_running(dev))
17236 		return 0;
17237 
17238 	netif_device_attach(dev);
17239 
17240 	tg3_full_lock(tp, 0);
17241 
17242 	tg3_flag_set(tp, INIT_COMPLETE);
17243 	err = tg3_restart_hw(tp, 1);
17244 	if (err)
17245 		goto out;
17246 
17247 	tg3_timer_start(tp);
17248 
17249 	tg3_netif_start(tp);
17250 
17251 out:
17252 	tg3_full_unlock(tp);
17253 
17254 	if (!err)
17255 		tg3_phy_start(tp);
17256 
17257 	return err;
17258 }
17259 
17260 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17261 #define TG3_PM_OPS (&tg3_pm_ops)
17262 
17263 #else
17264 
17265 #define TG3_PM_OPS NULL
17266 
17267 #endif /* CONFIG_PM_SLEEP */
17268 
17269 /**
17270  * tg3_io_error_detected - called when PCI error is detected
17271  * @pdev: Pointer to PCI device
17272  * @state: The current pci connection state
17273  *
17274  * This function is called after a PCI bus error affecting
17275  * this device has been detected.
17276  */
17277 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17278 					      pci_channel_state_t state)
17279 {
17280 	struct net_device *netdev = pci_get_drvdata(pdev);
17281 	struct tg3 *tp = netdev_priv(netdev);
17282 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17283 
17284 	netdev_info(netdev, "PCI I/O error detected\n");
17285 
17286 	rtnl_lock();
17287 
17288 	if (!netif_running(netdev))
17289 		goto done;
17290 
17291 	tg3_phy_stop(tp);
17292 
17293 	tg3_netif_stop(tp);
17294 
17295 	tg3_timer_stop(tp);
17296 
17297 	/* Want to make sure that the reset task doesn't run */
17298 	tg3_reset_task_cancel(tp);
17299 
17300 	netif_device_detach(netdev);
17301 
17302 	/* Clean up software state, even if MMIO is blocked */
17303 	tg3_full_lock(tp, 0);
17304 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17305 	tg3_full_unlock(tp);
17306 
17307 done:
17308 	if (state == pci_channel_io_perm_failure)
17309 		err = PCI_ERS_RESULT_DISCONNECT;
17310 	else
17311 		pci_disable_device(pdev);
17312 
17313 	rtnl_unlock();
17314 
17315 	return err;
17316 }
17317 
17318 /**
17319  * tg3_io_slot_reset - called after the pci bus has been reset.
17320  * @pdev: Pointer to PCI device
17321  *
17322  * Restart the card from scratch, as if from a cold-boot.
17323  * At this point, the card has exprienced a hard reset,
17324  * followed by fixups by BIOS, and has its config space
17325  * set up identically to what it was at cold boot.
17326  */
17327 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17328 {
17329 	struct net_device *netdev = pci_get_drvdata(pdev);
17330 	struct tg3 *tp = netdev_priv(netdev);
17331 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17332 	int err;
17333 
17334 	rtnl_lock();
17335 
17336 	if (pci_enable_device(pdev)) {
17337 		netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17338 		goto done;
17339 	}
17340 
17341 	pci_set_master(pdev);
17342 	pci_restore_state(pdev);
17343 	pci_save_state(pdev);
17344 
17345 	if (!netif_running(netdev)) {
17346 		rc = PCI_ERS_RESULT_RECOVERED;
17347 		goto done;
17348 	}
17349 
17350 	err = tg3_power_up(tp);
17351 	if (err)
17352 		goto done;
17353 
17354 	rc = PCI_ERS_RESULT_RECOVERED;
17355 
17356 done:
17357 	rtnl_unlock();
17358 
17359 	return rc;
17360 }
17361 
17362 /**
17363  * tg3_io_resume - called when traffic can start flowing again.
17364  * @pdev: Pointer to PCI device
17365  *
17366  * This callback is called when the error recovery driver tells
17367  * us that its OK to resume normal operation.
17368  */
17369 static void tg3_io_resume(struct pci_dev *pdev)
17370 {
17371 	struct net_device *netdev = pci_get_drvdata(pdev);
17372 	struct tg3 *tp = netdev_priv(netdev);
17373 	int err;
17374 
17375 	rtnl_lock();
17376 
17377 	if (!netif_running(netdev))
17378 		goto done;
17379 
17380 	tg3_full_lock(tp, 0);
17381 	tg3_flag_set(tp, INIT_COMPLETE);
17382 	err = tg3_restart_hw(tp, 1);
17383 	if (err) {
17384 		tg3_full_unlock(tp);
17385 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
17386 		goto done;
17387 	}
17388 
17389 	netif_device_attach(netdev);
17390 
17391 	tg3_timer_start(tp);
17392 
17393 	tg3_netif_start(tp);
17394 
17395 	tg3_full_unlock(tp);
17396 
17397 	tg3_phy_start(tp);
17398 
17399 done:
17400 	rtnl_unlock();
17401 }
17402 
17403 static const struct pci_error_handlers tg3_err_handler = {
17404 	.error_detected	= tg3_io_error_detected,
17405 	.slot_reset	= tg3_io_slot_reset,
17406 	.resume		= tg3_io_resume
17407 };
17408 
17409 static struct pci_driver tg3_driver = {
17410 	.name		= DRV_MODULE_NAME,
17411 	.id_table	= tg3_pci_tbl,
17412 	.probe		= tg3_init_one,
17413 	.remove		= tg3_remove_one,
17414 	.err_handler	= &tg3_err_handler,
17415 	.driver.pm	= TG3_PM_OPS,
17416 };
17417 
17418 static int __init tg3_init(void)
17419 {
17420 	return pci_register_driver(&tg3_driver);
17421 }
17422 
17423 static void __exit tg3_cleanup(void)
17424 {
17425 	pci_unregister_driver(&tg3_driver);
17426 }
17427 
17428 module_init(tg3_init);
17429 module_exit(tg3_cleanup);
17430