xref: /linux/drivers/net/ethernet/broadcom/tg3.c (revision b9b77222d4ff6b5bb8f5d87fca20de0910618bb9)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *	Derived from proprietary unpublished source code,
14  *	Copyright (C) 2000-2016 Broadcom Corporation.
15  *	Copyright (C) 2016-2017 Broadcom Ltd.
16  *	Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *	refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *	Permission is hereby granted for the distribution of this firmware
20  *	data in hexadecimal or equivalent format, provided this copyright
21  *	notice is accompanying it.
22  */
23 
24 
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 
58 #include <net/checksum.h>
59 #include <net/ip.h>
60 
61 #include <linux/io.h>
62 #include <asm/byteorder.h>
63 #include <linux/uaccess.h>
64 
65 #include <uapi/linux/net_tstamp.h>
66 #include <linux/ptp_clock_kernel.h>
67 
68 #ifdef CONFIG_SPARC
69 #include <asm/idprom.h>
70 #include <asm/prom.h>
71 #endif
72 
73 #define BAR_0	0
74 #define BAR_2	2
75 
76 #include "tg3.h"
77 
78 /* Functions & macros to verify TG3_FLAGS types */
79 
80 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
81 {
82 	return test_bit(flag, bits);
83 }
84 
85 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
86 {
87 	set_bit(flag, bits);
88 }
89 
90 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
91 {
92 	clear_bit(flag, bits);
93 }
94 
95 #define tg3_flag(tp, flag)				\
96 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
97 #define tg3_flag_set(tp, flag)				\
98 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
99 #define tg3_flag_clear(tp, flag)			\
100 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
101 
102 #define DRV_MODULE_NAME		"tg3"
103 #define TG3_MAJ_NUM			3
104 #define TG3_MIN_NUM			137
105 #define DRV_MODULE_VERSION	\
106 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
107 #define DRV_MODULE_RELDATE	"May 11, 2014"
108 
109 #define RESET_KIND_SHUTDOWN	0
110 #define RESET_KIND_INIT		1
111 #define RESET_KIND_SUSPEND	2
112 
113 #define TG3_DEF_RX_MODE		0
114 #define TG3_DEF_TX_MODE		0
115 #define TG3_DEF_MSG_ENABLE	  \
116 	(NETIF_MSG_DRV		| \
117 	 NETIF_MSG_PROBE	| \
118 	 NETIF_MSG_LINK		| \
119 	 NETIF_MSG_TIMER	| \
120 	 NETIF_MSG_IFDOWN	| \
121 	 NETIF_MSG_IFUP		| \
122 	 NETIF_MSG_RX_ERR	| \
123 	 NETIF_MSG_TX_ERR)
124 
125 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
126 
127 /* length of time before we decide the hardware is borked,
128  * and dev->tx_timeout() should be called to fix the problem
129  */
130 
131 #define TG3_TX_TIMEOUT			(5 * HZ)
132 
133 /* hardware minimum and maximum for a single frame's data payload */
134 #define TG3_MIN_MTU			ETH_ZLEN
135 #define TG3_MAX_MTU(tp)	\
136 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
137 
138 /* These numbers seem to be hard coded in the NIC firmware somehow.
139  * You can't change the ring sizes, but you can change where you place
140  * them in the NIC onboard memory.
141  */
142 #define TG3_RX_STD_RING_SIZE(tp) \
143 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
145 #define TG3_DEF_RX_RING_PENDING		200
146 #define TG3_RX_JMB_RING_SIZE(tp) \
147 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
148 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
149 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
150 
151 /* Do not place this n-ring entries value into the tp struct itself,
152  * we really want to expose these constants to GCC so that modulo et
153  * al.  operations are done with shifts and masks instead of with
154  * hw multiply/modulo instructions.  Another solution would be to
155  * replace things like '% foo' with '& (foo - 1)'.
156  */
157 
158 #define TG3_TX_RING_SIZE		512
159 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
160 
161 #define TG3_RX_STD_RING_BYTES(tp) \
162 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
163 #define TG3_RX_JMB_RING_BYTES(tp) \
164 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
165 #define TG3_RX_RCB_RING_BYTES(tp) \
166 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
167 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
168 				 TG3_TX_RING_SIZE)
169 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
170 
171 #define TG3_DMA_BYTE_ENAB		64
172 
173 #define TG3_RX_STD_DMA_SZ		1536
174 #define TG3_RX_JMB_DMA_SZ		9046
175 
176 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
177 
178 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
179 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
180 
181 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
182 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
183 
184 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
185 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
186 
187 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
188  * that are at least dword aligned when used in PCIX mode.  The driver
189  * works around this bug by double copying the packet.  This workaround
190  * is built into the normal double copy length check for efficiency.
191  *
192  * However, the double copy is only necessary on those architectures
193  * where unaligned memory accesses are inefficient.  For those architectures
194  * where unaligned memory accesses incur little penalty, we can reintegrate
195  * the 5701 in the normal rx path.  Doing so saves a device structure
196  * dereference by hardcoding the double copy threshold in place.
197  */
198 #define TG3_RX_COPY_THRESHOLD		256
199 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
200 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
201 #else
202 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
203 #endif
204 
205 #if (NET_IP_ALIGN != 0)
206 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
207 #else
208 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
209 #endif
210 
211 /* minimum number of free TX descriptors required to wake up TX process */
212 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
213 #define TG3_TX_BD_DMA_MAX_2K		2048
214 #define TG3_TX_BD_DMA_MAX_4K		4096
215 
216 #define TG3_RAW_IP_ALIGN 2
217 
218 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
219 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
220 
221 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
222 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
223 
224 #define FIRMWARE_TG3		"tigon/tg3.bin"
225 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
226 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
227 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
228 
229 static char version[] =
230 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
231 
232 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
233 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
234 MODULE_LICENSE("GPL");
235 MODULE_VERSION(DRV_MODULE_VERSION);
236 MODULE_FIRMWARE(FIRMWARE_TG3);
237 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
238 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
239 
240 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
241 module_param(tg3_debug, int, 0);
242 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
243 
244 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
245 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
246 
247 static const struct pci_device_id tg3_pci_tbl[] = {
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
260 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
261 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
264 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
267 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268 			TG3_DRV_DATA_FLAG_5705_10_100},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
270 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
271 			TG3_DRV_DATA_FLAG_5705_10_100},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
274 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
275 			TG3_DRV_DATA_FLAG_5705_10_100},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
282 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
288 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
289 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
292 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
296 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
297 			PCI_VENDOR_ID_LENOVO,
298 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
299 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
302 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
312 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
313 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
314 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
315 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
316 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
317 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
318 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
319 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
320 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
321 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
322 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
323 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
325 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
326 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
330 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
332 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
340 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
342 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
352 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
353 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
354 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
355 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
356 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
357 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
358 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
359 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
360 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
361 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
362 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
363 	{}
364 };
365 
366 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
367 
368 static const struct {
369 	const char string[ETH_GSTRING_LEN];
370 } ethtool_stats_keys[] = {
371 	{ "rx_octets" },
372 	{ "rx_fragments" },
373 	{ "rx_ucast_packets" },
374 	{ "rx_mcast_packets" },
375 	{ "rx_bcast_packets" },
376 	{ "rx_fcs_errors" },
377 	{ "rx_align_errors" },
378 	{ "rx_xon_pause_rcvd" },
379 	{ "rx_xoff_pause_rcvd" },
380 	{ "rx_mac_ctrl_rcvd" },
381 	{ "rx_xoff_entered" },
382 	{ "rx_frame_too_long_errors" },
383 	{ "rx_jabbers" },
384 	{ "rx_undersize_packets" },
385 	{ "rx_in_length_errors" },
386 	{ "rx_out_length_errors" },
387 	{ "rx_64_or_less_octet_packets" },
388 	{ "rx_65_to_127_octet_packets" },
389 	{ "rx_128_to_255_octet_packets" },
390 	{ "rx_256_to_511_octet_packets" },
391 	{ "rx_512_to_1023_octet_packets" },
392 	{ "rx_1024_to_1522_octet_packets" },
393 	{ "rx_1523_to_2047_octet_packets" },
394 	{ "rx_2048_to_4095_octet_packets" },
395 	{ "rx_4096_to_8191_octet_packets" },
396 	{ "rx_8192_to_9022_octet_packets" },
397 
398 	{ "tx_octets" },
399 	{ "tx_collisions" },
400 
401 	{ "tx_xon_sent" },
402 	{ "tx_xoff_sent" },
403 	{ "tx_flow_control" },
404 	{ "tx_mac_errors" },
405 	{ "tx_single_collisions" },
406 	{ "tx_mult_collisions" },
407 	{ "tx_deferred" },
408 	{ "tx_excessive_collisions" },
409 	{ "tx_late_collisions" },
410 	{ "tx_collide_2times" },
411 	{ "tx_collide_3times" },
412 	{ "tx_collide_4times" },
413 	{ "tx_collide_5times" },
414 	{ "tx_collide_6times" },
415 	{ "tx_collide_7times" },
416 	{ "tx_collide_8times" },
417 	{ "tx_collide_9times" },
418 	{ "tx_collide_10times" },
419 	{ "tx_collide_11times" },
420 	{ "tx_collide_12times" },
421 	{ "tx_collide_13times" },
422 	{ "tx_collide_14times" },
423 	{ "tx_collide_15times" },
424 	{ "tx_ucast_packets" },
425 	{ "tx_mcast_packets" },
426 	{ "tx_bcast_packets" },
427 	{ "tx_carrier_sense_errors" },
428 	{ "tx_discards" },
429 	{ "tx_errors" },
430 
431 	{ "dma_writeq_full" },
432 	{ "dma_write_prioq_full" },
433 	{ "rxbds_empty" },
434 	{ "rx_discards" },
435 	{ "rx_errors" },
436 	{ "rx_threshold_hit" },
437 
438 	{ "dma_readq_full" },
439 	{ "dma_read_prioq_full" },
440 	{ "tx_comp_queue_full" },
441 
442 	{ "ring_set_send_prod_index" },
443 	{ "ring_status_update" },
444 	{ "nic_irqs" },
445 	{ "nic_avoided_irqs" },
446 	{ "nic_tx_threshold_hit" },
447 
448 	{ "mbuf_lwm_thresh_hit" },
449 };
450 
451 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
452 #define TG3_NVRAM_TEST		0
453 #define TG3_LINK_TEST		1
454 #define TG3_REGISTER_TEST	2
455 #define TG3_MEMORY_TEST		3
456 #define TG3_MAC_LOOPB_TEST	4
457 #define TG3_PHY_LOOPB_TEST	5
458 #define TG3_EXT_LOOPB_TEST	6
459 #define TG3_INTERRUPT_TEST	7
460 
461 
462 static const struct {
463 	const char string[ETH_GSTRING_LEN];
464 } ethtool_test_keys[] = {
465 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
466 	[TG3_LINK_TEST]		= { "link test         (online) " },
467 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
468 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
469 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
470 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
471 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
472 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
473 };
474 
475 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
476 
477 
478 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
479 {
480 	writel(val, tp->regs + off);
481 }
482 
483 static u32 tg3_read32(struct tg3 *tp, u32 off)
484 {
485 	return readl(tp->regs + off);
486 }
487 
488 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
489 {
490 	writel(val, tp->aperegs + off);
491 }
492 
493 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
494 {
495 	return readl(tp->aperegs + off);
496 }
497 
498 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
499 {
500 	unsigned long flags;
501 
502 	spin_lock_irqsave(&tp->indirect_lock, flags);
503 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
504 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
505 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
506 }
507 
508 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
509 {
510 	writel(val, tp->regs + off);
511 	readl(tp->regs + off);
512 }
513 
514 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
515 {
516 	unsigned long flags;
517 	u32 val;
518 
519 	spin_lock_irqsave(&tp->indirect_lock, flags);
520 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
521 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
522 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
523 	return val;
524 }
525 
526 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
527 {
528 	unsigned long flags;
529 
530 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
531 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
532 				       TG3_64BIT_REG_LOW, val);
533 		return;
534 	}
535 	if (off == TG3_RX_STD_PROD_IDX_REG) {
536 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
537 				       TG3_64BIT_REG_LOW, val);
538 		return;
539 	}
540 
541 	spin_lock_irqsave(&tp->indirect_lock, flags);
542 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
543 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
544 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
545 
546 	/* In indirect mode when disabling interrupts, we also need
547 	 * to clear the interrupt bit in the GRC local ctrl register.
548 	 */
549 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
550 	    (val == 0x1)) {
551 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
552 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
553 	}
554 }
555 
556 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
557 {
558 	unsigned long flags;
559 	u32 val;
560 
561 	spin_lock_irqsave(&tp->indirect_lock, flags);
562 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
563 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
564 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
565 	return val;
566 }
567 
568 /* usec_wait specifies the wait time in usec when writing to certain registers
569  * where it is unsafe to read back the register without some delay.
570  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
571  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
572  */
573 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
574 {
575 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
576 		/* Non-posted methods */
577 		tp->write32(tp, off, val);
578 	else {
579 		/* Posted method */
580 		tg3_write32(tp, off, val);
581 		if (usec_wait)
582 			udelay(usec_wait);
583 		tp->read32(tp, off);
584 	}
585 	/* Wait again after the read for the posted method to guarantee that
586 	 * the wait time is met.
587 	 */
588 	if (usec_wait)
589 		udelay(usec_wait);
590 }
591 
592 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
593 {
594 	tp->write32_mbox(tp, off, val);
595 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
596 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
597 	     !tg3_flag(tp, ICH_WORKAROUND)))
598 		tp->read32_mbox(tp, off);
599 }
600 
601 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
602 {
603 	void __iomem *mbox = tp->regs + off;
604 	writel(val, mbox);
605 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
606 		writel(val, mbox);
607 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
608 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
609 		readl(mbox);
610 }
611 
612 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
613 {
614 	return readl(tp->regs + off + GRCMBOX_BASE);
615 }
616 
617 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
618 {
619 	writel(val, tp->regs + off + GRCMBOX_BASE);
620 }
621 
622 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
623 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
624 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
625 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
626 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
627 
628 #define tw32(reg, val)			tp->write32(tp, reg, val)
629 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
630 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
631 #define tr32(reg)			tp->read32(tp, reg)
632 
633 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
634 {
635 	unsigned long flags;
636 
637 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
638 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
639 		return;
640 
641 	spin_lock_irqsave(&tp->indirect_lock, flags);
642 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
643 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
644 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
645 
646 		/* Always leave this as zero. */
647 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
648 	} else {
649 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
650 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
651 
652 		/* Always leave this as zero. */
653 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
654 	}
655 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
656 }
657 
658 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
659 {
660 	unsigned long flags;
661 
662 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
663 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
664 		*val = 0;
665 		return;
666 	}
667 
668 	spin_lock_irqsave(&tp->indirect_lock, flags);
669 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
670 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
671 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
672 
673 		/* Always leave this as zero. */
674 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
675 	} else {
676 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
677 		*val = tr32(TG3PCI_MEM_WIN_DATA);
678 
679 		/* Always leave this as zero. */
680 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
681 	}
682 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
683 }
684 
685 static void tg3_ape_lock_init(struct tg3 *tp)
686 {
687 	int i;
688 	u32 regbase, bit;
689 
690 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
691 		regbase = TG3_APE_LOCK_GRANT;
692 	else
693 		regbase = TG3_APE_PER_LOCK_GRANT;
694 
695 	/* Make sure the driver hasn't any stale locks. */
696 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
697 		switch (i) {
698 		case TG3_APE_LOCK_PHY0:
699 		case TG3_APE_LOCK_PHY1:
700 		case TG3_APE_LOCK_PHY2:
701 		case TG3_APE_LOCK_PHY3:
702 			bit = APE_LOCK_GRANT_DRIVER;
703 			break;
704 		default:
705 			if (!tp->pci_fn)
706 				bit = APE_LOCK_GRANT_DRIVER;
707 			else
708 				bit = 1 << tp->pci_fn;
709 		}
710 		tg3_ape_write32(tp, regbase + 4 * i, bit);
711 	}
712 
713 }
714 
715 static int tg3_ape_lock(struct tg3 *tp, int locknum)
716 {
717 	int i, off;
718 	int ret = 0;
719 	u32 status, req, gnt, bit;
720 
721 	if (!tg3_flag(tp, ENABLE_APE))
722 		return 0;
723 
724 	switch (locknum) {
725 	case TG3_APE_LOCK_GPIO:
726 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
727 			return 0;
728 	case TG3_APE_LOCK_GRC:
729 	case TG3_APE_LOCK_MEM:
730 		if (!tp->pci_fn)
731 			bit = APE_LOCK_REQ_DRIVER;
732 		else
733 			bit = 1 << tp->pci_fn;
734 		break;
735 	case TG3_APE_LOCK_PHY0:
736 	case TG3_APE_LOCK_PHY1:
737 	case TG3_APE_LOCK_PHY2:
738 	case TG3_APE_LOCK_PHY3:
739 		bit = APE_LOCK_REQ_DRIVER;
740 		break;
741 	default:
742 		return -EINVAL;
743 	}
744 
745 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
746 		req = TG3_APE_LOCK_REQ;
747 		gnt = TG3_APE_LOCK_GRANT;
748 	} else {
749 		req = TG3_APE_PER_LOCK_REQ;
750 		gnt = TG3_APE_PER_LOCK_GRANT;
751 	}
752 
753 	off = 4 * locknum;
754 
755 	tg3_ape_write32(tp, req + off, bit);
756 
757 	/* Wait for up to 1 millisecond to acquire lock. */
758 	for (i = 0; i < 100; i++) {
759 		status = tg3_ape_read32(tp, gnt + off);
760 		if (status == bit)
761 			break;
762 		if (pci_channel_offline(tp->pdev))
763 			break;
764 
765 		udelay(10);
766 	}
767 
768 	if (status != bit) {
769 		/* Revoke the lock request. */
770 		tg3_ape_write32(tp, gnt + off, bit);
771 		ret = -EBUSY;
772 	}
773 
774 	return ret;
775 }
776 
777 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
778 {
779 	u32 gnt, bit;
780 
781 	if (!tg3_flag(tp, ENABLE_APE))
782 		return;
783 
784 	switch (locknum) {
785 	case TG3_APE_LOCK_GPIO:
786 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
787 			return;
788 	case TG3_APE_LOCK_GRC:
789 	case TG3_APE_LOCK_MEM:
790 		if (!tp->pci_fn)
791 			bit = APE_LOCK_GRANT_DRIVER;
792 		else
793 			bit = 1 << tp->pci_fn;
794 		break;
795 	case TG3_APE_LOCK_PHY0:
796 	case TG3_APE_LOCK_PHY1:
797 	case TG3_APE_LOCK_PHY2:
798 	case TG3_APE_LOCK_PHY3:
799 		bit = APE_LOCK_GRANT_DRIVER;
800 		break;
801 	default:
802 		return;
803 	}
804 
805 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
806 		gnt = TG3_APE_LOCK_GRANT;
807 	else
808 		gnt = TG3_APE_PER_LOCK_GRANT;
809 
810 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
811 }
812 
813 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
814 {
815 	u32 apedata;
816 
817 	while (timeout_us) {
818 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
819 			return -EBUSY;
820 
821 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
822 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
823 			break;
824 
825 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
826 
827 		udelay(10);
828 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
829 	}
830 
831 	return timeout_us ? 0 : -EBUSY;
832 }
833 
834 #ifdef CONFIG_TIGON3_HWMON
835 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
836 {
837 	u32 i, apedata;
838 
839 	for (i = 0; i < timeout_us / 10; i++) {
840 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
841 
842 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
843 			break;
844 
845 		udelay(10);
846 	}
847 
848 	return i == timeout_us / 10;
849 }
850 
851 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
852 				   u32 len)
853 {
854 	int err;
855 	u32 i, bufoff, msgoff, maxlen, apedata;
856 
857 	if (!tg3_flag(tp, APE_HAS_NCSI))
858 		return 0;
859 
860 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
861 	if (apedata != APE_SEG_SIG_MAGIC)
862 		return -ENODEV;
863 
864 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865 	if (!(apedata & APE_FW_STATUS_READY))
866 		return -EAGAIN;
867 
868 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
869 		 TG3_APE_SHMEM_BASE;
870 	msgoff = bufoff + 2 * sizeof(u32);
871 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
872 
873 	while (len) {
874 		u32 length;
875 
876 		/* Cap xfer sizes to scratchpad limits. */
877 		length = (len > maxlen) ? maxlen : len;
878 		len -= length;
879 
880 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
881 		if (!(apedata & APE_FW_STATUS_READY))
882 			return -EAGAIN;
883 
884 		/* Wait for up to 1 msec for APE to service previous event. */
885 		err = tg3_ape_event_lock(tp, 1000);
886 		if (err)
887 			return err;
888 
889 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
890 			  APE_EVENT_STATUS_SCRTCHPD_READ |
891 			  APE_EVENT_STATUS_EVENT_PENDING;
892 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
893 
894 		tg3_ape_write32(tp, bufoff, base_off);
895 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
896 
897 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
898 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
899 
900 		base_off += length;
901 
902 		if (tg3_ape_wait_for_event(tp, 30000))
903 			return -EAGAIN;
904 
905 		for (i = 0; length; i += 4, length -= 4) {
906 			u32 val = tg3_ape_read32(tp, msgoff + i);
907 			memcpy(data, &val, sizeof(u32));
908 			data++;
909 		}
910 	}
911 
912 	return 0;
913 }
914 #endif
915 
916 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
917 {
918 	int err;
919 	u32 apedata;
920 
921 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
922 	if (apedata != APE_SEG_SIG_MAGIC)
923 		return -EAGAIN;
924 
925 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
926 	if (!(apedata & APE_FW_STATUS_READY))
927 		return -EAGAIN;
928 
929 	/* Wait for up to 20 millisecond for APE to service previous event. */
930 	err = tg3_ape_event_lock(tp, 20000);
931 	if (err)
932 		return err;
933 
934 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
935 			event | APE_EVENT_STATUS_EVENT_PENDING);
936 
937 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
938 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
939 
940 	return 0;
941 }
942 
943 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
944 {
945 	u32 event;
946 	u32 apedata;
947 
948 	if (!tg3_flag(tp, ENABLE_APE))
949 		return;
950 
951 	switch (kind) {
952 	case RESET_KIND_INIT:
953 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
954 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
955 				APE_HOST_SEG_SIG_MAGIC);
956 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
957 				APE_HOST_SEG_LEN_MAGIC);
958 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
959 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
960 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
961 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
962 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
963 				APE_HOST_BEHAV_NO_PHYLOCK);
964 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
965 				    TG3_APE_HOST_DRVR_STATE_START);
966 
967 		event = APE_EVENT_STATUS_STATE_START;
968 		break;
969 	case RESET_KIND_SHUTDOWN:
970 		if (device_may_wakeup(&tp->pdev->dev) &&
971 		    tg3_flag(tp, WOL_ENABLE)) {
972 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
973 					    TG3_APE_HOST_WOL_SPEED_AUTO);
974 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
975 		} else
976 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
977 
978 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
979 
980 		event = APE_EVENT_STATUS_STATE_UNLOAD;
981 		break;
982 	default:
983 		return;
984 	}
985 
986 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
987 
988 	tg3_ape_send_event(tp, event);
989 }
990 
991 static void tg3_send_ape_heartbeat(struct tg3 *tp,
992 				   unsigned long interval)
993 {
994 	/* Check if hb interval has exceeded */
995 	if (!tg3_flag(tp, ENABLE_APE) ||
996 	    time_before(jiffies, tp->ape_hb_jiffies + interval))
997 		return;
998 
999 	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
1000 	tp->ape_hb_jiffies = jiffies;
1001 }
1002 
1003 static void tg3_disable_ints(struct tg3 *tp)
1004 {
1005 	int i;
1006 
1007 	tw32(TG3PCI_MISC_HOST_CTRL,
1008 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1009 	for (i = 0; i < tp->irq_max; i++)
1010 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1011 }
1012 
1013 static void tg3_enable_ints(struct tg3 *tp)
1014 {
1015 	int i;
1016 
1017 	tp->irq_sync = 0;
1018 	wmb();
1019 
1020 	tw32(TG3PCI_MISC_HOST_CTRL,
1021 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1022 
1023 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1024 	for (i = 0; i < tp->irq_cnt; i++) {
1025 		struct tg3_napi *tnapi = &tp->napi[i];
1026 
1027 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1028 		if (tg3_flag(tp, 1SHOT_MSI))
1029 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1030 
1031 		tp->coal_now |= tnapi->coal_now;
1032 	}
1033 
1034 	/* Force an initial interrupt */
1035 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1036 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1037 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1038 	else
1039 		tw32(HOSTCC_MODE, tp->coal_now);
1040 
1041 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1042 }
1043 
1044 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1045 {
1046 	struct tg3 *tp = tnapi->tp;
1047 	struct tg3_hw_status *sblk = tnapi->hw_status;
1048 	unsigned int work_exists = 0;
1049 
1050 	/* check for phy events */
1051 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1052 		if (sblk->status & SD_STATUS_LINK_CHG)
1053 			work_exists = 1;
1054 	}
1055 
1056 	/* check for TX work to do */
1057 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1058 		work_exists = 1;
1059 
1060 	/* check for RX work to do */
1061 	if (tnapi->rx_rcb_prod_idx &&
1062 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1063 		work_exists = 1;
1064 
1065 	return work_exists;
1066 }
1067 
1068 /* tg3_int_reenable
1069  *  similar to tg3_enable_ints, but it accurately determines whether there
1070  *  is new work pending and can return without flushing the PIO write
1071  *  which reenables interrupts
1072  */
1073 static void tg3_int_reenable(struct tg3_napi *tnapi)
1074 {
1075 	struct tg3 *tp = tnapi->tp;
1076 
1077 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1078 	mmiowb();
1079 
1080 	/* When doing tagged status, this work check is unnecessary.
1081 	 * The last_tag we write above tells the chip which piece of
1082 	 * work we've completed.
1083 	 */
1084 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1085 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1086 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1087 }
1088 
1089 static void tg3_switch_clocks(struct tg3 *tp)
1090 {
1091 	u32 clock_ctrl;
1092 	u32 orig_clock_ctrl;
1093 
1094 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1095 		return;
1096 
1097 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1098 
1099 	orig_clock_ctrl = clock_ctrl;
1100 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1101 		       CLOCK_CTRL_CLKRUN_OENABLE |
1102 		       0x1f);
1103 	tp->pci_clock_ctrl = clock_ctrl;
1104 
1105 	if (tg3_flag(tp, 5705_PLUS)) {
1106 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1107 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1108 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1109 		}
1110 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1111 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1112 			    clock_ctrl |
1113 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1114 			    40);
1115 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1116 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1117 			    40);
1118 	}
1119 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1120 }
1121 
1122 #define PHY_BUSY_LOOPS	5000
1123 
1124 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1125 			 u32 *val)
1126 {
1127 	u32 frame_val;
1128 	unsigned int loops;
1129 	int ret;
1130 
1131 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1132 		tw32_f(MAC_MI_MODE,
1133 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1134 		udelay(80);
1135 	}
1136 
1137 	tg3_ape_lock(tp, tp->phy_ape_lock);
1138 
1139 	*val = 0x0;
1140 
1141 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1142 		      MI_COM_PHY_ADDR_MASK);
1143 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1144 		      MI_COM_REG_ADDR_MASK);
1145 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1146 
1147 	tw32_f(MAC_MI_COM, frame_val);
1148 
1149 	loops = PHY_BUSY_LOOPS;
1150 	while (loops != 0) {
1151 		udelay(10);
1152 		frame_val = tr32(MAC_MI_COM);
1153 
1154 		if ((frame_val & MI_COM_BUSY) == 0) {
1155 			udelay(5);
1156 			frame_val = tr32(MAC_MI_COM);
1157 			break;
1158 		}
1159 		loops -= 1;
1160 	}
1161 
1162 	ret = -EBUSY;
1163 	if (loops != 0) {
1164 		*val = frame_val & MI_COM_DATA_MASK;
1165 		ret = 0;
1166 	}
1167 
1168 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1169 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1170 		udelay(80);
1171 	}
1172 
1173 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1174 
1175 	return ret;
1176 }
1177 
1178 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1179 {
1180 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1181 }
1182 
1183 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1184 			  u32 val)
1185 {
1186 	u32 frame_val;
1187 	unsigned int loops;
1188 	int ret;
1189 
1190 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1191 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1192 		return 0;
1193 
1194 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1195 		tw32_f(MAC_MI_MODE,
1196 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1197 		udelay(80);
1198 	}
1199 
1200 	tg3_ape_lock(tp, tp->phy_ape_lock);
1201 
1202 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1203 		      MI_COM_PHY_ADDR_MASK);
1204 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1205 		      MI_COM_REG_ADDR_MASK);
1206 	frame_val |= (val & MI_COM_DATA_MASK);
1207 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1208 
1209 	tw32_f(MAC_MI_COM, frame_val);
1210 
1211 	loops = PHY_BUSY_LOOPS;
1212 	while (loops != 0) {
1213 		udelay(10);
1214 		frame_val = tr32(MAC_MI_COM);
1215 		if ((frame_val & MI_COM_BUSY) == 0) {
1216 			udelay(5);
1217 			frame_val = tr32(MAC_MI_COM);
1218 			break;
1219 		}
1220 		loops -= 1;
1221 	}
1222 
1223 	ret = -EBUSY;
1224 	if (loops != 0)
1225 		ret = 0;
1226 
1227 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1228 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1229 		udelay(80);
1230 	}
1231 
1232 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1233 
1234 	return ret;
1235 }
1236 
1237 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1238 {
1239 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1240 }
1241 
1242 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1243 {
1244 	int err;
1245 
1246 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 	if (err)
1248 		goto done;
1249 
1250 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 	if (err)
1252 		goto done;
1253 
1254 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1255 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 	if (err)
1257 		goto done;
1258 
1259 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1260 
1261 done:
1262 	return err;
1263 }
1264 
1265 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1266 {
1267 	int err;
1268 
1269 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1270 	if (err)
1271 		goto done;
1272 
1273 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1274 	if (err)
1275 		goto done;
1276 
1277 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1278 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1279 	if (err)
1280 		goto done;
1281 
1282 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1283 
1284 done:
1285 	return err;
1286 }
1287 
1288 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1289 {
1290 	int err;
1291 
1292 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1293 	if (!err)
1294 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1295 
1296 	return err;
1297 }
1298 
1299 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1300 {
1301 	int err;
1302 
1303 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1304 	if (!err)
1305 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1306 
1307 	return err;
1308 }
1309 
1310 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1311 {
1312 	int err;
1313 
1314 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1315 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1316 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1317 	if (!err)
1318 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1319 
1320 	return err;
1321 }
1322 
1323 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1324 {
1325 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1326 		set |= MII_TG3_AUXCTL_MISC_WREN;
1327 
1328 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1329 }
1330 
1331 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1332 {
1333 	u32 val;
1334 	int err;
1335 
1336 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1337 
1338 	if (err)
1339 		return err;
1340 
1341 	if (enable)
1342 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1343 	else
1344 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1345 
1346 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1347 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1348 
1349 	return err;
1350 }
1351 
1352 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1353 {
1354 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1355 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1356 }
1357 
1358 static int tg3_bmcr_reset(struct tg3 *tp)
1359 {
1360 	u32 phy_control;
1361 	int limit, err;
1362 
1363 	/* OK, reset it, and poll the BMCR_RESET bit until it
1364 	 * clears or we time out.
1365 	 */
1366 	phy_control = BMCR_RESET;
1367 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1368 	if (err != 0)
1369 		return -EBUSY;
1370 
1371 	limit = 5000;
1372 	while (limit--) {
1373 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1374 		if (err != 0)
1375 			return -EBUSY;
1376 
1377 		if ((phy_control & BMCR_RESET) == 0) {
1378 			udelay(40);
1379 			break;
1380 		}
1381 		udelay(10);
1382 	}
1383 	if (limit < 0)
1384 		return -EBUSY;
1385 
1386 	return 0;
1387 }
1388 
1389 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1390 {
1391 	struct tg3 *tp = bp->priv;
1392 	u32 val;
1393 
1394 	spin_lock_bh(&tp->lock);
1395 
1396 	if (__tg3_readphy(tp, mii_id, reg, &val))
1397 		val = -EIO;
1398 
1399 	spin_unlock_bh(&tp->lock);
1400 
1401 	return val;
1402 }
1403 
1404 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1405 {
1406 	struct tg3 *tp = bp->priv;
1407 	u32 ret = 0;
1408 
1409 	spin_lock_bh(&tp->lock);
1410 
1411 	if (__tg3_writephy(tp, mii_id, reg, val))
1412 		ret = -EIO;
1413 
1414 	spin_unlock_bh(&tp->lock);
1415 
1416 	return ret;
1417 }
1418 
1419 static void tg3_mdio_config_5785(struct tg3 *tp)
1420 {
1421 	u32 val;
1422 	struct phy_device *phydev;
1423 
1424 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1425 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1426 	case PHY_ID_BCM50610:
1427 	case PHY_ID_BCM50610M:
1428 		val = MAC_PHYCFG2_50610_LED_MODES;
1429 		break;
1430 	case PHY_ID_BCMAC131:
1431 		val = MAC_PHYCFG2_AC131_LED_MODES;
1432 		break;
1433 	case PHY_ID_RTL8211C:
1434 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1435 		break;
1436 	case PHY_ID_RTL8201E:
1437 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1438 		break;
1439 	default:
1440 		return;
1441 	}
1442 
1443 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1444 		tw32(MAC_PHYCFG2, val);
1445 
1446 		val = tr32(MAC_PHYCFG1);
1447 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1448 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1449 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1450 		tw32(MAC_PHYCFG1, val);
1451 
1452 		return;
1453 	}
1454 
1455 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1456 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1457 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1458 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1459 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1460 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1461 		       MAC_PHYCFG2_INBAND_ENABLE;
1462 
1463 	tw32(MAC_PHYCFG2, val);
1464 
1465 	val = tr32(MAC_PHYCFG1);
1466 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1467 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1468 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1469 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1470 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1471 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1472 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1473 	}
1474 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1475 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1476 	tw32(MAC_PHYCFG1, val);
1477 
1478 	val = tr32(MAC_EXT_RGMII_MODE);
1479 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1480 		 MAC_RGMII_MODE_RX_QUALITY |
1481 		 MAC_RGMII_MODE_RX_ACTIVITY |
1482 		 MAC_RGMII_MODE_RX_ENG_DET |
1483 		 MAC_RGMII_MODE_TX_ENABLE |
1484 		 MAC_RGMII_MODE_TX_LOWPWR |
1485 		 MAC_RGMII_MODE_TX_RESET);
1486 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1487 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1488 			val |= MAC_RGMII_MODE_RX_INT_B |
1489 			       MAC_RGMII_MODE_RX_QUALITY |
1490 			       MAC_RGMII_MODE_RX_ACTIVITY |
1491 			       MAC_RGMII_MODE_RX_ENG_DET;
1492 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1493 			val |= MAC_RGMII_MODE_TX_ENABLE |
1494 			       MAC_RGMII_MODE_TX_LOWPWR |
1495 			       MAC_RGMII_MODE_TX_RESET;
1496 	}
1497 	tw32(MAC_EXT_RGMII_MODE, val);
1498 }
1499 
1500 static void tg3_mdio_start(struct tg3 *tp)
1501 {
1502 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1503 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1504 	udelay(80);
1505 
1506 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1507 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1508 		tg3_mdio_config_5785(tp);
1509 }
1510 
1511 static int tg3_mdio_init(struct tg3 *tp)
1512 {
1513 	int i;
1514 	u32 reg;
1515 	struct phy_device *phydev;
1516 
1517 	if (tg3_flag(tp, 5717_PLUS)) {
1518 		u32 is_serdes;
1519 
1520 		tp->phy_addr = tp->pci_fn + 1;
1521 
1522 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1523 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1524 		else
1525 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1526 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1527 		if (is_serdes)
1528 			tp->phy_addr += 7;
1529 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1530 		int addr;
1531 
1532 		addr = ssb_gige_get_phyaddr(tp->pdev);
1533 		if (addr < 0)
1534 			return addr;
1535 		tp->phy_addr = addr;
1536 	} else
1537 		tp->phy_addr = TG3_PHY_MII_ADDR;
1538 
1539 	tg3_mdio_start(tp);
1540 
1541 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1542 		return 0;
1543 
1544 	tp->mdio_bus = mdiobus_alloc();
1545 	if (tp->mdio_bus == NULL)
1546 		return -ENOMEM;
1547 
1548 	tp->mdio_bus->name     = "tg3 mdio bus";
1549 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1550 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1551 	tp->mdio_bus->priv     = tp;
1552 	tp->mdio_bus->parent   = &tp->pdev->dev;
1553 	tp->mdio_bus->read     = &tg3_mdio_read;
1554 	tp->mdio_bus->write    = &tg3_mdio_write;
1555 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1556 
1557 	/* The bus registration will look for all the PHYs on the mdio bus.
1558 	 * Unfortunately, it does not ensure the PHY is powered up before
1559 	 * accessing the PHY ID registers.  A chip reset is the
1560 	 * quickest way to bring the device back to an operational state..
1561 	 */
1562 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1563 		tg3_bmcr_reset(tp);
1564 
1565 	i = mdiobus_register(tp->mdio_bus);
1566 	if (i) {
1567 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1568 		mdiobus_free(tp->mdio_bus);
1569 		return i;
1570 	}
1571 
1572 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1573 
1574 	if (!phydev || !phydev->drv) {
1575 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1576 		mdiobus_unregister(tp->mdio_bus);
1577 		mdiobus_free(tp->mdio_bus);
1578 		return -ENODEV;
1579 	}
1580 
1581 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1582 	case PHY_ID_BCM57780:
1583 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1584 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1585 		break;
1586 	case PHY_ID_BCM50610:
1587 	case PHY_ID_BCM50610M:
1588 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1589 				     PHY_BRCM_RX_REFCLK_UNUSED |
1590 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1591 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1592 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1593 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1594 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1595 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1596 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1597 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1598 		/* fallthru */
1599 	case PHY_ID_RTL8211C:
1600 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1601 		break;
1602 	case PHY_ID_RTL8201E:
1603 	case PHY_ID_BCMAC131:
1604 		phydev->interface = PHY_INTERFACE_MODE_MII;
1605 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1606 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1607 		break;
1608 	}
1609 
1610 	tg3_flag_set(tp, MDIOBUS_INITED);
1611 
1612 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1613 		tg3_mdio_config_5785(tp);
1614 
1615 	return 0;
1616 }
1617 
1618 static void tg3_mdio_fini(struct tg3 *tp)
1619 {
1620 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1621 		tg3_flag_clear(tp, MDIOBUS_INITED);
1622 		mdiobus_unregister(tp->mdio_bus);
1623 		mdiobus_free(tp->mdio_bus);
1624 	}
1625 }
1626 
1627 /* tp->lock is held. */
1628 static inline void tg3_generate_fw_event(struct tg3 *tp)
1629 {
1630 	u32 val;
1631 
1632 	val = tr32(GRC_RX_CPU_EVENT);
1633 	val |= GRC_RX_CPU_DRIVER_EVENT;
1634 	tw32_f(GRC_RX_CPU_EVENT, val);
1635 
1636 	tp->last_event_jiffies = jiffies;
1637 }
1638 
1639 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1640 
1641 /* tp->lock is held. */
1642 static void tg3_wait_for_event_ack(struct tg3 *tp)
1643 {
1644 	int i;
1645 	unsigned int delay_cnt;
1646 	long time_remain;
1647 
1648 	/* If enough time has passed, no wait is necessary. */
1649 	time_remain = (long)(tp->last_event_jiffies + 1 +
1650 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1651 		      (long)jiffies;
1652 	if (time_remain < 0)
1653 		return;
1654 
1655 	/* Check if we can shorten the wait time. */
1656 	delay_cnt = jiffies_to_usecs(time_remain);
1657 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1658 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1659 	delay_cnt = (delay_cnt >> 3) + 1;
1660 
1661 	for (i = 0; i < delay_cnt; i++) {
1662 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1663 			break;
1664 		if (pci_channel_offline(tp->pdev))
1665 			break;
1666 
1667 		udelay(8);
1668 	}
1669 }
1670 
1671 /* tp->lock is held. */
1672 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1673 {
1674 	u32 reg, val;
1675 
1676 	val = 0;
1677 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1678 		val = reg << 16;
1679 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1680 		val |= (reg & 0xffff);
1681 	*data++ = val;
1682 
1683 	val = 0;
1684 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1685 		val = reg << 16;
1686 	if (!tg3_readphy(tp, MII_LPA, &reg))
1687 		val |= (reg & 0xffff);
1688 	*data++ = val;
1689 
1690 	val = 0;
1691 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1692 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1693 			val = reg << 16;
1694 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1695 			val |= (reg & 0xffff);
1696 	}
1697 	*data++ = val;
1698 
1699 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1700 		val = reg << 16;
1701 	else
1702 		val = 0;
1703 	*data++ = val;
1704 }
1705 
1706 /* tp->lock is held. */
1707 static void tg3_ump_link_report(struct tg3 *tp)
1708 {
1709 	u32 data[4];
1710 
1711 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1712 		return;
1713 
1714 	tg3_phy_gather_ump_data(tp, data);
1715 
1716 	tg3_wait_for_event_ack(tp);
1717 
1718 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1719 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1720 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1721 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1722 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1723 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1724 
1725 	tg3_generate_fw_event(tp);
1726 }
1727 
1728 /* tp->lock is held. */
1729 static void tg3_stop_fw(struct tg3 *tp)
1730 {
1731 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1732 		/* Wait for RX cpu to ACK the previous event. */
1733 		tg3_wait_for_event_ack(tp);
1734 
1735 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1736 
1737 		tg3_generate_fw_event(tp);
1738 
1739 		/* Wait for RX cpu to ACK this event. */
1740 		tg3_wait_for_event_ack(tp);
1741 	}
1742 }
1743 
1744 /* tp->lock is held. */
1745 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1746 {
1747 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1748 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1749 
1750 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1751 		switch (kind) {
1752 		case RESET_KIND_INIT:
1753 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 				      DRV_STATE_START);
1755 			break;
1756 
1757 		case RESET_KIND_SHUTDOWN:
1758 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1759 				      DRV_STATE_UNLOAD);
1760 			break;
1761 
1762 		case RESET_KIND_SUSPEND:
1763 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1764 				      DRV_STATE_SUSPEND);
1765 			break;
1766 
1767 		default:
1768 			break;
1769 		}
1770 	}
1771 }
1772 
1773 /* tp->lock is held. */
1774 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1775 {
1776 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1777 		switch (kind) {
1778 		case RESET_KIND_INIT:
1779 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1780 				      DRV_STATE_START_DONE);
1781 			break;
1782 
1783 		case RESET_KIND_SHUTDOWN:
1784 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1785 				      DRV_STATE_UNLOAD_DONE);
1786 			break;
1787 
1788 		default:
1789 			break;
1790 		}
1791 	}
1792 }
1793 
1794 /* tp->lock is held. */
1795 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1796 {
1797 	if (tg3_flag(tp, ENABLE_ASF)) {
1798 		switch (kind) {
1799 		case RESET_KIND_INIT:
1800 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1801 				      DRV_STATE_START);
1802 			break;
1803 
1804 		case RESET_KIND_SHUTDOWN:
1805 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1806 				      DRV_STATE_UNLOAD);
1807 			break;
1808 
1809 		case RESET_KIND_SUSPEND:
1810 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1811 				      DRV_STATE_SUSPEND);
1812 			break;
1813 
1814 		default:
1815 			break;
1816 		}
1817 	}
1818 }
1819 
1820 static int tg3_poll_fw(struct tg3 *tp)
1821 {
1822 	int i;
1823 	u32 val;
1824 
1825 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1826 		return 0;
1827 
1828 	if (tg3_flag(tp, IS_SSB_CORE)) {
1829 		/* We don't use firmware. */
1830 		return 0;
1831 	}
1832 
1833 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1834 		/* Wait up to 20ms for init done. */
1835 		for (i = 0; i < 200; i++) {
1836 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1837 				return 0;
1838 			if (pci_channel_offline(tp->pdev))
1839 				return -ENODEV;
1840 
1841 			udelay(100);
1842 		}
1843 		return -ENODEV;
1844 	}
1845 
1846 	/* Wait for firmware initialization to complete. */
1847 	for (i = 0; i < 100000; i++) {
1848 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1849 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1850 			break;
1851 		if (pci_channel_offline(tp->pdev)) {
1852 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1853 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1854 				netdev_info(tp->dev, "No firmware running\n");
1855 			}
1856 
1857 			break;
1858 		}
1859 
1860 		udelay(10);
1861 	}
1862 
1863 	/* Chip might not be fitted with firmware.  Some Sun onboard
1864 	 * parts are configured like that.  So don't signal the timeout
1865 	 * of the above loop as an error, but do report the lack of
1866 	 * running firmware once.
1867 	 */
1868 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1869 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1870 
1871 		netdev_info(tp->dev, "No firmware running\n");
1872 	}
1873 
1874 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1875 		/* The 57765 A0 needs a little more
1876 		 * time to do some important work.
1877 		 */
1878 		mdelay(10);
1879 	}
1880 
1881 	return 0;
1882 }
1883 
1884 static void tg3_link_report(struct tg3 *tp)
1885 {
1886 	if (!netif_carrier_ok(tp->dev)) {
1887 		netif_info(tp, link, tp->dev, "Link is down\n");
1888 		tg3_ump_link_report(tp);
1889 	} else if (netif_msg_link(tp)) {
1890 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1891 			    (tp->link_config.active_speed == SPEED_1000 ?
1892 			     1000 :
1893 			     (tp->link_config.active_speed == SPEED_100 ?
1894 			      100 : 10)),
1895 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1896 			     "full" : "half"));
1897 
1898 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1899 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1900 			    "on" : "off",
1901 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1902 			    "on" : "off");
1903 
1904 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1905 			netdev_info(tp->dev, "EEE is %s\n",
1906 				    tp->setlpicnt ? "enabled" : "disabled");
1907 
1908 		tg3_ump_link_report(tp);
1909 	}
1910 
1911 	tp->link_up = netif_carrier_ok(tp->dev);
1912 }
1913 
1914 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1915 {
1916 	u32 flowctrl = 0;
1917 
1918 	if (adv & ADVERTISE_PAUSE_CAP) {
1919 		flowctrl |= FLOW_CTRL_RX;
1920 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1921 			flowctrl |= FLOW_CTRL_TX;
1922 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1923 		flowctrl |= FLOW_CTRL_TX;
1924 
1925 	return flowctrl;
1926 }
1927 
1928 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1929 {
1930 	u16 miireg;
1931 
1932 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1933 		miireg = ADVERTISE_1000XPAUSE;
1934 	else if (flow_ctrl & FLOW_CTRL_TX)
1935 		miireg = ADVERTISE_1000XPSE_ASYM;
1936 	else if (flow_ctrl & FLOW_CTRL_RX)
1937 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1938 	else
1939 		miireg = 0;
1940 
1941 	return miireg;
1942 }
1943 
1944 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1945 {
1946 	u32 flowctrl = 0;
1947 
1948 	if (adv & ADVERTISE_1000XPAUSE) {
1949 		flowctrl |= FLOW_CTRL_RX;
1950 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1951 			flowctrl |= FLOW_CTRL_TX;
1952 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1953 		flowctrl |= FLOW_CTRL_TX;
1954 
1955 	return flowctrl;
1956 }
1957 
1958 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1959 {
1960 	u8 cap = 0;
1961 
1962 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1963 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1964 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1965 		if (lcladv & ADVERTISE_1000XPAUSE)
1966 			cap = FLOW_CTRL_RX;
1967 		if (rmtadv & ADVERTISE_1000XPAUSE)
1968 			cap = FLOW_CTRL_TX;
1969 	}
1970 
1971 	return cap;
1972 }
1973 
1974 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1975 {
1976 	u8 autoneg;
1977 	u8 flowctrl = 0;
1978 	u32 old_rx_mode = tp->rx_mode;
1979 	u32 old_tx_mode = tp->tx_mode;
1980 
1981 	if (tg3_flag(tp, USE_PHYLIB))
1982 		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1983 	else
1984 		autoneg = tp->link_config.autoneg;
1985 
1986 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1987 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1988 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1989 		else
1990 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1991 	} else
1992 		flowctrl = tp->link_config.flowctrl;
1993 
1994 	tp->link_config.active_flowctrl = flowctrl;
1995 
1996 	if (flowctrl & FLOW_CTRL_RX)
1997 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1998 	else
1999 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
2000 
2001 	if (old_rx_mode != tp->rx_mode)
2002 		tw32_f(MAC_RX_MODE, tp->rx_mode);
2003 
2004 	if (flowctrl & FLOW_CTRL_TX)
2005 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2006 	else
2007 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2008 
2009 	if (old_tx_mode != tp->tx_mode)
2010 		tw32_f(MAC_TX_MODE, tp->tx_mode);
2011 }
2012 
2013 static void tg3_adjust_link(struct net_device *dev)
2014 {
2015 	u8 oldflowctrl, linkmesg = 0;
2016 	u32 mac_mode, lcl_adv, rmt_adv;
2017 	struct tg3 *tp = netdev_priv(dev);
2018 	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2019 
2020 	spin_lock_bh(&tp->lock);
2021 
2022 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2023 				    MAC_MODE_HALF_DUPLEX);
2024 
2025 	oldflowctrl = tp->link_config.active_flowctrl;
2026 
2027 	if (phydev->link) {
2028 		lcl_adv = 0;
2029 		rmt_adv = 0;
2030 
2031 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2032 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2033 		else if (phydev->speed == SPEED_1000 ||
2034 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2035 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2036 		else
2037 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2038 
2039 		if (phydev->duplex == DUPLEX_HALF)
2040 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2041 		else {
2042 			lcl_adv = mii_advertise_flowctrl(
2043 				  tp->link_config.flowctrl);
2044 
2045 			if (phydev->pause)
2046 				rmt_adv = LPA_PAUSE_CAP;
2047 			if (phydev->asym_pause)
2048 				rmt_adv |= LPA_PAUSE_ASYM;
2049 		}
2050 
2051 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2052 	} else
2053 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2054 
2055 	if (mac_mode != tp->mac_mode) {
2056 		tp->mac_mode = mac_mode;
2057 		tw32_f(MAC_MODE, tp->mac_mode);
2058 		udelay(40);
2059 	}
2060 
2061 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2062 		if (phydev->speed == SPEED_10)
2063 			tw32(MAC_MI_STAT,
2064 			     MAC_MI_STAT_10MBPS_MODE |
2065 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2066 		else
2067 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2068 	}
2069 
2070 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2071 		tw32(MAC_TX_LENGTHS,
2072 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2073 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2074 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2075 	else
2076 		tw32(MAC_TX_LENGTHS,
2077 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2078 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2079 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2080 
2081 	if (phydev->link != tp->old_link ||
2082 	    phydev->speed != tp->link_config.active_speed ||
2083 	    phydev->duplex != tp->link_config.active_duplex ||
2084 	    oldflowctrl != tp->link_config.active_flowctrl)
2085 		linkmesg = 1;
2086 
2087 	tp->old_link = phydev->link;
2088 	tp->link_config.active_speed = phydev->speed;
2089 	tp->link_config.active_duplex = phydev->duplex;
2090 
2091 	spin_unlock_bh(&tp->lock);
2092 
2093 	if (linkmesg)
2094 		tg3_link_report(tp);
2095 }
2096 
2097 static int tg3_phy_init(struct tg3 *tp)
2098 {
2099 	struct phy_device *phydev;
2100 
2101 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2102 		return 0;
2103 
2104 	/* Bring the PHY back to a known state. */
2105 	tg3_bmcr_reset(tp);
2106 
2107 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2108 
2109 	/* Attach the MAC to the PHY. */
2110 	phydev = phy_connect(tp->dev, phydev_name(phydev),
2111 			     tg3_adjust_link, phydev->interface);
2112 	if (IS_ERR(phydev)) {
2113 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2114 		return PTR_ERR(phydev);
2115 	}
2116 
2117 	/* Mask with MAC supported features. */
2118 	switch (phydev->interface) {
2119 	case PHY_INTERFACE_MODE_GMII:
2120 	case PHY_INTERFACE_MODE_RGMII:
2121 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2122 			phydev->supported &= (PHY_GBIT_FEATURES |
2123 					      SUPPORTED_Pause |
2124 					      SUPPORTED_Asym_Pause);
2125 			break;
2126 		}
2127 		/* fallthru */
2128 	case PHY_INTERFACE_MODE_MII:
2129 		phydev->supported &= (PHY_BASIC_FEATURES |
2130 				      SUPPORTED_Pause |
2131 				      SUPPORTED_Asym_Pause);
2132 		break;
2133 	default:
2134 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2135 		return -EINVAL;
2136 	}
2137 
2138 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2139 
2140 	phydev->advertising = phydev->supported;
2141 
2142 	phy_attached_info(phydev);
2143 
2144 	return 0;
2145 }
2146 
2147 static void tg3_phy_start(struct tg3 *tp)
2148 {
2149 	struct phy_device *phydev;
2150 
2151 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2152 		return;
2153 
2154 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2155 
2156 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2157 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2158 		phydev->speed = tp->link_config.speed;
2159 		phydev->duplex = tp->link_config.duplex;
2160 		phydev->autoneg = tp->link_config.autoneg;
2161 		phydev->advertising = tp->link_config.advertising;
2162 	}
2163 
2164 	phy_start(phydev);
2165 
2166 	phy_start_aneg(phydev);
2167 }
2168 
2169 static void tg3_phy_stop(struct tg3 *tp)
2170 {
2171 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2172 		return;
2173 
2174 	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2175 }
2176 
2177 static void tg3_phy_fini(struct tg3 *tp)
2178 {
2179 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2180 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2181 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2182 	}
2183 }
2184 
2185 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2186 {
2187 	int err;
2188 	u32 val;
2189 
2190 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2191 		return 0;
2192 
2193 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2194 		/* Cannot do read-modify-write on 5401 */
2195 		err = tg3_phy_auxctl_write(tp,
2196 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2197 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2198 					   0x4c20);
2199 		goto done;
2200 	}
2201 
2202 	err = tg3_phy_auxctl_read(tp,
2203 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2204 	if (err)
2205 		return err;
2206 
2207 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2208 	err = tg3_phy_auxctl_write(tp,
2209 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2210 
2211 done:
2212 	return err;
2213 }
2214 
2215 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2216 {
2217 	u32 phytest;
2218 
2219 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2220 		u32 phy;
2221 
2222 		tg3_writephy(tp, MII_TG3_FET_TEST,
2223 			     phytest | MII_TG3_FET_SHADOW_EN);
2224 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2225 			if (enable)
2226 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2227 			else
2228 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2229 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2230 		}
2231 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2232 	}
2233 }
2234 
2235 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2236 {
2237 	u32 reg;
2238 
2239 	if (!tg3_flag(tp, 5705_PLUS) ||
2240 	    (tg3_flag(tp, 5717_PLUS) &&
2241 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2242 		return;
2243 
2244 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2245 		tg3_phy_fet_toggle_apd(tp, enable);
2246 		return;
2247 	}
2248 
2249 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2250 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2251 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2252 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2253 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2254 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2255 
2256 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2257 
2258 
2259 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2260 	if (enable)
2261 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2262 
2263 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2264 }
2265 
2266 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2267 {
2268 	u32 phy;
2269 
2270 	if (!tg3_flag(tp, 5705_PLUS) ||
2271 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2272 		return;
2273 
2274 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2275 		u32 ephy;
2276 
2277 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2278 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2279 
2280 			tg3_writephy(tp, MII_TG3_FET_TEST,
2281 				     ephy | MII_TG3_FET_SHADOW_EN);
2282 			if (!tg3_readphy(tp, reg, &phy)) {
2283 				if (enable)
2284 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2285 				else
2286 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2287 				tg3_writephy(tp, reg, phy);
2288 			}
2289 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2290 		}
2291 	} else {
2292 		int ret;
2293 
2294 		ret = tg3_phy_auxctl_read(tp,
2295 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2296 		if (!ret) {
2297 			if (enable)
2298 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2299 			else
2300 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2301 			tg3_phy_auxctl_write(tp,
2302 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2303 		}
2304 	}
2305 }
2306 
2307 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2308 {
2309 	int ret;
2310 	u32 val;
2311 
2312 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2313 		return;
2314 
2315 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2316 	if (!ret)
2317 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2318 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2319 }
2320 
2321 static void tg3_phy_apply_otp(struct tg3 *tp)
2322 {
2323 	u32 otp, phy;
2324 
2325 	if (!tp->phy_otp)
2326 		return;
2327 
2328 	otp = tp->phy_otp;
2329 
2330 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2331 		return;
2332 
2333 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2334 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2335 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2336 
2337 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2338 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2339 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2340 
2341 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2342 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2343 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2344 
2345 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2346 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2347 
2348 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2349 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2350 
2351 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2352 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2353 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2354 
2355 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2356 }
2357 
2358 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2359 {
2360 	u32 val;
2361 	struct ethtool_eee *dest = &tp->eee;
2362 
2363 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2364 		return;
2365 
2366 	if (eee)
2367 		dest = eee;
2368 
2369 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2370 		return;
2371 
2372 	/* Pull eee_active */
2373 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2374 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2375 		dest->eee_active = 1;
2376 	} else
2377 		dest->eee_active = 0;
2378 
2379 	/* Pull lp advertised settings */
2380 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2381 		return;
2382 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2383 
2384 	/* Pull advertised and eee_enabled settings */
2385 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2386 		return;
2387 	dest->eee_enabled = !!val;
2388 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2389 
2390 	/* Pull tx_lpi_enabled */
2391 	val = tr32(TG3_CPMU_EEE_MODE);
2392 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2393 
2394 	/* Pull lpi timer value */
2395 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2396 }
2397 
2398 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2399 {
2400 	u32 val;
2401 
2402 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2403 		return;
2404 
2405 	tp->setlpicnt = 0;
2406 
2407 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2408 	    current_link_up &&
2409 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2410 	    (tp->link_config.active_speed == SPEED_100 ||
2411 	     tp->link_config.active_speed == SPEED_1000)) {
2412 		u32 eeectl;
2413 
2414 		if (tp->link_config.active_speed == SPEED_1000)
2415 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2416 		else
2417 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2418 
2419 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2420 
2421 		tg3_eee_pull_config(tp, NULL);
2422 		if (tp->eee.eee_active)
2423 			tp->setlpicnt = 2;
2424 	}
2425 
2426 	if (!tp->setlpicnt) {
2427 		if (current_link_up &&
2428 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2429 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2430 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2431 		}
2432 
2433 		val = tr32(TG3_CPMU_EEE_MODE);
2434 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2435 	}
2436 }
2437 
2438 static void tg3_phy_eee_enable(struct tg3 *tp)
2439 {
2440 	u32 val;
2441 
2442 	if (tp->link_config.active_speed == SPEED_1000 &&
2443 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2444 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2445 	     tg3_flag(tp, 57765_CLASS)) &&
2446 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2447 		val = MII_TG3_DSP_TAP26_ALNOKO |
2448 		      MII_TG3_DSP_TAP26_RMRXSTO;
2449 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2450 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2451 	}
2452 
2453 	val = tr32(TG3_CPMU_EEE_MODE);
2454 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2455 }
2456 
2457 static int tg3_wait_macro_done(struct tg3 *tp)
2458 {
2459 	int limit = 100;
2460 
2461 	while (limit--) {
2462 		u32 tmp32;
2463 
2464 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2465 			if ((tmp32 & 0x1000) == 0)
2466 				break;
2467 		}
2468 	}
2469 	if (limit < 0)
2470 		return -EBUSY;
2471 
2472 	return 0;
2473 }
2474 
2475 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2476 {
2477 	static const u32 test_pat[4][6] = {
2478 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2479 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2480 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2481 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2482 	};
2483 	int chan;
2484 
2485 	for (chan = 0; chan < 4; chan++) {
2486 		int i;
2487 
2488 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2489 			     (chan * 0x2000) | 0x0200);
2490 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2491 
2492 		for (i = 0; i < 6; i++)
2493 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2494 				     test_pat[chan][i]);
2495 
2496 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2497 		if (tg3_wait_macro_done(tp)) {
2498 			*resetp = 1;
2499 			return -EBUSY;
2500 		}
2501 
2502 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2503 			     (chan * 0x2000) | 0x0200);
2504 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2505 		if (tg3_wait_macro_done(tp)) {
2506 			*resetp = 1;
2507 			return -EBUSY;
2508 		}
2509 
2510 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2511 		if (tg3_wait_macro_done(tp)) {
2512 			*resetp = 1;
2513 			return -EBUSY;
2514 		}
2515 
2516 		for (i = 0; i < 6; i += 2) {
2517 			u32 low, high;
2518 
2519 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2520 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2521 			    tg3_wait_macro_done(tp)) {
2522 				*resetp = 1;
2523 				return -EBUSY;
2524 			}
2525 			low &= 0x7fff;
2526 			high &= 0x000f;
2527 			if (low != test_pat[chan][i] ||
2528 			    high != test_pat[chan][i+1]) {
2529 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2530 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2531 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2532 
2533 				return -EBUSY;
2534 			}
2535 		}
2536 	}
2537 
2538 	return 0;
2539 }
2540 
2541 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2542 {
2543 	int chan;
2544 
2545 	for (chan = 0; chan < 4; chan++) {
2546 		int i;
2547 
2548 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2549 			     (chan * 0x2000) | 0x0200);
2550 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2551 		for (i = 0; i < 6; i++)
2552 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2553 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2554 		if (tg3_wait_macro_done(tp))
2555 			return -EBUSY;
2556 	}
2557 
2558 	return 0;
2559 }
2560 
2561 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2562 {
2563 	u32 reg32, phy9_orig;
2564 	int retries, do_phy_reset, err;
2565 
2566 	retries = 10;
2567 	do_phy_reset = 1;
2568 	do {
2569 		if (do_phy_reset) {
2570 			err = tg3_bmcr_reset(tp);
2571 			if (err)
2572 				return err;
2573 			do_phy_reset = 0;
2574 		}
2575 
2576 		/* Disable transmitter and interrupt.  */
2577 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2578 			continue;
2579 
2580 		reg32 |= 0x3000;
2581 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2582 
2583 		/* Set full-duplex, 1000 mbps.  */
2584 		tg3_writephy(tp, MII_BMCR,
2585 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2586 
2587 		/* Set to master mode.  */
2588 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2589 			continue;
2590 
2591 		tg3_writephy(tp, MII_CTRL1000,
2592 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2593 
2594 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2595 		if (err)
2596 			return err;
2597 
2598 		/* Block the PHY control access.  */
2599 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2600 
2601 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2602 		if (!err)
2603 			break;
2604 	} while (--retries);
2605 
2606 	err = tg3_phy_reset_chanpat(tp);
2607 	if (err)
2608 		return err;
2609 
2610 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2611 
2612 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2613 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2614 
2615 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2616 
2617 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2618 
2619 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2620 	if (err)
2621 		return err;
2622 
2623 	reg32 &= ~0x3000;
2624 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2625 
2626 	return 0;
2627 }
2628 
2629 static void tg3_carrier_off(struct tg3 *tp)
2630 {
2631 	netif_carrier_off(tp->dev);
2632 	tp->link_up = false;
2633 }
2634 
2635 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2636 {
2637 	if (tg3_flag(tp, ENABLE_ASF))
2638 		netdev_warn(tp->dev,
2639 			    "Management side-band traffic will be interrupted during phy settings change\n");
2640 }
2641 
2642 /* This will reset the tigon3 PHY if there is no valid
2643  * link unless the FORCE argument is non-zero.
2644  */
2645 static int tg3_phy_reset(struct tg3 *tp)
2646 {
2647 	u32 val, cpmuctrl;
2648 	int err;
2649 
2650 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2651 		val = tr32(GRC_MISC_CFG);
2652 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2653 		udelay(40);
2654 	}
2655 	err  = tg3_readphy(tp, MII_BMSR, &val);
2656 	err |= tg3_readphy(tp, MII_BMSR, &val);
2657 	if (err != 0)
2658 		return -EBUSY;
2659 
2660 	if (netif_running(tp->dev) && tp->link_up) {
2661 		netif_carrier_off(tp->dev);
2662 		tg3_link_report(tp);
2663 	}
2664 
2665 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2666 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2667 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2668 		err = tg3_phy_reset_5703_4_5(tp);
2669 		if (err)
2670 			return err;
2671 		goto out;
2672 	}
2673 
2674 	cpmuctrl = 0;
2675 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2676 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2677 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2678 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2679 			tw32(TG3_CPMU_CTRL,
2680 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2681 	}
2682 
2683 	err = tg3_bmcr_reset(tp);
2684 	if (err)
2685 		return err;
2686 
2687 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2688 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2689 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2690 
2691 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2692 	}
2693 
2694 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2695 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2696 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2697 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2698 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2699 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2700 			udelay(40);
2701 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2702 		}
2703 	}
2704 
2705 	if (tg3_flag(tp, 5717_PLUS) &&
2706 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2707 		return 0;
2708 
2709 	tg3_phy_apply_otp(tp);
2710 
2711 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2712 		tg3_phy_toggle_apd(tp, true);
2713 	else
2714 		tg3_phy_toggle_apd(tp, false);
2715 
2716 out:
2717 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2718 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2719 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2720 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2721 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2722 	}
2723 
2724 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2725 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2726 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2727 	}
2728 
2729 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2730 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2731 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2732 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2733 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2734 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2735 		}
2736 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2737 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2738 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2739 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2740 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2741 				tg3_writephy(tp, MII_TG3_TEST1,
2742 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2743 			} else
2744 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2745 
2746 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2747 		}
2748 	}
2749 
2750 	/* Set Extended packet length bit (bit 14) on all chips that */
2751 	/* support jumbo frames */
2752 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2753 		/* Cannot do read-modify-write on 5401 */
2754 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2755 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2756 		/* Set bit 14 with read-modify-write to preserve other bits */
2757 		err = tg3_phy_auxctl_read(tp,
2758 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2759 		if (!err)
2760 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2761 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2762 	}
2763 
2764 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2765 	 * jumbo frames transmission.
2766 	 */
2767 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2768 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2769 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2770 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2771 	}
2772 
2773 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2774 		/* adjust output voltage */
2775 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2776 	}
2777 
2778 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2779 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2780 
2781 	tg3_phy_toggle_automdix(tp, true);
2782 	tg3_phy_set_wirespeed(tp);
2783 	return 0;
2784 }
2785 
2786 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2787 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2788 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2789 					  TG3_GPIO_MSG_NEED_VAUX)
2790 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2791 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2792 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2793 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2794 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2795 
2796 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2797 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2798 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2799 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2800 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2801 
2802 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2803 {
2804 	u32 status, shift;
2805 
2806 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2807 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2808 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2809 	else
2810 		status = tr32(TG3_CPMU_DRV_STATUS);
2811 
2812 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2813 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2814 	status |= (newstat << shift);
2815 
2816 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2817 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2818 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2819 	else
2820 		tw32(TG3_CPMU_DRV_STATUS, status);
2821 
2822 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2823 }
2824 
2825 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2826 {
2827 	if (!tg3_flag(tp, IS_NIC))
2828 		return 0;
2829 
2830 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2831 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2832 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2833 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2834 			return -EIO;
2835 
2836 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2837 
2838 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2839 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2840 
2841 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2842 	} else {
2843 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2844 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2845 	}
2846 
2847 	return 0;
2848 }
2849 
2850 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2851 {
2852 	u32 grc_local_ctrl;
2853 
2854 	if (!tg3_flag(tp, IS_NIC) ||
2855 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2856 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2857 		return;
2858 
2859 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2860 
2861 	tw32_wait_f(GRC_LOCAL_CTRL,
2862 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2863 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2864 
2865 	tw32_wait_f(GRC_LOCAL_CTRL,
2866 		    grc_local_ctrl,
2867 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2868 
2869 	tw32_wait_f(GRC_LOCAL_CTRL,
2870 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2871 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2872 }
2873 
2874 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2875 {
2876 	if (!tg3_flag(tp, IS_NIC))
2877 		return;
2878 
2879 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2880 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2881 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2882 			    (GRC_LCLCTRL_GPIO_OE0 |
2883 			     GRC_LCLCTRL_GPIO_OE1 |
2884 			     GRC_LCLCTRL_GPIO_OE2 |
2885 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2886 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2887 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2888 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2889 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2890 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2891 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2892 				     GRC_LCLCTRL_GPIO_OE1 |
2893 				     GRC_LCLCTRL_GPIO_OE2 |
2894 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2895 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2896 				     tp->grc_local_ctrl;
2897 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2898 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2899 
2900 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2901 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2902 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2903 
2904 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2905 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2906 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2907 	} else {
2908 		u32 no_gpio2;
2909 		u32 grc_local_ctrl = 0;
2910 
2911 		/* Workaround to prevent overdrawing Amps. */
2912 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2913 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2914 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2915 				    grc_local_ctrl,
2916 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2917 		}
2918 
2919 		/* On 5753 and variants, GPIO2 cannot be used. */
2920 		no_gpio2 = tp->nic_sram_data_cfg &
2921 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2922 
2923 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2924 				  GRC_LCLCTRL_GPIO_OE1 |
2925 				  GRC_LCLCTRL_GPIO_OE2 |
2926 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2927 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2928 		if (no_gpio2) {
2929 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2930 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2931 		}
2932 		tw32_wait_f(GRC_LOCAL_CTRL,
2933 			    tp->grc_local_ctrl | grc_local_ctrl,
2934 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2935 
2936 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2937 
2938 		tw32_wait_f(GRC_LOCAL_CTRL,
2939 			    tp->grc_local_ctrl | grc_local_ctrl,
2940 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2941 
2942 		if (!no_gpio2) {
2943 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2944 			tw32_wait_f(GRC_LOCAL_CTRL,
2945 				    tp->grc_local_ctrl | grc_local_ctrl,
2946 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2947 		}
2948 	}
2949 }
2950 
2951 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2952 {
2953 	u32 msg = 0;
2954 
2955 	/* Serialize power state transitions */
2956 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2957 		return;
2958 
2959 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2960 		msg = TG3_GPIO_MSG_NEED_VAUX;
2961 
2962 	msg = tg3_set_function_status(tp, msg);
2963 
2964 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2965 		goto done;
2966 
2967 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2968 		tg3_pwrsrc_switch_to_vaux(tp);
2969 	else
2970 		tg3_pwrsrc_die_with_vmain(tp);
2971 
2972 done:
2973 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2974 }
2975 
2976 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2977 {
2978 	bool need_vaux = false;
2979 
2980 	/* The GPIOs do something completely different on 57765. */
2981 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2982 		return;
2983 
2984 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2985 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2986 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2987 		tg3_frob_aux_power_5717(tp, include_wol ?
2988 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2989 		return;
2990 	}
2991 
2992 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2993 		struct net_device *dev_peer;
2994 
2995 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2996 
2997 		/* remove_one() may have been run on the peer. */
2998 		if (dev_peer) {
2999 			struct tg3 *tp_peer = netdev_priv(dev_peer);
3000 
3001 			if (tg3_flag(tp_peer, INIT_COMPLETE))
3002 				return;
3003 
3004 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3005 			    tg3_flag(tp_peer, ENABLE_ASF))
3006 				need_vaux = true;
3007 		}
3008 	}
3009 
3010 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3011 	    tg3_flag(tp, ENABLE_ASF))
3012 		need_vaux = true;
3013 
3014 	if (need_vaux)
3015 		tg3_pwrsrc_switch_to_vaux(tp);
3016 	else
3017 		tg3_pwrsrc_die_with_vmain(tp);
3018 }
3019 
3020 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3021 {
3022 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3023 		return 1;
3024 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3025 		if (speed != SPEED_10)
3026 			return 1;
3027 	} else if (speed == SPEED_10)
3028 		return 1;
3029 
3030 	return 0;
3031 }
3032 
3033 static bool tg3_phy_power_bug(struct tg3 *tp)
3034 {
3035 	switch (tg3_asic_rev(tp)) {
3036 	case ASIC_REV_5700:
3037 	case ASIC_REV_5704:
3038 		return true;
3039 	case ASIC_REV_5780:
3040 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3041 			return true;
3042 		return false;
3043 	case ASIC_REV_5717:
3044 		if (!tp->pci_fn)
3045 			return true;
3046 		return false;
3047 	case ASIC_REV_5719:
3048 	case ASIC_REV_5720:
3049 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3050 		    !tp->pci_fn)
3051 			return true;
3052 		return false;
3053 	}
3054 
3055 	return false;
3056 }
3057 
3058 static bool tg3_phy_led_bug(struct tg3 *tp)
3059 {
3060 	switch (tg3_asic_rev(tp)) {
3061 	case ASIC_REV_5719:
3062 	case ASIC_REV_5720:
3063 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3064 		    !tp->pci_fn)
3065 			return true;
3066 		return false;
3067 	}
3068 
3069 	return false;
3070 }
3071 
3072 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3073 {
3074 	u32 val;
3075 
3076 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3077 		return;
3078 
3079 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3080 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3081 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3082 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3083 
3084 			sg_dig_ctrl |=
3085 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3086 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3087 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3088 		}
3089 		return;
3090 	}
3091 
3092 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3093 		tg3_bmcr_reset(tp);
3094 		val = tr32(GRC_MISC_CFG);
3095 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3096 		udelay(40);
3097 		return;
3098 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3099 		u32 phytest;
3100 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3101 			u32 phy;
3102 
3103 			tg3_writephy(tp, MII_ADVERTISE, 0);
3104 			tg3_writephy(tp, MII_BMCR,
3105 				     BMCR_ANENABLE | BMCR_ANRESTART);
3106 
3107 			tg3_writephy(tp, MII_TG3_FET_TEST,
3108 				     phytest | MII_TG3_FET_SHADOW_EN);
3109 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3110 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3111 				tg3_writephy(tp,
3112 					     MII_TG3_FET_SHDW_AUXMODE4,
3113 					     phy);
3114 			}
3115 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3116 		}
3117 		return;
3118 	} else if (do_low_power) {
3119 		if (!tg3_phy_led_bug(tp))
3120 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3121 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3122 
3123 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3124 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3125 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3126 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3127 	}
3128 
3129 	/* The PHY should not be powered down on some chips because
3130 	 * of bugs.
3131 	 */
3132 	if (tg3_phy_power_bug(tp))
3133 		return;
3134 
3135 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3136 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3137 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3138 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3139 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3140 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3141 	}
3142 
3143 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3144 }
3145 
3146 /* tp->lock is held. */
3147 static int tg3_nvram_lock(struct tg3 *tp)
3148 {
3149 	if (tg3_flag(tp, NVRAM)) {
3150 		int i;
3151 
3152 		if (tp->nvram_lock_cnt == 0) {
3153 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3154 			for (i = 0; i < 8000; i++) {
3155 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3156 					break;
3157 				udelay(20);
3158 			}
3159 			if (i == 8000) {
3160 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3161 				return -ENODEV;
3162 			}
3163 		}
3164 		tp->nvram_lock_cnt++;
3165 	}
3166 	return 0;
3167 }
3168 
3169 /* tp->lock is held. */
3170 static void tg3_nvram_unlock(struct tg3 *tp)
3171 {
3172 	if (tg3_flag(tp, NVRAM)) {
3173 		if (tp->nvram_lock_cnt > 0)
3174 			tp->nvram_lock_cnt--;
3175 		if (tp->nvram_lock_cnt == 0)
3176 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3177 	}
3178 }
3179 
3180 /* tp->lock is held. */
3181 static void tg3_enable_nvram_access(struct tg3 *tp)
3182 {
3183 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3184 		u32 nvaccess = tr32(NVRAM_ACCESS);
3185 
3186 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3187 	}
3188 }
3189 
3190 /* tp->lock is held. */
3191 static void tg3_disable_nvram_access(struct tg3 *tp)
3192 {
3193 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3194 		u32 nvaccess = tr32(NVRAM_ACCESS);
3195 
3196 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3197 	}
3198 }
3199 
3200 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3201 					u32 offset, u32 *val)
3202 {
3203 	u32 tmp;
3204 	int i;
3205 
3206 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3207 		return -EINVAL;
3208 
3209 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3210 					EEPROM_ADDR_DEVID_MASK |
3211 					EEPROM_ADDR_READ);
3212 	tw32(GRC_EEPROM_ADDR,
3213 	     tmp |
3214 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3215 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3216 	      EEPROM_ADDR_ADDR_MASK) |
3217 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3218 
3219 	for (i = 0; i < 1000; i++) {
3220 		tmp = tr32(GRC_EEPROM_ADDR);
3221 
3222 		if (tmp & EEPROM_ADDR_COMPLETE)
3223 			break;
3224 		msleep(1);
3225 	}
3226 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3227 		return -EBUSY;
3228 
3229 	tmp = tr32(GRC_EEPROM_DATA);
3230 
3231 	/*
3232 	 * The data will always be opposite the native endian
3233 	 * format.  Perform a blind byteswap to compensate.
3234 	 */
3235 	*val = swab32(tmp);
3236 
3237 	return 0;
3238 }
3239 
3240 #define NVRAM_CMD_TIMEOUT 10000
3241 
3242 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3243 {
3244 	int i;
3245 
3246 	tw32(NVRAM_CMD, nvram_cmd);
3247 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3248 		usleep_range(10, 40);
3249 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3250 			udelay(10);
3251 			break;
3252 		}
3253 	}
3254 
3255 	if (i == NVRAM_CMD_TIMEOUT)
3256 		return -EBUSY;
3257 
3258 	return 0;
3259 }
3260 
3261 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3262 {
3263 	if (tg3_flag(tp, NVRAM) &&
3264 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3265 	    tg3_flag(tp, FLASH) &&
3266 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3267 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3268 
3269 		addr = ((addr / tp->nvram_pagesize) <<
3270 			ATMEL_AT45DB0X1B_PAGE_POS) +
3271 		       (addr % tp->nvram_pagesize);
3272 
3273 	return addr;
3274 }
3275 
3276 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3277 {
3278 	if (tg3_flag(tp, NVRAM) &&
3279 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3280 	    tg3_flag(tp, FLASH) &&
3281 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3282 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3283 
3284 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3285 			tp->nvram_pagesize) +
3286 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3287 
3288 	return addr;
3289 }
3290 
3291 /* NOTE: Data read in from NVRAM is byteswapped according to
3292  * the byteswapping settings for all other register accesses.
3293  * tg3 devices are BE devices, so on a BE machine, the data
3294  * returned will be exactly as it is seen in NVRAM.  On a LE
3295  * machine, the 32-bit value will be byteswapped.
3296  */
3297 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3298 {
3299 	int ret;
3300 
3301 	if (!tg3_flag(tp, NVRAM))
3302 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3303 
3304 	offset = tg3_nvram_phys_addr(tp, offset);
3305 
3306 	if (offset > NVRAM_ADDR_MSK)
3307 		return -EINVAL;
3308 
3309 	ret = tg3_nvram_lock(tp);
3310 	if (ret)
3311 		return ret;
3312 
3313 	tg3_enable_nvram_access(tp);
3314 
3315 	tw32(NVRAM_ADDR, offset);
3316 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3317 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3318 
3319 	if (ret == 0)
3320 		*val = tr32(NVRAM_RDDATA);
3321 
3322 	tg3_disable_nvram_access(tp);
3323 
3324 	tg3_nvram_unlock(tp);
3325 
3326 	return ret;
3327 }
3328 
3329 /* Ensures NVRAM data is in bytestream format. */
3330 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3331 {
3332 	u32 v;
3333 	int res = tg3_nvram_read(tp, offset, &v);
3334 	if (!res)
3335 		*val = cpu_to_be32(v);
3336 	return res;
3337 }
3338 
3339 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3340 				    u32 offset, u32 len, u8 *buf)
3341 {
3342 	int i, j, rc = 0;
3343 	u32 val;
3344 
3345 	for (i = 0; i < len; i += 4) {
3346 		u32 addr;
3347 		__be32 data;
3348 
3349 		addr = offset + i;
3350 
3351 		memcpy(&data, buf + i, 4);
3352 
3353 		/*
3354 		 * The SEEPROM interface expects the data to always be opposite
3355 		 * the native endian format.  We accomplish this by reversing
3356 		 * all the operations that would have been performed on the
3357 		 * data from a call to tg3_nvram_read_be32().
3358 		 */
3359 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3360 
3361 		val = tr32(GRC_EEPROM_ADDR);
3362 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3363 
3364 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3365 			EEPROM_ADDR_READ);
3366 		tw32(GRC_EEPROM_ADDR, val |
3367 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3368 			(addr & EEPROM_ADDR_ADDR_MASK) |
3369 			EEPROM_ADDR_START |
3370 			EEPROM_ADDR_WRITE);
3371 
3372 		for (j = 0; j < 1000; j++) {
3373 			val = tr32(GRC_EEPROM_ADDR);
3374 
3375 			if (val & EEPROM_ADDR_COMPLETE)
3376 				break;
3377 			msleep(1);
3378 		}
3379 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3380 			rc = -EBUSY;
3381 			break;
3382 		}
3383 	}
3384 
3385 	return rc;
3386 }
3387 
3388 /* offset and length are dword aligned */
3389 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3390 		u8 *buf)
3391 {
3392 	int ret = 0;
3393 	u32 pagesize = tp->nvram_pagesize;
3394 	u32 pagemask = pagesize - 1;
3395 	u32 nvram_cmd;
3396 	u8 *tmp;
3397 
3398 	tmp = kmalloc(pagesize, GFP_KERNEL);
3399 	if (tmp == NULL)
3400 		return -ENOMEM;
3401 
3402 	while (len) {
3403 		int j;
3404 		u32 phy_addr, page_off, size;
3405 
3406 		phy_addr = offset & ~pagemask;
3407 
3408 		for (j = 0; j < pagesize; j += 4) {
3409 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3410 						  (__be32 *) (tmp + j));
3411 			if (ret)
3412 				break;
3413 		}
3414 		if (ret)
3415 			break;
3416 
3417 		page_off = offset & pagemask;
3418 		size = pagesize;
3419 		if (len < size)
3420 			size = len;
3421 
3422 		len -= size;
3423 
3424 		memcpy(tmp + page_off, buf, size);
3425 
3426 		offset = offset + (pagesize - page_off);
3427 
3428 		tg3_enable_nvram_access(tp);
3429 
3430 		/*
3431 		 * Before we can erase the flash page, we need
3432 		 * to issue a special "write enable" command.
3433 		 */
3434 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3435 
3436 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3437 			break;
3438 
3439 		/* Erase the target page */
3440 		tw32(NVRAM_ADDR, phy_addr);
3441 
3442 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3443 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3444 
3445 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3446 			break;
3447 
3448 		/* Issue another write enable to start the write. */
3449 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3450 
3451 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3452 			break;
3453 
3454 		for (j = 0; j < pagesize; j += 4) {
3455 			__be32 data;
3456 
3457 			data = *((__be32 *) (tmp + j));
3458 
3459 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3460 
3461 			tw32(NVRAM_ADDR, phy_addr + j);
3462 
3463 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3464 				NVRAM_CMD_WR;
3465 
3466 			if (j == 0)
3467 				nvram_cmd |= NVRAM_CMD_FIRST;
3468 			else if (j == (pagesize - 4))
3469 				nvram_cmd |= NVRAM_CMD_LAST;
3470 
3471 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3472 			if (ret)
3473 				break;
3474 		}
3475 		if (ret)
3476 			break;
3477 	}
3478 
3479 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3480 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3481 
3482 	kfree(tmp);
3483 
3484 	return ret;
3485 }
3486 
3487 /* offset and length are dword aligned */
3488 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3489 		u8 *buf)
3490 {
3491 	int i, ret = 0;
3492 
3493 	for (i = 0; i < len; i += 4, offset += 4) {
3494 		u32 page_off, phy_addr, nvram_cmd;
3495 		__be32 data;
3496 
3497 		memcpy(&data, buf + i, 4);
3498 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3499 
3500 		page_off = offset % tp->nvram_pagesize;
3501 
3502 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3503 
3504 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3505 
3506 		if (page_off == 0 || i == 0)
3507 			nvram_cmd |= NVRAM_CMD_FIRST;
3508 		if (page_off == (tp->nvram_pagesize - 4))
3509 			nvram_cmd |= NVRAM_CMD_LAST;
3510 
3511 		if (i == (len - 4))
3512 			nvram_cmd |= NVRAM_CMD_LAST;
3513 
3514 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3515 		    !tg3_flag(tp, FLASH) ||
3516 		    !tg3_flag(tp, 57765_PLUS))
3517 			tw32(NVRAM_ADDR, phy_addr);
3518 
3519 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3520 		    !tg3_flag(tp, 5755_PLUS) &&
3521 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3522 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3523 			u32 cmd;
3524 
3525 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3526 			ret = tg3_nvram_exec_cmd(tp, cmd);
3527 			if (ret)
3528 				break;
3529 		}
3530 		if (!tg3_flag(tp, FLASH)) {
3531 			/* We always do complete word writes to eeprom. */
3532 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3533 		}
3534 
3535 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3536 		if (ret)
3537 			break;
3538 	}
3539 	return ret;
3540 }
3541 
3542 /* offset and length are dword aligned */
3543 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3544 {
3545 	int ret;
3546 
3547 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3548 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3549 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3550 		udelay(40);
3551 	}
3552 
3553 	if (!tg3_flag(tp, NVRAM)) {
3554 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3555 	} else {
3556 		u32 grc_mode;
3557 
3558 		ret = tg3_nvram_lock(tp);
3559 		if (ret)
3560 			return ret;
3561 
3562 		tg3_enable_nvram_access(tp);
3563 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3564 			tw32(NVRAM_WRITE1, 0x406);
3565 
3566 		grc_mode = tr32(GRC_MODE);
3567 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3568 
3569 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3570 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3571 				buf);
3572 		} else {
3573 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3574 				buf);
3575 		}
3576 
3577 		grc_mode = tr32(GRC_MODE);
3578 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3579 
3580 		tg3_disable_nvram_access(tp);
3581 		tg3_nvram_unlock(tp);
3582 	}
3583 
3584 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3585 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3586 		udelay(40);
3587 	}
3588 
3589 	return ret;
3590 }
3591 
3592 #define RX_CPU_SCRATCH_BASE	0x30000
3593 #define RX_CPU_SCRATCH_SIZE	0x04000
3594 #define TX_CPU_SCRATCH_BASE	0x34000
3595 #define TX_CPU_SCRATCH_SIZE	0x04000
3596 
3597 /* tp->lock is held. */
3598 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3599 {
3600 	int i;
3601 	const int iters = 10000;
3602 
3603 	for (i = 0; i < iters; i++) {
3604 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3605 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3606 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3607 			break;
3608 		if (pci_channel_offline(tp->pdev))
3609 			return -EBUSY;
3610 	}
3611 
3612 	return (i == iters) ? -EBUSY : 0;
3613 }
3614 
3615 /* tp->lock is held. */
3616 static int tg3_rxcpu_pause(struct tg3 *tp)
3617 {
3618 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3619 
3620 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3621 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3622 	udelay(10);
3623 
3624 	return rc;
3625 }
3626 
3627 /* tp->lock is held. */
3628 static int tg3_txcpu_pause(struct tg3 *tp)
3629 {
3630 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3631 }
3632 
3633 /* tp->lock is held. */
3634 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3635 {
3636 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3637 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3638 }
3639 
3640 /* tp->lock is held. */
3641 static void tg3_rxcpu_resume(struct tg3 *tp)
3642 {
3643 	tg3_resume_cpu(tp, RX_CPU_BASE);
3644 }
3645 
3646 /* tp->lock is held. */
3647 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3648 {
3649 	int rc;
3650 
3651 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3652 
3653 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3654 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3655 
3656 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3657 		return 0;
3658 	}
3659 	if (cpu_base == RX_CPU_BASE) {
3660 		rc = tg3_rxcpu_pause(tp);
3661 	} else {
3662 		/*
3663 		 * There is only an Rx CPU for the 5750 derivative in the
3664 		 * BCM4785.
3665 		 */
3666 		if (tg3_flag(tp, IS_SSB_CORE))
3667 			return 0;
3668 
3669 		rc = tg3_txcpu_pause(tp);
3670 	}
3671 
3672 	if (rc) {
3673 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3674 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3675 		return -ENODEV;
3676 	}
3677 
3678 	/* Clear firmware's nvram arbitration. */
3679 	if (tg3_flag(tp, NVRAM))
3680 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3681 	return 0;
3682 }
3683 
3684 static int tg3_fw_data_len(struct tg3 *tp,
3685 			   const struct tg3_firmware_hdr *fw_hdr)
3686 {
3687 	int fw_len;
3688 
3689 	/* Non fragmented firmware have one firmware header followed by a
3690 	 * contiguous chunk of data to be written. The length field in that
3691 	 * header is not the length of data to be written but the complete
3692 	 * length of the bss. The data length is determined based on
3693 	 * tp->fw->size minus headers.
3694 	 *
3695 	 * Fragmented firmware have a main header followed by multiple
3696 	 * fragments. Each fragment is identical to non fragmented firmware
3697 	 * with a firmware header followed by a contiguous chunk of data. In
3698 	 * the main header, the length field is unused and set to 0xffffffff.
3699 	 * In each fragment header the length is the entire size of that
3700 	 * fragment i.e. fragment data + header length. Data length is
3701 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3702 	 */
3703 	if (tp->fw_len == 0xffffffff)
3704 		fw_len = be32_to_cpu(fw_hdr->len);
3705 	else
3706 		fw_len = tp->fw->size;
3707 
3708 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3709 }
3710 
3711 /* tp->lock is held. */
3712 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3713 				 u32 cpu_scratch_base, int cpu_scratch_size,
3714 				 const struct tg3_firmware_hdr *fw_hdr)
3715 {
3716 	int err, i;
3717 	void (*write_op)(struct tg3 *, u32, u32);
3718 	int total_len = tp->fw->size;
3719 
3720 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3721 		netdev_err(tp->dev,
3722 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3723 			   __func__);
3724 		return -EINVAL;
3725 	}
3726 
3727 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3728 		write_op = tg3_write_mem;
3729 	else
3730 		write_op = tg3_write_indirect_reg32;
3731 
3732 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3733 		/* It is possible that bootcode is still loading at this point.
3734 		 * Get the nvram lock first before halting the cpu.
3735 		 */
3736 		int lock_err = tg3_nvram_lock(tp);
3737 		err = tg3_halt_cpu(tp, cpu_base);
3738 		if (!lock_err)
3739 			tg3_nvram_unlock(tp);
3740 		if (err)
3741 			goto out;
3742 
3743 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3744 			write_op(tp, cpu_scratch_base + i, 0);
3745 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3746 		tw32(cpu_base + CPU_MODE,
3747 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3748 	} else {
3749 		/* Subtract additional main header for fragmented firmware and
3750 		 * advance to the first fragment
3751 		 */
3752 		total_len -= TG3_FW_HDR_LEN;
3753 		fw_hdr++;
3754 	}
3755 
3756 	do {
3757 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3758 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3759 			write_op(tp, cpu_scratch_base +
3760 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3761 				     (i * sizeof(u32)),
3762 				 be32_to_cpu(fw_data[i]));
3763 
3764 		total_len -= be32_to_cpu(fw_hdr->len);
3765 
3766 		/* Advance to next fragment */
3767 		fw_hdr = (struct tg3_firmware_hdr *)
3768 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3769 	} while (total_len > 0);
3770 
3771 	err = 0;
3772 
3773 out:
3774 	return err;
3775 }
3776 
3777 /* tp->lock is held. */
3778 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3779 {
3780 	int i;
3781 	const int iters = 5;
3782 
3783 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3784 	tw32_f(cpu_base + CPU_PC, pc);
3785 
3786 	for (i = 0; i < iters; i++) {
3787 		if (tr32(cpu_base + CPU_PC) == pc)
3788 			break;
3789 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3790 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3791 		tw32_f(cpu_base + CPU_PC, pc);
3792 		udelay(1000);
3793 	}
3794 
3795 	return (i == iters) ? -EBUSY : 0;
3796 }
3797 
3798 /* tp->lock is held. */
3799 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3800 {
3801 	const struct tg3_firmware_hdr *fw_hdr;
3802 	int err;
3803 
3804 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3805 
3806 	/* Firmware blob starts with version numbers, followed by
3807 	   start address and length. We are setting complete length.
3808 	   length = end_address_of_bss - start_address_of_text.
3809 	   Remainder is the blob to be loaded contiguously
3810 	   from start address. */
3811 
3812 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3813 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3814 				    fw_hdr);
3815 	if (err)
3816 		return err;
3817 
3818 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3819 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3820 				    fw_hdr);
3821 	if (err)
3822 		return err;
3823 
3824 	/* Now startup only the RX cpu. */
3825 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3826 				       be32_to_cpu(fw_hdr->base_addr));
3827 	if (err) {
3828 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3829 			   "should be %08x\n", __func__,
3830 			   tr32(RX_CPU_BASE + CPU_PC),
3831 				be32_to_cpu(fw_hdr->base_addr));
3832 		return -ENODEV;
3833 	}
3834 
3835 	tg3_rxcpu_resume(tp);
3836 
3837 	return 0;
3838 }
3839 
3840 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3841 {
3842 	const int iters = 1000;
3843 	int i;
3844 	u32 val;
3845 
3846 	/* Wait for boot code to complete initialization and enter service
3847 	 * loop. It is then safe to download service patches
3848 	 */
3849 	for (i = 0; i < iters; i++) {
3850 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3851 			break;
3852 
3853 		udelay(10);
3854 	}
3855 
3856 	if (i == iters) {
3857 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3858 		return -EBUSY;
3859 	}
3860 
3861 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3862 	if (val & 0xff) {
3863 		netdev_warn(tp->dev,
3864 			    "Other patches exist. Not downloading EEE patch\n");
3865 		return -EEXIST;
3866 	}
3867 
3868 	return 0;
3869 }
3870 
3871 /* tp->lock is held. */
3872 static void tg3_load_57766_firmware(struct tg3 *tp)
3873 {
3874 	struct tg3_firmware_hdr *fw_hdr;
3875 
3876 	if (!tg3_flag(tp, NO_NVRAM))
3877 		return;
3878 
3879 	if (tg3_validate_rxcpu_state(tp))
3880 		return;
3881 
3882 	if (!tp->fw)
3883 		return;
3884 
3885 	/* This firmware blob has a different format than older firmware
3886 	 * releases as given below. The main difference is we have fragmented
3887 	 * data to be written to non-contiguous locations.
3888 	 *
3889 	 * In the beginning we have a firmware header identical to other
3890 	 * firmware which consists of version, base addr and length. The length
3891 	 * here is unused and set to 0xffffffff.
3892 	 *
3893 	 * This is followed by a series of firmware fragments which are
3894 	 * individually identical to previous firmware. i.e. they have the
3895 	 * firmware header and followed by data for that fragment. The version
3896 	 * field of the individual fragment header is unused.
3897 	 */
3898 
3899 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3900 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3901 		return;
3902 
3903 	if (tg3_rxcpu_pause(tp))
3904 		return;
3905 
3906 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3907 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3908 
3909 	tg3_rxcpu_resume(tp);
3910 }
3911 
3912 /* tp->lock is held. */
3913 static int tg3_load_tso_firmware(struct tg3 *tp)
3914 {
3915 	const struct tg3_firmware_hdr *fw_hdr;
3916 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3917 	int err;
3918 
3919 	if (!tg3_flag(tp, FW_TSO))
3920 		return 0;
3921 
3922 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3923 
3924 	/* Firmware blob starts with version numbers, followed by
3925 	   start address and length. We are setting complete length.
3926 	   length = end_address_of_bss - start_address_of_text.
3927 	   Remainder is the blob to be loaded contiguously
3928 	   from start address. */
3929 
3930 	cpu_scratch_size = tp->fw_len;
3931 
3932 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3933 		cpu_base = RX_CPU_BASE;
3934 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3935 	} else {
3936 		cpu_base = TX_CPU_BASE;
3937 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3938 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3939 	}
3940 
3941 	err = tg3_load_firmware_cpu(tp, cpu_base,
3942 				    cpu_scratch_base, cpu_scratch_size,
3943 				    fw_hdr);
3944 	if (err)
3945 		return err;
3946 
3947 	/* Now startup the cpu. */
3948 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3949 				       be32_to_cpu(fw_hdr->base_addr));
3950 	if (err) {
3951 		netdev_err(tp->dev,
3952 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3953 			   __func__, tr32(cpu_base + CPU_PC),
3954 			   be32_to_cpu(fw_hdr->base_addr));
3955 		return -ENODEV;
3956 	}
3957 
3958 	tg3_resume_cpu(tp, cpu_base);
3959 	return 0;
3960 }
3961 
3962 /* tp->lock is held. */
3963 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3964 {
3965 	u32 addr_high, addr_low;
3966 
3967 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3968 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3969 		    (mac_addr[4] <<  8) | mac_addr[5]);
3970 
3971 	if (index < 4) {
3972 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3973 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3974 	} else {
3975 		index -= 4;
3976 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3977 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3978 	}
3979 }
3980 
3981 /* tp->lock is held. */
3982 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3983 {
3984 	u32 addr_high;
3985 	int i;
3986 
3987 	for (i = 0; i < 4; i++) {
3988 		if (i == 1 && skip_mac_1)
3989 			continue;
3990 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3991 	}
3992 
3993 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3994 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3995 		for (i = 4; i < 16; i++)
3996 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3997 	}
3998 
3999 	addr_high = (tp->dev->dev_addr[0] +
4000 		     tp->dev->dev_addr[1] +
4001 		     tp->dev->dev_addr[2] +
4002 		     tp->dev->dev_addr[3] +
4003 		     tp->dev->dev_addr[4] +
4004 		     tp->dev->dev_addr[5]) &
4005 		TX_BACKOFF_SEED_MASK;
4006 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
4007 }
4008 
4009 static void tg3_enable_register_access(struct tg3 *tp)
4010 {
4011 	/*
4012 	 * Make sure register accesses (indirect or otherwise) will function
4013 	 * correctly.
4014 	 */
4015 	pci_write_config_dword(tp->pdev,
4016 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4017 }
4018 
4019 static int tg3_power_up(struct tg3 *tp)
4020 {
4021 	int err;
4022 
4023 	tg3_enable_register_access(tp);
4024 
4025 	err = pci_set_power_state(tp->pdev, PCI_D0);
4026 	if (!err) {
4027 		/* Switch out of Vaux if it is a NIC */
4028 		tg3_pwrsrc_switch_to_vmain(tp);
4029 	} else {
4030 		netdev_err(tp->dev, "Transition to D0 failed\n");
4031 	}
4032 
4033 	return err;
4034 }
4035 
4036 static int tg3_setup_phy(struct tg3 *, bool);
4037 
4038 static int tg3_power_down_prepare(struct tg3 *tp)
4039 {
4040 	u32 misc_host_ctrl;
4041 	bool device_should_wake, do_low_power;
4042 
4043 	tg3_enable_register_access(tp);
4044 
4045 	/* Restore the CLKREQ setting. */
4046 	if (tg3_flag(tp, CLKREQ_BUG))
4047 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4048 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4049 
4050 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4051 	tw32(TG3PCI_MISC_HOST_CTRL,
4052 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4053 
4054 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4055 			     tg3_flag(tp, WOL_ENABLE);
4056 
4057 	if (tg3_flag(tp, USE_PHYLIB)) {
4058 		do_low_power = false;
4059 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4060 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4061 			struct phy_device *phydev;
4062 			u32 phyid, advertising;
4063 
4064 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4065 
4066 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4067 
4068 			tp->link_config.speed = phydev->speed;
4069 			tp->link_config.duplex = phydev->duplex;
4070 			tp->link_config.autoneg = phydev->autoneg;
4071 			tp->link_config.advertising = phydev->advertising;
4072 
4073 			advertising = ADVERTISED_TP |
4074 				      ADVERTISED_Pause |
4075 				      ADVERTISED_Autoneg |
4076 				      ADVERTISED_10baseT_Half;
4077 
4078 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4079 				if (tg3_flag(tp, WOL_SPEED_100MB))
4080 					advertising |=
4081 						ADVERTISED_100baseT_Half |
4082 						ADVERTISED_100baseT_Full |
4083 						ADVERTISED_10baseT_Full;
4084 				else
4085 					advertising |= ADVERTISED_10baseT_Full;
4086 			}
4087 
4088 			phydev->advertising = advertising;
4089 
4090 			phy_start_aneg(phydev);
4091 
4092 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4093 			if (phyid != PHY_ID_BCMAC131) {
4094 				phyid &= PHY_BCM_OUI_MASK;
4095 				if (phyid == PHY_BCM_OUI_1 ||
4096 				    phyid == PHY_BCM_OUI_2 ||
4097 				    phyid == PHY_BCM_OUI_3)
4098 					do_low_power = true;
4099 			}
4100 		}
4101 	} else {
4102 		do_low_power = true;
4103 
4104 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4105 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4106 
4107 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4108 			tg3_setup_phy(tp, false);
4109 	}
4110 
4111 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4112 		u32 val;
4113 
4114 		val = tr32(GRC_VCPU_EXT_CTRL);
4115 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4116 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4117 		int i;
4118 		u32 val;
4119 
4120 		for (i = 0; i < 200; i++) {
4121 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4122 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4123 				break;
4124 			msleep(1);
4125 		}
4126 	}
4127 	if (tg3_flag(tp, WOL_CAP))
4128 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4129 						     WOL_DRV_STATE_SHUTDOWN |
4130 						     WOL_DRV_WOL |
4131 						     WOL_SET_MAGIC_PKT);
4132 
4133 	if (device_should_wake) {
4134 		u32 mac_mode;
4135 
4136 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4137 			if (do_low_power &&
4138 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4139 				tg3_phy_auxctl_write(tp,
4140 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4141 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4142 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4143 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4144 				udelay(40);
4145 			}
4146 
4147 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4148 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4149 			else if (tp->phy_flags &
4150 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4151 				if (tp->link_config.active_speed == SPEED_1000)
4152 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4153 				else
4154 					mac_mode = MAC_MODE_PORT_MODE_MII;
4155 			} else
4156 				mac_mode = MAC_MODE_PORT_MODE_MII;
4157 
4158 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4159 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4160 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4161 					     SPEED_100 : SPEED_10;
4162 				if (tg3_5700_link_polarity(tp, speed))
4163 					mac_mode |= MAC_MODE_LINK_POLARITY;
4164 				else
4165 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4166 			}
4167 		} else {
4168 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4169 		}
4170 
4171 		if (!tg3_flag(tp, 5750_PLUS))
4172 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4173 
4174 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4175 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4176 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4177 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4178 
4179 		if (tg3_flag(tp, ENABLE_APE))
4180 			mac_mode |= MAC_MODE_APE_TX_EN |
4181 				    MAC_MODE_APE_RX_EN |
4182 				    MAC_MODE_TDE_ENABLE;
4183 
4184 		tw32_f(MAC_MODE, mac_mode);
4185 		udelay(100);
4186 
4187 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4188 		udelay(10);
4189 	}
4190 
4191 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4192 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4193 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4194 		u32 base_val;
4195 
4196 		base_val = tp->pci_clock_ctrl;
4197 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4198 			     CLOCK_CTRL_TXCLK_DISABLE);
4199 
4200 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4201 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4202 	} else if (tg3_flag(tp, 5780_CLASS) ||
4203 		   tg3_flag(tp, CPMU_PRESENT) ||
4204 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4205 		/* do nothing */
4206 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4207 		u32 newbits1, newbits2;
4208 
4209 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4210 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4211 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4212 				    CLOCK_CTRL_TXCLK_DISABLE |
4213 				    CLOCK_CTRL_ALTCLK);
4214 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4215 		} else if (tg3_flag(tp, 5705_PLUS)) {
4216 			newbits1 = CLOCK_CTRL_625_CORE;
4217 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4218 		} else {
4219 			newbits1 = CLOCK_CTRL_ALTCLK;
4220 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4221 		}
4222 
4223 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4224 			    40);
4225 
4226 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4227 			    40);
4228 
4229 		if (!tg3_flag(tp, 5705_PLUS)) {
4230 			u32 newbits3;
4231 
4232 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4233 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4234 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4235 					    CLOCK_CTRL_TXCLK_DISABLE |
4236 					    CLOCK_CTRL_44MHZ_CORE);
4237 			} else {
4238 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4239 			}
4240 
4241 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4242 				    tp->pci_clock_ctrl | newbits3, 40);
4243 		}
4244 	}
4245 
4246 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4247 		tg3_power_down_phy(tp, do_low_power);
4248 
4249 	tg3_frob_aux_power(tp, true);
4250 
4251 	/* Workaround for unstable PLL clock */
4252 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4253 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4254 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4255 		u32 val = tr32(0x7d00);
4256 
4257 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4258 		tw32(0x7d00, val);
4259 		if (!tg3_flag(tp, ENABLE_ASF)) {
4260 			int err;
4261 
4262 			err = tg3_nvram_lock(tp);
4263 			tg3_halt_cpu(tp, RX_CPU_BASE);
4264 			if (!err)
4265 				tg3_nvram_unlock(tp);
4266 		}
4267 	}
4268 
4269 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4270 
4271 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4272 
4273 	return 0;
4274 }
4275 
4276 static void tg3_power_down(struct tg3 *tp)
4277 {
4278 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4279 	pci_set_power_state(tp->pdev, PCI_D3hot);
4280 }
4281 
4282 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4283 {
4284 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4285 	case MII_TG3_AUX_STAT_10HALF:
4286 		*speed = SPEED_10;
4287 		*duplex = DUPLEX_HALF;
4288 		break;
4289 
4290 	case MII_TG3_AUX_STAT_10FULL:
4291 		*speed = SPEED_10;
4292 		*duplex = DUPLEX_FULL;
4293 		break;
4294 
4295 	case MII_TG3_AUX_STAT_100HALF:
4296 		*speed = SPEED_100;
4297 		*duplex = DUPLEX_HALF;
4298 		break;
4299 
4300 	case MII_TG3_AUX_STAT_100FULL:
4301 		*speed = SPEED_100;
4302 		*duplex = DUPLEX_FULL;
4303 		break;
4304 
4305 	case MII_TG3_AUX_STAT_1000HALF:
4306 		*speed = SPEED_1000;
4307 		*duplex = DUPLEX_HALF;
4308 		break;
4309 
4310 	case MII_TG3_AUX_STAT_1000FULL:
4311 		*speed = SPEED_1000;
4312 		*duplex = DUPLEX_FULL;
4313 		break;
4314 
4315 	default:
4316 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4317 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4318 				 SPEED_10;
4319 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4320 				  DUPLEX_HALF;
4321 			break;
4322 		}
4323 		*speed = SPEED_UNKNOWN;
4324 		*duplex = DUPLEX_UNKNOWN;
4325 		break;
4326 	}
4327 }
4328 
4329 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4330 {
4331 	int err = 0;
4332 	u32 val, new_adv;
4333 
4334 	new_adv = ADVERTISE_CSMA;
4335 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4336 	new_adv |= mii_advertise_flowctrl(flowctrl);
4337 
4338 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4339 	if (err)
4340 		goto done;
4341 
4342 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4343 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4344 
4345 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4346 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4347 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4348 
4349 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4350 		if (err)
4351 			goto done;
4352 	}
4353 
4354 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4355 		goto done;
4356 
4357 	tw32(TG3_CPMU_EEE_MODE,
4358 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4359 
4360 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4361 	if (!err) {
4362 		u32 err2;
4363 
4364 		val = 0;
4365 		/* Advertise 100-BaseTX EEE ability */
4366 		if (advertise & ADVERTISED_100baseT_Full)
4367 			val |= MDIO_AN_EEE_ADV_100TX;
4368 		/* Advertise 1000-BaseT EEE ability */
4369 		if (advertise & ADVERTISED_1000baseT_Full)
4370 			val |= MDIO_AN_EEE_ADV_1000T;
4371 
4372 		if (!tp->eee.eee_enabled) {
4373 			val = 0;
4374 			tp->eee.advertised = 0;
4375 		} else {
4376 			tp->eee.advertised = advertise &
4377 					     (ADVERTISED_100baseT_Full |
4378 					      ADVERTISED_1000baseT_Full);
4379 		}
4380 
4381 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4382 		if (err)
4383 			val = 0;
4384 
4385 		switch (tg3_asic_rev(tp)) {
4386 		case ASIC_REV_5717:
4387 		case ASIC_REV_57765:
4388 		case ASIC_REV_57766:
4389 		case ASIC_REV_5719:
4390 			/* If we advertised any eee advertisements above... */
4391 			if (val)
4392 				val = MII_TG3_DSP_TAP26_ALNOKO |
4393 				      MII_TG3_DSP_TAP26_RMRXSTO |
4394 				      MII_TG3_DSP_TAP26_OPCSINPT;
4395 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4396 			/* Fall through */
4397 		case ASIC_REV_5720:
4398 		case ASIC_REV_5762:
4399 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4400 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4401 						 MII_TG3_DSP_CH34TP2_HIBW01);
4402 		}
4403 
4404 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4405 		if (!err)
4406 			err = err2;
4407 	}
4408 
4409 done:
4410 	return err;
4411 }
4412 
4413 static void tg3_phy_copper_begin(struct tg3 *tp)
4414 {
4415 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4416 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4417 		u32 adv, fc;
4418 
4419 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4420 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4421 			adv = ADVERTISED_10baseT_Half |
4422 			      ADVERTISED_10baseT_Full;
4423 			if (tg3_flag(tp, WOL_SPEED_100MB))
4424 				adv |= ADVERTISED_100baseT_Half |
4425 				       ADVERTISED_100baseT_Full;
4426 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4427 				if (!(tp->phy_flags &
4428 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4429 					adv |= ADVERTISED_1000baseT_Half;
4430 				adv |= ADVERTISED_1000baseT_Full;
4431 			}
4432 
4433 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4434 		} else {
4435 			adv = tp->link_config.advertising;
4436 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4437 				adv &= ~(ADVERTISED_1000baseT_Half |
4438 					 ADVERTISED_1000baseT_Full);
4439 
4440 			fc = tp->link_config.flowctrl;
4441 		}
4442 
4443 		tg3_phy_autoneg_cfg(tp, adv, fc);
4444 
4445 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4446 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4447 			/* Normally during power down we want to autonegotiate
4448 			 * the lowest possible speed for WOL. However, to avoid
4449 			 * link flap, we leave it untouched.
4450 			 */
4451 			return;
4452 		}
4453 
4454 		tg3_writephy(tp, MII_BMCR,
4455 			     BMCR_ANENABLE | BMCR_ANRESTART);
4456 	} else {
4457 		int i;
4458 		u32 bmcr, orig_bmcr;
4459 
4460 		tp->link_config.active_speed = tp->link_config.speed;
4461 		tp->link_config.active_duplex = tp->link_config.duplex;
4462 
4463 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4464 			/* With autoneg disabled, 5715 only links up when the
4465 			 * advertisement register has the configured speed
4466 			 * enabled.
4467 			 */
4468 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4469 		}
4470 
4471 		bmcr = 0;
4472 		switch (tp->link_config.speed) {
4473 		default:
4474 		case SPEED_10:
4475 			break;
4476 
4477 		case SPEED_100:
4478 			bmcr |= BMCR_SPEED100;
4479 			break;
4480 
4481 		case SPEED_1000:
4482 			bmcr |= BMCR_SPEED1000;
4483 			break;
4484 		}
4485 
4486 		if (tp->link_config.duplex == DUPLEX_FULL)
4487 			bmcr |= BMCR_FULLDPLX;
4488 
4489 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4490 		    (bmcr != orig_bmcr)) {
4491 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4492 			for (i = 0; i < 1500; i++) {
4493 				u32 tmp;
4494 
4495 				udelay(10);
4496 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4497 				    tg3_readphy(tp, MII_BMSR, &tmp))
4498 					continue;
4499 				if (!(tmp & BMSR_LSTATUS)) {
4500 					udelay(40);
4501 					break;
4502 				}
4503 			}
4504 			tg3_writephy(tp, MII_BMCR, bmcr);
4505 			udelay(40);
4506 		}
4507 	}
4508 }
4509 
4510 static int tg3_phy_pull_config(struct tg3 *tp)
4511 {
4512 	int err;
4513 	u32 val;
4514 
4515 	err = tg3_readphy(tp, MII_BMCR, &val);
4516 	if (err)
4517 		goto done;
4518 
4519 	if (!(val & BMCR_ANENABLE)) {
4520 		tp->link_config.autoneg = AUTONEG_DISABLE;
4521 		tp->link_config.advertising = 0;
4522 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4523 
4524 		err = -EIO;
4525 
4526 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4527 		case 0:
4528 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4529 				goto done;
4530 
4531 			tp->link_config.speed = SPEED_10;
4532 			break;
4533 		case BMCR_SPEED100:
4534 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4535 				goto done;
4536 
4537 			tp->link_config.speed = SPEED_100;
4538 			break;
4539 		case BMCR_SPEED1000:
4540 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4541 				tp->link_config.speed = SPEED_1000;
4542 				break;
4543 			}
4544 			/* Fall through */
4545 		default:
4546 			goto done;
4547 		}
4548 
4549 		if (val & BMCR_FULLDPLX)
4550 			tp->link_config.duplex = DUPLEX_FULL;
4551 		else
4552 			tp->link_config.duplex = DUPLEX_HALF;
4553 
4554 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4555 
4556 		err = 0;
4557 		goto done;
4558 	}
4559 
4560 	tp->link_config.autoneg = AUTONEG_ENABLE;
4561 	tp->link_config.advertising = ADVERTISED_Autoneg;
4562 	tg3_flag_set(tp, PAUSE_AUTONEG);
4563 
4564 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4565 		u32 adv;
4566 
4567 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4568 		if (err)
4569 			goto done;
4570 
4571 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4572 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4573 
4574 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4575 	} else {
4576 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4577 	}
4578 
4579 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4580 		u32 adv;
4581 
4582 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4583 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4584 			if (err)
4585 				goto done;
4586 
4587 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4588 		} else {
4589 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4590 			if (err)
4591 				goto done;
4592 
4593 			adv = tg3_decode_flowctrl_1000X(val);
4594 			tp->link_config.flowctrl = adv;
4595 
4596 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4597 			adv = mii_adv_to_ethtool_adv_x(val);
4598 		}
4599 
4600 		tp->link_config.advertising |= adv;
4601 	}
4602 
4603 done:
4604 	return err;
4605 }
4606 
4607 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4608 {
4609 	int err;
4610 
4611 	/* Turn off tap power management. */
4612 	/* Set Extended packet length bit */
4613 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4614 
4615 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4616 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4617 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4618 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4619 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4620 
4621 	udelay(40);
4622 
4623 	return err;
4624 }
4625 
4626 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4627 {
4628 	struct ethtool_eee eee;
4629 
4630 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4631 		return true;
4632 
4633 	tg3_eee_pull_config(tp, &eee);
4634 
4635 	if (tp->eee.eee_enabled) {
4636 		if (tp->eee.advertised != eee.advertised ||
4637 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4638 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4639 			return false;
4640 	} else {
4641 		/* EEE is disabled but we're advertising */
4642 		if (eee.advertised)
4643 			return false;
4644 	}
4645 
4646 	return true;
4647 }
4648 
4649 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4650 {
4651 	u32 advmsk, tgtadv, advertising;
4652 
4653 	advertising = tp->link_config.advertising;
4654 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4655 
4656 	advmsk = ADVERTISE_ALL;
4657 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4658 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4659 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4660 	}
4661 
4662 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4663 		return false;
4664 
4665 	if ((*lcladv & advmsk) != tgtadv)
4666 		return false;
4667 
4668 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4669 		u32 tg3_ctrl;
4670 
4671 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4672 
4673 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4674 			return false;
4675 
4676 		if (tgtadv &&
4677 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4678 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4679 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4680 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4681 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4682 		} else {
4683 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4684 		}
4685 
4686 		if (tg3_ctrl != tgtadv)
4687 			return false;
4688 	}
4689 
4690 	return true;
4691 }
4692 
4693 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4694 {
4695 	u32 lpeth = 0;
4696 
4697 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4698 		u32 val;
4699 
4700 		if (tg3_readphy(tp, MII_STAT1000, &val))
4701 			return false;
4702 
4703 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4704 	}
4705 
4706 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4707 		return false;
4708 
4709 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4710 	tp->link_config.rmt_adv = lpeth;
4711 
4712 	return true;
4713 }
4714 
4715 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4716 {
4717 	if (curr_link_up != tp->link_up) {
4718 		if (curr_link_up) {
4719 			netif_carrier_on(tp->dev);
4720 		} else {
4721 			netif_carrier_off(tp->dev);
4722 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4723 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4724 		}
4725 
4726 		tg3_link_report(tp);
4727 		return true;
4728 	}
4729 
4730 	return false;
4731 }
4732 
4733 static void tg3_clear_mac_status(struct tg3 *tp)
4734 {
4735 	tw32(MAC_EVENT, 0);
4736 
4737 	tw32_f(MAC_STATUS,
4738 	       MAC_STATUS_SYNC_CHANGED |
4739 	       MAC_STATUS_CFG_CHANGED |
4740 	       MAC_STATUS_MI_COMPLETION |
4741 	       MAC_STATUS_LNKSTATE_CHANGED);
4742 	udelay(40);
4743 }
4744 
4745 static void tg3_setup_eee(struct tg3 *tp)
4746 {
4747 	u32 val;
4748 
4749 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4750 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4751 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4752 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4753 
4754 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4755 
4756 	tw32_f(TG3_CPMU_EEE_CTRL,
4757 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4758 
4759 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4760 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4761 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4762 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4763 
4764 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4765 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4766 
4767 	if (tg3_flag(tp, ENABLE_APE))
4768 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4769 
4770 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4771 
4772 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4773 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4774 	       (tp->eee.tx_lpi_timer & 0xffff));
4775 
4776 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4777 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4778 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4779 }
4780 
4781 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4782 {
4783 	bool current_link_up;
4784 	u32 bmsr, val;
4785 	u32 lcl_adv, rmt_adv;
4786 	u16 current_speed;
4787 	u8 current_duplex;
4788 	int i, err;
4789 
4790 	tg3_clear_mac_status(tp);
4791 
4792 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4793 		tw32_f(MAC_MI_MODE,
4794 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4795 		udelay(80);
4796 	}
4797 
4798 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4799 
4800 	/* Some third-party PHYs need to be reset on link going
4801 	 * down.
4802 	 */
4803 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4804 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4805 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4806 	    tp->link_up) {
4807 		tg3_readphy(tp, MII_BMSR, &bmsr);
4808 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4809 		    !(bmsr & BMSR_LSTATUS))
4810 			force_reset = true;
4811 	}
4812 	if (force_reset)
4813 		tg3_phy_reset(tp);
4814 
4815 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4816 		tg3_readphy(tp, MII_BMSR, &bmsr);
4817 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4818 		    !tg3_flag(tp, INIT_COMPLETE))
4819 			bmsr = 0;
4820 
4821 		if (!(bmsr & BMSR_LSTATUS)) {
4822 			err = tg3_init_5401phy_dsp(tp);
4823 			if (err)
4824 				return err;
4825 
4826 			tg3_readphy(tp, MII_BMSR, &bmsr);
4827 			for (i = 0; i < 1000; i++) {
4828 				udelay(10);
4829 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4830 				    (bmsr & BMSR_LSTATUS)) {
4831 					udelay(40);
4832 					break;
4833 				}
4834 			}
4835 
4836 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4837 			    TG3_PHY_REV_BCM5401_B0 &&
4838 			    !(bmsr & BMSR_LSTATUS) &&
4839 			    tp->link_config.active_speed == SPEED_1000) {
4840 				err = tg3_phy_reset(tp);
4841 				if (!err)
4842 					err = tg3_init_5401phy_dsp(tp);
4843 				if (err)
4844 					return err;
4845 			}
4846 		}
4847 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4848 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4849 		/* 5701 {A0,B0} CRC bug workaround */
4850 		tg3_writephy(tp, 0x15, 0x0a75);
4851 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4852 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4853 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4854 	}
4855 
4856 	/* Clear pending interrupts... */
4857 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4858 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4859 
4860 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4861 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4862 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4863 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4864 
4865 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4866 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4867 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4868 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4869 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4870 		else
4871 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4872 	}
4873 
4874 	current_link_up = false;
4875 	current_speed = SPEED_UNKNOWN;
4876 	current_duplex = DUPLEX_UNKNOWN;
4877 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4878 	tp->link_config.rmt_adv = 0;
4879 
4880 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4881 		err = tg3_phy_auxctl_read(tp,
4882 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4883 					  &val);
4884 		if (!err && !(val & (1 << 10))) {
4885 			tg3_phy_auxctl_write(tp,
4886 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4887 					     val | (1 << 10));
4888 			goto relink;
4889 		}
4890 	}
4891 
4892 	bmsr = 0;
4893 	for (i = 0; i < 100; i++) {
4894 		tg3_readphy(tp, MII_BMSR, &bmsr);
4895 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4896 		    (bmsr & BMSR_LSTATUS))
4897 			break;
4898 		udelay(40);
4899 	}
4900 
4901 	if (bmsr & BMSR_LSTATUS) {
4902 		u32 aux_stat, bmcr;
4903 
4904 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4905 		for (i = 0; i < 2000; i++) {
4906 			udelay(10);
4907 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4908 			    aux_stat)
4909 				break;
4910 		}
4911 
4912 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4913 					     &current_speed,
4914 					     &current_duplex);
4915 
4916 		bmcr = 0;
4917 		for (i = 0; i < 200; i++) {
4918 			tg3_readphy(tp, MII_BMCR, &bmcr);
4919 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4920 				continue;
4921 			if (bmcr && bmcr != 0x7fff)
4922 				break;
4923 			udelay(10);
4924 		}
4925 
4926 		lcl_adv = 0;
4927 		rmt_adv = 0;
4928 
4929 		tp->link_config.active_speed = current_speed;
4930 		tp->link_config.active_duplex = current_duplex;
4931 
4932 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4933 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4934 
4935 			if ((bmcr & BMCR_ANENABLE) &&
4936 			    eee_config_ok &&
4937 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4938 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4939 				current_link_up = true;
4940 
4941 			/* EEE settings changes take effect only after a phy
4942 			 * reset.  If we have skipped a reset due to Link Flap
4943 			 * Avoidance being enabled, do it now.
4944 			 */
4945 			if (!eee_config_ok &&
4946 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4947 			    !force_reset) {
4948 				tg3_setup_eee(tp);
4949 				tg3_phy_reset(tp);
4950 			}
4951 		} else {
4952 			if (!(bmcr & BMCR_ANENABLE) &&
4953 			    tp->link_config.speed == current_speed &&
4954 			    tp->link_config.duplex == current_duplex) {
4955 				current_link_up = true;
4956 			}
4957 		}
4958 
4959 		if (current_link_up &&
4960 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4961 			u32 reg, bit;
4962 
4963 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4964 				reg = MII_TG3_FET_GEN_STAT;
4965 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4966 			} else {
4967 				reg = MII_TG3_EXT_STAT;
4968 				bit = MII_TG3_EXT_STAT_MDIX;
4969 			}
4970 
4971 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4972 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4973 
4974 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4975 		}
4976 	}
4977 
4978 relink:
4979 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4980 		tg3_phy_copper_begin(tp);
4981 
4982 		if (tg3_flag(tp, ROBOSWITCH)) {
4983 			current_link_up = true;
4984 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4985 			current_speed = SPEED_1000;
4986 			current_duplex = DUPLEX_FULL;
4987 			tp->link_config.active_speed = current_speed;
4988 			tp->link_config.active_duplex = current_duplex;
4989 		}
4990 
4991 		tg3_readphy(tp, MII_BMSR, &bmsr);
4992 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4993 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4994 			current_link_up = true;
4995 	}
4996 
4997 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4998 	if (current_link_up) {
4999 		if (tp->link_config.active_speed == SPEED_100 ||
5000 		    tp->link_config.active_speed == SPEED_10)
5001 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5002 		else
5003 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5004 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5005 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5006 	else
5007 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5008 
5009 	/* In order for the 5750 core in BCM4785 chip to work properly
5010 	 * in RGMII mode, the Led Control Register must be set up.
5011 	 */
5012 	if (tg3_flag(tp, RGMII_MODE)) {
5013 		u32 led_ctrl = tr32(MAC_LED_CTRL);
5014 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5015 
5016 		if (tp->link_config.active_speed == SPEED_10)
5017 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5018 		else if (tp->link_config.active_speed == SPEED_100)
5019 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5020 				     LED_CTRL_100MBPS_ON);
5021 		else if (tp->link_config.active_speed == SPEED_1000)
5022 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5023 				     LED_CTRL_1000MBPS_ON);
5024 
5025 		tw32(MAC_LED_CTRL, led_ctrl);
5026 		udelay(40);
5027 	}
5028 
5029 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5030 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5031 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5032 
5033 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5034 		if (current_link_up &&
5035 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5036 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5037 		else
5038 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5039 	}
5040 
5041 	/* ??? Without this setting Netgear GA302T PHY does not
5042 	 * ??? send/receive packets...
5043 	 */
5044 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5045 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5046 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5047 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5048 		udelay(80);
5049 	}
5050 
5051 	tw32_f(MAC_MODE, tp->mac_mode);
5052 	udelay(40);
5053 
5054 	tg3_phy_eee_adjust(tp, current_link_up);
5055 
5056 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5057 		/* Polled via timer. */
5058 		tw32_f(MAC_EVENT, 0);
5059 	} else {
5060 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5061 	}
5062 	udelay(40);
5063 
5064 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5065 	    current_link_up &&
5066 	    tp->link_config.active_speed == SPEED_1000 &&
5067 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5068 		udelay(120);
5069 		tw32_f(MAC_STATUS,
5070 		     (MAC_STATUS_SYNC_CHANGED |
5071 		      MAC_STATUS_CFG_CHANGED));
5072 		udelay(40);
5073 		tg3_write_mem(tp,
5074 			      NIC_SRAM_FIRMWARE_MBOX,
5075 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5076 	}
5077 
5078 	/* Prevent send BD corruption. */
5079 	if (tg3_flag(tp, CLKREQ_BUG)) {
5080 		if (tp->link_config.active_speed == SPEED_100 ||
5081 		    tp->link_config.active_speed == SPEED_10)
5082 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5083 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5084 		else
5085 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5086 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5087 	}
5088 
5089 	tg3_test_and_report_link_chg(tp, current_link_up);
5090 
5091 	return 0;
5092 }
5093 
5094 struct tg3_fiber_aneginfo {
5095 	int state;
5096 #define ANEG_STATE_UNKNOWN		0
5097 #define ANEG_STATE_AN_ENABLE		1
5098 #define ANEG_STATE_RESTART_INIT		2
5099 #define ANEG_STATE_RESTART		3
5100 #define ANEG_STATE_DISABLE_LINK_OK	4
5101 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5102 #define ANEG_STATE_ABILITY_DETECT	6
5103 #define ANEG_STATE_ACK_DETECT_INIT	7
5104 #define ANEG_STATE_ACK_DETECT		8
5105 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5106 #define ANEG_STATE_COMPLETE_ACK		10
5107 #define ANEG_STATE_IDLE_DETECT_INIT	11
5108 #define ANEG_STATE_IDLE_DETECT		12
5109 #define ANEG_STATE_LINK_OK		13
5110 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5111 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5112 
5113 	u32 flags;
5114 #define MR_AN_ENABLE		0x00000001
5115 #define MR_RESTART_AN		0x00000002
5116 #define MR_AN_COMPLETE		0x00000004
5117 #define MR_PAGE_RX		0x00000008
5118 #define MR_NP_LOADED		0x00000010
5119 #define MR_TOGGLE_TX		0x00000020
5120 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5121 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5122 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5123 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5124 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5125 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5126 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5127 #define MR_TOGGLE_RX		0x00002000
5128 #define MR_NP_RX		0x00004000
5129 
5130 #define MR_LINK_OK		0x80000000
5131 
5132 	unsigned long link_time, cur_time;
5133 
5134 	u32 ability_match_cfg;
5135 	int ability_match_count;
5136 
5137 	char ability_match, idle_match, ack_match;
5138 
5139 	u32 txconfig, rxconfig;
5140 #define ANEG_CFG_NP		0x00000080
5141 #define ANEG_CFG_ACK		0x00000040
5142 #define ANEG_CFG_RF2		0x00000020
5143 #define ANEG_CFG_RF1		0x00000010
5144 #define ANEG_CFG_PS2		0x00000001
5145 #define ANEG_CFG_PS1		0x00008000
5146 #define ANEG_CFG_HD		0x00004000
5147 #define ANEG_CFG_FD		0x00002000
5148 #define ANEG_CFG_INVAL		0x00001f06
5149 
5150 };
5151 #define ANEG_OK		0
5152 #define ANEG_DONE	1
5153 #define ANEG_TIMER_ENAB	2
5154 #define ANEG_FAILED	-1
5155 
5156 #define ANEG_STATE_SETTLE_TIME	10000
5157 
5158 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5159 				   struct tg3_fiber_aneginfo *ap)
5160 {
5161 	u16 flowctrl;
5162 	unsigned long delta;
5163 	u32 rx_cfg_reg;
5164 	int ret;
5165 
5166 	if (ap->state == ANEG_STATE_UNKNOWN) {
5167 		ap->rxconfig = 0;
5168 		ap->link_time = 0;
5169 		ap->cur_time = 0;
5170 		ap->ability_match_cfg = 0;
5171 		ap->ability_match_count = 0;
5172 		ap->ability_match = 0;
5173 		ap->idle_match = 0;
5174 		ap->ack_match = 0;
5175 	}
5176 	ap->cur_time++;
5177 
5178 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5179 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5180 
5181 		if (rx_cfg_reg != ap->ability_match_cfg) {
5182 			ap->ability_match_cfg = rx_cfg_reg;
5183 			ap->ability_match = 0;
5184 			ap->ability_match_count = 0;
5185 		} else {
5186 			if (++ap->ability_match_count > 1) {
5187 				ap->ability_match = 1;
5188 				ap->ability_match_cfg = rx_cfg_reg;
5189 			}
5190 		}
5191 		if (rx_cfg_reg & ANEG_CFG_ACK)
5192 			ap->ack_match = 1;
5193 		else
5194 			ap->ack_match = 0;
5195 
5196 		ap->idle_match = 0;
5197 	} else {
5198 		ap->idle_match = 1;
5199 		ap->ability_match_cfg = 0;
5200 		ap->ability_match_count = 0;
5201 		ap->ability_match = 0;
5202 		ap->ack_match = 0;
5203 
5204 		rx_cfg_reg = 0;
5205 	}
5206 
5207 	ap->rxconfig = rx_cfg_reg;
5208 	ret = ANEG_OK;
5209 
5210 	switch (ap->state) {
5211 	case ANEG_STATE_UNKNOWN:
5212 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5213 			ap->state = ANEG_STATE_AN_ENABLE;
5214 
5215 		/* fallthru */
5216 	case ANEG_STATE_AN_ENABLE:
5217 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5218 		if (ap->flags & MR_AN_ENABLE) {
5219 			ap->link_time = 0;
5220 			ap->cur_time = 0;
5221 			ap->ability_match_cfg = 0;
5222 			ap->ability_match_count = 0;
5223 			ap->ability_match = 0;
5224 			ap->idle_match = 0;
5225 			ap->ack_match = 0;
5226 
5227 			ap->state = ANEG_STATE_RESTART_INIT;
5228 		} else {
5229 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5230 		}
5231 		break;
5232 
5233 	case ANEG_STATE_RESTART_INIT:
5234 		ap->link_time = ap->cur_time;
5235 		ap->flags &= ~(MR_NP_LOADED);
5236 		ap->txconfig = 0;
5237 		tw32(MAC_TX_AUTO_NEG, 0);
5238 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5239 		tw32_f(MAC_MODE, tp->mac_mode);
5240 		udelay(40);
5241 
5242 		ret = ANEG_TIMER_ENAB;
5243 		ap->state = ANEG_STATE_RESTART;
5244 
5245 		/* fallthru */
5246 	case ANEG_STATE_RESTART:
5247 		delta = ap->cur_time - ap->link_time;
5248 		if (delta > ANEG_STATE_SETTLE_TIME)
5249 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5250 		else
5251 			ret = ANEG_TIMER_ENAB;
5252 		break;
5253 
5254 	case ANEG_STATE_DISABLE_LINK_OK:
5255 		ret = ANEG_DONE;
5256 		break;
5257 
5258 	case ANEG_STATE_ABILITY_DETECT_INIT:
5259 		ap->flags &= ~(MR_TOGGLE_TX);
5260 		ap->txconfig = ANEG_CFG_FD;
5261 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5262 		if (flowctrl & ADVERTISE_1000XPAUSE)
5263 			ap->txconfig |= ANEG_CFG_PS1;
5264 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5265 			ap->txconfig |= ANEG_CFG_PS2;
5266 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5267 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5268 		tw32_f(MAC_MODE, tp->mac_mode);
5269 		udelay(40);
5270 
5271 		ap->state = ANEG_STATE_ABILITY_DETECT;
5272 		break;
5273 
5274 	case ANEG_STATE_ABILITY_DETECT:
5275 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5276 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5277 		break;
5278 
5279 	case ANEG_STATE_ACK_DETECT_INIT:
5280 		ap->txconfig |= ANEG_CFG_ACK;
5281 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5282 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5283 		tw32_f(MAC_MODE, tp->mac_mode);
5284 		udelay(40);
5285 
5286 		ap->state = ANEG_STATE_ACK_DETECT;
5287 
5288 		/* fallthru */
5289 	case ANEG_STATE_ACK_DETECT:
5290 		if (ap->ack_match != 0) {
5291 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5292 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5293 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5294 			} else {
5295 				ap->state = ANEG_STATE_AN_ENABLE;
5296 			}
5297 		} else if (ap->ability_match != 0 &&
5298 			   ap->rxconfig == 0) {
5299 			ap->state = ANEG_STATE_AN_ENABLE;
5300 		}
5301 		break;
5302 
5303 	case ANEG_STATE_COMPLETE_ACK_INIT:
5304 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5305 			ret = ANEG_FAILED;
5306 			break;
5307 		}
5308 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5309 			       MR_LP_ADV_HALF_DUPLEX |
5310 			       MR_LP_ADV_SYM_PAUSE |
5311 			       MR_LP_ADV_ASYM_PAUSE |
5312 			       MR_LP_ADV_REMOTE_FAULT1 |
5313 			       MR_LP_ADV_REMOTE_FAULT2 |
5314 			       MR_LP_ADV_NEXT_PAGE |
5315 			       MR_TOGGLE_RX |
5316 			       MR_NP_RX);
5317 		if (ap->rxconfig & ANEG_CFG_FD)
5318 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5319 		if (ap->rxconfig & ANEG_CFG_HD)
5320 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5321 		if (ap->rxconfig & ANEG_CFG_PS1)
5322 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5323 		if (ap->rxconfig & ANEG_CFG_PS2)
5324 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5325 		if (ap->rxconfig & ANEG_CFG_RF1)
5326 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5327 		if (ap->rxconfig & ANEG_CFG_RF2)
5328 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5329 		if (ap->rxconfig & ANEG_CFG_NP)
5330 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5331 
5332 		ap->link_time = ap->cur_time;
5333 
5334 		ap->flags ^= (MR_TOGGLE_TX);
5335 		if (ap->rxconfig & 0x0008)
5336 			ap->flags |= MR_TOGGLE_RX;
5337 		if (ap->rxconfig & ANEG_CFG_NP)
5338 			ap->flags |= MR_NP_RX;
5339 		ap->flags |= MR_PAGE_RX;
5340 
5341 		ap->state = ANEG_STATE_COMPLETE_ACK;
5342 		ret = ANEG_TIMER_ENAB;
5343 		break;
5344 
5345 	case ANEG_STATE_COMPLETE_ACK:
5346 		if (ap->ability_match != 0 &&
5347 		    ap->rxconfig == 0) {
5348 			ap->state = ANEG_STATE_AN_ENABLE;
5349 			break;
5350 		}
5351 		delta = ap->cur_time - ap->link_time;
5352 		if (delta > ANEG_STATE_SETTLE_TIME) {
5353 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5354 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5355 			} else {
5356 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5357 				    !(ap->flags & MR_NP_RX)) {
5358 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5359 				} else {
5360 					ret = ANEG_FAILED;
5361 				}
5362 			}
5363 		}
5364 		break;
5365 
5366 	case ANEG_STATE_IDLE_DETECT_INIT:
5367 		ap->link_time = ap->cur_time;
5368 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5369 		tw32_f(MAC_MODE, tp->mac_mode);
5370 		udelay(40);
5371 
5372 		ap->state = ANEG_STATE_IDLE_DETECT;
5373 		ret = ANEG_TIMER_ENAB;
5374 		break;
5375 
5376 	case ANEG_STATE_IDLE_DETECT:
5377 		if (ap->ability_match != 0 &&
5378 		    ap->rxconfig == 0) {
5379 			ap->state = ANEG_STATE_AN_ENABLE;
5380 			break;
5381 		}
5382 		delta = ap->cur_time - ap->link_time;
5383 		if (delta > ANEG_STATE_SETTLE_TIME) {
5384 			/* XXX another gem from the Broadcom driver :( */
5385 			ap->state = ANEG_STATE_LINK_OK;
5386 		}
5387 		break;
5388 
5389 	case ANEG_STATE_LINK_OK:
5390 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5391 		ret = ANEG_DONE;
5392 		break;
5393 
5394 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5395 		/* ??? unimplemented */
5396 		break;
5397 
5398 	case ANEG_STATE_NEXT_PAGE_WAIT:
5399 		/* ??? unimplemented */
5400 		break;
5401 
5402 	default:
5403 		ret = ANEG_FAILED;
5404 		break;
5405 	}
5406 
5407 	return ret;
5408 }
5409 
5410 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5411 {
5412 	int res = 0;
5413 	struct tg3_fiber_aneginfo aninfo;
5414 	int status = ANEG_FAILED;
5415 	unsigned int tick;
5416 	u32 tmp;
5417 
5418 	tw32_f(MAC_TX_AUTO_NEG, 0);
5419 
5420 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5421 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5422 	udelay(40);
5423 
5424 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5425 	udelay(40);
5426 
5427 	memset(&aninfo, 0, sizeof(aninfo));
5428 	aninfo.flags |= MR_AN_ENABLE;
5429 	aninfo.state = ANEG_STATE_UNKNOWN;
5430 	aninfo.cur_time = 0;
5431 	tick = 0;
5432 	while (++tick < 195000) {
5433 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5434 		if (status == ANEG_DONE || status == ANEG_FAILED)
5435 			break;
5436 
5437 		udelay(1);
5438 	}
5439 
5440 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5441 	tw32_f(MAC_MODE, tp->mac_mode);
5442 	udelay(40);
5443 
5444 	*txflags = aninfo.txconfig;
5445 	*rxflags = aninfo.flags;
5446 
5447 	if (status == ANEG_DONE &&
5448 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5449 			     MR_LP_ADV_FULL_DUPLEX)))
5450 		res = 1;
5451 
5452 	return res;
5453 }
5454 
5455 static void tg3_init_bcm8002(struct tg3 *tp)
5456 {
5457 	u32 mac_status = tr32(MAC_STATUS);
5458 	int i;
5459 
5460 	/* Reset when initting first time or we have a link. */
5461 	if (tg3_flag(tp, INIT_COMPLETE) &&
5462 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5463 		return;
5464 
5465 	/* Set PLL lock range. */
5466 	tg3_writephy(tp, 0x16, 0x8007);
5467 
5468 	/* SW reset */
5469 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5470 
5471 	/* Wait for reset to complete. */
5472 	/* XXX schedule_timeout() ... */
5473 	for (i = 0; i < 500; i++)
5474 		udelay(10);
5475 
5476 	/* Config mode; select PMA/Ch 1 regs. */
5477 	tg3_writephy(tp, 0x10, 0x8411);
5478 
5479 	/* Enable auto-lock and comdet, select txclk for tx. */
5480 	tg3_writephy(tp, 0x11, 0x0a10);
5481 
5482 	tg3_writephy(tp, 0x18, 0x00a0);
5483 	tg3_writephy(tp, 0x16, 0x41ff);
5484 
5485 	/* Assert and deassert POR. */
5486 	tg3_writephy(tp, 0x13, 0x0400);
5487 	udelay(40);
5488 	tg3_writephy(tp, 0x13, 0x0000);
5489 
5490 	tg3_writephy(tp, 0x11, 0x0a50);
5491 	udelay(40);
5492 	tg3_writephy(tp, 0x11, 0x0a10);
5493 
5494 	/* Wait for signal to stabilize */
5495 	/* XXX schedule_timeout() ... */
5496 	for (i = 0; i < 15000; i++)
5497 		udelay(10);
5498 
5499 	/* Deselect the channel register so we can read the PHYID
5500 	 * later.
5501 	 */
5502 	tg3_writephy(tp, 0x10, 0x8011);
5503 }
5504 
5505 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5506 {
5507 	u16 flowctrl;
5508 	bool current_link_up;
5509 	u32 sg_dig_ctrl, sg_dig_status;
5510 	u32 serdes_cfg, expected_sg_dig_ctrl;
5511 	int workaround, port_a;
5512 
5513 	serdes_cfg = 0;
5514 	expected_sg_dig_ctrl = 0;
5515 	workaround = 0;
5516 	port_a = 1;
5517 	current_link_up = false;
5518 
5519 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5520 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5521 		workaround = 1;
5522 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5523 			port_a = 0;
5524 
5525 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5526 		/* preserve bits 20-23 for voltage regulator */
5527 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5528 	}
5529 
5530 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5531 
5532 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5533 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5534 			if (workaround) {
5535 				u32 val = serdes_cfg;
5536 
5537 				if (port_a)
5538 					val |= 0xc010000;
5539 				else
5540 					val |= 0x4010000;
5541 				tw32_f(MAC_SERDES_CFG, val);
5542 			}
5543 
5544 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5545 		}
5546 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5547 			tg3_setup_flow_control(tp, 0, 0);
5548 			current_link_up = true;
5549 		}
5550 		goto out;
5551 	}
5552 
5553 	/* Want auto-negotiation.  */
5554 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5555 
5556 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5557 	if (flowctrl & ADVERTISE_1000XPAUSE)
5558 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5559 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5560 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5561 
5562 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5563 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5564 		    tp->serdes_counter &&
5565 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5566 				    MAC_STATUS_RCVD_CFG)) ==
5567 		     MAC_STATUS_PCS_SYNCED)) {
5568 			tp->serdes_counter--;
5569 			current_link_up = true;
5570 			goto out;
5571 		}
5572 restart_autoneg:
5573 		if (workaround)
5574 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5575 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5576 		udelay(5);
5577 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5578 
5579 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5580 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5581 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5582 				 MAC_STATUS_SIGNAL_DET)) {
5583 		sg_dig_status = tr32(SG_DIG_STATUS);
5584 		mac_status = tr32(MAC_STATUS);
5585 
5586 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5587 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5588 			u32 local_adv = 0, remote_adv = 0;
5589 
5590 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5591 				local_adv |= ADVERTISE_1000XPAUSE;
5592 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5593 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5594 
5595 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5596 				remote_adv |= LPA_1000XPAUSE;
5597 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5598 				remote_adv |= LPA_1000XPAUSE_ASYM;
5599 
5600 			tp->link_config.rmt_adv =
5601 					   mii_adv_to_ethtool_adv_x(remote_adv);
5602 
5603 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5604 			current_link_up = true;
5605 			tp->serdes_counter = 0;
5606 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5607 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5608 			if (tp->serdes_counter)
5609 				tp->serdes_counter--;
5610 			else {
5611 				if (workaround) {
5612 					u32 val = serdes_cfg;
5613 
5614 					if (port_a)
5615 						val |= 0xc010000;
5616 					else
5617 						val |= 0x4010000;
5618 
5619 					tw32_f(MAC_SERDES_CFG, val);
5620 				}
5621 
5622 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5623 				udelay(40);
5624 
5625 				/* Link parallel detection - link is up */
5626 				/* only if we have PCS_SYNC and not */
5627 				/* receiving config code words */
5628 				mac_status = tr32(MAC_STATUS);
5629 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5630 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5631 					tg3_setup_flow_control(tp, 0, 0);
5632 					current_link_up = true;
5633 					tp->phy_flags |=
5634 						TG3_PHYFLG_PARALLEL_DETECT;
5635 					tp->serdes_counter =
5636 						SERDES_PARALLEL_DET_TIMEOUT;
5637 				} else
5638 					goto restart_autoneg;
5639 			}
5640 		}
5641 	} else {
5642 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5643 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5644 	}
5645 
5646 out:
5647 	return current_link_up;
5648 }
5649 
5650 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5651 {
5652 	bool current_link_up = false;
5653 
5654 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5655 		goto out;
5656 
5657 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5658 		u32 txflags, rxflags;
5659 		int i;
5660 
5661 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5662 			u32 local_adv = 0, remote_adv = 0;
5663 
5664 			if (txflags & ANEG_CFG_PS1)
5665 				local_adv |= ADVERTISE_1000XPAUSE;
5666 			if (txflags & ANEG_CFG_PS2)
5667 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5668 
5669 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5670 				remote_adv |= LPA_1000XPAUSE;
5671 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5672 				remote_adv |= LPA_1000XPAUSE_ASYM;
5673 
5674 			tp->link_config.rmt_adv =
5675 					   mii_adv_to_ethtool_adv_x(remote_adv);
5676 
5677 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5678 
5679 			current_link_up = true;
5680 		}
5681 		for (i = 0; i < 30; i++) {
5682 			udelay(20);
5683 			tw32_f(MAC_STATUS,
5684 			       (MAC_STATUS_SYNC_CHANGED |
5685 				MAC_STATUS_CFG_CHANGED));
5686 			udelay(40);
5687 			if ((tr32(MAC_STATUS) &
5688 			     (MAC_STATUS_SYNC_CHANGED |
5689 			      MAC_STATUS_CFG_CHANGED)) == 0)
5690 				break;
5691 		}
5692 
5693 		mac_status = tr32(MAC_STATUS);
5694 		if (!current_link_up &&
5695 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5696 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5697 			current_link_up = true;
5698 	} else {
5699 		tg3_setup_flow_control(tp, 0, 0);
5700 
5701 		/* Forcing 1000FD link up. */
5702 		current_link_up = true;
5703 
5704 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5705 		udelay(40);
5706 
5707 		tw32_f(MAC_MODE, tp->mac_mode);
5708 		udelay(40);
5709 	}
5710 
5711 out:
5712 	return current_link_up;
5713 }
5714 
5715 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5716 {
5717 	u32 orig_pause_cfg;
5718 	u16 orig_active_speed;
5719 	u8 orig_active_duplex;
5720 	u32 mac_status;
5721 	bool current_link_up;
5722 	int i;
5723 
5724 	orig_pause_cfg = tp->link_config.active_flowctrl;
5725 	orig_active_speed = tp->link_config.active_speed;
5726 	orig_active_duplex = tp->link_config.active_duplex;
5727 
5728 	if (!tg3_flag(tp, HW_AUTONEG) &&
5729 	    tp->link_up &&
5730 	    tg3_flag(tp, INIT_COMPLETE)) {
5731 		mac_status = tr32(MAC_STATUS);
5732 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5733 			       MAC_STATUS_SIGNAL_DET |
5734 			       MAC_STATUS_CFG_CHANGED |
5735 			       MAC_STATUS_RCVD_CFG);
5736 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5737 				   MAC_STATUS_SIGNAL_DET)) {
5738 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5739 					    MAC_STATUS_CFG_CHANGED));
5740 			return 0;
5741 		}
5742 	}
5743 
5744 	tw32_f(MAC_TX_AUTO_NEG, 0);
5745 
5746 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5747 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5748 	tw32_f(MAC_MODE, tp->mac_mode);
5749 	udelay(40);
5750 
5751 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5752 		tg3_init_bcm8002(tp);
5753 
5754 	/* Enable link change event even when serdes polling.  */
5755 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5756 	udelay(40);
5757 
5758 	current_link_up = false;
5759 	tp->link_config.rmt_adv = 0;
5760 	mac_status = tr32(MAC_STATUS);
5761 
5762 	if (tg3_flag(tp, HW_AUTONEG))
5763 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5764 	else
5765 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5766 
5767 	tp->napi[0].hw_status->status =
5768 		(SD_STATUS_UPDATED |
5769 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5770 
5771 	for (i = 0; i < 100; i++) {
5772 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5773 				    MAC_STATUS_CFG_CHANGED));
5774 		udelay(5);
5775 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5776 					 MAC_STATUS_CFG_CHANGED |
5777 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5778 			break;
5779 	}
5780 
5781 	mac_status = tr32(MAC_STATUS);
5782 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5783 		current_link_up = false;
5784 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5785 		    tp->serdes_counter == 0) {
5786 			tw32_f(MAC_MODE, (tp->mac_mode |
5787 					  MAC_MODE_SEND_CONFIGS));
5788 			udelay(1);
5789 			tw32_f(MAC_MODE, tp->mac_mode);
5790 		}
5791 	}
5792 
5793 	if (current_link_up) {
5794 		tp->link_config.active_speed = SPEED_1000;
5795 		tp->link_config.active_duplex = DUPLEX_FULL;
5796 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5797 				    LED_CTRL_LNKLED_OVERRIDE |
5798 				    LED_CTRL_1000MBPS_ON));
5799 	} else {
5800 		tp->link_config.active_speed = SPEED_UNKNOWN;
5801 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5802 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5803 				    LED_CTRL_LNKLED_OVERRIDE |
5804 				    LED_CTRL_TRAFFIC_OVERRIDE));
5805 	}
5806 
5807 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5808 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5809 		if (orig_pause_cfg != now_pause_cfg ||
5810 		    orig_active_speed != tp->link_config.active_speed ||
5811 		    orig_active_duplex != tp->link_config.active_duplex)
5812 			tg3_link_report(tp);
5813 	}
5814 
5815 	return 0;
5816 }
5817 
5818 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5819 {
5820 	int err = 0;
5821 	u32 bmsr, bmcr;
5822 	u16 current_speed = SPEED_UNKNOWN;
5823 	u8 current_duplex = DUPLEX_UNKNOWN;
5824 	bool current_link_up = false;
5825 	u32 local_adv, remote_adv, sgsr;
5826 
5827 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5828 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5829 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5830 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5831 
5832 		if (force_reset)
5833 			tg3_phy_reset(tp);
5834 
5835 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5836 
5837 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5838 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5839 		} else {
5840 			current_link_up = true;
5841 			if (sgsr & SERDES_TG3_SPEED_1000) {
5842 				current_speed = SPEED_1000;
5843 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5844 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5845 				current_speed = SPEED_100;
5846 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5847 			} else {
5848 				current_speed = SPEED_10;
5849 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5850 			}
5851 
5852 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5853 				current_duplex = DUPLEX_FULL;
5854 			else
5855 				current_duplex = DUPLEX_HALF;
5856 		}
5857 
5858 		tw32_f(MAC_MODE, tp->mac_mode);
5859 		udelay(40);
5860 
5861 		tg3_clear_mac_status(tp);
5862 
5863 		goto fiber_setup_done;
5864 	}
5865 
5866 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5867 	tw32_f(MAC_MODE, tp->mac_mode);
5868 	udelay(40);
5869 
5870 	tg3_clear_mac_status(tp);
5871 
5872 	if (force_reset)
5873 		tg3_phy_reset(tp);
5874 
5875 	tp->link_config.rmt_adv = 0;
5876 
5877 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5878 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5879 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5880 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5881 			bmsr |= BMSR_LSTATUS;
5882 		else
5883 			bmsr &= ~BMSR_LSTATUS;
5884 	}
5885 
5886 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5887 
5888 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5889 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5890 		/* do nothing, just check for link up at the end */
5891 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5892 		u32 adv, newadv;
5893 
5894 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5895 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5896 				 ADVERTISE_1000XPAUSE |
5897 				 ADVERTISE_1000XPSE_ASYM |
5898 				 ADVERTISE_SLCT);
5899 
5900 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5901 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5902 
5903 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5904 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5905 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5906 			tg3_writephy(tp, MII_BMCR, bmcr);
5907 
5908 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5909 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5910 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5911 
5912 			return err;
5913 		}
5914 	} else {
5915 		u32 new_bmcr;
5916 
5917 		bmcr &= ~BMCR_SPEED1000;
5918 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5919 
5920 		if (tp->link_config.duplex == DUPLEX_FULL)
5921 			new_bmcr |= BMCR_FULLDPLX;
5922 
5923 		if (new_bmcr != bmcr) {
5924 			/* BMCR_SPEED1000 is a reserved bit that needs
5925 			 * to be set on write.
5926 			 */
5927 			new_bmcr |= BMCR_SPEED1000;
5928 
5929 			/* Force a linkdown */
5930 			if (tp->link_up) {
5931 				u32 adv;
5932 
5933 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5934 				adv &= ~(ADVERTISE_1000XFULL |
5935 					 ADVERTISE_1000XHALF |
5936 					 ADVERTISE_SLCT);
5937 				tg3_writephy(tp, MII_ADVERTISE, adv);
5938 				tg3_writephy(tp, MII_BMCR, bmcr |
5939 							   BMCR_ANRESTART |
5940 							   BMCR_ANENABLE);
5941 				udelay(10);
5942 				tg3_carrier_off(tp);
5943 			}
5944 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5945 			bmcr = new_bmcr;
5946 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5947 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5948 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5949 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5950 					bmsr |= BMSR_LSTATUS;
5951 				else
5952 					bmsr &= ~BMSR_LSTATUS;
5953 			}
5954 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5955 		}
5956 	}
5957 
5958 	if (bmsr & BMSR_LSTATUS) {
5959 		current_speed = SPEED_1000;
5960 		current_link_up = true;
5961 		if (bmcr & BMCR_FULLDPLX)
5962 			current_duplex = DUPLEX_FULL;
5963 		else
5964 			current_duplex = DUPLEX_HALF;
5965 
5966 		local_adv = 0;
5967 		remote_adv = 0;
5968 
5969 		if (bmcr & BMCR_ANENABLE) {
5970 			u32 common;
5971 
5972 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5973 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5974 			common = local_adv & remote_adv;
5975 			if (common & (ADVERTISE_1000XHALF |
5976 				      ADVERTISE_1000XFULL)) {
5977 				if (common & ADVERTISE_1000XFULL)
5978 					current_duplex = DUPLEX_FULL;
5979 				else
5980 					current_duplex = DUPLEX_HALF;
5981 
5982 				tp->link_config.rmt_adv =
5983 					   mii_adv_to_ethtool_adv_x(remote_adv);
5984 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5985 				/* Link is up via parallel detect */
5986 			} else {
5987 				current_link_up = false;
5988 			}
5989 		}
5990 	}
5991 
5992 fiber_setup_done:
5993 	if (current_link_up && current_duplex == DUPLEX_FULL)
5994 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5995 
5996 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5997 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5998 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5999 
6000 	tw32_f(MAC_MODE, tp->mac_mode);
6001 	udelay(40);
6002 
6003 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6004 
6005 	tp->link_config.active_speed = current_speed;
6006 	tp->link_config.active_duplex = current_duplex;
6007 
6008 	tg3_test_and_report_link_chg(tp, current_link_up);
6009 	return err;
6010 }
6011 
6012 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6013 {
6014 	if (tp->serdes_counter) {
6015 		/* Give autoneg time to complete. */
6016 		tp->serdes_counter--;
6017 		return;
6018 	}
6019 
6020 	if (!tp->link_up &&
6021 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6022 		u32 bmcr;
6023 
6024 		tg3_readphy(tp, MII_BMCR, &bmcr);
6025 		if (bmcr & BMCR_ANENABLE) {
6026 			u32 phy1, phy2;
6027 
6028 			/* Select shadow register 0x1f */
6029 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6030 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6031 
6032 			/* Select expansion interrupt status register */
6033 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6034 					 MII_TG3_DSP_EXP1_INT_STAT);
6035 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6036 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6037 
6038 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6039 				/* We have signal detect and not receiving
6040 				 * config code words, link is up by parallel
6041 				 * detection.
6042 				 */
6043 
6044 				bmcr &= ~BMCR_ANENABLE;
6045 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6046 				tg3_writephy(tp, MII_BMCR, bmcr);
6047 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6048 			}
6049 		}
6050 	} else if (tp->link_up &&
6051 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6052 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6053 		u32 phy2;
6054 
6055 		/* Select expansion interrupt status register */
6056 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6057 				 MII_TG3_DSP_EXP1_INT_STAT);
6058 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6059 		if (phy2 & 0x20) {
6060 			u32 bmcr;
6061 
6062 			/* Config code words received, turn on autoneg. */
6063 			tg3_readphy(tp, MII_BMCR, &bmcr);
6064 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6065 
6066 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6067 
6068 		}
6069 	}
6070 }
6071 
6072 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6073 {
6074 	u32 val;
6075 	int err;
6076 
6077 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6078 		err = tg3_setup_fiber_phy(tp, force_reset);
6079 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6080 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6081 	else
6082 		err = tg3_setup_copper_phy(tp, force_reset);
6083 
6084 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6085 		u32 scale;
6086 
6087 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6088 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6089 			scale = 65;
6090 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6091 			scale = 6;
6092 		else
6093 			scale = 12;
6094 
6095 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6096 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6097 		tw32(GRC_MISC_CFG, val);
6098 	}
6099 
6100 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6101 	      (6 << TX_LENGTHS_IPG_SHIFT);
6102 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6103 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6104 		val |= tr32(MAC_TX_LENGTHS) &
6105 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6106 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6107 
6108 	if (tp->link_config.active_speed == SPEED_1000 &&
6109 	    tp->link_config.active_duplex == DUPLEX_HALF)
6110 		tw32(MAC_TX_LENGTHS, val |
6111 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6112 	else
6113 		tw32(MAC_TX_LENGTHS, val |
6114 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6115 
6116 	if (!tg3_flag(tp, 5705_PLUS)) {
6117 		if (tp->link_up) {
6118 			tw32(HOSTCC_STAT_COAL_TICKS,
6119 			     tp->coal.stats_block_coalesce_usecs);
6120 		} else {
6121 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6122 		}
6123 	}
6124 
6125 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6126 		val = tr32(PCIE_PWR_MGMT_THRESH);
6127 		if (!tp->link_up)
6128 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6129 			      tp->pwrmgmt_thresh;
6130 		else
6131 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6132 		tw32(PCIE_PWR_MGMT_THRESH, val);
6133 	}
6134 
6135 	return err;
6136 }
6137 
6138 /* tp->lock must be held */
6139 static u64 tg3_refclk_read(struct tg3 *tp)
6140 {
6141 	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6142 	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6143 }
6144 
6145 /* tp->lock must be held */
6146 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6147 {
6148 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6149 
6150 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6151 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6152 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6153 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6154 }
6155 
6156 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6157 static inline void tg3_full_unlock(struct tg3 *tp);
6158 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6159 {
6160 	struct tg3 *tp = netdev_priv(dev);
6161 
6162 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6163 				SOF_TIMESTAMPING_RX_SOFTWARE |
6164 				SOF_TIMESTAMPING_SOFTWARE;
6165 
6166 	if (tg3_flag(tp, PTP_CAPABLE)) {
6167 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6168 					SOF_TIMESTAMPING_RX_HARDWARE |
6169 					SOF_TIMESTAMPING_RAW_HARDWARE;
6170 	}
6171 
6172 	if (tp->ptp_clock)
6173 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6174 	else
6175 		info->phc_index = -1;
6176 
6177 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6178 
6179 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6180 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6181 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6182 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6183 	return 0;
6184 }
6185 
6186 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6187 {
6188 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6189 	bool neg_adj = false;
6190 	u32 correction = 0;
6191 
6192 	if (ppb < 0) {
6193 		neg_adj = true;
6194 		ppb = -ppb;
6195 	}
6196 
6197 	/* Frequency adjustment is performed using hardware with a 24 bit
6198 	 * accumulator and a programmable correction value. On each clk, the
6199 	 * correction value gets added to the accumulator and when it
6200 	 * overflows, the time counter is incremented/decremented.
6201 	 *
6202 	 * So conversion from ppb to correction value is
6203 	 *		ppb * (1 << 24) / 1000000000
6204 	 */
6205 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6206 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6207 
6208 	tg3_full_lock(tp, 0);
6209 
6210 	if (correction)
6211 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6212 		     TG3_EAV_REF_CLK_CORRECT_EN |
6213 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6214 	else
6215 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6216 
6217 	tg3_full_unlock(tp);
6218 
6219 	return 0;
6220 }
6221 
6222 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6223 {
6224 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6225 
6226 	tg3_full_lock(tp, 0);
6227 	tp->ptp_adjust += delta;
6228 	tg3_full_unlock(tp);
6229 
6230 	return 0;
6231 }
6232 
6233 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6234 {
6235 	u64 ns;
6236 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6237 
6238 	tg3_full_lock(tp, 0);
6239 	ns = tg3_refclk_read(tp);
6240 	ns += tp->ptp_adjust;
6241 	tg3_full_unlock(tp);
6242 
6243 	*ts = ns_to_timespec64(ns);
6244 
6245 	return 0;
6246 }
6247 
6248 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6249 			   const struct timespec64 *ts)
6250 {
6251 	u64 ns;
6252 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6253 
6254 	ns = timespec64_to_ns(ts);
6255 
6256 	tg3_full_lock(tp, 0);
6257 	tg3_refclk_write(tp, ns);
6258 	tp->ptp_adjust = 0;
6259 	tg3_full_unlock(tp);
6260 
6261 	return 0;
6262 }
6263 
6264 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6265 			  struct ptp_clock_request *rq, int on)
6266 {
6267 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6268 	u32 clock_ctl;
6269 	int rval = 0;
6270 
6271 	switch (rq->type) {
6272 	case PTP_CLK_REQ_PEROUT:
6273 		if (rq->perout.index != 0)
6274 			return -EINVAL;
6275 
6276 		tg3_full_lock(tp, 0);
6277 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6278 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6279 
6280 		if (on) {
6281 			u64 nsec;
6282 
6283 			nsec = rq->perout.start.sec * 1000000000ULL +
6284 			       rq->perout.start.nsec;
6285 
6286 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6287 				netdev_warn(tp->dev,
6288 					    "Device supports only a one-shot timesync output, period must be 0\n");
6289 				rval = -EINVAL;
6290 				goto err_out;
6291 			}
6292 
6293 			if (nsec & (1ULL << 63)) {
6294 				netdev_warn(tp->dev,
6295 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6296 				rval = -EINVAL;
6297 				goto err_out;
6298 			}
6299 
6300 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6301 			tw32(TG3_EAV_WATCHDOG0_MSB,
6302 			     TG3_EAV_WATCHDOG0_EN |
6303 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6304 
6305 			tw32(TG3_EAV_REF_CLCK_CTL,
6306 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6307 		} else {
6308 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6309 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6310 		}
6311 
6312 err_out:
6313 		tg3_full_unlock(tp);
6314 		return rval;
6315 
6316 	default:
6317 		break;
6318 	}
6319 
6320 	return -EOPNOTSUPP;
6321 }
6322 
6323 static const struct ptp_clock_info tg3_ptp_caps = {
6324 	.owner		= THIS_MODULE,
6325 	.name		= "tg3 clock",
6326 	.max_adj	= 250000000,
6327 	.n_alarm	= 0,
6328 	.n_ext_ts	= 0,
6329 	.n_per_out	= 1,
6330 	.n_pins		= 0,
6331 	.pps		= 0,
6332 	.adjfreq	= tg3_ptp_adjfreq,
6333 	.adjtime	= tg3_ptp_adjtime,
6334 	.gettime64	= tg3_ptp_gettime,
6335 	.settime64	= tg3_ptp_settime,
6336 	.enable		= tg3_ptp_enable,
6337 };
6338 
6339 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6340 				     struct skb_shared_hwtstamps *timestamp)
6341 {
6342 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6343 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6344 					   tp->ptp_adjust);
6345 }
6346 
6347 /* tp->lock must be held */
6348 static void tg3_ptp_init(struct tg3 *tp)
6349 {
6350 	if (!tg3_flag(tp, PTP_CAPABLE))
6351 		return;
6352 
6353 	/* Initialize the hardware clock to the system time. */
6354 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6355 	tp->ptp_adjust = 0;
6356 	tp->ptp_info = tg3_ptp_caps;
6357 }
6358 
6359 /* tp->lock must be held */
6360 static void tg3_ptp_resume(struct tg3 *tp)
6361 {
6362 	if (!tg3_flag(tp, PTP_CAPABLE))
6363 		return;
6364 
6365 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6366 	tp->ptp_adjust = 0;
6367 }
6368 
6369 static void tg3_ptp_fini(struct tg3 *tp)
6370 {
6371 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6372 		return;
6373 
6374 	ptp_clock_unregister(tp->ptp_clock);
6375 	tp->ptp_clock = NULL;
6376 	tp->ptp_adjust = 0;
6377 }
6378 
6379 static inline int tg3_irq_sync(struct tg3 *tp)
6380 {
6381 	return tp->irq_sync;
6382 }
6383 
6384 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6385 {
6386 	int i;
6387 
6388 	dst = (u32 *)((u8 *)dst + off);
6389 	for (i = 0; i < len; i += sizeof(u32))
6390 		*dst++ = tr32(off + i);
6391 }
6392 
6393 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6394 {
6395 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6396 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6397 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6398 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6399 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6400 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6401 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6402 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6403 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6404 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6405 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6406 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6407 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6408 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6409 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6410 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6411 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6412 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6413 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6414 
6415 	if (tg3_flag(tp, SUPPORT_MSIX))
6416 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6417 
6418 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6419 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6420 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6421 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6422 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6423 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6424 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6425 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6426 
6427 	if (!tg3_flag(tp, 5705_PLUS)) {
6428 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6429 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6430 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6431 	}
6432 
6433 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6434 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6435 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6436 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6437 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6438 
6439 	if (tg3_flag(tp, NVRAM))
6440 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6441 }
6442 
6443 static void tg3_dump_state(struct tg3 *tp)
6444 {
6445 	int i;
6446 	u32 *regs;
6447 
6448 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6449 	if (!regs)
6450 		return;
6451 
6452 	if (tg3_flag(tp, PCI_EXPRESS)) {
6453 		/* Read up to but not including private PCI registers */
6454 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6455 			regs[i / sizeof(u32)] = tr32(i);
6456 	} else
6457 		tg3_dump_legacy_regs(tp, regs);
6458 
6459 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6460 		if (!regs[i + 0] && !regs[i + 1] &&
6461 		    !regs[i + 2] && !regs[i + 3])
6462 			continue;
6463 
6464 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6465 			   i * 4,
6466 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6467 	}
6468 
6469 	kfree(regs);
6470 
6471 	for (i = 0; i < tp->irq_cnt; i++) {
6472 		struct tg3_napi *tnapi = &tp->napi[i];
6473 
6474 		/* SW status block */
6475 		netdev_err(tp->dev,
6476 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6477 			   i,
6478 			   tnapi->hw_status->status,
6479 			   tnapi->hw_status->status_tag,
6480 			   tnapi->hw_status->rx_jumbo_consumer,
6481 			   tnapi->hw_status->rx_consumer,
6482 			   tnapi->hw_status->rx_mini_consumer,
6483 			   tnapi->hw_status->idx[0].rx_producer,
6484 			   tnapi->hw_status->idx[0].tx_consumer);
6485 
6486 		netdev_err(tp->dev,
6487 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6488 			   i,
6489 			   tnapi->last_tag, tnapi->last_irq_tag,
6490 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6491 			   tnapi->rx_rcb_ptr,
6492 			   tnapi->prodring.rx_std_prod_idx,
6493 			   tnapi->prodring.rx_std_cons_idx,
6494 			   tnapi->prodring.rx_jmb_prod_idx,
6495 			   tnapi->prodring.rx_jmb_cons_idx);
6496 	}
6497 }
6498 
6499 /* This is called whenever we suspect that the system chipset is re-
6500  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6501  * is bogus tx completions. We try to recover by setting the
6502  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6503  * in the workqueue.
6504  */
6505 static void tg3_tx_recover(struct tg3 *tp)
6506 {
6507 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6508 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6509 
6510 	netdev_warn(tp->dev,
6511 		    "The system may be re-ordering memory-mapped I/O "
6512 		    "cycles to the network device, attempting to recover. "
6513 		    "Please report the problem to the driver maintainer "
6514 		    "and include system chipset information.\n");
6515 
6516 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6517 }
6518 
6519 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6520 {
6521 	/* Tell compiler to fetch tx indices from memory. */
6522 	barrier();
6523 	return tnapi->tx_pending -
6524 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6525 }
6526 
6527 /* Tigon3 never reports partial packet sends.  So we do not
6528  * need special logic to handle SKBs that have not had all
6529  * of their frags sent yet, like SunGEM does.
6530  */
6531 static void tg3_tx(struct tg3_napi *tnapi)
6532 {
6533 	struct tg3 *tp = tnapi->tp;
6534 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6535 	u32 sw_idx = tnapi->tx_cons;
6536 	struct netdev_queue *txq;
6537 	int index = tnapi - tp->napi;
6538 	unsigned int pkts_compl = 0, bytes_compl = 0;
6539 
6540 	if (tg3_flag(tp, ENABLE_TSS))
6541 		index--;
6542 
6543 	txq = netdev_get_tx_queue(tp->dev, index);
6544 
6545 	while (sw_idx != hw_idx) {
6546 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6547 		struct sk_buff *skb = ri->skb;
6548 		int i, tx_bug = 0;
6549 
6550 		if (unlikely(skb == NULL)) {
6551 			tg3_tx_recover(tp);
6552 			return;
6553 		}
6554 
6555 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6556 			struct skb_shared_hwtstamps timestamp;
6557 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6558 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6559 
6560 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6561 
6562 			skb_tstamp_tx(skb, &timestamp);
6563 		}
6564 
6565 		pci_unmap_single(tp->pdev,
6566 				 dma_unmap_addr(ri, mapping),
6567 				 skb_headlen(skb),
6568 				 PCI_DMA_TODEVICE);
6569 
6570 		ri->skb = NULL;
6571 
6572 		while (ri->fragmented) {
6573 			ri->fragmented = false;
6574 			sw_idx = NEXT_TX(sw_idx);
6575 			ri = &tnapi->tx_buffers[sw_idx];
6576 		}
6577 
6578 		sw_idx = NEXT_TX(sw_idx);
6579 
6580 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6581 			ri = &tnapi->tx_buffers[sw_idx];
6582 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6583 				tx_bug = 1;
6584 
6585 			pci_unmap_page(tp->pdev,
6586 				       dma_unmap_addr(ri, mapping),
6587 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6588 				       PCI_DMA_TODEVICE);
6589 
6590 			while (ri->fragmented) {
6591 				ri->fragmented = false;
6592 				sw_idx = NEXT_TX(sw_idx);
6593 				ri = &tnapi->tx_buffers[sw_idx];
6594 			}
6595 
6596 			sw_idx = NEXT_TX(sw_idx);
6597 		}
6598 
6599 		pkts_compl++;
6600 		bytes_compl += skb->len;
6601 
6602 		dev_consume_skb_any(skb);
6603 
6604 		if (unlikely(tx_bug)) {
6605 			tg3_tx_recover(tp);
6606 			return;
6607 		}
6608 	}
6609 
6610 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6611 
6612 	tnapi->tx_cons = sw_idx;
6613 
6614 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6615 	 * before checking for netif_queue_stopped().  Without the
6616 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6617 	 * will miss it and cause the queue to be stopped forever.
6618 	 */
6619 	smp_mb();
6620 
6621 	if (unlikely(netif_tx_queue_stopped(txq) &&
6622 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6623 		__netif_tx_lock(txq, smp_processor_id());
6624 		if (netif_tx_queue_stopped(txq) &&
6625 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6626 			netif_tx_wake_queue(txq);
6627 		__netif_tx_unlock(txq);
6628 	}
6629 }
6630 
6631 static void tg3_frag_free(bool is_frag, void *data)
6632 {
6633 	if (is_frag)
6634 		skb_free_frag(data);
6635 	else
6636 		kfree(data);
6637 }
6638 
6639 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6640 {
6641 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6642 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6643 
6644 	if (!ri->data)
6645 		return;
6646 
6647 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6648 			 map_sz, PCI_DMA_FROMDEVICE);
6649 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6650 	ri->data = NULL;
6651 }
6652 
6653 
6654 /* Returns size of skb allocated or < 0 on error.
6655  *
6656  * We only need to fill in the address because the other members
6657  * of the RX descriptor are invariant, see tg3_init_rings.
6658  *
6659  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6660  * posting buffers we only dirty the first cache line of the RX
6661  * descriptor (containing the address).  Whereas for the RX status
6662  * buffers the cpu only reads the last cacheline of the RX descriptor
6663  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6664  */
6665 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6666 			     u32 opaque_key, u32 dest_idx_unmasked,
6667 			     unsigned int *frag_size)
6668 {
6669 	struct tg3_rx_buffer_desc *desc;
6670 	struct ring_info *map;
6671 	u8 *data;
6672 	dma_addr_t mapping;
6673 	int skb_size, data_size, dest_idx;
6674 
6675 	switch (opaque_key) {
6676 	case RXD_OPAQUE_RING_STD:
6677 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6678 		desc = &tpr->rx_std[dest_idx];
6679 		map = &tpr->rx_std_buffers[dest_idx];
6680 		data_size = tp->rx_pkt_map_sz;
6681 		break;
6682 
6683 	case RXD_OPAQUE_RING_JUMBO:
6684 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6685 		desc = &tpr->rx_jmb[dest_idx].std;
6686 		map = &tpr->rx_jmb_buffers[dest_idx];
6687 		data_size = TG3_RX_JMB_MAP_SZ;
6688 		break;
6689 
6690 	default:
6691 		return -EINVAL;
6692 	}
6693 
6694 	/* Do not overwrite any of the map or rp information
6695 	 * until we are sure we can commit to a new buffer.
6696 	 *
6697 	 * Callers depend upon this behavior and assume that
6698 	 * we leave everything unchanged if we fail.
6699 	 */
6700 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6701 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6702 	if (skb_size <= PAGE_SIZE) {
6703 		data = netdev_alloc_frag(skb_size);
6704 		*frag_size = skb_size;
6705 	} else {
6706 		data = kmalloc(skb_size, GFP_ATOMIC);
6707 		*frag_size = 0;
6708 	}
6709 	if (!data)
6710 		return -ENOMEM;
6711 
6712 	mapping = pci_map_single(tp->pdev,
6713 				 data + TG3_RX_OFFSET(tp),
6714 				 data_size,
6715 				 PCI_DMA_FROMDEVICE);
6716 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6717 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6718 		return -EIO;
6719 	}
6720 
6721 	map->data = data;
6722 	dma_unmap_addr_set(map, mapping, mapping);
6723 
6724 	desc->addr_hi = ((u64)mapping >> 32);
6725 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6726 
6727 	return data_size;
6728 }
6729 
6730 /* We only need to move over in the address because the other
6731  * members of the RX descriptor are invariant.  See notes above
6732  * tg3_alloc_rx_data for full details.
6733  */
6734 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6735 			   struct tg3_rx_prodring_set *dpr,
6736 			   u32 opaque_key, int src_idx,
6737 			   u32 dest_idx_unmasked)
6738 {
6739 	struct tg3 *tp = tnapi->tp;
6740 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6741 	struct ring_info *src_map, *dest_map;
6742 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6743 	int dest_idx;
6744 
6745 	switch (opaque_key) {
6746 	case RXD_OPAQUE_RING_STD:
6747 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6748 		dest_desc = &dpr->rx_std[dest_idx];
6749 		dest_map = &dpr->rx_std_buffers[dest_idx];
6750 		src_desc = &spr->rx_std[src_idx];
6751 		src_map = &spr->rx_std_buffers[src_idx];
6752 		break;
6753 
6754 	case RXD_OPAQUE_RING_JUMBO:
6755 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6756 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6757 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6758 		src_desc = &spr->rx_jmb[src_idx].std;
6759 		src_map = &spr->rx_jmb_buffers[src_idx];
6760 		break;
6761 
6762 	default:
6763 		return;
6764 	}
6765 
6766 	dest_map->data = src_map->data;
6767 	dma_unmap_addr_set(dest_map, mapping,
6768 			   dma_unmap_addr(src_map, mapping));
6769 	dest_desc->addr_hi = src_desc->addr_hi;
6770 	dest_desc->addr_lo = src_desc->addr_lo;
6771 
6772 	/* Ensure that the update to the skb happens after the physical
6773 	 * addresses have been transferred to the new BD location.
6774 	 */
6775 	smp_wmb();
6776 
6777 	src_map->data = NULL;
6778 }
6779 
6780 /* The RX ring scheme is composed of multiple rings which post fresh
6781  * buffers to the chip, and one special ring the chip uses to report
6782  * status back to the host.
6783  *
6784  * The special ring reports the status of received packets to the
6785  * host.  The chip does not write into the original descriptor the
6786  * RX buffer was obtained from.  The chip simply takes the original
6787  * descriptor as provided by the host, updates the status and length
6788  * field, then writes this into the next status ring entry.
6789  *
6790  * Each ring the host uses to post buffers to the chip is described
6791  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6792  * it is first placed into the on-chip ram.  When the packet's length
6793  * is known, it walks down the TG3_BDINFO entries to select the ring.
6794  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6795  * which is within the range of the new packet's length is chosen.
6796  *
6797  * The "separate ring for rx status" scheme may sound queer, but it makes
6798  * sense from a cache coherency perspective.  If only the host writes
6799  * to the buffer post rings, and only the chip writes to the rx status
6800  * rings, then cache lines never move beyond shared-modified state.
6801  * If both the host and chip were to write into the same ring, cache line
6802  * eviction could occur since both entities want it in an exclusive state.
6803  */
6804 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6805 {
6806 	struct tg3 *tp = tnapi->tp;
6807 	u32 work_mask, rx_std_posted = 0;
6808 	u32 std_prod_idx, jmb_prod_idx;
6809 	u32 sw_idx = tnapi->rx_rcb_ptr;
6810 	u16 hw_idx;
6811 	int received;
6812 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6813 
6814 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6815 	/*
6816 	 * We need to order the read of hw_idx and the read of
6817 	 * the opaque cookie.
6818 	 */
6819 	rmb();
6820 	work_mask = 0;
6821 	received = 0;
6822 	std_prod_idx = tpr->rx_std_prod_idx;
6823 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6824 	while (sw_idx != hw_idx && budget > 0) {
6825 		struct ring_info *ri;
6826 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6827 		unsigned int len;
6828 		struct sk_buff *skb;
6829 		dma_addr_t dma_addr;
6830 		u32 opaque_key, desc_idx, *post_ptr;
6831 		u8 *data;
6832 		u64 tstamp = 0;
6833 
6834 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6835 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6836 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6837 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6838 			dma_addr = dma_unmap_addr(ri, mapping);
6839 			data = ri->data;
6840 			post_ptr = &std_prod_idx;
6841 			rx_std_posted++;
6842 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6843 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6844 			dma_addr = dma_unmap_addr(ri, mapping);
6845 			data = ri->data;
6846 			post_ptr = &jmb_prod_idx;
6847 		} else
6848 			goto next_pkt_nopost;
6849 
6850 		work_mask |= opaque_key;
6851 
6852 		if (desc->err_vlan & RXD_ERR_MASK) {
6853 		drop_it:
6854 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6855 				       desc_idx, *post_ptr);
6856 		drop_it_no_recycle:
6857 			/* Other statistics kept track of by card. */
6858 			tp->rx_dropped++;
6859 			goto next_pkt;
6860 		}
6861 
6862 		prefetch(data + TG3_RX_OFFSET(tp));
6863 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6864 		      ETH_FCS_LEN;
6865 
6866 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6867 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6868 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6869 		     RXD_FLAG_PTPSTAT_PTPV2) {
6870 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6871 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6872 		}
6873 
6874 		if (len > TG3_RX_COPY_THRESH(tp)) {
6875 			int skb_size;
6876 			unsigned int frag_size;
6877 
6878 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6879 						    *post_ptr, &frag_size);
6880 			if (skb_size < 0)
6881 				goto drop_it;
6882 
6883 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6884 					 PCI_DMA_FROMDEVICE);
6885 
6886 			/* Ensure that the update to the data happens
6887 			 * after the usage of the old DMA mapping.
6888 			 */
6889 			smp_wmb();
6890 
6891 			ri->data = NULL;
6892 
6893 			skb = build_skb(data, frag_size);
6894 			if (!skb) {
6895 				tg3_frag_free(frag_size != 0, data);
6896 				goto drop_it_no_recycle;
6897 			}
6898 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6899 		} else {
6900 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6901 				       desc_idx, *post_ptr);
6902 
6903 			skb = netdev_alloc_skb(tp->dev,
6904 					       len + TG3_RAW_IP_ALIGN);
6905 			if (skb == NULL)
6906 				goto drop_it_no_recycle;
6907 
6908 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6909 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6910 			memcpy(skb->data,
6911 			       data + TG3_RX_OFFSET(tp),
6912 			       len);
6913 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6914 		}
6915 
6916 		skb_put(skb, len);
6917 		if (tstamp)
6918 			tg3_hwclock_to_timestamp(tp, tstamp,
6919 						 skb_hwtstamps(skb));
6920 
6921 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6922 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6923 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6924 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6925 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6926 		else
6927 			skb_checksum_none_assert(skb);
6928 
6929 		skb->protocol = eth_type_trans(skb, tp->dev);
6930 
6931 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6932 		    skb->protocol != htons(ETH_P_8021Q) &&
6933 		    skb->protocol != htons(ETH_P_8021AD)) {
6934 			dev_kfree_skb_any(skb);
6935 			goto drop_it_no_recycle;
6936 		}
6937 
6938 		if (desc->type_flags & RXD_FLAG_VLAN &&
6939 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6940 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6941 					       desc->err_vlan & RXD_VLAN_MASK);
6942 
6943 		napi_gro_receive(&tnapi->napi, skb);
6944 
6945 		received++;
6946 		budget--;
6947 
6948 next_pkt:
6949 		(*post_ptr)++;
6950 
6951 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6952 			tpr->rx_std_prod_idx = std_prod_idx &
6953 					       tp->rx_std_ring_mask;
6954 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6955 				     tpr->rx_std_prod_idx);
6956 			work_mask &= ~RXD_OPAQUE_RING_STD;
6957 			rx_std_posted = 0;
6958 		}
6959 next_pkt_nopost:
6960 		sw_idx++;
6961 		sw_idx &= tp->rx_ret_ring_mask;
6962 
6963 		/* Refresh hw_idx to see if there is new work */
6964 		if (sw_idx == hw_idx) {
6965 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6966 			rmb();
6967 		}
6968 	}
6969 
6970 	/* ACK the status ring. */
6971 	tnapi->rx_rcb_ptr = sw_idx;
6972 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6973 
6974 	/* Refill RX ring(s). */
6975 	if (!tg3_flag(tp, ENABLE_RSS)) {
6976 		/* Sync BD data before updating mailbox */
6977 		wmb();
6978 
6979 		if (work_mask & RXD_OPAQUE_RING_STD) {
6980 			tpr->rx_std_prod_idx = std_prod_idx &
6981 					       tp->rx_std_ring_mask;
6982 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6983 				     tpr->rx_std_prod_idx);
6984 		}
6985 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6986 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6987 					       tp->rx_jmb_ring_mask;
6988 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6989 				     tpr->rx_jmb_prod_idx);
6990 		}
6991 		mmiowb();
6992 	} else if (work_mask) {
6993 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6994 		 * updated before the producer indices can be updated.
6995 		 */
6996 		smp_wmb();
6997 
6998 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6999 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7000 
7001 		if (tnapi != &tp->napi[1]) {
7002 			tp->rx_refill = true;
7003 			napi_schedule(&tp->napi[1].napi);
7004 		}
7005 	}
7006 
7007 	return received;
7008 }
7009 
7010 static void tg3_poll_link(struct tg3 *tp)
7011 {
7012 	/* handle link change and other phy events */
7013 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7014 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7015 
7016 		if (sblk->status & SD_STATUS_LINK_CHG) {
7017 			sblk->status = SD_STATUS_UPDATED |
7018 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7019 			spin_lock(&tp->lock);
7020 			if (tg3_flag(tp, USE_PHYLIB)) {
7021 				tw32_f(MAC_STATUS,
7022 				     (MAC_STATUS_SYNC_CHANGED |
7023 				      MAC_STATUS_CFG_CHANGED |
7024 				      MAC_STATUS_MI_COMPLETION |
7025 				      MAC_STATUS_LNKSTATE_CHANGED));
7026 				udelay(40);
7027 			} else
7028 				tg3_setup_phy(tp, false);
7029 			spin_unlock(&tp->lock);
7030 		}
7031 	}
7032 }
7033 
7034 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7035 				struct tg3_rx_prodring_set *dpr,
7036 				struct tg3_rx_prodring_set *spr)
7037 {
7038 	u32 si, di, cpycnt, src_prod_idx;
7039 	int i, err = 0;
7040 
7041 	while (1) {
7042 		src_prod_idx = spr->rx_std_prod_idx;
7043 
7044 		/* Make sure updates to the rx_std_buffers[] entries and the
7045 		 * standard producer index are seen in the correct order.
7046 		 */
7047 		smp_rmb();
7048 
7049 		if (spr->rx_std_cons_idx == src_prod_idx)
7050 			break;
7051 
7052 		if (spr->rx_std_cons_idx < src_prod_idx)
7053 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7054 		else
7055 			cpycnt = tp->rx_std_ring_mask + 1 -
7056 				 spr->rx_std_cons_idx;
7057 
7058 		cpycnt = min(cpycnt,
7059 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7060 
7061 		si = spr->rx_std_cons_idx;
7062 		di = dpr->rx_std_prod_idx;
7063 
7064 		for (i = di; i < di + cpycnt; i++) {
7065 			if (dpr->rx_std_buffers[i].data) {
7066 				cpycnt = i - di;
7067 				err = -ENOSPC;
7068 				break;
7069 			}
7070 		}
7071 
7072 		if (!cpycnt)
7073 			break;
7074 
7075 		/* Ensure that updates to the rx_std_buffers ring and the
7076 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7077 		 * ordered correctly WRT the skb check above.
7078 		 */
7079 		smp_rmb();
7080 
7081 		memcpy(&dpr->rx_std_buffers[di],
7082 		       &spr->rx_std_buffers[si],
7083 		       cpycnt * sizeof(struct ring_info));
7084 
7085 		for (i = 0; i < cpycnt; i++, di++, si++) {
7086 			struct tg3_rx_buffer_desc *sbd, *dbd;
7087 			sbd = &spr->rx_std[si];
7088 			dbd = &dpr->rx_std[di];
7089 			dbd->addr_hi = sbd->addr_hi;
7090 			dbd->addr_lo = sbd->addr_lo;
7091 		}
7092 
7093 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7094 				       tp->rx_std_ring_mask;
7095 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7096 				       tp->rx_std_ring_mask;
7097 	}
7098 
7099 	while (1) {
7100 		src_prod_idx = spr->rx_jmb_prod_idx;
7101 
7102 		/* Make sure updates to the rx_jmb_buffers[] entries and
7103 		 * the jumbo producer index are seen in the correct order.
7104 		 */
7105 		smp_rmb();
7106 
7107 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7108 			break;
7109 
7110 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7111 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7112 		else
7113 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7114 				 spr->rx_jmb_cons_idx;
7115 
7116 		cpycnt = min(cpycnt,
7117 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7118 
7119 		si = spr->rx_jmb_cons_idx;
7120 		di = dpr->rx_jmb_prod_idx;
7121 
7122 		for (i = di; i < di + cpycnt; i++) {
7123 			if (dpr->rx_jmb_buffers[i].data) {
7124 				cpycnt = i - di;
7125 				err = -ENOSPC;
7126 				break;
7127 			}
7128 		}
7129 
7130 		if (!cpycnt)
7131 			break;
7132 
7133 		/* Ensure that updates to the rx_jmb_buffers ring and the
7134 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7135 		 * ordered correctly WRT the skb check above.
7136 		 */
7137 		smp_rmb();
7138 
7139 		memcpy(&dpr->rx_jmb_buffers[di],
7140 		       &spr->rx_jmb_buffers[si],
7141 		       cpycnt * sizeof(struct ring_info));
7142 
7143 		for (i = 0; i < cpycnt; i++, di++, si++) {
7144 			struct tg3_rx_buffer_desc *sbd, *dbd;
7145 			sbd = &spr->rx_jmb[si].std;
7146 			dbd = &dpr->rx_jmb[di].std;
7147 			dbd->addr_hi = sbd->addr_hi;
7148 			dbd->addr_lo = sbd->addr_lo;
7149 		}
7150 
7151 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7152 				       tp->rx_jmb_ring_mask;
7153 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7154 				       tp->rx_jmb_ring_mask;
7155 	}
7156 
7157 	return err;
7158 }
7159 
7160 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7161 {
7162 	struct tg3 *tp = tnapi->tp;
7163 
7164 	/* run TX completion thread */
7165 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7166 		tg3_tx(tnapi);
7167 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7168 			return work_done;
7169 	}
7170 
7171 	if (!tnapi->rx_rcb_prod_idx)
7172 		return work_done;
7173 
7174 	/* run RX thread, within the bounds set by NAPI.
7175 	 * All RX "locking" is done by ensuring outside
7176 	 * code synchronizes with tg3->napi.poll()
7177 	 */
7178 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7179 		work_done += tg3_rx(tnapi, budget - work_done);
7180 
7181 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7182 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7183 		int i, err = 0;
7184 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7185 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7186 
7187 		tp->rx_refill = false;
7188 		for (i = 1; i <= tp->rxq_cnt; i++)
7189 			err |= tg3_rx_prodring_xfer(tp, dpr,
7190 						    &tp->napi[i].prodring);
7191 
7192 		wmb();
7193 
7194 		if (std_prod_idx != dpr->rx_std_prod_idx)
7195 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7196 				     dpr->rx_std_prod_idx);
7197 
7198 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7199 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7200 				     dpr->rx_jmb_prod_idx);
7201 
7202 		mmiowb();
7203 
7204 		if (err)
7205 			tw32_f(HOSTCC_MODE, tp->coal_now);
7206 	}
7207 
7208 	return work_done;
7209 }
7210 
7211 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7212 {
7213 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7214 		schedule_work(&tp->reset_task);
7215 }
7216 
7217 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7218 {
7219 	cancel_work_sync(&tp->reset_task);
7220 	tg3_flag_clear(tp, RESET_TASK_PENDING);
7221 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7222 }
7223 
7224 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7225 {
7226 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7227 	struct tg3 *tp = tnapi->tp;
7228 	int work_done = 0;
7229 	struct tg3_hw_status *sblk = tnapi->hw_status;
7230 
7231 	while (1) {
7232 		work_done = tg3_poll_work(tnapi, work_done, budget);
7233 
7234 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7235 			goto tx_recovery;
7236 
7237 		if (unlikely(work_done >= budget))
7238 			break;
7239 
7240 		/* tp->last_tag is used in tg3_int_reenable() below
7241 		 * to tell the hw how much work has been processed,
7242 		 * so we must read it before checking for more work.
7243 		 */
7244 		tnapi->last_tag = sblk->status_tag;
7245 		tnapi->last_irq_tag = tnapi->last_tag;
7246 		rmb();
7247 
7248 		/* check for RX/TX work to do */
7249 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7250 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7251 
7252 			/* This test here is not race free, but will reduce
7253 			 * the number of interrupts by looping again.
7254 			 */
7255 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7256 				continue;
7257 
7258 			napi_complete_done(napi, work_done);
7259 			/* Reenable interrupts. */
7260 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7261 
7262 			/* This test here is synchronized by napi_schedule()
7263 			 * and napi_complete() to close the race condition.
7264 			 */
7265 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7266 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7267 						  HOSTCC_MODE_ENABLE |
7268 						  tnapi->coal_now);
7269 			}
7270 			mmiowb();
7271 			break;
7272 		}
7273 	}
7274 
7275 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7276 	return work_done;
7277 
7278 tx_recovery:
7279 	/* work_done is guaranteed to be less than budget. */
7280 	napi_complete(napi);
7281 	tg3_reset_task_schedule(tp);
7282 	return work_done;
7283 }
7284 
7285 static void tg3_process_error(struct tg3 *tp)
7286 {
7287 	u32 val;
7288 	bool real_error = false;
7289 
7290 	if (tg3_flag(tp, ERROR_PROCESSED))
7291 		return;
7292 
7293 	/* Check Flow Attention register */
7294 	val = tr32(HOSTCC_FLOW_ATTN);
7295 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7296 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7297 		real_error = true;
7298 	}
7299 
7300 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7301 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7302 		real_error = true;
7303 	}
7304 
7305 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7306 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7307 		real_error = true;
7308 	}
7309 
7310 	if (!real_error)
7311 		return;
7312 
7313 	tg3_dump_state(tp);
7314 
7315 	tg3_flag_set(tp, ERROR_PROCESSED);
7316 	tg3_reset_task_schedule(tp);
7317 }
7318 
7319 static int tg3_poll(struct napi_struct *napi, int budget)
7320 {
7321 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7322 	struct tg3 *tp = tnapi->tp;
7323 	int work_done = 0;
7324 	struct tg3_hw_status *sblk = tnapi->hw_status;
7325 
7326 	while (1) {
7327 		if (sblk->status & SD_STATUS_ERROR)
7328 			tg3_process_error(tp);
7329 
7330 		tg3_poll_link(tp);
7331 
7332 		work_done = tg3_poll_work(tnapi, work_done, budget);
7333 
7334 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7335 			goto tx_recovery;
7336 
7337 		if (unlikely(work_done >= budget))
7338 			break;
7339 
7340 		if (tg3_flag(tp, TAGGED_STATUS)) {
7341 			/* tp->last_tag is used in tg3_int_reenable() below
7342 			 * to tell the hw how much work has been processed,
7343 			 * so we must read it before checking for more work.
7344 			 */
7345 			tnapi->last_tag = sblk->status_tag;
7346 			tnapi->last_irq_tag = tnapi->last_tag;
7347 			rmb();
7348 		} else
7349 			sblk->status &= ~SD_STATUS_UPDATED;
7350 
7351 		if (likely(!tg3_has_work(tnapi))) {
7352 			napi_complete_done(napi, work_done);
7353 			tg3_int_reenable(tnapi);
7354 			break;
7355 		}
7356 	}
7357 
7358 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7359 	return work_done;
7360 
7361 tx_recovery:
7362 	/* work_done is guaranteed to be less than budget. */
7363 	napi_complete(napi);
7364 	tg3_reset_task_schedule(tp);
7365 	return work_done;
7366 }
7367 
7368 static void tg3_napi_disable(struct tg3 *tp)
7369 {
7370 	int i;
7371 
7372 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7373 		napi_disable(&tp->napi[i].napi);
7374 }
7375 
7376 static void tg3_napi_enable(struct tg3 *tp)
7377 {
7378 	int i;
7379 
7380 	for (i = 0; i < tp->irq_cnt; i++)
7381 		napi_enable(&tp->napi[i].napi);
7382 }
7383 
7384 static void tg3_napi_init(struct tg3 *tp)
7385 {
7386 	int i;
7387 
7388 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7389 	for (i = 1; i < tp->irq_cnt; i++)
7390 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7391 }
7392 
7393 static void tg3_napi_fini(struct tg3 *tp)
7394 {
7395 	int i;
7396 
7397 	for (i = 0; i < tp->irq_cnt; i++)
7398 		netif_napi_del(&tp->napi[i].napi);
7399 }
7400 
7401 static inline void tg3_netif_stop(struct tg3 *tp)
7402 {
7403 	netif_trans_update(tp->dev);	/* prevent tx timeout */
7404 	tg3_napi_disable(tp);
7405 	netif_carrier_off(tp->dev);
7406 	netif_tx_disable(tp->dev);
7407 }
7408 
7409 /* tp->lock must be held */
7410 static inline void tg3_netif_start(struct tg3 *tp)
7411 {
7412 	tg3_ptp_resume(tp);
7413 
7414 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7415 	 * appropriate so long as all callers are assured to
7416 	 * have free tx slots (such as after tg3_init_hw)
7417 	 */
7418 	netif_tx_wake_all_queues(tp->dev);
7419 
7420 	if (tp->link_up)
7421 		netif_carrier_on(tp->dev);
7422 
7423 	tg3_napi_enable(tp);
7424 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7425 	tg3_enable_ints(tp);
7426 }
7427 
7428 static void tg3_irq_quiesce(struct tg3 *tp)
7429 	__releases(tp->lock)
7430 	__acquires(tp->lock)
7431 {
7432 	int i;
7433 
7434 	BUG_ON(tp->irq_sync);
7435 
7436 	tp->irq_sync = 1;
7437 	smp_mb();
7438 
7439 	spin_unlock_bh(&tp->lock);
7440 
7441 	for (i = 0; i < tp->irq_cnt; i++)
7442 		synchronize_irq(tp->napi[i].irq_vec);
7443 
7444 	spin_lock_bh(&tp->lock);
7445 }
7446 
7447 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7448  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7449  * with as well.  Most of the time, this is not necessary except when
7450  * shutting down the device.
7451  */
7452 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7453 {
7454 	spin_lock_bh(&tp->lock);
7455 	if (irq_sync)
7456 		tg3_irq_quiesce(tp);
7457 }
7458 
7459 static inline void tg3_full_unlock(struct tg3 *tp)
7460 {
7461 	spin_unlock_bh(&tp->lock);
7462 }
7463 
7464 /* One-shot MSI handler - Chip automatically disables interrupt
7465  * after sending MSI so driver doesn't have to do it.
7466  */
7467 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7468 {
7469 	struct tg3_napi *tnapi = dev_id;
7470 	struct tg3 *tp = tnapi->tp;
7471 
7472 	prefetch(tnapi->hw_status);
7473 	if (tnapi->rx_rcb)
7474 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7475 
7476 	if (likely(!tg3_irq_sync(tp)))
7477 		napi_schedule(&tnapi->napi);
7478 
7479 	return IRQ_HANDLED;
7480 }
7481 
7482 /* MSI ISR - No need to check for interrupt sharing and no need to
7483  * flush status block and interrupt mailbox. PCI ordering rules
7484  * guarantee that MSI will arrive after the status block.
7485  */
7486 static irqreturn_t tg3_msi(int irq, void *dev_id)
7487 {
7488 	struct tg3_napi *tnapi = dev_id;
7489 	struct tg3 *tp = tnapi->tp;
7490 
7491 	prefetch(tnapi->hw_status);
7492 	if (tnapi->rx_rcb)
7493 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7494 	/*
7495 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7496 	 * chip-internal interrupt pending events.
7497 	 * Writing non-zero to intr-mbox-0 additional tells the
7498 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7499 	 * event coalescing.
7500 	 */
7501 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7502 	if (likely(!tg3_irq_sync(tp)))
7503 		napi_schedule(&tnapi->napi);
7504 
7505 	return IRQ_RETVAL(1);
7506 }
7507 
7508 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7509 {
7510 	struct tg3_napi *tnapi = dev_id;
7511 	struct tg3 *tp = tnapi->tp;
7512 	struct tg3_hw_status *sblk = tnapi->hw_status;
7513 	unsigned int handled = 1;
7514 
7515 	/* In INTx mode, it is possible for the interrupt to arrive at
7516 	 * the CPU before the status block posted prior to the interrupt.
7517 	 * Reading the PCI State register will confirm whether the
7518 	 * interrupt is ours and will flush the status block.
7519 	 */
7520 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7521 		if (tg3_flag(tp, CHIP_RESETTING) ||
7522 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7523 			handled = 0;
7524 			goto out;
7525 		}
7526 	}
7527 
7528 	/*
7529 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7530 	 * chip-internal interrupt pending events.
7531 	 * Writing non-zero to intr-mbox-0 additional tells the
7532 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7533 	 * event coalescing.
7534 	 *
7535 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7536 	 * spurious interrupts.  The flush impacts performance but
7537 	 * excessive spurious interrupts can be worse in some cases.
7538 	 */
7539 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7540 	if (tg3_irq_sync(tp))
7541 		goto out;
7542 	sblk->status &= ~SD_STATUS_UPDATED;
7543 	if (likely(tg3_has_work(tnapi))) {
7544 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7545 		napi_schedule(&tnapi->napi);
7546 	} else {
7547 		/* No work, shared interrupt perhaps?  re-enable
7548 		 * interrupts, and flush that PCI write
7549 		 */
7550 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7551 			       0x00000000);
7552 	}
7553 out:
7554 	return IRQ_RETVAL(handled);
7555 }
7556 
7557 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7558 {
7559 	struct tg3_napi *tnapi = dev_id;
7560 	struct tg3 *tp = tnapi->tp;
7561 	struct tg3_hw_status *sblk = tnapi->hw_status;
7562 	unsigned int handled = 1;
7563 
7564 	/* In INTx mode, it is possible for the interrupt to arrive at
7565 	 * the CPU before the status block posted prior to the interrupt.
7566 	 * Reading the PCI State register will confirm whether the
7567 	 * interrupt is ours and will flush the status block.
7568 	 */
7569 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7570 		if (tg3_flag(tp, CHIP_RESETTING) ||
7571 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7572 			handled = 0;
7573 			goto out;
7574 		}
7575 	}
7576 
7577 	/*
7578 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7579 	 * chip-internal interrupt pending events.
7580 	 * writing non-zero to intr-mbox-0 additional tells the
7581 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7582 	 * event coalescing.
7583 	 *
7584 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7585 	 * spurious interrupts.  The flush impacts performance but
7586 	 * excessive spurious interrupts can be worse in some cases.
7587 	 */
7588 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7589 
7590 	/*
7591 	 * In a shared interrupt configuration, sometimes other devices'
7592 	 * interrupts will scream.  We record the current status tag here
7593 	 * so that the above check can report that the screaming interrupts
7594 	 * are unhandled.  Eventually they will be silenced.
7595 	 */
7596 	tnapi->last_irq_tag = sblk->status_tag;
7597 
7598 	if (tg3_irq_sync(tp))
7599 		goto out;
7600 
7601 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7602 
7603 	napi_schedule(&tnapi->napi);
7604 
7605 out:
7606 	return IRQ_RETVAL(handled);
7607 }
7608 
7609 /* ISR for interrupt test */
7610 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7611 {
7612 	struct tg3_napi *tnapi = dev_id;
7613 	struct tg3 *tp = tnapi->tp;
7614 	struct tg3_hw_status *sblk = tnapi->hw_status;
7615 
7616 	if ((sblk->status & SD_STATUS_UPDATED) ||
7617 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7618 		tg3_disable_ints(tp);
7619 		return IRQ_RETVAL(1);
7620 	}
7621 	return IRQ_RETVAL(0);
7622 }
7623 
7624 #ifdef CONFIG_NET_POLL_CONTROLLER
7625 static void tg3_poll_controller(struct net_device *dev)
7626 {
7627 	int i;
7628 	struct tg3 *tp = netdev_priv(dev);
7629 
7630 	if (tg3_irq_sync(tp))
7631 		return;
7632 
7633 	for (i = 0; i < tp->irq_cnt; i++)
7634 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7635 }
7636 #endif
7637 
7638 static void tg3_tx_timeout(struct net_device *dev)
7639 {
7640 	struct tg3 *tp = netdev_priv(dev);
7641 
7642 	if (netif_msg_tx_err(tp)) {
7643 		netdev_err(dev, "transmit timed out, resetting\n");
7644 		tg3_dump_state(tp);
7645 	}
7646 
7647 	tg3_reset_task_schedule(tp);
7648 }
7649 
7650 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7651 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7652 {
7653 	u32 base = (u32) mapping & 0xffffffff;
7654 
7655 	return base + len + 8 < base;
7656 }
7657 
7658 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7659  * of any 4GB boundaries: 4G, 8G, etc
7660  */
7661 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7662 					   u32 len, u32 mss)
7663 {
7664 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7665 		u32 base = (u32) mapping & 0xffffffff;
7666 
7667 		return ((base + len + (mss & 0x3fff)) < base);
7668 	}
7669 	return 0;
7670 }
7671 
7672 /* Test for DMA addresses > 40-bit */
7673 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7674 					  int len)
7675 {
7676 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7677 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7678 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7679 	return 0;
7680 #else
7681 	return 0;
7682 #endif
7683 }
7684 
7685 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7686 				 dma_addr_t mapping, u32 len, u32 flags,
7687 				 u32 mss, u32 vlan)
7688 {
7689 	txbd->addr_hi = ((u64) mapping >> 32);
7690 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7691 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7692 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7693 }
7694 
7695 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7696 			    dma_addr_t map, u32 len, u32 flags,
7697 			    u32 mss, u32 vlan)
7698 {
7699 	struct tg3 *tp = tnapi->tp;
7700 	bool hwbug = false;
7701 
7702 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7703 		hwbug = true;
7704 
7705 	if (tg3_4g_overflow_test(map, len))
7706 		hwbug = true;
7707 
7708 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7709 		hwbug = true;
7710 
7711 	if (tg3_40bit_overflow_test(tp, map, len))
7712 		hwbug = true;
7713 
7714 	if (tp->dma_limit) {
7715 		u32 prvidx = *entry;
7716 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7717 		while (len > tp->dma_limit && *budget) {
7718 			u32 frag_len = tp->dma_limit;
7719 			len -= tp->dma_limit;
7720 
7721 			/* Avoid the 8byte DMA problem */
7722 			if (len <= 8) {
7723 				len += tp->dma_limit / 2;
7724 				frag_len = tp->dma_limit / 2;
7725 			}
7726 
7727 			tnapi->tx_buffers[*entry].fragmented = true;
7728 
7729 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7730 				      frag_len, tmp_flag, mss, vlan);
7731 			*budget -= 1;
7732 			prvidx = *entry;
7733 			*entry = NEXT_TX(*entry);
7734 
7735 			map += frag_len;
7736 		}
7737 
7738 		if (len) {
7739 			if (*budget) {
7740 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7741 					      len, flags, mss, vlan);
7742 				*budget -= 1;
7743 				*entry = NEXT_TX(*entry);
7744 			} else {
7745 				hwbug = true;
7746 				tnapi->tx_buffers[prvidx].fragmented = false;
7747 			}
7748 		}
7749 	} else {
7750 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7751 			      len, flags, mss, vlan);
7752 		*entry = NEXT_TX(*entry);
7753 	}
7754 
7755 	return hwbug;
7756 }
7757 
7758 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7759 {
7760 	int i;
7761 	struct sk_buff *skb;
7762 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7763 
7764 	skb = txb->skb;
7765 	txb->skb = NULL;
7766 
7767 	pci_unmap_single(tnapi->tp->pdev,
7768 			 dma_unmap_addr(txb, mapping),
7769 			 skb_headlen(skb),
7770 			 PCI_DMA_TODEVICE);
7771 
7772 	while (txb->fragmented) {
7773 		txb->fragmented = false;
7774 		entry = NEXT_TX(entry);
7775 		txb = &tnapi->tx_buffers[entry];
7776 	}
7777 
7778 	for (i = 0; i <= last; i++) {
7779 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7780 
7781 		entry = NEXT_TX(entry);
7782 		txb = &tnapi->tx_buffers[entry];
7783 
7784 		pci_unmap_page(tnapi->tp->pdev,
7785 			       dma_unmap_addr(txb, mapping),
7786 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7787 
7788 		while (txb->fragmented) {
7789 			txb->fragmented = false;
7790 			entry = NEXT_TX(entry);
7791 			txb = &tnapi->tx_buffers[entry];
7792 		}
7793 	}
7794 }
7795 
7796 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7797 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7798 				       struct sk_buff **pskb,
7799 				       u32 *entry, u32 *budget,
7800 				       u32 base_flags, u32 mss, u32 vlan)
7801 {
7802 	struct tg3 *tp = tnapi->tp;
7803 	struct sk_buff *new_skb, *skb = *pskb;
7804 	dma_addr_t new_addr = 0;
7805 	int ret = 0;
7806 
7807 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7808 		new_skb = skb_copy(skb, GFP_ATOMIC);
7809 	else {
7810 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7811 
7812 		new_skb = skb_copy_expand(skb,
7813 					  skb_headroom(skb) + more_headroom,
7814 					  skb_tailroom(skb), GFP_ATOMIC);
7815 	}
7816 
7817 	if (!new_skb) {
7818 		ret = -1;
7819 	} else {
7820 		/* New SKB is guaranteed to be linear. */
7821 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7822 					  PCI_DMA_TODEVICE);
7823 		/* Make sure the mapping succeeded */
7824 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7825 			dev_kfree_skb_any(new_skb);
7826 			ret = -1;
7827 		} else {
7828 			u32 save_entry = *entry;
7829 
7830 			base_flags |= TXD_FLAG_END;
7831 
7832 			tnapi->tx_buffers[*entry].skb = new_skb;
7833 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7834 					   mapping, new_addr);
7835 
7836 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7837 					    new_skb->len, base_flags,
7838 					    mss, vlan)) {
7839 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7840 				dev_kfree_skb_any(new_skb);
7841 				ret = -1;
7842 			}
7843 		}
7844 	}
7845 
7846 	dev_consume_skb_any(skb);
7847 	*pskb = new_skb;
7848 	return ret;
7849 }
7850 
7851 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7852 {
7853 	/* Check if we will never have enough descriptors,
7854 	 * as gso_segs can be more than current ring size
7855 	 */
7856 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7857 }
7858 
7859 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7860 
7861 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7862  * indicated in tg3_tx_frag_set()
7863  */
7864 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7865 		       struct netdev_queue *txq, struct sk_buff *skb)
7866 {
7867 	struct sk_buff *segs, *nskb;
7868 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7869 
7870 	/* Estimate the number of fragments in the worst case */
7871 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7872 		netif_tx_stop_queue(txq);
7873 
7874 		/* netif_tx_stop_queue() must be done before checking
7875 		 * checking tx index in tg3_tx_avail() below, because in
7876 		 * tg3_tx(), we update tx index before checking for
7877 		 * netif_tx_queue_stopped().
7878 		 */
7879 		smp_mb();
7880 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7881 			return NETDEV_TX_BUSY;
7882 
7883 		netif_tx_wake_queue(txq);
7884 	}
7885 
7886 	segs = skb_gso_segment(skb, tp->dev->features &
7887 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7888 	if (IS_ERR(segs) || !segs)
7889 		goto tg3_tso_bug_end;
7890 
7891 	do {
7892 		nskb = segs;
7893 		segs = segs->next;
7894 		nskb->next = NULL;
7895 		tg3_start_xmit(nskb, tp->dev);
7896 	} while (segs);
7897 
7898 tg3_tso_bug_end:
7899 	dev_consume_skb_any(skb);
7900 
7901 	return NETDEV_TX_OK;
7902 }
7903 
7904 /* hard_start_xmit for all devices */
7905 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7906 {
7907 	struct tg3 *tp = netdev_priv(dev);
7908 	u32 len, entry, base_flags, mss, vlan = 0;
7909 	u32 budget;
7910 	int i = -1, would_hit_hwbug;
7911 	dma_addr_t mapping;
7912 	struct tg3_napi *tnapi;
7913 	struct netdev_queue *txq;
7914 	unsigned int last;
7915 	struct iphdr *iph = NULL;
7916 	struct tcphdr *tcph = NULL;
7917 	__sum16 tcp_csum = 0, ip_csum = 0;
7918 	__be16 ip_tot_len = 0;
7919 
7920 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7921 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7922 	if (tg3_flag(tp, ENABLE_TSS))
7923 		tnapi++;
7924 
7925 	budget = tg3_tx_avail(tnapi);
7926 
7927 	/* We are running in BH disabled context with netif_tx_lock
7928 	 * and TX reclaim runs via tp->napi.poll inside of a software
7929 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7930 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7931 	 */
7932 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7933 		if (!netif_tx_queue_stopped(txq)) {
7934 			netif_tx_stop_queue(txq);
7935 
7936 			/* This is a hard error, log it. */
7937 			netdev_err(dev,
7938 				   "BUG! Tx Ring full when queue awake!\n");
7939 		}
7940 		return NETDEV_TX_BUSY;
7941 	}
7942 
7943 	entry = tnapi->tx_prod;
7944 	base_flags = 0;
7945 
7946 	mss = skb_shinfo(skb)->gso_size;
7947 	if (mss) {
7948 		u32 tcp_opt_len, hdr_len;
7949 
7950 		if (skb_cow_head(skb, 0))
7951 			goto drop;
7952 
7953 		iph = ip_hdr(skb);
7954 		tcp_opt_len = tcp_optlen(skb);
7955 
7956 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7957 
7958 		/* HW/FW can not correctly segment packets that have been
7959 		 * vlan encapsulated.
7960 		 */
7961 		if (skb->protocol == htons(ETH_P_8021Q) ||
7962 		    skb->protocol == htons(ETH_P_8021AD)) {
7963 			if (tg3_tso_bug_gso_check(tnapi, skb))
7964 				return tg3_tso_bug(tp, tnapi, txq, skb);
7965 			goto drop;
7966 		}
7967 
7968 		if (!skb_is_gso_v6(skb)) {
7969 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7970 			    tg3_flag(tp, TSO_BUG)) {
7971 				if (tg3_tso_bug_gso_check(tnapi, skb))
7972 					return tg3_tso_bug(tp, tnapi, txq, skb);
7973 				goto drop;
7974 			}
7975 			ip_csum = iph->check;
7976 			ip_tot_len = iph->tot_len;
7977 			iph->check = 0;
7978 			iph->tot_len = htons(mss + hdr_len);
7979 		}
7980 
7981 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7982 			       TXD_FLAG_CPU_POST_DMA);
7983 
7984 		tcph = tcp_hdr(skb);
7985 		tcp_csum = tcph->check;
7986 
7987 		if (tg3_flag(tp, HW_TSO_1) ||
7988 		    tg3_flag(tp, HW_TSO_2) ||
7989 		    tg3_flag(tp, HW_TSO_3)) {
7990 			tcph->check = 0;
7991 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7992 		} else {
7993 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7994 							 0, IPPROTO_TCP, 0);
7995 		}
7996 
7997 		if (tg3_flag(tp, HW_TSO_3)) {
7998 			mss |= (hdr_len & 0xc) << 12;
7999 			if (hdr_len & 0x10)
8000 				base_flags |= 0x00000010;
8001 			base_flags |= (hdr_len & 0x3e0) << 5;
8002 		} else if (tg3_flag(tp, HW_TSO_2))
8003 			mss |= hdr_len << 9;
8004 		else if (tg3_flag(tp, HW_TSO_1) ||
8005 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
8006 			if (tcp_opt_len || iph->ihl > 5) {
8007 				int tsflags;
8008 
8009 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8010 				mss |= (tsflags << 11);
8011 			}
8012 		} else {
8013 			if (tcp_opt_len || iph->ihl > 5) {
8014 				int tsflags;
8015 
8016 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8017 				base_flags |= tsflags << 12;
8018 			}
8019 		}
8020 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8021 		/* HW/FW can not correctly checksum packets that have been
8022 		 * vlan encapsulated.
8023 		 */
8024 		if (skb->protocol == htons(ETH_P_8021Q) ||
8025 		    skb->protocol == htons(ETH_P_8021AD)) {
8026 			if (skb_checksum_help(skb))
8027 				goto drop;
8028 		} else  {
8029 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8030 		}
8031 	}
8032 
8033 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8034 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8035 		base_flags |= TXD_FLAG_JMB_PKT;
8036 
8037 	if (skb_vlan_tag_present(skb)) {
8038 		base_flags |= TXD_FLAG_VLAN;
8039 		vlan = skb_vlan_tag_get(skb);
8040 	}
8041 
8042 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8043 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8044 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8045 		base_flags |= TXD_FLAG_HWTSTAMP;
8046 	}
8047 
8048 	len = skb_headlen(skb);
8049 
8050 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8051 	if (pci_dma_mapping_error(tp->pdev, mapping))
8052 		goto drop;
8053 
8054 
8055 	tnapi->tx_buffers[entry].skb = skb;
8056 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8057 
8058 	would_hit_hwbug = 0;
8059 
8060 	if (tg3_flag(tp, 5701_DMA_BUG))
8061 		would_hit_hwbug = 1;
8062 
8063 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8064 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8065 			    mss, vlan)) {
8066 		would_hit_hwbug = 1;
8067 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8068 		u32 tmp_mss = mss;
8069 
8070 		if (!tg3_flag(tp, HW_TSO_1) &&
8071 		    !tg3_flag(tp, HW_TSO_2) &&
8072 		    !tg3_flag(tp, HW_TSO_3))
8073 			tmp_mss = 0;
8074 
8075 		/* Now loop through additional data
8076 		 * fragments, and queue them.
8077 		 */
8078 		last = skb_shinfo(skb)->nr_frags - 1;
8079 		for (i = 0; i <= last; i++) {
8080 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8081 
8082 			len = skb_frag_size(frag);
8083 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8084 						   len, DMA_TO_DEVICE);
8085 
8086 			tnapi->tx_buffers[entry].skb = NULL;
8087 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8088 					   mapping);
8089 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8090 				goto dma_error;
8091 
8092 			if (!budget ||
8093 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8094 					    len, base_flags |
8095 					    ((i == last) ? TXD_FLAG_END : 0),
8096 					    tmp_mss, vlan)) {
8097 				would_hit_hwbug = 1;
8098 				break;
8099 			}
8100 		}
8101 	}
8102 
8103 	if (would_hit_hwbug) {
8104 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8105 
8106 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8107 			/* If it's a TSO packet, do GSO instead of
8108 			 * allocating and copying to a large linear SKB
8109 			 */
8110 			if (ip_tot_len) {
8111 				iph->check = ip_csum;
8112 				iph->tot_len = ip_tot_len;
8113 			}
8114 			tcph->check = tcp_csum;
8115 			return tg3_tso_bug(tp, tnapi, txq, skb);
8116 		}
8117 
8118 		/* If the workaround fails due to memory/mapping
8119 		 * failure, silently drop this packet.
8120 		 */
8121 		entry = tnapi->tx_prod;
8122 		budget = tg3_tx_avail(tnapi);
8123 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8124 						base_flags, mss, vlan))
8125 			goto drop_nofree;
8126 	}
8127 
8128 	skb_tx_timestamp(skb);
8129 	netdev_tx_sent_queue(txq, skb->len);
8130 
8131 	/* Sync BD data before updating mailbox */
8132 	wmb();
8133 
8134 	tnapi->tx_prod = entry;
8135 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8136 		netif_tx_stop_queue(txq);
8137 
8138 		/* netif_tx_stop_queue() must be done before checking
8139 		 * checking tx index in tg3_tx_avail() below, because in
8140 		 * tg3_tx(), we update tx index before checking for
8141 		 * netif_tx_queue_stopped().
8142 		 */
8143 		smp_mb();
8144 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8145 			netif_tx_wake_queue(txq);
8146 	}
8147 
8148 	if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8149 		/* Packets are ready, update Tx producer idx on card. */
8150 		tw32_tx_mbox(tnapi->prodmbox, entry);
8151 		mmiowb();
8152 	}
8153 
8154 	return NETDEV_TX_OK;
8155 
8156 dma_error:
8157 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8158 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8159 drop:
8160 	dev_kfree_skb_any(skb);
8161 drop_nofree:
8162 	tp->tx_dropped++;
8163 	return NETDEV_TX_OK;
8164 }
8165 
8166 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8167 {
8168 	if (enable) {
8169 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8170 				  MAC_MODE_PORT_MODE_MASK);
8171 
8172 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8173 
8174 		if (!tg3_flag(tp, 5705_PLUS))
8175 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8176 
8177 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8178 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8179 		else
8180 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8181 	} else {
8182 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8183 
8184 		if (tg3_flag(tp, 5705_PLUS) ||
8185 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8186 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8187 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8188 	}
8189 
8190 	tw32(MAC_MODE, tp->mac_mode);
8191 	udelay(40);
8192 }
8193 
8194 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8195 {
8196 	u32 val, bmcr, mac_mode, ptest = 0;
8197 
8198 	tg3_phy_toggle_apd(tp, false);
8199 	tg3_phy_toggle_automdix(tp, false);
8200 
8201 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8202 		return -EIO;
8203 
8204 	bmcr = BMCR_FULLDPLX;
8205 	switch (speed) {
8206 	case SPEED_10:
8207 		break;
8208 	case SPEED_100:
8209 		bmcr |= BMCR_SPEED100;
8210 		break;
8211 	case SPEED_1000:
8212 	default:
8213 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8214 			speed = SPEED_100;
8215 			bmcr |= BMCR_SPEED100;
8216 		} else {
8217 			speed = SPEED_1000;
8218 			bmcr |= BMCR_SPEED1000;
8219 		}
8220 	}
8221 
8222 	if (extlpbk) {
8223 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8224 			tg3_readphy(tp, MII_CTRL1000, &val);
8225 			val |= CTL1000_AS_MASTER |
8226 			       CTL1000_ENABLE_MASTER;
8227 			tg3_writephy(tp, MII_CTRL1000, val);
8228 		} else {
8229 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8230 				MII_TG3_FET_PTEST_TRIM_2;
8231 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8232 		}
8233 	} else
8234 		bmcr |= BMCR_LOOPBACK;
8235 
8236 	tg3_writephy(tp, MII_BMCR, bmcr);
8237 
8238 	/* The write needs to be flushed for the FETs */
8239 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8240 		tg3_readphy(tp, MII_BMCR, &bmcr);
8241 
8242 	udelay(40);
8243 
8244 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8245 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8246 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8247 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8248 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8249 
8250 		/* The write needs to be flushed for the AC131 */
8251 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8252 	}
8253 
8254 	/* Reset to prevent losing 1st rx packet intermittently */
8255 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8256 	    tg3_flag(tp, 5780_CLASS)) {
8257 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8258 		udelay(10);
8259 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8260 	}
8261 
8262 	mac_mode = tp->mac_mode &
8263 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8264 	if (speed == SPEED_1000)
8265 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8266 	else
8267 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8268 
8269 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8270 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8271 
8272 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8273 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8274 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8275 			mac_mode |= MAC_MODE_LINK_POLARITY;
8276 
8277 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8278 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8279 	}
8280 
8281 	tw32(MAC_MODE, mac_mode);
8282 	udelay(40);
8283 
8284 	return 0;
8285 }
8286 
8287 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8288 {
8289 	struct tg3 *tp = netdev_priv(dev);
8290 
8291 	if (features & NETIF_F_LOOPBACK) {
8292 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8293 			return;
8294 
8295 		spin_lock_bh(&tp->lock);
8296 		tg3_mac_loopback(tp, true);
8297 		netif_carrier_on(tp->dev);
8298 		spin_unlock_bh(&tp->lock);
8299 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8300 	} else {
8301 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8302 			return;
8303 
8304 		spin_lock_bh(&tp->lock);
8305 		tg3_mac_loopback(tp, false);
8306 		/* Force link status check */
8307 		tg3_setup_phy(tp, true);
8308 		spin_unlock_bh(&tp->lock);
8309 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8310 	}
8311 }
8312 
8313 static netdev_features_t tg3_fix_features(struct net_device *dev,
8314 	netdev_features_t features)
8315 {
8316 	struct tg3 *tp = netdev_priv(dev);
8317 
8318 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8319 		features &= ~NETIF_F_ALL_TSO;
8320 
8321 	return features;
8322 }
8323 
8324 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8325 {
8326 	netdev_features_t changed = dev->features ^ features;
8327 
8328 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8329 		tg3_set_loopback(dev, features);
8330 
8331 	return 0;
8332 }
8333 
8334 static void tg3_rx_prodring_free(struct tg3 *tp,
8335 				 struct tg3_rx_prodring_set *tpr)
8336 {
8337 	int i;
8338 
8339 	if (tpr != &tp->napi[0].prodring) {
8340 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8341 		     i = (i + 1) & tp->rx_std_ring_mask)
8342 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8343 					tp->rx_pkt_map_sz);
8344 
8345 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8346 			for (i = tpr->rx_jmb_cons_idx;
8347 			     i != tpr->rx_jmb_prod_idx;
8348 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8349 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8350 						TG3_RX_JMB_MAP_SZ);
8351 			}
8352 		}
8353 
8354 		return;
8355 	}
8356 
8357 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8358 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8359 				tp->rx_pkt_map_sz);
8360 
8361 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8362 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8363 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8364 					TG3_RX_JMB_MAP_SZ);
8365 	}
8366 }
8367 
8368 /* Initialize rx rings for packet processing.
8369  *
8370  * The chip has been shut down and the driver detached from
8371  * the networking, so no interrupts or new tx packets will
8372  * end up in the driver.  tp->{tx,}lock are held and thus
8373  * we may not sleep.
8374  */
8375 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8376 				 struct tg3_rx_prodring_set *tpr)
8377 {
8378 	u32 i, rx_pkt_dma_sz;
8379 
8380 	tpr->rx_std_cons_idx = 0;
8381 	tpr->rx_std_prod_idx = 0;
8382 	tpr->rx_jmb_cons_idx = 0;
8383 	tpr->rx_jmb_prod_idx = 0;
8384 
8385 	if (tpr != &tp->napi[0].prodring) {
8386 		memset(&tpr->rx_std_buffers[0], 0,
8387 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8388 		if (tpr->rx_jmb_buffers)
8389 			memset(&tpr->rx_jmb_buffers[0], 0,
8390 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8391 		goto done;
8392 	}
8393 
8394 	/* Zero out all descriptors. */
8395 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8396 
8397 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8398 	if (tg3_flag(tp, 5780_CLASS) &&
8399 	    tp->dev->mtu > ETH_DATA_LEN)
8400 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8401 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8402 
8403 	/* Initialize invariants of the rings, we only set this
8404 	 * stuff once.  This works because the card does not
8405 	 * write into the rx buffer posting rings.
8406 	 */
8407 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8408 		struct tg3_rx_buffer_desc *rxd;
8409 
8410 		rxd = &tpr->rx_std[i];
8411 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8412 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8413 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8414 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8415 	}
8416 
8417 	/* Now allocate fresh SKBs for each rx ring. */
8418 	for (i = 0; i < tp->rx_pending; i++) {
8419 		unsigned int frag_size;
8420 
8421 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8422 				      &frag_size) < 0) {
8423 			netdev_warn(tp->dev,
8424 				    "Using a smaller RX standard ring. Only "
8425 				    "%d out of %d buffers were allocated "
8426 				    "successfully\n", i, tp->rx_pending);
8427 			if (i == 0)
8428 				goto initfail;
8429 			tp->rx_pending = i;
8430 			break;
8431 		}
8432 	}
8433 
8434 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8435 		goto done;
8436 
8437 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8438 
8439 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8440 		goto done;
8441 
8442 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8443 		struct tg3_rx_buffer_desc *rxd;
8444 
8445 		rxd = &tpr->rx_jmb[i].std;
8446 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8447 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8448 				  RXD_FLAG_JUMBO;
8449 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8450 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8451 	}
8452 
8453 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8454 		unsigned int frag_size;
8455 
8456 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8457 				      &frag_size) < 0) {
8458 			netdev_warn(tp->dev,
8459 				    "Using a smaller RX jumbo ring. Only %d "
8460 				    "out of %d buffers were allocated "
8461 				    "successfully\n", i, tp->rx_jumbo_pending);
8462 			if (i == 0)
8463 				goto initfail;
8464 			tp->rx_jumbo_pending = i;
8465 			break;
8466 		}
8467 	}
8468 
8469 done:
8470 	return 0;
8471 
8472 initfail:
8473 	tg3_rx_prodring_free(tp, tpr);
8474 	return -ENOMEM;
8475 }
8476 
8477 static void tg3_rx_prodring_fini(struct tg3 *tp,
8478 				 struct tg3_rx_prodring_set *tpr)
8479 {
8480 	kfree(tpr->rx_std_buffers);
8481 	tpr->rx_std_buffers = NULL;
8482 	kfree(tpr->rx_jmb_buffers);
8483 	tpr->rx_jmb_buffers = NULL;
8484 	if (tpr->rx_std) {
8485 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8486 				  tpr->rx_std, tpr->rx_std_mapping);
8487 		tpr->rx_std = NULL;
8488 	}
8489 	if (tpr->rx_jmb) {
8490 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8491 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8492 		tpr->rx_jmb = NULL;
8493 	}
8494 }
8495 
8496 static int tg3_rx_prodring_init(struct tg3 *tp,
8497 				struct tg3_rx_prodring_set *tpr)
8498 {
8499 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8500 				      GFP_KERNEL);
8501 	if (!tpr->rx_std_buffers)
8502 		return -ENOMEM;
8503 
8504 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8505 					 TG3_RX_STD_RING_BYTES(tp),
8506 					 &tpr->rx_std_mapping,
8507 					 GFP_KERNEL);
8508 	if (!tpr->rx_std)
8509 		goto err_out;
8510 
8511 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8512 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8513 					      GFP_KERNEL);
8514 		if (!tpr->rx_jmb_buffers)
8515 			goto err_out;
8516 
8517 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8518 						 TG3_RX_JMB_RING_BYTES(tp),
8519 						 &tpr->rx_jmb_mapping,
8520 						 GFP_KERNEL);
8521 		if (!tpr->rx_jmb)
8522 			goto err_out;
8523 	}
8524 
8525 	return 0;
8526 
8527 err_out:
8528 	tg3_rx_prodring_fini(tp, tpr);
8529 	return -ENOMEM;
8530 }
8531 
8532 /* Free up pending packets in all rx/tx rings.
8533  *
8534  * The chip has been shut down and the driver detached from
8535  * the networking, so no interrupts or new tx packets will
8536  * end up in the driver.  tp->{tx,}lock is not held and we are not
8537  * in an interrupt context and thus may sleep.
8538  */
8539 static void tg3_free_rings(struct tg3 *tp)
8540 {
8541 	int i, j;
8542 
8543 	for (j = 0; j < tp->irq_cnt; j++) {
8544 		struct tg3_napi *tnapi = &tp->napi[j];
8545 
8546 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8547 
8548 		if (!tnapi->tx_buffers)
8549 			continue;
8550 
8551 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8552 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8553 
8554 			if (!skb)
8555 				continue;
8556 
8557 			tg3_tx_skb_unmap(tnapi, i,
8558 					 skb_shinfo(skb)->nr_frags - 1);
8559 
8560 			dev_consume_skb_any(skb);
8561 		}
8562 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8563 	}
8564 }
8565 
8566 /* Initialize tx/rx rings for packet processing.
8567  *
8568  * The chip has been shut down and the driver detached from
8569  * the networking, so no interrupts or new tx packets will
8570  * end up in the driver.  tp->{tx,}lock are held and thus
8571  * we may not sleep.
8572  */
8573 static int tg3_init_rings(struct tg3 *tp)
8574 {
8575 	int i;
8576 
8577 	/* Free up all the SKBs. */
8578 	tg3_free_rings(tp);
8579 
8580 	for (i = 0; i < tp->irq_cnt; i++) {
8581 		struct tg3_napi *tnapi = &tp->napi[i];
8582 
8583 		tnapi->last_tag = 0;
8584 		tnapi->last_irq_tag = 0;
8585 		tnapi->hw_status->status = 0;
8586 		tnapi->hw_status->status_tag = 0;
8587 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8588 
8589 		tnapi->tx_prod = 0;
8590 		tnapi->tx_cons = 0;
8591 		if (tnapi->tx_ring)
8592 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8593 
8594 		tnapi->rx_rcb_ptr = 0;
8595 		if (tnapi->rx_rcb)
8596 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8597 
8598 		if (tnapi->prodring.rx_std &&
8599 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8600 			tg3_free_rings(tp);
8601 			return -ENOMEM;
8602 		}
8603 	}
8604 
8605 	return 0;
8606 }
8607 
8608 static void tg3_mem_tx_release(struct tg3 *tp)
8609 {
8610 	int i;
8611 
8612 	for (i = 0; i < tp->irq_max; i++) {
8613 		struct tg3_napi *tnapi = &tp->napi[i];
8614 
8615 		if (tnapi->tx_ring) {
8616 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8617 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8618 			tnapi->tx_ring = NULL;
8619 		}
8620 
8621 		kfree(tnapi->tx_buffers);
8622 		tnapi->tx_buffers = NULL;
8623 	}
8624 }
8625 
8626 static int tg3_mem_tx_acquire(struct tg3 *tp)
8627 {
8628 	int i;
8629 	struct tg3_napi *tnapi = &tp->napi[0];
8630 
8631 	/* If multivector TSS is enabled, vector 0 does not handle
8632 	 * tx interrupts.  Don't allocate any resources for it.
8633 	 */
8634 	if (tg3_flag(tp, ENABLE_TSS))
8635 		tnapi++;
8636 
8637 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8638 		tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8639 					    sizeof(struct tg3_tx_ring_info),
8640 					    GFP_KERNEL);
8641 		if (!tnapi->tx_buffers)
8642 			goto err_out;
8643 
8644 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8645 						    TG3_TX_RING_BYTES,
8646 						    &tnapi->tx_desc_mapping,
8647 						    GFP_KERNEL);
8648 		if (!tnapi->tx_ring)
8649 			goto err_out;
8650 	}
8651 
8652 	return 0;
8653 
8654 err_out:
8655 	tg3_mem_tx_release(tp);
8656 	return -ENOMEM;
8657 }
8658 
8659 static void tg3_mem_rx_release(struct tg3 *tp)
8660 {
8661 	int i;
8662 
8663 	for (i = 0; i < tp->irq_max; i++) {
8664 		struct tg3_napi *tnapi = &tp->napi[i];
8665 
8666 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8667 
8668 		if (!tnapi->rx_rcb)
8669 			continue;
8670 
8671 		dma_free_coherent(&tp->pdev->dev,
8672 				  TG3_RX_RCB_RING_BYTES(tp),
8673 				  tnapi->rx_rcb,
8674 				  tnapi->rx_rcb_mapping);
8675 		tnapi->rx_rcb = NULL;
8676 	}
8677 }
8678 
8679 static int tg3_mem_rx_acquire(struct tg3 *tp)
8680 {
8681 	unsigned int i, limit;
8682 
8683 	limit = tp->rxq_cnt;
8684 
8685 	/* If RSS is enabled, we need a (dummy) producer ring
8686 	 * set on vector zero.  This is the true hw prodring.
8687 	 */
8688 	if (tg3_flag(tp, ENABLE_RSS))
8689 		limit++;
8690 
8691 	for (i = 0; i < limit; i++) {
8692 		struct tg3_napi *tnapi = &tp->napi[i];
8693 
8694 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8695 			goto err_out;
8696 
8697 		/* If multivector RSS is enabled, vector 0
8698 		 * does not handle rx or tx interrupts.
8699 		 * Don't allocate any resources for it.
8700 		 */
8701 		if (!i && tg3_flag(tp, ENABLE_RSS))
8702 			continue;
8703 
8704 		tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8705 						    TG3_RX_RCB_RING_BYTES(tp),
8706 						    &tnapi->rx_rcb_mapping,
8707 						    GFP_KERNEL);
8708 		if (!tnapi->rx_rcb)
8709 			goto err_out;
8710 	}
8711 
8712 	return 0;
8713 
8714 err_out:
8715 	tg3_mem_rx_release(tp);
8716 	return -ENOMEM;
8717 }
8718 
8719 /*
8720  * Must not be invoked with interrupt sources disabled and
8721  * the hardware shutdown down.
8722  */
8723 static void tg3_free_consistent(struct tg3 *tp)
8724 {
8725 	int i;
8726 
8727 	for (i = 0; i < tp->irq_cnt; i++) {
8728 		struct tg3_napi *tnapi = &tp->napi[i];
8729 
8730 		if (tnapi->hw_status) {
8731 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8732 					  tnapi->hw_status,
8733 					  tnapi->status_mapping);
8734 			tnapi->hw_status = NULL;
8735 		}
8736 	}
8737 
8738 	tg3_mem_rx_release(tp);
8739 	tg3_mem_tx_release(tp);
8740 
8741 	/* tp->hw_stats can be referenced safely:
8742 	 *     1. under rtnl_lock
8743 	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8744 	 */
8745 	if (tp->hw_stats) {
8746 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8747 				  tp->hw_stats, tp->stats_mapping);
8748 		tp->hw_stats = NULL;
8749 	}
8750 }
8751 
8752 /*
8753  * Must not be invoked with interrupt sources disabled and
8754  * the hardware shutdown down.  Can sleep.
8755  */
8756 static int tg3_alloc_consistent(struct tg3 *tp)
8757 {
8758 	int i;
8759 
8760 	tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8761 					   sizeof(struct tg3_hw_stats),
8762 					   &tp->stats_mapping, GFP_KERNEL);
8763 	if (!tp->hw_stats)
8764 		goto err_out;
8765 
8766 	for (i = 0; i < tp->irq_cnt; i++) {
8767 		struct tg3_napi *tnapi = &tp->napi[i];
8768 		struct tg3_hw_status *sblk;
8769 
8770 		tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8771 						       TG3_HW_STATUS_SIZE,
8772 						       &tnapi->status_mapping,
8773 						       GFP_KERNEL);
8774 		if (!tnapi->hw_status)
8775 			goto err_out;
8776 
8777 		sblk = tnapi->hw_status;
8778 
8779 		if (tg3_flag(tp, ENABLE_RSS)) {
8780 			u16 *prodptr = NULL;
8781 
8782 			/*
8783 			 * When RSS is enabled, the status block format changes
8784 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8785 			 * and "rx_mini_consumer" members get mapped to the
8786 			 * other three rx return ring producer indexes.
8787 			 */
8788 			switch (i) {
8789 			case 1:
8790 				prodptr = &sblk->idx[0].rx_producer;
8791 				break;
8792 			case 2:
8793 				prodptr = &sblk->rx_jumbo_consumer;
8794 				break;
8795 			case 3:
8796 				prodptr = &sblk->reserved;
8797 				break;
8798 			case 4:
8799 				prodptr = &sblk->rx_mini_consumer;
8800 				break;
8801 			}
8802 			tnapi->rx_rcb_prod_idx = prodptr;
8803 		} else {
8804 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8805 		}
8806 	}
8807 
8808 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8809 		goto err_out;
8810 
8811 	return 0;
8812 
8813 err_out:
8814 	tg3_free_consistent(tp);
8815 	return -ENOMEM;
8816 }
8817 
8818 #define MAX_WAIT_CNT 1000
8819 
8820 /* To stop a block, clear the enable bit and poll till it
8821  * clears.  tp->lock is held.
8822  */
8823 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8824 {
8825 	unsigned int i;
8826 	u32 val;
8827 
8828 	if (tg3_flag(tp, 5705_PLUS)) {
8829 		switch (ofs) {
8830 		case RCVLSC_MODE:
8831 		case DMAC_MODE:
8832 		case MBFREE_MODE:
8833 		case BUFMGR_MODE:
8834 		case MEMARB_MODE:
8835 			/* We can't enable/disable these bits of the
8836 			 * 5705/5750, just say success.
8837 			 */
8838 			return 0;
8839 
8840 		default:
8841 			break;
8842 		}
8843 	}
8844 
8845 	val = tr32(ofs);
8846 	val &= ~enable_bit;
8847 	tw32_f(ofs, val);
8848 
8849 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8850 		if (pci_channel_offline(tp->pdev)) {
8851 			dev_err(&tp->pdev->dev,
8852 				"tg3_stop_block device offline, "
8853 				"ofs=%lx enable_bit=%x\n",
8854 				ofs, enable_bit);
8855 			return -ENODEV;
8856 		}
8857 
8858 		udelay(100);
8859 		val = tr32(ofs);
8860 		if ((val & enable_bit) == 0)
8861 			break;
8862 	}
8863 
8864 	if (i == MAX_WAIT_CNT && !silent) {
8865 		dev_err(&tp->pdev->dev,
8866 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8867 			ofs, enable_bit);
8868 		return -ENODEV;
8869 	}
8870 
8871 	return 0;
8872 }
8873 
8874 /* tp->lock is held. */
8875 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8876 {
8877 	int i, err;
8878 
8879 	tg3_disable_ints(tp);
8880 
8881 	if (pci_channel_offline(tp->pdev)) {
8882 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8883 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8884 		err = -ENODEV;
8885 		goto err_no_dev;
8886 	}
8887 
8888 	tp->rx_mode &= ~RX_MODE_ENABLE;
8889 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8890 	udelay(10);
8891 
8892 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8893 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8894 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8895 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8896 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8897 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8898 
8899 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8900 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8901 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8902 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8903 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8904 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8905 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8906 
8907 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8908 	tw32_f(MAC_MODE, tp->mac_mode);
8909 	udelay(40);
8910 
8911 	tp->tx_mode &= ~TX_MODE_ENABLE;
8912 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8913 
8914 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8915 		udelay(100);
8916 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8917 			break;
8918 	}
8919 	if (i >= MAX_WAIT_CNT) {
8920 		dev_err(&tp->pdev->dev,
8921 			"%s timed out, TX_MODE_ENABLE will not clear "
8922 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8923 		err |= -ENODEV;
8924 	}
8925 
8926 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8927 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8928 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8929 
8930 	tw32(FTQ_RESET, 0xffffffff);
8931 	tw32(FTQ_RESET, 0x00000000);
8932 
8933 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8934 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8935 
8936 err_no_dev:
8937 	for (i = 0; i < tp->irq_cnt; i++) {
8938 		struct tg3_napi *tnapi = &tp->napi[i];
8939 		if (tnapi->hw_status)
8940 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8941 	}
8942 
8943 	return err;
8944 }
8945 
8946 /* Save PCI command register before chip reset */
8947 static void tg3_save_pci_state(struct tg3 *tp)
8948 {
8949 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8950 }
8951 
8952 /* Restore PCI state after chip reset */
8953 static void tg3_restore_pci_state(struct tg3 *tp)
8954 {
8955 	u32 val;
8956 
8957 	/* Re-enable indirect register accesses. */
8958 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8959 			       tp->misc_host_ctrl);
8960 
8961 	/* Set MAX PCI retry to zero. */
8962 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8963 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8964 	    tg3_flag(tp, PCIX_MODE))
8965 		val |= PCISTATE_RETRY_SAME_DMA;
8966 	/* Allow reads and writes to the APE register and memory space. */
8967 	if (tg3_flag(tp, ENABLE_APE))
8968 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8969 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8970 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8971 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8972 
8973 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8974 
8975 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8976 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8977 				      tp->pci_cacheline_sz);
8978 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8979 				      tp->pci_lat_timer);
8980 	}
8981 
8982 	/* Make sure PCI-X relaxed ordering bit is clear. */
8983 	if (tg3_flag(tp, PCIX_MODE)) {
8984 		u16 pcix_cmd;
8985 
8986 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8987 				     &pcix_cmd);
8988 		pcix_cmd &= ~PCI_X_CMD_ERO;
8989 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8990 				      pcix_cmd);
8991 	}
8992 
8993 	if (tg3_flag(tp, 5780_CLASS)) {
8994 
8995 		/* Chip reset on 5780 will reset MSI enable bit,
8996 		 * so need to restore it.
8997 		 */
8998 		if (tg3_flag(tp, USING_MSI)) {
8999 			u16 ctrl;
9000 
9001 			pci_read_config_word(tp->pdev,
9002 					     tp->msi_cap + PCI_MSI_FLAGS,
9003 					     &ctrl);
9004 			pci_write_config_word(tp->pdev,
9005 					      tp->msi_cap + PCI_MSI_FLAGS,
9006 					      ctrl | PCI_MSI_FLAGS_ENABLE);
9007 			val = tr32(MSGINT_MODE);
9008 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9009 		}
9010 	}
9011 }
9012 
9013 static void tg3_override_clk(struct tg3 *tp)
9014 {
9015 	u32 val;
9016 
9017 	switch (tg3_asic_rev(tp)) {
9018 	case ASIC_REV_5717:
9019 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9020 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9021 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9022 		break;
9023 
9024 	case ASIC_REV_5719:
9025 	case ASIC_REV_5720:
9026 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9027 		break;
9028 
9029 	default:
9030 		return;
9031 	}
9032 }
9033 
9034 static void tg3_restore_clk(struct tg3 *tp)
9035 {
9036 	u32 val;
9037 
9038 	switch (tg3_asic_rev(tp)) {
9039 	case ASIC_REV_5717:
9040 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9041 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9042 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9043 		break;
9044 
9045 	case ASIC_REV_5719:
9046 	case ASIC_REV_5720:
9047 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9048 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9049 		break;
9050 
9051 	default:
9052 		return;
9053 	}
9054 }
9055 
9056 /* tp->lock is held. */
9057 static int tg3_chip_reset(struct tg3 *tp)
9058 	__releases(tp->lock)
9059 	__acquires(tp->lock)
9060 {
9061 	u32 val;
9062 	void (*write_op)(struct tg3 *, u32, u32);
9063 	int i, err;
9064 
9065 	if (!pci_device_is_present(tp->pdev))
9066 		return -ENODEV;
9067 
9068 	tg3_nvram_lock(tp);
9069 
9070 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9071 
9072 	/* No matching tg3_nvram_unlock() after this because
9073 	 * chip reset below will undo the nvram lock.
9074 	 */
9075 	tp->nvram_lock_cnt = 0;
9076 
9077 	/* GRC_MISC_CFG core clock reset will clear the memory
9078 	 * enable bit in PCI register 4 and the MSI enable bit
9079 	 * on some chips, so we save relevant registers here.
9080 	 */
9081 	tg3_save_pci_state(tp);
9082 
9083 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9084 	    tg3_flag(tp, 5755_PLUS))
9085 		tw32(GRC_FASTBOOT_PC, 0);
9086 
9087 	/*
9088 	 * We must avoid the readl() that normally takes place.
9089 	 * It locks machines, causes machine checks, and other
9090 	 * fun things.  So, temporarily disable the 5701
9091 	 * hardware workaround, while we do the reset.
9092 	 */
9093 	write_op = tp->write32;
9094 	if (write_op == tg3_write_flush_reg32)
9095 		tp->write32 = tg3_write32;
9096 
9097 	/* Prevent the irq handler from reading or writing PCI registers
9098 	 * during chip reset when the memory enable bit in the PCI command
9099 	 * register may be cleared.  The chip does not generate interrupt
9100 	 * at this time, but the irq handler may still be called due to irq
9101 	 * sharing or irqpoll.
9102 	 */
9103 	tg3_flag_set(tp, CHIP_RESETTING);
9104 	for (i = 0; i < tp->irq_cnt; i++) {
9105 		struct tg3_napi *tnapi = &tp->napi[i];
9106 		if (tnapi->hw_status) {
9107 			tnapi->hw_status->status = 0;
9108 			tnapi->hw_status->status_tag = 0;
9109 		}
9110 		tnapi->last_tag = 0;
9111 		tnapi->last_irq_tag = 0;
9112 	}
9113 	smp_mb();
9114 
9115 	tg3_full_unlock(tp);
9116 
9117 	for (i = 0; i < tp->irq_cnt; i++)
9118 		synchronize_irq(tp->napi[i].irq_vec);
9119 
9120 	tg3_full_lock(tp, 0);
9121 
9122 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9123 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9124 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9125 	}
9126 
9127 	/* do the reset */
9128 	val = GRC_MISC_CFG_CORECLK_RESET;
9129 
9130 	if (tg3_flag(tp, PCI_EXPRESS)) {
9131 		/* Force PCIe 1.0a mode */
9132 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9133 		    !tg3_flag(tp, 57765_PLUS) &&
9134 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9135 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9136 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9137 
9138 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9139 			tw32(GRC_MISC_CFG, (1 << 29));
9140 			val |= (1 << 29);
9141 		}
9142 	}
9143 
9144 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9145 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9146 		tw32(GRC_VCPU_EXT_CTRL,
9147 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9148 	}
9149 
9150 	/* Set the clock to the highest frequency to avoid timeouts. With link
9151 	 * aware mode, the clock speed could be slow and bootcode does not
9152 	 * complete within the expected time. Override the clock to allow the
9153 	 * bootcode to finish sooner and then restore it.
9154 	 */
9155 	tg3_override_clk(tp);
9156 
9157 	/* Manage gphy power for all CPMU absent PCIe devices. */
9158 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9159 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9160 
9161 	tw32(GRC_MISC_CFG, val);
9162 
9163 	/* restore 5701 hardware bug workaround write method */
9164 	tp->write32 = write_op;
9165 
9166 	/* Unfortunately, we have to delay before the PCI read back.
9167 	 * Some 575X chips even will not respond to a PCI cfg access
9168 	 * when the reset command is given to the chip.
9169 	 *
9170 	 * How do these hardware designers expect things to work
9171 	 * properly if the PCI write is posted for a long period
9172 	 * of time?  It is always necessary to have some method by
9173 	 * which a register read back can occur to push the write
9174 	 * out which does the reset.
9175 	 *
9176 	 * For most tg3 variants the trick below was working.
9177 	 * Ho hum...
9178 	 */
9179 	udelay(120);
9180 
9181 	/* Flush PCI posted writes.  The normal MMIO registers
9182 	 * are inaccessible at this time so this is the only
9183 	 * way to make this reliably (actually, this is no longer
9184 	 * the case, see above).  I tried to use indirect
9185 	 * register read/write but this upset some 5701 variants.
9186 	 */
9187 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9188 
9189 	udelay(120);
9190 
9191 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9192 		u16 val16;
9193 
9194 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9195 			int j;
9196 			u32 cfg_val;
9197 
9198 			/* Wait for link training to complete.  */
9199 			for (j = 0; j < 5000; j++)
9200 				udelay(100);
9201 
9202 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9203 			pci_write_config_dword(tp->pdev, 0xc4,
9204 					       cfg_val | (1 << 15));
9205 		}
9206 
9207 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9208 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9209 		/*
9210 		 * Older PCIe devices only support the 128 byte
9211 		 * MPS setting.  Enforce the restriction.
9212 		 */
9213 		if (!tg3_flag(tp, CPMU_PRESENT))
9214 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9215 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9216 
9217 		/* Clear error status */
9218 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9219 				      PCI_EXP_DEVSTA_CED |
9220 				      PCI_EXP_DEVSTA_NFED |
9221 				      PCI_EXP_DEVSTA_FED |
9222 				      PCI_EXP_DEVSTA_URD);
9223 	}
9224 
9225 	tg3_restore_pci_state(tp);
9226 
9227 	tg3_flag_clear(tp, CHIP_RESETTING);
9228 	tg3_flag_clear(tp, ERROR_PROCESSED);
9229 
9230 	val = 0;
9231 	if (tg3_flag(tp, 5780_CLASS))
9232 		val = tr32(MEMARB_MODE);
9233 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9234 
9235 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9236 		tg3_stop_fw(tp);
9237 		tw32(0x5000, 0x400);
9238 	}
9239 
9240 	if (tg3_flag(tp, IS_SSB_CORE)) {
9241 		/*
9242 		 * BCM4785: In order to avoid repercussions from using
9243 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9244 		 * which is not required.
9245 		 */
9246 		tg3_stop_fw(tp);
9247 		tg3_halt_cpu(tp, RX_CPU_BASE);
9248 	}
9249 
9250 	err = tg3_poll_fw(tp);
9251 	if (err)
9252 		return err;
9253 
9254 	tw32(GRC_MODE, tp->grc_mode);
9255 
9256 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9257 		val = tr32(0xc4);
9258 
9259 		tw32(0xc4, val | (1 << 15));
9260 	}
9261 
9262 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9263 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9264 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9265 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9266 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9267 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9268 	}
9269 
9270 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9271 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9272 		val = tp->mac_mode;
9273 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9274 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9275 		val = tp->mac_mode;
9276 	} else
9277 		val = 0;
9278 
9279 	tw32_f(MAC_MODE, val);
9280 	udelay(40);
9281 
9282 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9283 
9284 	tg3_mdio_start(tp);
9285 
9286 	if (tg3_flag(tp, PCI_EXPRESS) &&
9287 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9288 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9289 	    !tg3_flag(tp, 57765_PLUS)) {
9290 		val = tr32(0x7c00);
9291 
9292 		tw32(0x7c00, val | (1 << 25));
9293 	}
9294 
9295 	tg3_restore_clk(tp);
9296 
9297 	/* Increase the core clock speed to fix tx timeout issue for 5762
9298 	 * with 100Mbps link speed.
9299 	 */
9300 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9301 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9302 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9303 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9304 	}
9305 
9306 	/* Reprobe ASF enable state.  */
9307 	tg3_flag_clear(tp, ENABLE_ASF);
9308 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9309 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9310 
9311 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9312 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9313 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9314 		u32 nic_cfg;
9315 
9316 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9317 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9318 			tg3_flag_set(tp, ENABLE_ASF);
9319 			tp->last_event_jiffies = jiffies;
9320 			if (tg3_flag(tp, 5750_PLUS))
9321 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9322 
9323 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9324 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9325 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9326 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9327 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9328 		}
9329 	}
9330 
9331 	return 0;
9332 }
9333 
9334 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9335 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9336 static void __tg3_set_rx_mode(struct net_device *);
9337 
9338 /* tp->lock is held. */
9339 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9340 {
9341 	int err;
9342 
9343 	tg3_stop_fw(tp);
9344 
9345 	tg3_write_sig_pre_reset(tp, kind);
9346 
9347 	tg3_abort_hw(tp, silent);
9348 	err = tg3_chip_reset(tp);
9349 
9350 	__tg3_set_mac_addr(tp, false);
9351 
9352 	tg3_write_sig_legacy(tp, kind);
9353 	tg3_write_sig_post_reset(tp, kind);
9354 
9355 	if (tp->hw_stats) {
9356 		/* Save the stats across chip resets... */
9357 		tg3_get_nstats(tp, &tp->net_stats_prev);
9358 		tg3_get_estats(tp, &tp->estats_prev);
9359 
9360 		/* And make sure the next sample is new data */
9361 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9362 	}
9363 
9364 	return err;
9365 }
9366 
9367 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9368 {
9369 	struct tg3 *tp = netdev_priv(dev);
9370 	struct sockaddr *addr = p;
9371 	int err = 0;
9372 	bool skip_mac_1 = false;
9373 
9374 	if (!is_valid_ether_addr(addr->sa_data))
9375 		return -EADDRNOTAVAIL;
9376 
9377 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9378 
9379 	if (!netif_running(dev))
9380 		return 0;
9381 
9382 	if (tg3_flag(tp, ENABLE_ASF)) {
9383 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9384 
9385 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9386 		addr0_low = tr32(MAC_ADDR_0_LOW);
9387 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9388 		addr1_low = tr32(MAC_ADDR_1_LOW);
9389 
9390 		/* Skip MAC addr 1 if ASF is using it. */
9391 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9392 		    !(addr1_high == 0 && addr1_low == 0))
9393 			skip_mac_1 = true;
9394 	}
9395 	spin_lock_bh(&tp->lock);
9396 	__tg3_set_mac_addr(tp, skip_mac_1);
9397 	__tg3_set_rx_mode(dev);
9398 	spin_unlock_bh(&tp->lock);
9399 
9400 	return err;
9401 }
9402 
9403 /* tp->lock is held. */
9404 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9405 			   dma_addr_t mapping, u32 maxlen_flags,
9406 			   u32 nic_addr)
9407 {
9408 	tg3_write_mem(tp,
9409 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9410 		      ((u64) mapping >> 32));
9411 	tg3_write_mem(tp,
9412 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9413 		      ((u64) mapping & 0xffffffff));
9414 	tg3_write_mem(tp,
9415 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9416 		       maxlen_flags);
9417 
9418 	if (!tg3_flag(tp, 5705_PLUS))
9419 		tg3_write_mem(tp,
9420 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9421 			      nic_addr);
9422 }
9423 
9424 
9425 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9426 {
9427 	int i = 0;
9428 
9429 	if (!tg3_flag(tp, ENABLE_TSS)) {
9430 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9431 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9432 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9433 	} else {
9434 		tw32(HOSTCC_TXCOL_TICKS, 0);
9435 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9436 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9437 
9438 		for (; i < tp->txq_cnt; i++) {
9439 			u32 reg;
9440 
9441 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9442 			tw32(reg, ec->tx_coalesce_usecs);
9443 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9444 			tw32(reg, ec->tx_max_coalesced_frames);
9445 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9446 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9447 		}
9448 	}
9449 
9450 	for (; i < tp->irq_max - 1; i++) {
9451 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9452 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9453 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9454 	}
9455 }
9456 
9457 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9458 {
9459 	int i = 0;
9460 	u32 limit = tp->rxq_cnt;
9461 
9462 	if (!tg3_flag(tp, ENABLE_RSS)) {
9463 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9464 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9465 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9466 		limit--;
9467 	} else {
9468 		tw32(HOSTCC_RXCOL_TICKS, 0);
9469 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9470 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9471 	}
9472 
9473 	for (; i < limit; i++) {
9474 		u32 reg;
9475 
9476 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9477 		tw32(reg, ec->rx_coalesce_usecs);
9478 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9479 		tw32(reg, ec->rx_max_coalesced_frames);
9480 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9481 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9482 	}
9483 
9484 	for (; i < tp->irq_max - 1; i++) {
9485 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9486 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9487 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9488 	}
9489 }
9490 
9491 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9492 {
9493 	tg3_coal_tx_init(tp, ec);
9494 	tg3_coal_rx_init(tp, ec);
9495 
9496 	if (!tg3_flag(tp, 5705_PLUS)) {
9497 		u32 val = ec->stats_block_coalesce_usecs;
9498 
9499 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9500 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9501 
9502 		if (!tp->link_up)
9503 			val = 0;
9504 
9505 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9506 	}
9507 }
9508 
9509 /* tp->lock is held. */
9510 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9511 {
9512 	u32 txrcb, limit;
9513 
9514 	/* Disable all transmit rings but the first. */
9515 	if (!tg3_flag(tp, 5705_PLUS))
9516 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9517 	else if (tg3_flag(tp, 5717_PLUS))
9518 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9519 	else if (tg3_flag(tp, 57765_CLASS) ||
9520 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9521 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9522 	else
9523 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9524 
9525 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9526 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9527 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9528 			      BDINFO_FLAGS_DISABLED);
9529 }
9530 
9531 /* tp->lock is held. */
9532 static void tg3_tx_rcbs_init(struct tg3 *tp)
9533 {
9534 	int i = 0;
9535 	u32 txrcb = NIC_SRAM_SEND_RCB;
9536 
9537 	if (tg3_flag(tp, ENABLE_TSS))
9538 		i++;
9539 
9540 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9541 		struct tg3_napi *tnapi = &tp->napi[i];
9542 
9543 		if (!tnapi->tx_ring)
9544 			continue;
9545 
9546 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9547 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9548 			       NIC_SRAM_TX_BUFFER_DESC);
9549 	}
9550 }
9551 
9552 /* tp->lock is held. */
9553 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9554 {
9555 	u32 rxrcb, limit;
9556 
9557 	/* Disable all receive return rings but the first. */
9558 	if (tg3_flag(tp, 5717_PLUS))
9559 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9560 	else if (!tg3_flag(tp, 5705_PLUS))
9561 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9562 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9563 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9564 		 tg3_flag(tp, 57765_CLASS))
9565 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9566 	else
9567 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9568 
9569 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9570 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9571 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9572 			      BDINFO_FLAGS_DISABLED);
9573 }
9574 
9575 /* tp->lock is held. */
9576 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9577 {
9578 	int i = 0;
9579 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9580 
9581 	if (tg3_flag(tp, ENABLE_RSS))
9582 		i++;
9583 
9584 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9585 		struct tg3_napi *tnapi = &tp->napi[i];
9586 
9587 		if (!tnapi->rx_rcb)
9588 			continue;
9589 
9590 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9591 			       (tp->rx_ret_ring_mask + 1) <<
9592 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9593 	}
9594 }
9595 
9596 /* tp->lock is held. */
9597 static void tg3_rings_reset(struct tg3 *tp)
9598 {
9599 	int i;
9600 	u32 stblk;
9601 	struct tg3_napi *tnapi = &tp->napi[0];
9602 
9603 	tg3_tx_rcbs_disable(tp);
9604 
9605 	tg3_rx_ret_rcbs_disable(tp);
9606 
9607 	/* Disable interrupts */
9608 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9609 	tp->napi[0].chk_msi_cnt = 0;
9610 	tp->napi[0].last_rx_cons = 0;
9611 	tp->napi[0].last_tx_cons = 0;
9612 
9613 	/* Zero mailbox registers. */
9614 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9615 		for (i = 1; i < tp->irq_max; i++) {
9616 			tp->napi[i].tx_prod = 0;
9617 			tp->napi[i].tx_cons = 0;
9618 			if (tg3_flag(tp, ENABLE_TSS))
9619 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9620 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9621 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9622 			tp->napi[i].chk_msi_cnt = 0;
9623 			tp->napi[i].last_rx_cons = 0;
9624 			tp->napi[i].last_tx_cons = 0;
9625 		}
9626 		if (!tg3_flag(tp, ENABLE_TSS))
9627 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9628 	} else {
9629 		tp->napi[0].tx_prod = 0;
9630 		tp->napi[0].tx_cons = 0;
9631 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9632 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9633 	}
9634 
9635 	/* Make sure the NIC-based send BD rings are disabled. */
9636 	if (!tg3_flag(tp, 5705_PLUS)) {
9637 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9638 		for (i = 0; i < 16; i++)
9639 			tw32_tx_mbox(mbox + i * 8, 0);
9640 	}
9641 
9642 	/* Clear status block in ram. */
9643 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9644 
9645 	/* Set status block DMA address */
9646 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9647 	     ((u64) tnapi->status_mapping >> 32));
9648 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9649 	     ((u64) tnapi->status_mapping & 0xffffffff));
9650 
9651 	stblk = HOSTCC_STATBLCK_RING1;
9652 
9653 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9654 		u64 mapping = (u64)tnapi->status_mapping;
9655 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9656 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9657 		stblk += 8;
9658 
9659 		/* Clear status block in ram. */
9660 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9661 	}
9662 
9663 	tg3_tx_rcbs_init(tp);
9664 	tg3_rx_ret_rcbs_init(tp);
9665 }
9666 
9667 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9668 {
9669 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9670 
9671 	if (!tg3_flag(tp, 5750_PLUS) ||
9672 	    tg3_flag(tp, 5780_CLASS) ||
9673 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9674 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9675 	    tg3_flag(tp, 57765_PLUS))
9676 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9677 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9678 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9679 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9680 	else
9681 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9682 
9683 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9684 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9685 
9686 	val = min(nic_rep_thresh, host_rep_thresh);
9687 	tw32(RCVBDI_STD_THRESH, val);
9688 
9689 	if (tg3_flag(tp, 57765_PLUS))
9690 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9691 
9692 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9693 		return;
9694 
9695 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9696 
9697 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9698 
9699 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9700 	tw32(RCVBDI_JUMBO_THRESH, val);
9701 
9702 	if (tg3_flag(tp, 57765_PLUS))
9703 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9704 }
9705 
9706 static inline u32 calc_crc(unsigned char *buf, int len)
9707 {
9708 	u32 reg;
9709 	u32 tmp;
9710 	int j, k;
9711 
9712 	reg = 0xffffffff;
9713 
9714 	for (j = 0; j < len; j++) {
9715 		reg ^= buf[j];
9716 
9717 		for (k = 0; k < 8; k++) {
9718 			tmp = reg & 0x01;
9719 
9720 			reg >>= 1;
9721 
9722 			if (tmp)
9723 				reg ^= 0xedb88320;
9724 		}
9725 	}
9726 
9727 	return ~reg;
9728 }
9729 
9730 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9731 {
9732 	/* accept or reject all multicast frames */
9733 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9734 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9735 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9736 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9737 }
9738 
9739 static void __tg3_set_rx_mode(struct net_device *dev)
9740 {
9741 	struct tg3 *tp = netdev_priv(dev);
9742 	u32 rx_mode;
9743 
9744 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9745 				  RX_MODE_KEEP_VLAN_TAG);
9746 
9747 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9748 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9749 	 * flag clear.
9750 	 */
9751 	if (!tg3_flag(tp, ENABLE_ASF))
9752 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9753 #endif
9754 
9755 	if (dev->flags & IFF_PROMISC) {
9756 		/* Promiscuous mode. */
9757 		rx_mode |= RX_MODE_PROMISC;
9758 	} else if (dev->flags & IFF_ALLMULTI) {
9759 		/* Accept all multicast. */
9760 		tg3_set_multi(tp, 1);
9761 	} else if (netdev_mc_empty(dev)) {
9762 		/* Reject all multicast. */
9763 		tg3_set_multi(tp, 0);
9764 	} else {
9765 		/* Accept one or more multicast(s). */
9766 		struct netdev_hw_addr *ha;
9767 		u32 mc_filter[4] = { 0, };
9768 		u32 regidx;
9769 		u32 bit;
9770 		u32 crc;
9771 
9772 		netdev_for_each_mc_addr(ha, dev) {
9773 			crc = calc_crc(ha->addr, ETH_ALEN);
9774 			bit = ~crc & 0x7f;
9775 			regidx = (bit & 0x60) >> 5;
9776 			bit &= 0x1f;
9777 			mc_filter[regidx] |= (1 << bit);
9778 		}
9779 
9780 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9781 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9782 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9783 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9784 	}
9785 
9786 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9787 		rx_mode |= RX_MODE_PROMISC;
9788 	} else if (!(dev->flags & IFF_PROMISC)) {
9789 		/* Add all entries into to the mac addr filter list */
9790 		int i = 0;
9791 		struct netdev_hw_addr *ha;
9792 
9793 		netdev_for_each_uc_addr(ha, dev) {
9794 			__tg3_set_one_mac_addr(tp, ha->addr,
9795 					       i + TG3_UCAST_ADDR_IDX(tp));
9796 			i++;
9797 		}
9798 	}
9799 
9800 	if (rx_mode != tp->rx_mode) {
9801 		tp->rx_mode = rx_mode;
9802 		tw32_f(MAC_RX_MODE, rx_mode);
9803 		udelay(10);
9804 	}
9805 }
9806 
9807 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9808 {
9809 	int i;
9810 
9811 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9812 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9813 }
9814 
9815 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9816 {
9817 	int i;
9818 
9819 	if (!tg3_flag(tp, SUPPORT_MSIX))
9820 		return;
9821 
9822 	if (tp->rxq_cnt == 1) {
9823 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9824 		return;
9825 	}
9826 
9827 	/* Validate table against current IRQ count */
9828 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9829 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9830 			break;
9831 	}
9832 
9833 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9834 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9835 }
9836 
9837 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9838 {
9839 	int i = 0;
9840 	u32 reg = MAC_RSS_INDIR_TBL_0;
9841 
9842 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9843 		u32 val = tp->rss_ind_tbl[i];
9844 		i++;
9845 		for (; i % 8; i++) {
9846 			val <<= 4;
9847 			val |= tp->rss_ind_tbl[i];
9848 		}
9849 		tw32(reg, val);
9850 		reg += 4;
9851 	}
9852 }
9853 
9854 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9855 {
9856 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9857 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9858 	else
9859 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9860 }
9861 
9862 /* tp->lock is held. */
9863 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9864 {
9865 	u32 val, rdmac_mode;
9866 	int i, err, limit;
9867 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9868 
9869 	tg3_disable_ints(tp);
9870 
9871 	tg3_stop_fw(tp);
9872 
9873 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9874 
9875 	if (tg3_flag(tp, INIT_COMPLETE))
9876 		tg3_abort_hw(tp, 1);
9877 
9878 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9879 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9880 		tg3_phy_pull_config(tp);
9881 		tg3_eee_pull_config(tp, NULL);
9882 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9883 	}
9884 
9885 	/* Enable MAC control of LPI */
9886 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9887 		tg3_setup_eee(tp);
9888 
9889 	if (reset_phy)
9890 		tg3_phy_reset(tp);
9891 
9892 	err = tg3_chip_reset(tp);
9893 	if (err)
9894 		return err;
9895 
9896 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9897 
9898 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9899 		val = tr32(TG3_CPMU_CTRL);
9900 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9901 		tw32(TG3_CPMU_CTRL, val);
9902 
9903 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9904 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9905 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9906 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9907 
9908 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9909 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9910 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9911 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9912 
9913 		val = tr32(TG3_CPMU_HST_ACC);
9914 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9915 		val |= CPMU_HST_ACC_MACCLK_6_25;
9916 		tw32(TG3_CPMU_HST_ACC, val);
9917 	}
9918 
9919 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9920 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9921 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9922 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9923 		tw32(PCIE_PWR_MGMT_THRESH, val);
9924 
9925 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9926 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9927 
9928 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9929 
9930 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9931 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9932 	}
9933 
9934 	if (tg3_flag(tp, L1PLLPD_EN)) {
9935 		u32 grc_mode = tr32(GRC_MODE);
9936 
9937 		/* Access the lower 1K of PL PCIE block registers. */
9938 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9939 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9940 
9941 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9942 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9943 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9944 
9945 		tw32(GRC_MODE, grc_mode);
9946 	}
9947 
9948 	if (tg3_flag(tp, 57765_CLASS)) {
9949 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9950 			u32 grc_mode = tr32(GRC_MODE);
9951 
9952 			/* Access the lower 1K of PL PCIE block registers. */
9953 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9954 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9955 
9956 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9957 				   TG3_PCIE_PL_LO_PHYCTL5);
9958 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9959 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9960 
9961 			tw32(GRC_MODE, grc_mode);
9962 		}
9963 
9964 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9965 			u32 grc_mode;
9966 
9967 			/* Fix transmit hangs */
9968 			val = tr32(TG3_CPMU_PADRNG_CTL);
9969 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9970 			tw32(TG3_CPMU_PADRNG_CTL, val);
9971 
9972 			grc_mode = tr32(GRC_MODE);
9973 
9974 			/* Access the lower 1K of DL PCIE block registers. */
9975 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9976 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9977 
9978 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9979 				   TG3_PCIE_DL_LO_FTSMAX);
9980 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9981 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9982 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9983 
9984 			tw32(GRC_MODE, grc_mode);
9985 		}
9986 
9987 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9988 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9989 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9990 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9991 	}
9992 
9993 	/* This works around an issue with Athlon chipsets on
9994 	 * B3 tigon3 silicon.  This bit has no effect on any
9995 	 * other revision.  But do not set this on PCI Express
9996 	 * chips and don't even touch the clocks if the CPMU is present.
9997 	 */
9998 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9999 		if (!tg3_flag(tp, PCI_EXPRESS))
10000 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10001 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10002 	}
10003 
10004 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10005 	    tg3_flag(tp, PCIX_MODE)) {
10006 		val = tr32(TG3PCI_PCISTATE);
10007 		val |= PCISTATE_RETRY_SAME_DMA;
10008 		tw32(TG3PCI_PCISTATE, val);
10009 	}
10010 
10011 	if (tg3_flag(tp, ENABLE_APE)) {
10012 		/* Allow reads and writes to the
10013 		 * APE register and memory space.
10014 		 */
10015 		val = tr32(TG3PCI_PCISTATE);
10016 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10017 		       PCISTATE_ALLOW_APE_SHMEM_WR |
10018 		       PCISTATE_ALLOW_APE_PSPACE_WR;
10019 		tw32(TG3PCI_PCISTATE, val);
10020 	}
10021 
10022 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10023 		/* Enable some hw fixes.  */
10024 		val = tr32(TG3PCI_MSI_DATA);
10025 		val |= (1 << 26) | (1 << 28) | (1 << 29);
10026 		tw32(TG3PCI_MSI_DATA, val);
10027 	}
10028 
10029 	/* Descriptor ring init may make accesses to the
10030 	 * NIC SRAM area to setup the TX descriptors, so we
10031 	 * can only do this after the hardware has been
10032 	 * successfully reset.
10033 	 */
10034 	err = tg3_init_rings(tp);
10035 	if (err)
10036 		return err;
10037 
10038 	if (tg3_flag(tp, 57765_PLUS)) {
10039 		val = tr32(TG3PCI_DMA_RW_CTRL) &
10040 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10041 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10042 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10043 		if (!tg3_flag(tp, 57765_CLASS) &&
10044 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10045 		    tg3_asic_rev(tp) != ASIC_REV_5762)
10046 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10047 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10048 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10049 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10050 		/* This value is determined during the probe time DMA
10051 		 * engine test, tg3_test_dma.
10052 		 */
10053 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10054 	}
10055 
10056 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10057 			  GRC_MODE_4X_NIC_SEND_RINGS |
10058 			  GRC_MODE_NO_TX_PHDR_CSUM |
10059 			  GRC_MODE_NO_RX_PHDR_CSUM);
10060 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10061 
10062 	/* Pseudo-header checksum is done by hardware logic and not
10063 	 * the offload processers, so make the chip do the pseudo-
10064 	 * header checksums on receive.  For transmit it is more
10065 	 * convenient to do the pseudo-header checksum in software
10066 	 * as Linux does that on transmit for us in all cases.
10067 	 */
10068 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10069 
10070 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10071 	if (tp->rxptpctl)
10072 		tw32(TG3_RX_PTP_CTL,
10073 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10074 
10075 	if (tg3_flag(tp, PTP_CAPABLE))
10076 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10077 
10078 	tw32(GRC_MODE, tp->grc_mode | val);
10079 
10080 	/* On one of the AMD platform, MRRS is restricted to 4000 because of
10081 	 * south bridge limitation. As a workaround, Driver is setting MRRS
10082 	 * to 2048 instead of default 4096.
10083 	 */
10084 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10085 	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10086 		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10087 		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10088 	}
10089 
10090 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10091 	val = tr32(GRC_MISC_CFG);
10092 	val &= ~0xff;
10093 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10094 	tw32(GRC_MISC_CFG, val);
10095 
10096 	/* Initialize MBUF/DESC pool. */
10097 	if (tg3_flag(tp, 5750_PLUS)) {
10098 		/* Do nothing.  */
10099 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10100 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10101 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10102 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10103 		else
10104 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10105 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10106 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10107 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10108 		int fw_len;
10109 
10110 		fw_len = tp->fw_len;
10111 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10112 		tw32(BUFMGR_MB_POOL_ADDR,
10113 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10114 		tw32(BUFMGR_MB_POOL_SIZE,
10115 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10116 	}
10117 
10118 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10119 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10120 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10121 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10122 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10123 		tw32(BUFMGR_MB_HIGH_WATER,
10124 		     tp->bufmgr_config.mbuf_high_water);
10125 	} else {
10126 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10127 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10128 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10129 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10130 		tw32(BUFMGR_MB_HIGH_WATER,
10131 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10132 	}
10133 	tw32(BUFMGR_DMA_LOW_WATER,
10134 	     tp->bufmgr_config.dma_low_water);
10135 	tw32(BUFMGR_DMA_HIGH_WATER,
10136 	     tp->bufmgr_config.dma_high_water);
10137 
10138 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10139 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10140 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10141 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10142 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10143 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10144 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10145 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10146 	tw32(BUFMGR_MODE, val);
10147 	for (i = 0; i < 2000; i++) {
10148 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10149 			break;
10150 		udelay(10);
10151 	}
10152 	if (i >= 2000) {
10153 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10154 		return -ENODEV;
10155 	}
10156 
10157 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10158 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10159 
10160 	tg3_setup_rxbd_thresholds(tp);
10161 
10162 	/* Initialize TG3_BDINFO's at:
10163 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10164 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10165 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10166 	 *
10167 	 * like so:
10168 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10169 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10170 	 *                              ring attribute flags
10171 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10172 	 *
10173 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10174 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10175 	 *
10176 	 * The size of each ring is fixed in the firmware, but the location is
10177 	 * configurable.
10178 	 */
10179 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10180 	     ((u64) tpr->rx_std_mapping >> 32));
10181 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10182 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10183 	if (!tg3_flag(tp, 5717_PLUS))
10184 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10185 		     NIC_SRAM_RX_BUFFER_DESC);
10186 
10187 	/* Disable the mini ring */
10188 	if (!tg3_flag(tp, 5705_PLUS))
10189 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10190 		     BDINFO_FLAGS_DISABLED);
10191 
10192 	/* Program the jumbo buffer descriptor ring control
10193 	 * blocks on those devices that have them.
10194 	 */
10195 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10196 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10197 
10198 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10199 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10200 			     ((u64) tpr->rx_jmb_mapping >> 32));
10201 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10202 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10203 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10204 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10205 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10206 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10207 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10208 			    tg3_flag(tp, 57765_CLASS) ||
10209 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10210 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10211 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10212 		} else {
10213 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10214 			     BDINFO_FLAGS_DISABLED);
10215 		}
10216 
10217 		if (tg3_flag(tp, 57765_PLUS)) {
10218 			val = TG3_RX_STD_RING_SIZE(tp);
10219 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10220 			val |= (TG3_RX_STD_DMA_SZ << 2);
10221 		} else
10222 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10223 	} else
10224 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10225 
10226 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10227 
10228 	tpr->rx_std_prod_idx = tp->rx_pending;
10229 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10230 
10231 	tpr->rx_jmb_prod_idx =
10232 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10233 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10234 
10235 	tg3_rings_reset(tp);
10236 
10237 	/* Initialize MAC address and backoff seed. */
10238 	__tg3_set_mac_addr(tp, false);
10239 
10240 	/* MTU + ethernet header + FCS + optional VLAN tag */
10241 	tw32(MAC_RX_MTU_SIZE,
10242 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10243 
10244 	/* The slot time is changed by tg3_setup_phy if we
10245 	 * run at gigabit with half duplex.
10246 	 */
10247 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10248 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10249 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10250 
10251 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10252 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10253 		val |= tr32(MAC_TX_LENGTHS) &
10254 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10255 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10256 
10257 	tw32(MAC_TX_LENGTHS, val);
10258 
10259 	/* Receive rules. */
10260 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10261 	tw32(RCVLPC_CONFIG, 0x0181);
10262 
10263 	/* Calculate RDMAC_MODE setting early, we need it to determine
10264 	 * the RCVLPC_STATE_ENABLE mask.
10265 	 */
10266 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10267 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10268 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10269 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10270 		      RDMAC_MODE_LNGREAD_ENAB);
10271 
10272 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10273 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10274 
10275 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10276 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10277 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10278 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10279 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10280 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10281 
10282 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10283 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10284 		if (tg3_flag(tp, TSO_CAPABLE) &&
10285 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
10286 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10287 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10288 			   !tg3_flag(tp, IS_5788)) {
10289 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10290 		}
10291 	}
10292 
10293 	if (tg3_flag(tp, PCI_EXPRESS))
10294 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10295 
10296 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10297 		tp->dma_limit = 0;
10298 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10299 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10300 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10301 		}
10302 	}
10303 
10304 	if (tg3_flag(tp, HW_TSO_1) ||
10305 	    tg3_flag(tp, HW_TSO_2) ||
10306 	    tg3_flag(tp, HW_TSO_3))
10307 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10308 
10309 	if (tg3_flag(tp, 57765_PLUS) ||
10310 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10311 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10312 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10313 
10314 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10315 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10316 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10317 
10318 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10319 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10320 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10321 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10322 	    tg3_flag(tp, 57765_PLUS)) {
10323 		u32 tgtreg;
10324 
10325 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10326 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10327 		else
10328 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10329 
10330 		val = tr32(tgtreg);
10331 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10332 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10333 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10334 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10335 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10336 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10337 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10338 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10339 		}
10340 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10341 	}
10342 
10343 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10344 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10345 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10346 		u32 tgtreg;
10347 
10348 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10349 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10350 		else
10351 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10352 
10353 		val = tr32(tgtreg);
10354 		tw32(tgtreg, val |
10355 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10356 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10357 	}
10358 
10359 	/* Receive/send statistics. */
10360 	if (tg3_flag(tp, 5750_PLUS)) {
10361 		val = tr32(RCVLPC_STATS_ENABLE);
10362 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10363 		tw32(RCVLPC_STATS_ENABLE, val);
10364 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10365 		   tg3_flag(tp, TSO_CAPABLE)) {
10366 		val = tr32(RCVLPC_STATS_ENABLE);
10367 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10368 		tw32(RCVLPC_STATS_ENABLE, val);
10369 	} else {
10370 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10371 	}
10372 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10373 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10374 	tw32(SNDDATAI_STATSCTRL,
10375 	     (SNDDATAI_SCTRL_ENABLE |
10376 	      SNDDATAI_SCTRL_FASTUPD));
10377 
10378 	/* Setup host coalescing engine. */
10379 	tw32(HOSTCC_MODE, 0);
10380 	for (i = 0; i < 2000; i++) {
10381 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10382 			break;
10383 		udelay(10);
10384 	}
10385 
10386 	__tg3_set_coalesce(tp, &tp->coal);
10387 
10388 	if (!tg3_flag(tp, 5705_PLUS)) {
10389 		/* Status/statistics block address.  See tg3_timer,
10390 		 * the tg3_periodic_fetch_stats call there, and
10391 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10392 		 */
10393 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10394 		     ((u64) tp->stats_mapping >> 32));
10395 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10396 		     ((u64) tp->stats_mapping & 0xffffffff));
10397 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10398 
10399 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10400 
10401 		/* Clear statistics and status block memory areas */
10402 		for (i = NIC_SRAM_STATS_BLK;
10403 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10404 		     i += sizeof(u32)) {
10405 			tg3_write_mem(tp, i, 0);
10406 			udelay(40);
10407 		}
10408 	}
10409 
10410 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10411 
10412 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10413 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10414 	if (!tg3_flag(tp, 5705_PLUS))
10415 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10416 
10417 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10418 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10419 		/* reset to prevent losing 1st rx packet intermittently */
10420 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10421 		udelay(10);
10422 	}
10423 
10424 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10425 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10426 			MAC_MODE_FHDE_ENABLE;
10427 	if (tg3_flag(tp, ENABLE_APE))
10428 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10429 	if (!tg3_flag(tp, 5705_PLUS) &&
10430 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10431 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10432 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10433 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10434 	udelay(40);
10435 
10436 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10437 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10438 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10439 	 * whether used as inputs or outputs, are set by boot code after
10440 	 * reset.
10441 	 */
10442 	if (!tg3_flag(tp, IS_NIC)) {
10443 		u32 gpio_mask;
10444 
10445 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10446 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10447 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10448 
10449 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10450 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10451 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10452 
10453 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10454 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10455 
10456 		tp->grc_local_ctrl &= ~gpio_mask;
10457 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10458 
10459 		/* GPIO1 must be driven high for eeprom write protect */
10460 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10461 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10462 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10463 	}
10464 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10465 	udelay(100);
10466 
10467 	if (tg3_flag(tp, USING_MSIX)) {
10468 		val = tr32(MSGINT_MODE);
10469 		val |= MSGINT_MODE_ENABLE;
10470 		if (tp->irq_cnt > 1)
10471 			val |= MSGINT_MODE_MULTIVEC_EN;
10472 		if (!tg3_flag(tp, 1SHOT_MSI))
10473 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10474 		tw32(MSGINT_MODE, val);
10475 	}
10476 
10477 	if (!tg3_flag(tp, 5705_PLUS)) {
10478 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10479 		udelay(40);
10480 	}
10481 
10482 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10483 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10484 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10485 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10486 	       WDMAC_MODE_LNGREAD_ENAB);
10487 
10488 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10489 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10490 		if (tg3_flag(tp, TSO_CAPABLE) &&
10491 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10492 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10493 			/* nothing */
10494 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10495 			   !tg3_flag(tp, IS_5788)) {
10496 			val |= WDMAC_MODE_RX_ACCEL;
10497 		}
10498 	}
10499 
10500 	/* Enable host coalescing bug fix */
10501 	if (tg3_flag(tp, 5755_PLUS))
10502 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10503 
10504 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10505 		val |= WDMAC_MODE_BURST_ALL_DATA;
10506 
10507 	tw32_f(WDMAC_MODE, val);
10508 	udelay(40);
10509 
10510 	if (tg3_flag(tp, PCIX_MODE)) {
10511 		u16 pcix_cmd;
10512 
10513 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10514 				     &pcix_cmd);
10515 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10516 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10517 			pcix_cmd |= PCI_X_CMD_READ_2K;
10518 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10519 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10520 			pcix_cmd |= PCI_X_CMD_READ_2K;
10521 		}
10522 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10523 				      pcix_cmd);
10524 	}
10525 
10526 	tw32_f(RDMAC_MODE, rdmac_mode);
10527 	udelay(40);
10528 
10529 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10530 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10531 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10532 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10533 				break;
10534 		}
10535 		if (i < TG3_NUM_RDMA_CHANNELS) {
10536 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10537 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10538 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10539 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10540 		}
10541 	}
10542 
10543 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10544 	if (!tg3_flag(tp, 5705_PLUS))
10545 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10546 
10547 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10548 		tw32(SNDDATAC_MODE,
10549 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10550 	else
10551 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10552 
10553 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10554 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10555 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10556 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10557 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10558 	tw32(RCVDBDI_MODE, val);
10559 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10560 	if (tg3_flag(tp, HW_TSO_1) ||
10561 	    tg3_flag(tp, HW_TSO_2) ||
10562 	    tg3_flag(tp, HW_TSO_3))
10563 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10564 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10565 	if (tg3_flag(tp, ENABLE_TSS))
10566 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10567 	tw32(SNDBDI_MODE, val);
10568 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10569 
10570 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10571 		err = tg3_load_5701_a0_firmware_fix(tp);
10572 		if (err)
10573 			return err;
10574 	}
10575 
10576 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10577 		/* Ignore any errors for the firmware download. If download
10578 		 * fails, the device will operate with EEE disabled
10579 		 */
10580 		tg3_load_57766_firmware(tp);
10581 	}
10582 
10583 	if (tg3_flag(tp, TSO_CAPABLE)) {
10584 		err = tg3_load_tso_firmware(tp);
10585 		if (err)
10586 			return err;
10587 	}
10588 
10589 	tp->tx_mode = TX_MODE_ENABLE;
10590 
10591 	if (tg3_flag(tp, 5755_PLUS) ||
10592 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10593 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10594 
10595 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10596 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10597 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10598 		tp->tx_mode &= ~val;
10599 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10600 	}
10601 
10602 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10603 	udelay(100);
10604 
10605 	if (tg3_flag(tp, ENABLE_RSS)) {
10606 		u32 rss_key[10];
10607 
10608 		tg3_rss_write_indir_tbl(tp);
10609 
10610 		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10611 
10612 		for (i = 0; i < 10 ; i++)
10613 			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10614 	}
10615 
10616 	tp->rx_mode = RX_MODE_ENABLE;
10617 	if (tg3_flag(tp, 5755_PLUS))
10618 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10619 
10620 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10621 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10622 
10623 	if (tg3_flag(tp, ENABLE_RSS))
10624 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10625 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10626 			       RX_MODE_RSS_IPV6_HASH_EN |
10627 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10628 			       RX_MODE_RSS_IPV4_HASH_EN |
10629 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10630 
10631 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10632 	udelay(10);
10633 
10634 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10635 
10636 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10637 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10638 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10639 		udelay(10);
10640 	}
10641 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10642 	udelay(10);
10643 
10644 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10645 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10646 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10647 			/* Set drive transmission level to 1.2V  */
10648 			/* only if the signal pre-emphasis bit is not set  */
10649 			val = tr32(MAC_SERDES_CFG);
10650 			val &= 0xfffff000;
10651 			val |= 0x880;
10652 			tw32(MAC_SERDES_CFG, val);
10653 		}
10654 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10655 			tw32(MAC_SERDES_CFG, 0x616000);
10656 	}
10657 
10658 	/* Prevent chip from dropping frames when flow control
10659 	 * is enabled.
10660 	 */
10661 	if (tg3_flag(tp, 57765_CLASS))
10662 		val = 1;
10663 	else
10664 		val = 2;
10665 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10666 
10667 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10668 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10669 		/* Use hardware link auto-negotiation */
10670 		tg3_flag_set(tp, HW_AUTONEG);
10671 	}
10672 
10673 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10674 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10675 		u32 tmp;
10676 
10677 		tmp = tr32(SERDES_RX_CTRL);
10678 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10679 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10680 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10681 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10682 	}
10683 
10684 	if (!tg3_flag(tp, USE_PHYLIB)) {
10685 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10686 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10687 
10688 		err = tg3_setup_phy(tp, false);
10689 		if (err)
10690 			return err;
10691 
10692 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10693 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10694 			u32 tmp;
10695 
10696 			/* Clear CRC stats. */
10697 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10698 				tg3_writephy(tp, MII_TG3_TEST1,
10699 					     tmp | MII_TG3_TEST1_CRC_EN);
10700 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10701 			}
10702 		}
10703 	}
10704 
10705 	__tg3_set_rx_mode(tp->dev);
10706 
10707 	/* Initialize receive rules. */
10708 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10709 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10710 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10711 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10712 
10713 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10714 		limit = 8;
10715 	else
10716 		limit = 16;
10717 	if (tg3_flag(tp, ENABLE_ASF))
10718 		limit -= 4;
10719 	switch (limit) {
10720 	case 16:
10721 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10722 	case 15:
10723 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10724 	case 14:
10725 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10726 	case 13:
10727 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10728 	case 12:
10729 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10730 	case 11:
10731 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10732 	case 10:
10733 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10734 	case 9:
10735 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10736 	case 8:
10737 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10738 	case 7:
10739 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10740 	case 6:
10741 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10742 	case 5:
10743 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10744 	case 4:
10745 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10746 	case 3:
10747 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10748 	case 2:
10749 	case 1:
10750 
10751 	default:
10752 		break;
10753 	}
10754 
10755 	if (tg3_flag(tp, ENABLE_APE))
10756 		/* Write our heartbeat update interval to APE. */
10757 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10758 				APE_HOST_HEARTBEAT_INT_5SEC);
10759 
10760 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10761 
10762 	return 0;
10763 }
10764 
10765 /* Called at device open time to get the chip ready for
10766  * packet processing.  Invoked with tp->lock held.
10767  */
10768 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10769 {
10770 	/* Chip may have been just powered on. If so, the boot code may still
10771 	 * be running initialization. Wait for it to finish to avoid races in
10772 	 * accessing the hardware.
10773 	 */
10774 	tg3_enable_register_access(tp);
10775 	tg3_poll_fw(tp);
10776 
10777 	tg3_switch_clocks(tp);
10778 
10779 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10780 
10781 	return tg3_reset_hw(tp, reset_phy);
10782 }
10783 
10784 #ifdef CONFIG_TIGON3_HWMON
10785 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10786 {
10787 	int i;
10788 
10789 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10790 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10791 
10792 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10793 		off += len;
10794 
10795 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10796 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10797 			memset(ocir, 0, TG3_OCIR_LEN);
10798 	}
10799 }
10800 
10801 /* sysfs attributes for hwmon */
10802 static ssize_t tg3_show_temp(struct device *dev,
10803 			     struct device_attribute *devattr, char *buf)
10804 {
10805 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10806 	struct tg3 *tp = dev_get_drvdata(dev);
10807 	u32 temperature;
10808 
10809 	spin_lock_bh(&tp->lock);
10810 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10811 				sizeof(temperature));
10812 	spin_unlock_bh(&tp->lock);
10813 	return sprintf(buf, "%u\n", temperature * 1000);
10814 }
10815 
10816 
10817 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10818 			  TG3_TEMP_SENSOR_OFFSET);
10819 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10820 			  TG3_TEMP_CAUTION_OFFSET);
10821 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10822 			  TG3_TEMP_MAX_OFFSET);
10823 
10824 static struct attribute *tg3_attrs[] = {
10825 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10826 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10827 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10828 	NULL
10829 };
10830 ATTRIBUTE_GROUPS(tg3);
10831 
10832 static void tg3_hwmon_close(struct tg3 *tp)
10833 {
10834 	if (tp->hwmon_dev) {
10835 		hwmon_device_unregister(tp->hwmon_dev);
10836 		tp->hwmon_dev = NULL;
10837 	}
10838 }
10839 
10840 static void tg3_hwmon_open(struct tg3 *tp)
10841 {
10842 	int i;
10843 	u32 size = 0;
10844 	struct pci_dev *pdev = tp->pdev;
10845 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10846 
10847 	tg3_sd_scan_scratchpad(tp, ocirs);
10848 
10849 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10850 		if (!ocirs[i].src_data_length)
10851 			continue;
10852 
10853 		size += ocirs[i].src_hdr_length;
10854 		size += ocirs[i].src_data_length;
10855 	}
10856 
10857 	if (!size)
10858 		return;
10859 
10860 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10861 							  tp, tg3_groups);
10862 	if (IS_ERR(tp->hwmon_dev)) {
10863 		tp->hwmon_dev = NULL;
10864 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10865 	}
10866 }
10867 #else
10868 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10869 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10870 #endif /* CONFIG_TIGON3_HWMON */
10871 
10872 
10873 #define TG3_STAT_ADD32(PSTAT, REG) \
10874 do {	u32 __val = tr32(REG); \
10875 	(PSTAT)->low += __val; \
10876 	if ((PSTAT)->low < __val) \
10877 		(PSTAT)->high += 1; \
10878 } while (0)
10879 
10880 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10881 {
10882 	struct tg3_hw_stats *sp = tp->hw_stats;
10883 
10884 	if (!tp->link_up)
10885 		return;
10886 
10887 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10888 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10889 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10890 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10891 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10892 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10893 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10894 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10895 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10896 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10897 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10898 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10899 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10900 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10901 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10902 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10903 		u32 val;
10904 
10905 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10906 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10907 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10908 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10909 	}
10910 
10911 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10912 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10913 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10914 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10915 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10916 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10917 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10918 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10919 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10920 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10921 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10922 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10923 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10924 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10925 
10926 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10927 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10928 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
10929 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10930 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10931 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10932 	} else {
10933 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10934 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10935 		if (val) {
10936 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10937 			sp->rx_discards.low += val;
10938 			if (sp->rx_discards.low < val)
10939 				sp->rx_discards.high += 1;
10940 		}
10941 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10942 	}
10943 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10944 }
10945 
10946 static void tg3_chk_missed_msi(struct tg3 *tp)
10947 {
10948 	u32 i;
10949 
10950 	for (i = 0; i < tp->irq_cnt; i++) {
10951 		struct tg3_napi *tnapi = &tp->napi[i];
10952 
10953 		if (tg3_has_work(tnapi)) {
10954 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10955 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10956 				if (tnapi->chk_msi_cnt < 1) {
10957 					tnapi->chk_msi_cnt++;
10958 					return;
10959 				}
10960 				tg3_msi(0, tnapi);
10961 			}
10962 		}
10963 		tnapi->chk_msi_cnt = 0;
10964 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10965 		tnapi->last_tx_cons = tnapi->tx_cons;
10966 	}
10967 }
10968 
10969 static void tg3_timer(struct timer_list *t)
10970 {
10971 	struct tg3 *tp = from_timer(tp, t, timer);
10972 
10973 	spin_lock(&tp->lock);
10974 
10975 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10976 		spin_unlock(&tp->lock);
10977 		goto restart_timer;
10978 	}
10979 
10980 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10981 	    tg3_flag(tp, 57765_CLASS))
10982 		tg3_chk_missed_msi(tp);
10983 
10984 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10985 		/* BCM4785: Flush posted writes from GbE to host memory. */
10986 		tr32(HOSTCC_MODE);
10987 	}
10988 
10989 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10990 		/* All of this garbage is because when using non-tagged
10991 		 * IRQ status the mailbox/status_block protocol the chip
10992 		 * uses with the cpu is race prone.
10993 		 */
10994 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10995 			tw32(GRC_LOCAL_CTRL,
10996 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10997 		} else {
10998 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10999 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11000 		}
11001 
11002 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11003 			spin_unlock(&tp->lock);
11004 			tg3_reset_task_schedule(tp);
11005 			goto restart_timer;
11006 		}
11007 	}
11008 
11009 	/* This part only runs once per second. */
11010 	if (!--tp->timer_counter) {
11011 		if (tg3_flag(tp, 5705_PLUS))
11012 			tg3_periodic_fetch_stats(tp);
11013 
11014 		if (tp->setlpicnt && !--tp->setlpicnt)
11015 			tg3_phy_eee_enable(tp);
11016 
11017 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
11018 			u32 mac_stat;
11019 			int phy_event;
11020 
11021 			mac_stat = tr32(MAC_STATUS);
11022 
11023 			phy_event = 0;
11024 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11025 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11026 					phy_event = 1;
11027 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11028 				phy_event = 1;
11029 
11030 			if (phy_event)
11031 				tg3_setup_phy(tp, false);
11032 		} else if (tg3_flag(tp, POLL_SERDES)) {
11033 			u32 mac_stat = tr32(MAC_STATUS);
11034 			int need_setup = 0;
11035 
11036 			if (tp->link_up &&
11037 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11038 				need_setup = 1;
11039 			}
11040 			if (!tp->link_up &&
11041 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11042 					 MAC_STATUS_SIGNAL_DET))) {
11043 				need_setup = 1;
11044 			}
11045 			if (need_setup) {
11046 				if (!tp->serdes_counter) {
11047 					tw32_f(MAC_MODE,
11048 					     (tp->mac_mode &
11049 					      ~MAC_MODE_PORT_MODE_MASK));
11050 					udelay(40);
11051 					tw32_f(MAC_MODE, tp->mac_mode);
11052 					udelay(40);
11053 				}
11054 				tg3_setup_phy(tp, false);
11055 			}
11056 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11057 			   tg3_flag(tp, 5780_CLASS)) {
11058 			tg3_serdes_parallel_detect(tp);
11059 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11060 			u32 cpmu = tr32(TG3_CPMU_STATUS);
11061 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11062 					 TG3_CPMU_STATUS_LINK_MASK);
11063 
11064 			if (link_up != tp->link_up)
11065 				tg3_setup_phy(tp, false);
11066 		}
11067 
11068 		tp->timer_counter = tp->timer_multiplier;
11069 	}
11070 
11071 	/* Heartbeat is only sent once every 2 seconds.
11072 	 *
11073 	 * The heartbeat is to tell the ASF firmware that the host
11074 	 * driver is still alive.  In the event that the OS crashes,
11075 	 * ASF needs to reset the hardware to free up the FIFO space
11076 	 * that may be filled with rx packets destined for the host.
11077 	 * If the FIFO is full, ASF will no longer function properly.
11078 	 *
11079 	 * Unintended resets have been reported on real time kernels
11080 	 * where the timer doesn't run on time.  Netpoll will also have
11081 	 * same problem.
11082 	 *
11083 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11084 	 * to check the ring condition when the heartbeat is expiring
11085 	 * before doing the reset.  This will prevent most unintended
11086 	 * resets.
11087 	 */
11088 	if (!--tp->asf_counter) {
11089 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11090 			tg3_wait_for_event_ack(tp);
11091 
11092 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11093 				      FWCMD_NICDRV_ALIVE3);
11094 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11095 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11096 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11097 
11098 			tg3_generate_fw_event(tp);
11099 		}
11100 		tp->asf_counter = tp->asf_multiplier;
11101 	}
11102 
11103 	/* Update the APE heartbeat every 5 seconds.*/
11104 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11105 
11106 	spin_unlock(&tp->lock);
11107 
11108 restart_timer:
11109 	tp->timer.expires = jiffies + tp->timer_offset;
11110 	add_timer(&tp->timer);
11111 }
11112 
11113 static void tg3_timer_init(struct tg3 *tp)
11114 {
11115 	if (tg3_flag(tp, TAGGED_STATUS) &&
11116 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11117 	    !tg3_flag(tp, 57765_CLASS))
11118 		tp->timer_offset = HZ;
11119 	else
11120 		tp->timer_offset = HZ / 10;
11121 
11122 	BUG_ON(tp->timer_offset > HZ);
11123 
11124 	tp->timer_multiplier = (HZ / tp->timer_offset);
11125 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11126 			     TG3_FW_UPDATE_FREQ_SEC;
11127 
11128 	timer_setup(&tp->timer, tg3_timer, 0);
11129 }
11130 
11131 static void tg3_timer_start(struct tg3 *tp)
11132 {
11133 	tp->asf_counter   = tp->asf_multiplier;
11134 	tp->timer_counter = tp->timer_multiplier;
11135 
11136 	tp->timer.expires = jiffies + tp->timer_offset;
11137 	add_timer(&tp->timer);
11138 }
11139 
11140 static void tg3_timer_stop(struct tg3 *tp)
11141 {
11142 	del_timer_sync(&tp->timer);
11143 }
11144 
11145 /* Restart hardware after configuration changes, self-test, etc.
11146  * Invoked with tp->lock held.
11147  */
11148 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11149 	__releases(tp->lock)
11150 	__acquires(tp->lock)
11151 {
11152 	int err;
11153 
11154 	err = tg3_init_hw(tp, reset_phy);
11155 	if (err) {
11156 		netdev_err(tp->dev,
11157 			   "Failed to re-initialize device, aborting\n");
11158 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11159 		tg3_full_unlock(tp);
11160 		tg3_timer_stop(tp);
11161 		tp->irq_sync = 0;
11162 		tg3_napi_enable(tp);
11163 		dev_close(tp->dev);
11164 		tg3_full_lock(tp, 0);
11165 	}
11166 	return err;
11167 }
11168 
11169 static void tg3_reset_task(struct work_struct *work)
11170 {
11171 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11172 	int err;
11173 
11174 	rtnl_lock();
11175 	tg3_full_lock(tp, 0);
11176 
11177 	if (!netif_running(tp->dev)) {
11178 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11179 		tg3_full_unlock(tp);
11180 		rtnl_unlock();
11181 		return;
11182 	}
11183 
11184 	tg3_full_unlock(tp);
11185 
11186 	tg3_phy_stop(tp);
11187 
11188 	tg3_netif_stop(tp);
11189 
11190 	tg3_full_lock(tp, 1);
11191 
11192 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11193 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11194 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11195 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11196 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11197 	}
11198 
11199 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11200 	err = tg3_init_hw(tp, true);
11201 	if (err)
11202 		goto out;
11203 
11204 	tg3_netif_start(tp);
11205 
11206 out:
11207 	tg3_full_unlock(tp);
11208 
11209 	if (!err)
11210 		tg3_phy_start(tp);
11211 
11212 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11213 	rtnl_unlock();
11214 }
11215 
11216 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11217 {
11218 	irq_handler_t fn;
11219 	unsigned long flags;
11220 	char *name;
11221 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11222 
11223 	if (tp->irq_cnt == 1)
11224 		name = tp->dev->name;
11225 	else {
11226 		name = &tnapi->irq_lbl[0];
11227 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11228 			snprintf(name, IFNAMSIZ,
11229 				 "%s-txrx-%d", tp->dev->name, irq_num);
11230 		else if (tnapi->tx_buffers)
11231 			snprintf(name, IFNAMSIZ,
11232 				 "%s-tx-%d", tp->dev->name, irq_num);
11233 		else if (tnapi->rx_rcb)
11234 			snprintf(name, IFNAMSIZ,
11235 				 "%s-rx-%d", tp->dev->name, irq_num);
11236 		else
11237 			snprintf(name, IFNAMSIZ,
11238 				 "%s-%d", tp->dev->name, irq_num);
11239 		name[IFNAMSIZ-1] = 0;
11240 	}
11241 
11242 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11243 		fn = tg3_msi;
11244 		if (tg3_flag(tp, 1SHOT_MSI))
11245 			fn = tg3_msi_1shot;
11246 		flags = 0;
11247 	} else {
11248 		fn = tg3_interrupt;
11249 		if (tg3_flag(tp, TAGGED_STATUS))
11250 			fn = tg3_interrupt_tagged;
11251 		flags = IRQF_SHARED;
11252 	}
11253 
11254 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11255 }
11256 
11257 static int tg3_test_interrupt(struct tg3 *tp)
11258 {
11259 	struct tg3_napi *tnapi = &tp->napi[0];
11260 	struct net_device *dev = tp->dev;
11261 	int err, i, intr_ok = 0;
11262 	u32 val;
11263 
11264 	if (!netif_running(dev))
11265 		return -ENODEV;
11266 
11267 	tg3_disable_ints(tp);
11268 
11269 	free_irq(tnapi->irq_vec, tnapi);
11270 
11271 	/*
11272 	 * Turn off MSI one shot mode.  Otherwise this test has no
11273 	 * observable way to know whether the interrupt was delivered.
11274 	 */
11275 	if (tg3_flag(tp, 57765_PLUS)) {
11276 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11277 		tw32(MSGINT_MODE, val);
11278 	}
11279 
11280 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11281 			  IRQF_SHARED, dev->name, tnapi);
11282 	if (err)
11283 		return err;
11284 
11285 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11286 	tg3_enable_ints(tp);
11287 
11288 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11289 	       tnapi->coal_now);
11290 
11291 	for (i = 0; i < 5; i++) {
11292 		u32 int_mbox, misc_host_ctrl;
11293 
11294 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11295 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11296 
11297 		if ((int_mbox != 0) ||
11298 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11299 			intr_ok = 1;
11300 			break;
11301 		}
11302 
11303 		if (tg3_flag(tp, 57765_PLUS) &&
11304 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11305 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11306 
11307 		msleep(10);
11308 	}
11309 
11310 	tg3_disable_ints(tp);
11311 
11312 	free_irq(tnapi->irq_vec, tnapi);
11313 
11314 	err = tg3_request_irq(tp, 0);
11315 
11316 	if (err)
11317 		return err;
11318 
11319 	if (intr_ok) {
11320 		/* Reenable MSI one shot mode. */
11321 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11322 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11323 			tw32(MSGINT_MODE, val);
11324 		}
11325 		return 0;
11326 	}
11327 
11328 	return -EIO;
11329 }
11330 
11331 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11332  * successfully restored
11333  */
11334 static int tg3_test_msi(struct tg3 *tp)
11335 {
11336 	int err;
11337 	u16 pci_cmd;
11338 
11339 	if (!tg3_flag(tp, USING_MSI))
11340 		return 0;
11341 
11342 	/* Turn off SERR reporting in case MSI terminates with Master
11343 	 * Abort.
11344 	 */
11345 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11346 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11347 			      pci_cmd & ~PCI_COMMAND_SERR);
11348 
11349 	err = tg3_test_interrupt(tp);
11350 
11351 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11352 
11353 	if (!err)
11354 		return 0;
11355 
11356 	/* other failures */
11357 	if (err != -EIO)
11358 		return err;
11359 
11360 	/* MSI test failed, go back to INTx mode */
11361 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11362 		    "to INTx mode. Please report this failure to the PCI "
11363 		    "maintainer and include system chipset information\n");
11364 
11365 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11366 
11367 	pci_disable_msi(tp->pdev);
11368 
11369 	tg3_flag_clear(tp, USING_MSI);
11370 	tp->napi[0].irq_vec = tp->pdev->irq;
11371 
11372 	err = tg3_request_irq(tp, 0);
11373 	if (err)
11374 		return err;
11375 
11376 	/* Need to reset the chip because the MSI cycle may have terminated
11377 	 * with Master Abort.
11378 	 */
11379 	tg3_full_lock(tp, 1);
11380 
11381 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11382 	err = tg3_init_hw(tp, true);
11383 
11384 	tg3_full_unlock(tp);
11385 
11386 	if (err)
11387 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11388 
11389 	return err;
11390 }
11391 
11392 static int tg3_request_firmware(struct tg3 *tp)
11393 {
11394 	const struct tg3_firmware_hdr *fw_hdr;
11395 
11396 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11397 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11398 			   tp->fw_needed);
11399 		return -ENOENT;
11400 	}
11401 
11402 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11403 
11404 	/* Firmware blob starts with version numbers, followed by
11405 	 * start address and _full_ length including BSS sections
11406 	 * (which must be longer than the actual data, of course
11407 	 */
11408 
11409 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11410 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11411 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11412 			   tp->fw_len, tp->fw_needed);
11413 		release_firmware(tp->fw);
11414 		tp->fw = NULL;
11415 		return -EINVAL;
11416 	}
11417 
11418 	/* We no longer need firmware; we have it. */
11419 	tp->fw_needed = NULL;
11420 	return 0;
11421 }
11422 
11423 static u32 tg3_irq_count(struct tg3 *tp)
11424 {
11425 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11426 
11427 	if (irq_cnt > 1) {
11428 		/* We want as many rx rings enabled as there are cpus.
11429 		 * In multiqueue MSI-X mode, the first MSI-X vector
11430 		 * only deals with link interrupts, etc, so we add
11431 		 * one to the number of vectors we are requesting.
11432 		 */
11433 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11434 	}
11435 
11436 	return irq_cnt;
11437 }
11438 
11439 static bool tg3_enable_msix(struct tg3 *tp)
11440 {
11441 	int i, rc;
11442 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11443 
11444 	tp->txq_cnt = tp->txq_req;
11445 	tp->rxq_cnt = tp->rxq_req;
11446 	if (!tp->rxq_cnt)
11447 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11448 	if (tp->rxq_cnt > tp->rxq_max)
11449 		tp->rxq_cnt = tp->rxq_max;
11450 
11451 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11452 	 * scheduling of the TX rings can cause starvation of rings with
11453 	 * small packets when other rings have TSO or jumbo packets.
11454 	 */
11455 	if (!tp->txq_req)
11456 		tp->txq_cnt = 1;
11457 
11458 	tp->irq_cnt = tg3_irq_count(tp);
11459 
11460 	for (i = 0; i < tp->irq_max; i++) {
11461 		msix_ent[i].entry  = i;
11462 		msix_ent[i].vector = 0;
11463 	}
11464 
11465 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11466 	if (rc < 0) {
11467 		return false;
11468 	} else if (rc < tp->irq_cnt) {
11469 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11470 			      tp->irq_cnt, rc);
11471 		tp->irq_cnt = rc;
11472 		tp->rxq_cnt = max(rc - 1, 1);
11473 		if (tp->txq_cnt)
11474 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11475 	}
11476 
11477 	for (i = 0; i < tp->irq_max; i++)
11478 		tp->napi[i].irq_vec = msix_ent[i].vector;
11479 
11480 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11481 		pci_disable_msix(tp->pdev);
11482 		return false;
11483 	}
11484 
11485 	if (tp->irq_cnt == 1)
11486 		return true;
11487 
11488 	tg3_flag_set(tp, ENABLE_RSS);
11489 
11490 	if (tp->txq_cnt > 1)
11491 		tg3_flag_set(tp, ENABLE_TSS);
11492 
11493 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11494 
11495 	return true;
11496 }
11497 
11498 static void tg3_ints_init(struct tg3 *tp)
11499 {
11500 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11501 	    !tg3_flag(tp, TAGGED_STATUS)) {
11502 		/* All MSI supporting chips should support tagged
11503 		 * status.  Assert that this is the case.
11504 		 */
11505 		netdev_warn(tp->dev,
11506 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11507 		goto defcfg;
11508 	}
11509 
11510 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11511 		tg3_flag_set(tp, USING_MSIX);
11512 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11513 		tg3_flag_set(tp, USING_MSI);
11514 
11515 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11516 		u32 msi_mode = tr32(MSGINT_MODE);
11517 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11518 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11519 		if (!tg3_flag(tp, 1SHOT_MSI))
11520 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11521 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11522 	}
11523 defcfg:
11524 	if (!tg3_flag(tp, USING_MSIX)) {
11525 		tp->irq_cnt = 1;
11526 		tp->napi[0].irq_vec = tp->pdev->irq;
11527 	}
11528 
11529 	if (tp->irq_cnt == 1) {
11530 		tp->txq_cnt = 1;
11531 		tp->rxq_cnt = 1;
11532 		netif_set_real_num_tx_queues(tp->dev, 1);
11533 		netif_set_real_num_rx_queues(tp->dev, 1);
11534 	}
11535 }
11536 
11537 static void tg3_ints_fini(struct tg3 *tp)
11538 {
11539 	if (tg3_flag(tp, USING_MSIX))
11540 		pci_disable_msix(tp->pdev);
11541 	else if (tg3_flag(tp, USING_MSI))
11542 		pci_disable_msi(tp->pdev);
11543 	tg3_flag_clear(tp, USING_MSI);
11544 	tg3_flag_clear(tp, USING_MSIX);
11545 	tg3_flag_clear(tp, ENABLE_RSS);
11546 	tg3_flag_clear(tp, ENABLE_TSS);
11547 }
11548 
11549 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11550 		     bool init)
11551 {
11552 	struct net_device *dev = tp->dev;
11553 	int i, err;
11554 
11555 	/*
11556 	 * Setup interrupts first so we know how
11557 	 * many NAPI resources to allocate
11558 	 */
11559 	tg3_ints_init(tp);
11560 
11561 	tg3_rss_check_indir_tbl(tp);
11562 
11563 	/* The placement of this call is tied
11564 	 * to the setup and use of Host TX descriptors.
11565 	 */
11566 	err = tg3_alloc_consistent(tp);
11567 	if (err)
11568 		goto out_ints_fini;
11569 
11570 	tg3_napi_init(tp);
11571 
11572 	tg3_napi_enable(tp);
11573 
11574 	for (i = 0; i < tp->irq_cnt; i++) {
11575 		err = tg3_request_irq(tp, i);
11576 		if (err) {
11577 			for (i--; i >= 0; i--) {
11578 				struct tg3_napi *tnapi = &tp->napi[i];
11579 
11580 				free_irq(tnapi->irq_vec, tnapi);
11581 			}
11582 			goto out_napi_fini;
11583 		}
11584 	}
11585 
11586 	tg3_full_lock(tp, 0);
11587 
11588 	if (init)
11589 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11590 
11591 	err = tg3_init_hw(tp, reset_phy);
11592 	if (err) {
11593 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11594 		tg3_free_rings(tp);
11595 	}
11596 
11597 	tg3_full_unlock(tp);
11598 
11599 	if (err)
11600 		goto out_free_irq;
11601 
11602 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11603 		err = tg3_test_msi(tp);
11604 
11605 		if (err) {
11606 			tg3_full_lock(tp, 0);
11607 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11608 			tg3_free_rings(tp);
11609 			tg3_full_unlock(tp);
11610 
11611 			goto out_napi_fini;
11612 		}
11613 
11614 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11615 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11616 
11617 			tw32(PCIE_TRANSACTION_CFG,
11618 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11619 		}
11620 	}
11621 
11622 	tg3_phy_start(tp);
11623 
11624 	tg3_hwmon_open(tp);
11625 
11626 	tg3_full_lock(tp, 0);
11627 
11628 	tg3_timer_start(tp);
11629 	tg3_flag_set(tp, INIT_COMPLETE);
11630 	tg3_enable_ints(tp);
11631 
11632 	tg3_ptp_resume(tp);
11633 
11634 	tg3_full_unlock(tp);
11635 
11636 	netif_tx_start_all_queues(dev);
11637 
11638 	/*
11639 	 * Reset loopback feature if it was turned on while the device was down
11640 	 * make sure that it's installed properly now.
11641 	 */
11642 	if (dev->features & NETIF_F_LOOPBACK)
11643 		tg3_set_loopback(dev, dev->features);
11644 
11645 	return 0;
11646 
11647 out_free_irq:
11648 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11649 		struct tg3_napi *tnapi = &tp->napi[i];
11650 		free_irq(tnapi->irq_vec, tnapi);
11651 	}
11652 
11653 out_napi_fini:
11654 	tg3_napi_disable(tp);
11655 	tg3_napi_fini(tp);
11656 	tg3_free_consistent(tp);
11657 
11658 out_ints_fini:
11659 	tg3_ints_fini(tp);
11660 
11661 	return err;
11662 }
11663 
11664 static void tg3_stop(struct tg3 *tp)
11665 {
11666 	int i;
11667 
11668 	tg3_reset_task_cancel(tp);
11669 	tg3_netif_stop(tp);
11670 
11671 	tg3_timer_stop(tp);
11672 
11673 	tg3_hwmon_close(tp);
11674 
11675 	tg3_phy_stop(tp);
11676 
11677 	tg3_full_lock(tp, 1);
11678 
11679 	tg3_disable_ints(tp);
11680 
11681 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11682 	tg3_free_rings(tp);
11683 	tg3_flag_clear(tp, INIT_COMPLETE);
11684 
11685 	tg3_full_unlock(tp);
11686 
11687 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11688 		struct tg3_napi *tnapi = &tp->napi[i];
11689 		free_irq(tnapi->irq_vec, tnapi);
11690 	}
11691 
11692 	tg3_ints_fini(tp);
11693 
11694 	tg3_napi_fini(tp);
11695 
11696 	tg3_free_consistent(tp);
11697 }
11698 
11699 static int tg3_open(struct net_device *dev)
11700 {
11701 	struct tg3 *tp = netdev_priv(dev);
11702 	int err;
11703 
11704 	if (tp->pcierr_recovery) {
11705 		netdev_err(dev, "Failed to open device. PCI error recovery "
11706 			   "in progress\n");
11707 		return -EAGAIN;
11708 	}
11709 
11710 	if (tp->fw_needed) {
11711 		err = tg3_request_firmware(tp);
11712 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11713 			if (err) {
11714 				netdev_warn(tp->dev, "EEE capability disabled\n");
11715 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11716 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11717 				netdev_warn(tp->dev, "EEE capability restored\n");
11718 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11719 			}
11720 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11721 			if (err)
11722 				return err;
11723 		} else if (err) {
11724 			netdev_warn(tp->dev, "TSO capability disabled\n");
11725 			tg3_flag_clear(tp, TSO_CAPABLE);
11726 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11727 			netdev_notice(tp->dev, "TSO capability restored\n");
11728 			tg3_flag_set(tp, TSO_CAPABLE);
11729 		}
11730 	}
11731 
11732 	tg3_carrier_off(tp);
11733 
11734 	err = tg3_power_up(tp);
11735 	if (err)
11736 		return err;
11737 
11738 	tg3_full_lock(tp, 0);
11739 
11740 	tg3_disable_ints(tp);
11741 	tg3_flag_clear(tp, INIT_COMPLETE);
11742 
11743 	tg3_full_unlock(tp);
11744 
11745 	err = tg3_start(tp,
11746 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11747 			true, true);
11748 	if (err) {
11749 		tg3_frob_aux_power(tp, false);
11750 		pci_set_power_state(tp->pdev, PCI_D3hot);
11751 	}
11752 
11753 	return err;
11754 }
11755 
11756 static int tg3_close(struct net_device *dev)
11757 {
11758 	struct tg3 *tp = netdev_priv(dev);
11759 
11760 	if (tp->pcierr_recovery) {
11761 		netdev_err(dev, "Failed to close device. PCI error recovery "
11762 			   "in progress\n");
11763 		return -EAGAIN;
11764 	}
11765 
11766 	tg3_stop(tp);
11767 
11768 	if (pci_device_is_present(tp->pdev)) {
11769 		tg3_power_down_prepare(tp);
11770 
11771 		tg3_carrier_off(tp);
11772 	}
11773 	return 0;
11774 }
11775 
11776 static inline u64 get_stat64(tg3_stat64_t *val)
11777 {
11778        return ((u64)val->high << 32) | ((u64)val->low);
11779 }
11780 
11781 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11782 {
11783 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11784 
11785 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11786 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11787 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11788 		u32 val;
11789 
11790 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11791 			tg3_writephy(tp, MII_TG3_TEST1,
11792 				     val | MII_TG3_TEST1_CRC_EN);
11793 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11794 		} else
11795 			val = 0;
11796 
11797 		tp->phy_crc_errors += val;
11798 
11799 		return tp->phy_crc_errors;
11800 	}
11801 
11802 	return get_stat64(&hw_stats->rx_fcs_errors);
11803 }
11804 
11805 #define ESTAT_ADD(member) \
11806 	estats->member =	old_estats->member + \
11807 				get_stat64(&hw_stats->member)
11808 
11809 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11810 {
11811 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11812 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11813 
11814 	ESTAT_ADD(rx_octets);
11815 	ESTAT_ADD(rx_fragments);
11816 	ESTAT_ADD(rx_ucast_packets);
11817 	ESTAT_ADD(rx_mcast_packets);
11818 	ESTAT_ADD(rx_bcast_packets);
11819 	ESTAT_ADD(rx_fcs_errors);
11820 	ESTAT_ADD(rx_align_errors);
11821 	ESTAT_ADD(rx_xon_pause_rcvd);
11822 	ESTAT_ADD(rx_xoff_pause_rcvd);
11823 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11824 	ESTAT_ADD(rx_xoff_entered);
11825 	ESTAT_ADD(rx_frame_too_long_errors);
11826 	ESTAT_ADD(rx_jabbers);
11827 	ESTAT_ADD(rx_undersize_packets);
11828 	ESTAT_ADD(rx_in_length_errors);
11829 	ESTAT_ADD(rx_out_length_errors);
11830 	ESTAT_ADD(rx_64_or_less_octet_packets);
11831 	ESTAT_ADD(rx_65_to_127_octet_packets);
11832 	ESTAT_ADD(rx_128_to_255_octet_packets);
11833 	ESTAT_ADD(rx_256_to_511_octet_packets);
11834 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11835 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11836 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11837 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11838 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11839 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11840 
11841 	ESTAT_ADD(tx_octets);
11842 	ESTAT_ADD(tx_collisions);
11843 	ESTAT_ADD(tx_xon_sent);
11844 	ESTAT_ADD(tx_xoff_sent);
11845 	ESTAT_ADD(tx_flow_control);
11846 	ESTAT_ADD(tx_mac_errors);
11847 	ESTAT_ADD(tx_single_collisions);
11848 	ESTAT_ADD(tx_mult_collisions);
11849 	ESTAT_ADD(tx_deferred);
11850 	ESTAT_ADD(tx_excessive_collisions);
11851 	ESTAT_ADD(tx_late_collisions);
11852 	ESTAT_ADD(tx_collide_2times);
11853 	ESTAT_ADD(tx_collide_3times);
11854 	ESTAT_ADD(tx_collide_4times);
11855 	ESTAT_ADD(tx_collide_5times);
11856 	ESTAT_ADD(tx_collide_6times);
11857 	ESTAT_ADD(tx_collide_7times);
11858 	ESTAT_ADD(tx_collide_8times);
11859 	ESTAT_ADD(tx_collide_9times);
11860 	ESTAT_ADD(tx_collide_10times);
11861 	ESTAT_ADD(tx_collide_11times);
11862 	ESTAT_ADD(tx_collide_12times);
11863 	ESTAT_ADD(tx_collide_13times);
11864 	ESTAT_ADD(tx_collide_14times);
11865 	ESTAT_ADD(tx_collide_15times);
11866 	ESTAT_ADD(tx_ucast_packets);
11867 	ESTAT_ADD(tx_mcast_packets);
11868 	ESTAT_ADD(tx_bcast_packets);
11869 	ESTAT_ADD(tx_carrier_sense_errors);
11870 	ESTAT_ADD(tx_discards);
11871 	ESTAT_ADD(tx_errors);
11872 
11873 	ESTAT_ADD(dma_writeq_full);
11874 	ESTAT_ADD(dma_write_prioq_full);
11875 	ESTAT_ADD(rxbds_empty);
11876 	ESTAT_ADD(rx_discards);
11877 	ESTAT_ADD(rx_errors);
11878 	ESTAT_ADD(rx_threshold_hit);
11879 
11880 	ESTAT_ADD(dma_readq_full);
11881 	ESTAT_ADD(dma_read_prioq_full);
11882 	ESTAT_ADD(tx_comp_queue_full);
11883 
11884 	ESTAT_ADD(ring_set_send_prod_index);
11885 	ESTAT_ADD(ring_status_update);
11886 	ESTAT_ADD(nic_irqs);
11887 	ESTAT_ADD(nic_avoided_irqs);
11888 	ESTAT_ADD(nic_tx_threshold_hit);
11889 
11890 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11891 }
11892 
11893 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11894 {
11895 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11896 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11897 
11898 	stats->rx_packets = old_stats->rx_packets +
11899 		get_stat64(&hw_stats->rx_ucast_packets) +
11900 		get_stat64(&hw_stats->rx_mcast_packets) +
11901 		get_stat64(&hw_stats->rx_bcast_packets);
11902 
11903 	stats->tx_packets = old_stats->tx_packets +
11904 		get_stat64(&hw_stats->tx_ucast_packets) +
11905 		get_stat64(&hw_stats->tx_mcast_packets) +
11906 		get_stat64(&hw_stats->tx_bcast_packets);
11907 
11908 	stats->rx_bytes = old_stats->rx_bytes +
11909 		get_stat64(&hw_stats->rx_octets);
11910 	stats->tx_bytes = old_stats->tx_bytes +
11911 		get_stat64(&hw_stats->tx_octets);
11912 
11913 	stats->rx_errors = old_stats->rx_errors +
11914 		get_stat64(&hw_stats->rx_errors);
11915 	stats->tx_errors = old_stats->tx_errors +
11916 		get_stat64(&hw_stats->tx_errors) +
11917 		get_stat64(&hw_stats->tx_mac_errors) +
11918 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11919 		get_stat64(&hw_stats->tx_discards);
11920 
11921 	stats->multicast = old_stats->multicast +
11922 		get_stat64(&hw_stats->rx_mcast_packets);
11923 	stats->collisions = old_stats->collisions +
11924 		get_stat64(&hw_stats->tx_collisions);
11925 
11926 	stats->rx_length_errors = old_stats->rx_length_errors +
11927 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11928 		get_stat64(&hw_stats->rx_undersize_packets);
11929 
11930 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11931 		get_stat64(&hw_stats->rx_align_errors);
11932 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11933 		get_stat64(&hw_stats->tx_discards);
11934 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11935 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11936 
11937 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11938 		tg3_calc_crc_errors(tp);
11939 
11940 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11941 		get_stat64(&hw_stats->rx_discards);
11942 
11943 	stats->rx_dropped = tp->rx_dropped;
11944 	stats->tx_dropped = tp->tx_dropped;
11945 }
11946 
11947 static int tg3_get_regs_len(struct net_device *dev)
11948 {
11949 	return TG3_REG_BLK_SIZE;
11950 }
11951 
11952 static void tg3_get_regs(struct net_device *dev,
11953 		struct ethtool_regs *regs, void *_p)
11954 {
11955 	struct tg3 *tp = netdev_priv(dev);
11956 
11957 	regs->version = 0;
11958 
11959 	memset(_p, 0, TG3_REG_BLK_SIZE);
11960 
11961 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11962 		return;
11963 
11964 	tg3_full_lock(tp, 0);
11965 
11966 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11967 
11968 	tg3_full_unlock(tp);
11969 }
11970 
11971 static int tg3_get_eeprom_len(struct net_device *dev)
11972 {
11973 	struct tg3 *tp = netdev_priv(dev);
11974 
11975 	return tp->nvram_size;
11976 }
11977 
11978 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11979 {
11980 	struct tg3 *tp = netdev_priv(dev);
11981 	int ret, cpmu_restore = 0;
11982 	u8  *pd;
11983 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11984 	__be32 val;
11985 
11986 	if (tg3_flag(tp, NO_NVRAM))
11987 		return -EINVAL;
11988 
11989 	offset = eeprom->offset;
11990 	len = eeprom->len;
11991 	eeprom->len = 0;
11992 
11993 	eeprom->magic = TG3_EEPROM_MAGIC;
11994 
11995 	/* Override clock, link aware and link idle modes */
11996 	if (tg3_flag(tp, CPMU_PRESENT)) {
11997 		cpmu_val = tr32(TG3_CPMU_CTRL);
11998 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11999 				CPMU_CTRL_LINK_IDLE_MODE)) {
12000 			tw32(TG3_CPMU_CTRL, cpmu_val &
12001 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
12002 					     CPMU_CTRL_LINK_IDLE_MODE));
12003 			cpmu_restore = 1;
12004 		}
12005 	}
12006 	tg3_override_clk(tp);
12007 
12008 	if (offset & 3) {
12009 		/* adjustments to start on required 4 byte boundary */
12010 		b_offset = offset & 3;
12011 		b_count = 4 - b_offset;
12012 		if (b_count > len) {
12013 			/* i.e. offset=1 len=2 */
12014 			b_count = len;
12015 		}
12016 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12017 		if (ret)
12018 			goto eeprom_done;
12019 		memcpy(data, ((char *)&val) + b_offset, b_count);
12020 		len -= b_count;
12021 		offset += b_count;
12022 		eeprom->len += b_count;
12023 	}
12024 
12025 	/* read bytes up to the last 4 byte boundary */
12026 	pd = &data[eeprom->len];
12027 	for (i = 0; i < (len - (len & 3)); i += 4) {
12028 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
12029 		if (ret) {
12030 			if (i)
12031 				i -= 4;
12032 			eeprom->len += i;
12033 			goto eeprom_done;
12034 		}
12035 		memcpy(pd + i, &val, 4);
12036 		if (need_resched()) {
12037 			if (signal_pending(current)) {
12038 				eeprom->len += i;
12039 				ret = -EINTR;
12040 				goto eeprom_done;
12041 			}
12042 			cond_resched();
12043 		}
12044 	}
12045 	eeprom->len += i;
12046 
12047 	if (len & 3) {
12048 		/* read last bytes not ending on 4 byte boundary */
12049 		pd = &data[eeprom->len];
12050 		b_count = len & 3;
12051 		b_offset = offset + len - b_count;
12052 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12053 		if (ret)
12054 			goto eeprom_done;
12055 		memcpy(pd, &val, b_count);
12056 		eeprom->len += b_count;
12057 	}
12058 	ret = 0;
12059 
12060 eeprom_done:
12061 	/* Restore clock, link aware and link idle modes */
12062 	tg3_restore_clk(tp);
12063 	if (cpmu_restore)
12064 		tw32(TG3_CPMU_CTRL, cpmu_val);
12065 
12066 	return ret;
12067 }
12068 
12069 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12070 {
12071 	struct tg3 *tp = netdev_priv(dev);
12072 	int ret;
12073 	u32 offset, len, b_offset, odd_len;
12074 	u8 *buf;
12075 	__be32 start = 0, end;
12076 
12077 	if (tg3_flag(tp, NO_NVRAM) ||
12078 	    eeprom->magic != TG3_EEPROM_MAGIC)
12079 		return -EINVAL;
12080 
12081 	offset = eeprom->offset;
12082 	len = eeprom->len;
12083 
12084 	if ((b_offset = (offset & 3))) {
12085 		/* adjustments to start on required 4 byte boundary */
12086 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12087 		if (ret)
12088 			return ret;
12089 		len += b_offset;
12090 		offset &= ~3;
12091 		if (len < 4)
12092 			len = 4;
12093 	}
12094 
12095 	odd_len = 0;
12096 	if (len & 3) {
12097 		/* adjustments to end on required 4 byte boundary */
12098 		odd_len = 1;
12099 		len = (len + 3) & ~3;
12100 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12101 		if (ret)
12102 			return ret;
12103 	}
12104 
12105 	buf = data;
12106 	if (b_offset || odd_len) {
12107 		buf = kmalloc(len, GFP_KERNEL);
12108 		if (!buf)
12109 			return -ENOMEM;
12110 		if (b_offset)
12111 			memcpy(buf, &start, 4);
12112 		if (odd_len)
12113 			memcpy(buf+len-4, &end, 4);
12114 		memcpy(buf + b_offset, data, eeprom->len);
12115 	}
12116 
12117 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12118 
12119 	if (buf != data)
12120 		kfree(buf);
12121 
12122 	return ret;
12123 }
12124 
12125 static int tg3_get_link_ksettings(struct net_device *dev,
12126 				  struct ethtool_link_ksettings *cmd)
12127 {
12128 	struct tg3 *tp = netdev_priv(dev);
12129 	u32 supported, advertising;
12130 
12131 	if (tg3_flag(tp, USE_PHYLIB)) {
12132 		struct phy_device *phydev;
12133 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12134 			return -EAGAIN;
12135 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12136 		phy_ethtool_ksettings_get(phydev, cmd);
12137 
12138 		return 0;
12139 	}
12140 
12141 	supported = (SUPPORTED_Autoneg);
12142 
12143 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12144 		supported |= (SUPPORTED_1000baseT_Half |
12145 			      SUPPORTED_1000baseT_Full);
12146 
12147 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12148 		supported |= (SUPPORTED_100baseT_Half |
12149 			      SUPPORTED_100baseT_Full |
12150 			      SUPPORTED_10baseT_Half |
12151 			      SUPPORTED_10baseT_Full |
12152 			      SUPPORTED_TP);
12153 		cmd->base.port = PORT_TP;
12154 	} else {
12155 		supported |= SUPPORTED_FIBRE;
12156 		cmd->base.port = PORT_FIBRE;
12157 	}
12158 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12159 						supported);
12160 
12161 	advertising = tp->link_config.advertising;
12162 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12163 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12164 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12165 				advertising |= ADVERTISED_Pause;
12166 			} else {
12167 				advertising |= ADVERTISED_Pause |
12168 					ADVERTISED_Asym_Pause;
12169 			}
12170 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12171 			advertising |= ADVERTISED_Asym_Pause;
12172 		}
12173 	}
12174 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12175 						advertising);
12176 
12177 	if (netif_running(dev) && tp->link_up) {
12178 		cmd->base.speed = tp->link_config.active_speed;
12179 		cmd->base.duplex = tp->link_config.active_duplex;
12180 		ethtool_convert_legacy_u32_to_link_mode(
12181 			cmd->link_modes.lp_advertising,
12182 			tp->link_config.rmt_adv);
12183 
12184 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12185 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12186 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12187 			else
12188 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12189 		}
12190 	} else {
12191 		cmd->base.speed = SPEED_UNKNOWN;
12192 		cmd->base.duplex = DUPLEX_UNKNOWN;
12193 		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12194 	}
12195 	cmd->base.phy_address = tp->phy_addr;
12196 	cmd->base.autoneg = tp->link_config.autoneg;
12197 	return 0;
12198 }
12199 
12200 static int tg3_set_link_ksettings(struct net_device *dev,
12201 				  const struct ethtool_link_ksettings *cmd)
12202 {
12203 	struct tg3 *tp = netdev_priv(dev);
12204 	u32 speed = cmd->base.speed;
12205 	u32 advertising;
12206 
12207 	if (tg3_flag(tp, USE_PHYLIB)) {
12208 		struct phy_device *phydev;
12209 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12210 			return -EAGAIN;
12211 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12212 		return phy_ethtool_ksettings_set(phydev, cmd);
12213 	}
12214 
12215 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12216 	    cmd->base.autoneg != AUTONEG_DISABLE)
12217 		return -EINVAL;
12218 
12219 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12220 	    cmd->base.duplex != DUPLEX_FULL &&
12221 	    cmd->base.duplex != DUPLEX_HALF)
12222 		return -EINVAL;
12223 
12224 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12225 						cmd->link_modes.advertising);
12226 
12227 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12228 		u32 mask = ADVERTISED_Autoneg |
12229 			   ADVERTISED_Pause |
12230 			   ADVERTISED_Asym_Pause;
12231 
12232 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12233 			mask |= ADVERTISED_1000baseT_Half |
12234 				ADVERTISED_1000baseT_Full;
12235 
12236 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12237 			mask |= ADVERTISED_100baseT_Half |
12238 				ADVERTISED_100baseT_Full |
12239 				ADVERTISED_10baseT_Half |
12240 				ADVERTISED_10baseT_Full |
12241 				ADVERTISED_TP;
12242 		else
12243 			mask |= ADVERTISED_FIBRE;
12244 
12245 		if (advertising & ~mask)
12246 			return -EINVAL;
12247 
12248 		mask &= (ADVERTISED_1000baseT_Half |
12249 			 ADVERTISED_1000baseT_Full |
12250 			 ADVERTISED_100baseT_Half |
12251 			 ADVERTISED_100baseT_Full |
12252 			 ADVERTISED_10baseT_Half |
12253 			 ADVERTISED_10baseT_Full);
12254 
12255 		advertising &= mask;
12256 	} else {
12257 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12258 			if (speed != SPEED_1000)
12259 				return -EINVAL;
12260 
12261 			if (cmd->base.duplex != DUPLEX_FULL)
12262 				return -EINVAL;
12263 		} else {
12264 			if (speed != SPEED_100 &&
12265 			    speed != SPEED_10)
12266 				return -EINVAL;
12267 		}
12268 	}
12269 
12270 	tg3_full_lock(tp, 0);
12271 
12272 	tp->link_config.autoneg = cmd->base.autoneg;
12273 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12274 		tp->link_config.advertising = (advertising |
12275 					      ADVERTISED_Autoneg);
12276 		tp->link_config.speed = SPEED_UNKNOWN;
12277 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12278 	} else {
12279 		tp->link_config.advertising = 0;
12280 		tp->link_config.speed = speed;
12281 		tp->link_config.duplex = cmd->base.duplex;
12282 	}
12283 
12284 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12285 
12286 	tg3_warn_mgmt_link_flap(tp);
12287 
12288 	if (netif_running(dev))
12289 		tg3_setup_phy(tp, true);
12290 
12291 	tg3_full_unlock(tp);
12292 
12293 	return 0;
12294 }
12295 
12296 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12297 {
12298 	struct tg3 *tp = netdev_priv(dev);
12299 
12300 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12301 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12302 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12303 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12304 }
12305 
12306 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12307 {
12308 	struct tg3 *tp = netdev_priv(dev);
12309 
12310 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12311 		wol->supported = WAKE_MAGIC;
12312 	else
12313 		wol->supported = 0;
12314 	wol->wolopts = 0;
12315 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12316 		wol->wolopts = WAKE_MAGIC;
12317 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12318 }
12319 
12320 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12321 {
12322 	struct tg3 *tp = netdev_priv(dev);
12323 	struct device *dp = &tp->pdev->dev;
12324 
12325 	if (wol->wolopts & ~WAKE_MAGIC)
12326 		return -EINVAL;
12327 	if ((wol->wolopts & WAKE_MAGIC) &&
12328 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12329 		return -EINVAL;
12330 
12331 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12332 
12333 	if (device_may_wakeup(dp))
12334 		tg3_flag_set(tp, WOL_ENABLE);
12335 	else
12336 		tg3_flag_clear(tp, WOL_ENABLE);
12337 
12338 	return 0;
12339 }
12340 
12341 static u32 tg3_get_msglevel(struct net_device *dev)
12342 {
12343 	struct tg3 *tp = netdev_priv(dev);
12344 	return tp->msg_enable;
12345 }
12346 
12347 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12348 {
12349 	struct tg3 *tp = netdev_priv(dev);
12350 	tp->msg_enable = value;
12351 }
12352 
12353 static int tg3_nway_reset(struct net_device *dev)
12354 {
12355 	struct tg3 *tp = netdev_priv(dev);
12356 	int r;
12357 
12358 	if (!netif_running(dev))
12359 		return -EAGAIN;
12360 
12361 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12362 		return -EINVAL;
12363 
12364 	tg3_warn_mgmt_link_flap(tp);
12365 
12366 	if (tg3_flag(tp, USE_PHYLIB)) {
12367 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12368 			return -EAGAIN;
12369 		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12370 	} else {
12371 		u32 bmcr;
12372 
12373 		spin_lock_bh(&tp->lock);
12374 		r = -EINVAL;
12375 		tg3_readphy(tp, MII_BMCR, &bmcr);
12376 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12377 		    ((bmcr & BMCR_ANENABLE) ||
12378 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12379 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12380 						   BMCR_ANENABLE);
12381 			r = 0;
12382 		}
12383 		spin_unlock_bh(&tp->lock);
12384 	}
12385 
12386 	return r;
12387 }
12388 
12389 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12390 {
12391 	struct tg3 *tp = netdev_priv(dev);
12392 
12393 	ering->rx_max_pending = tp->rx_std_ring_mask;
12394 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12395 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12396 	else
12397 		ering->rx_jumbo_max_pending = 0;
12398 
12399 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12400 
12401 	ering->rx_pending = tp->rx_pending;
12402 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12403 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12404 	else
12405 		ering->rx_jumbo_pending = 0;
12406 
12407 	ering->tx_pending = tp->napi[0].tx_pending;
12408 }
12409 
12410 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12411 {
12412 	struct tg3 *tp = netdev_priv(dev);
12413 	int i, irq_sync = 0, err = 0;
12414 
12415 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12416 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12417 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12418 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12419 	    (tg3_flag(tp, TSO_BUG) &&
12420 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12421 		return -EINVAL;
12422 
12423 	if (netif_running(dev)) {
12424 		tg3_phy_stop(tp);
12425 		tg3_netif_stop(tp);
12426 		irq_sync = 1;
12427 	}
12428 
12429 	tg3_full_lock(tp, irq_sync);
12430 
12431 	tp->rx_pending = ering->rx_pending;
12432 
12433 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12434 	    tp->rx_pending > 63)
12435 		tp->rx_pending = 63;
12436 
12437 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12438 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12439 
12440 	for (i = 0; i < tp->irq_max; i++)
12441 		tp->napi[i].tx_pending = ering->tx_pending;
12442 
12443 	if (netif_running(dev)) {
12444 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12445 		err = tg3_restart_hw(tp, false);
12446 		if (!err)
12447 			tg3_netif_start(tp);
12448 	}
12449 
12450 	tg3_full_unlock(tp);
12451 
12452 	if (irq_sync && !err)
12453 		tg3_phy_start(tp);
12454 
12455 	return err;
12456 }
12457 
12458 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12459 {
12460 	struct tg3 *tp = netdev_priv(dev);
12461 
12462 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12463 
12464 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12465 		epause->rx_pause = 1;
12466 	else
12467 		epause->rx_pause = 0;
12468 
12469 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12470 		epause->tx_pause = 1;
12471 	else
12472 		epause->tx_pause = 0;
12473 }
12474 
12475 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12476 {
12477 	struct tg3 *tp = netdev_priv(dev);
12478 	int err = 0;
12479 
12480 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12481 		tg3_warn_mgmt_link_flap(tp);
12482 
12483 	if (tg3_flag(tp, USE_PHYLIB)) {
12484 		u32 newadv;
12485 		struct phy_device *phydev;
12486 
12487 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12488 
12489 		if (!(phydev->supported & SUPPORTED_Pause) ||
12490 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12491 		     (epause->rx_pause != epause->tx_pause)))
12492 			return -EINVAL;
12493 
12494 		tp->link_config.flowctrl = 0;
12495 		if (epause->rx_pause) {
12496 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12497 
12498 			if (epause->tx_pause) {
12499 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12500 				newadv = ADVERTISED_Pause;
12501 			} else
12502 				newadv = ADVERTISED_Pause |
12503 					 ADVERTISED_Asym_Pause;
12504 		} else if (epause->tx_pause) {
12505 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12506 			newadv = ADVERTISED_Asym_Pause;
12507 		} else
12508 			newadv = 0;
12509 
12510 		if (epause->autoneg)
12511 			tg3_flag_set(tp, PAUSE_AUTONEG);
12512 		else
12513 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12514 
12515 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12516 			u32 oldadv = phydev->advertising &
12517 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12518 			if (oldadv != newadv) {
12519 				phydev->advertising &=
12520 					~(ADVERTISED_Pause |
12521 					  ADVERTISED_Asym_Pause);
12522 				phydev->advertising |= newadv;
12523 				if (phydev->autoneg) {
12524 					/*
12525 					 * Always renegotiate the link to
12526 					 * inform our link partner of our
12527 					 * flow control settings, even if the
12528 					 * flow control is forced.  Let
12529 					 * tg3_adjust_link() do the final
12530 					 * flow control setup.
12531 					 */
12532 					return phy_start_aneg(phydev);
12533 				}
12534 			}
12535 
12536 			if (!epause->autoneg)
12537 				tg3_setup_flow_control(tp, 0, 0);
12538 		} else {
12539 			tp->link_config.advertising &=
12540 					~(ADVERTISED_Pause |
12541 					  ADVERTISED_Asym_Pause);
12542 			tp->link_config.advertising |= newadv;
12543 		}
12544 	} else {
12545 		int irq_sync = 0;
12546 
12547 		if (netif_running(dev)) {
12548 			tg3_netif_stop(tp);
12549 			irq_sync = 1;
12550 		}
12551 
12552 		tg3_full_lock(tp, irq_sync);
12553 
12554 		if (epause->autoneg)
12555 			tg3_flag_set(tp, PAUSE_AUTONEG);
12556 		else
12557 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12558 		if (epause->rx_pause)
12559 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12560 		else
12561 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12562 		if (epause->tx_pause)
12563 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12564 		else
12565 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12566 
12567 		if (netif_running(dev)) {
12568 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12569 			err = tg3_restart_hw(tp, false);
12570 			if (!err)
12571 				tg3_netif_start(tp);
12572 		}
12573 
12574 		tg3_full_unlock(tp);
12575 	}
12576 
12577 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12578 
12579 	return err;
12580 }
12581 
12582 static int tg3_get_sset_count(struct net_device *dev, int sset)
12583 {
12584 	switch (sset) {
12585 	case ETH_SS_TEST:
12586 		return TG3_NUM_TEST;
12587 	case ETH_SS_STATS:
12588 		return TG3_NUM_STATS;
12589 	default:
12590 		return -EOPNOTSUPP;
12591 	}
12592 }
12593 
12594 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12595 			 u32 *rules __always_unused)
12596 {
12597 	struct tg3 *tp = netdev_priv(dev);
12598 
12599 	if (!tg3_flag(tp, SUPPORT_MSIX))
12600 		return -EOPNOTSUPP;
12601 
12602 	switch (info->cmd) {
12603 	case ETHTOOL_GRXRINGS:
12604 		if (netif_running(tp->dev))
12605 			info->data = tp->rxq_cnt;
12606 		else {
12607 			info->data = num_online_cpus();
12608 			if (info->data > TG3_RSS_MAX_NUM_QS)
12609 				info->data = TG3_RSS_MAX_NUM_QS;
12610 		}
12611 
12612 		return 0;
12613 
12614 	default:
12615 		return -EOPNOTSUPP;
12616 	}
12617 }
12618 
12619 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12620 {
12621 	u32 size = 0;
12622 	struct tg3 *tp = netdev_priv(dev);
12623 
12624 	if (tg3_flag(tp, SUPPORT_MSIX))
12625 		size = TG3_RSS_INDIR_TBL_SIZE;
12626 
12627 	return size;
12628 }
12629 
12630 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12631 {
12632 	struct tg3 *tp = netdev_priv(dev);
12633 	int i;
12634 
12635 	if (hfunc)
12636 		*hfunc = ETH_RSS_HASH_TOP;
12637 	if (!indir)
12638 		return 0;
12639 
12640 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12641 		indir[i] = tp->rss_ind_tbl[i];
12642 
12643 	return 0;
12644 }
12645 
12646 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12647 			const u8 hfunc)
12648 {
12649 	struct tg3 *tp = netdev_priv(dev);
12650 	size_t i;
12651 
12652 	/* We require at least one supported parameter to be changed and no
12653 	 * change in any of the unsupported parameters
12654 	 */
12655 	if (key ||
12656 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12657 		return -EOPNOTSUPP;
12658 
12659 	if (!indir)
12660 		return 0;
12661 
12662 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12663 		tp->rss_ind_tbl[i] = indir[i];
12664 
12665 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12666 		return 0;
12667 
12668 	/* It is legal to write the indirection
12669 	 * table while the device is running.
12670 	 */
12671 	tg3_full_lock(tp, 0);
12672 	tg3_rss_write_indir_tbl(tp);
12673 	tg3_full_unlock(tp);
12674 
12675 	return 0;
12676 }
12677 
12678 static void tg3_get_channels(struct net_device *dev,
12679 			     struct ethtool_channels *channel)
12680 {
12681 	struct tg3 *tp = netdev_priv(dev);
12682 	u32 deflt_qs = netif_get_num_default_rss_queues();
12683 
12684 	channel->max_rx = tp->rxq_max;
12685 	channel->max_tx = tp->txq_max;
12686 
12687 	if (netif_running(dev)) {
12688 		channel->rx_count = tp->rxq_cnt;
12689 		channel->tx_count = tp->txq_cnt;
12690 	} else {
12691 		if (tp->rxq_req)
12692 			channel->rx_count = tp->rxq_req;
12693 		else
12694 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12695 
12696 		if (tp->txq_req)
12697 			channel->tx_count = tp->txq_req;
12698 		else
12699 			channel->tx_count = min(deflt_qs, tp->txq_max);
12700 	}
12701 }
12702 
12703 static int tg3_set_channels(struct net_device *dev,
12704 			    struct ethtool_channels *channel)
12705 {
12706 	struct tg3 *tp = netdev_priv(dev);
12707 
12708 	if (!tg3_flag(tp, SUPPORT_MSIX))
12709 		return -EOPNOTSUPP;
12710 
12711 	if (channel->rx_count > tp->rxq_max ||
12712 	    channel->tx_count > tp->txq_max)
12713 		return -EINVAL;
12714 
12715 	tp->rxq_req = channel->rx_count;
12716 	tp->txq_req = channel->tx_count;
12717 
12718 	if (!netif_running(dev))
12719 		return 0;
12720 
12721 	tg3_stop(tp);
12722 
12723 	tg3_carrier_off(tp);
12724 
12725 	tg3_start(tp, true, false, false);
12726 
12727 	return 0;
12728 }
12729 
12730 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12731 {
12732 	switch (stringset) {
12733 	case ETH_SS_STATS:
12734 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12735 		break;
12736 	case ETH_SS_TEST:
12737 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12738 		break;
12739 	default:
12740 		WARN_ON(1);	/* we need a WARN() */
12741 		break;
12742 	}
12743 }
12744 
12745 static int tg3_set_phys_id(struct net_device *dev,
12746 			    enum ethtool_phys_id_state state)
12747 {
12748 	struct tg3 *tp = netdev_priv(dev);
12749 
12750 	if (!netif_running(tp->dev))
12751 		return -EAGAIN;
12752 
12753 	switch (state) {
12754 	case ETHTOOL_ID_ACTIVE:
12755 		return 1;	/* cycle on/off once per second */
12756 
12757 	case ETHTOOL_ID_ON:
12758 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12759 		     LED_CTRL_1000MBPS_ON |
12760 		     LED_CTRL_100MBPS_ON |
12761 		     LED_CTRL_10MBPS_ON |
12762 		     LED_CTRL_TRAFFIC_OVERRIDE |
12763 		     LED_CTRL_TRAFFIC_BLINK |
12764 		     LED_CTRL_TRAFFIC_LED);
12765 		break;
12766 
12767 	case ETHTOOL_ID_OFF:
12768 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12769 		     LED_CTRL_TRAFFIC_OVERRIDE);
12770 		break;
12771 
12772 	case ETHTOOL_ID_INACTIVE:
12773 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12774 		break;
12775 	}
12776 
12777 	return 0;
12778 }
12779 
12780 static void tg3_get_ethtool_stats(struct net_device *dev,
12781 				   struct ethtool_stats *estats, u64 *tmp_stats)
12782 {
12783 	struct tg3 *tp = netdev_priv(dev);
12784 
12785 	if (tp->hw_stats)
12786 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12787 	else
12788 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12789 }
12790 
12791 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12792 {
12793 	int i;
12794 	__be32 *buf;
12795 	u32 offset = 0, len = 0;
12796 	u32 magic, val;
12797 
12798 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12799 		return NULL;
12800 
12801 	if (magic == TG3_EEPROM_MAGIC) {
12802 		for (offset = TG3_NVM_DIR_START;
12803 		     offset < TG3_NVM_DIR_END;
12804 		     offset += TG3_NVM_DIRENT_SIZE) {
12805 			if (tg3_nvram_read(tp, offset, &val))
12806 				return NULL;
12807 
12808 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12809 			    TG3_NVM_DIRTYPE_EXTVPD)
12810 				break;
12811 		}
12812 
12813 		if (offset != TG3_NVM_DIR_END) {
12814 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12815 			if (tg3_nvram_read(tp, offset + 4, &offset))
12816 				return NULL;
12817 
12818 			offset = tg3_nvram_logical_addr(tp, offset);
12819 		}
12820 	}
12821 
12822 	if (!offset || !len) {
12823 		offset = TG3_NVM_VPD_OFF;
12824 		len = TG3_NVM_VPD_LEN;
12825 	}
12826 
12827 	buf = kmalloc(len, GFP_KERNEL);
12828 	if (buf == NULL)
12829 		return NULL;
12830 
12831 	if (magic == TG3_EEPROM_MAGIC) {
12832 		for (i = 0; i < len; i += 4) {
12833 			/* The data is in little-endian format in NVRAM.
12834 			 * Use the big-endian read routines to preserve
12835 			 * the byte order as it exists in NVRAM.
12836 			 */
12837 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12838 				goto error;
12839 		}
12840 	} else {
12841 		u8 *ptr;
12842 		ssize_t cnt;
12843 		unsigned int pos = 0;
12844 
12845 		ptr = (u8 *)&buf[0];
12846 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12847 			cnt = pci_read_vpd(tp->pdev, pos,
12848 					   len - pos, ptr);
12849 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12850 				cnt = 0;
12851 			else if (cnt < 0)
12852 				goto error;
12853 		}
12854 		if (pos != len)
12855 			goto error;
12856 	}
12857 
12858 	*vpdlen = len;
12859 
12860 	return buf;
12861 
12862 error:
12863 	kfree(buf);
12864 	return NULL;
12865 }
12866 
12867 #define NVRAM_TEST_SIZE 0x100
12868 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12869 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12870 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12871 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12872 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12873 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12874 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12875 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12876 
12877 static int tg3_test_nvram(struct tg3 *tp)
12878 {
12879 	u32 csum, magic, len;
12880 	__be32 *buf;
12881 	int i, j, k, err = 0, size;
12882 
12883 	if (tg3_flag(tp, NO_NVRAM))
12884 		return 0;
12885 
12886 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12887 		return -EIO;
12888 
12889 	if (magic == TG3_EEPROM_MAGIC)
12890 		size = NVRAM_TEST_SIZE;
12891 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12892 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12893 		    TG3_EEPROM_SB_FORMAT_1) {
12894 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12895 			case TG3_EEPROM_SB_REVISION_0:
12896 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12897 				break;
12898 			case TG3_EEPROM_SB_REVISION_2:
12899 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12900 				break;
12901 			case TG3_EEPROM_SB_REVISION_3:
12902 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12903 				break;
12904 			case TG3_EEPROM_SB_REVISION_4:
12905 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12906 				break;
12907 			case TG3_EEPROM_SB_REVISION_5:
12908 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12909 				break;
12910 			case TG3_EEPROM_SB_REVISION_6:
12911 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12912 				break;
12913 			default:
12914 				return -EIO;
12915 			}
12916 		} else
12917 			return 0;
12918 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12919 		size = NVRAM_SELFBOOT_HW_SIZE;
12920 	else
12921 		return -EIO;
12922 
12923 	buf = kmalloc(size, GFP_KERNEL);
12924 	if (buf == NULL)
12925 		return -ENOMEM;
12926 
12927 	err = -EIO;
12928 	for (i = 0, j = 0; i < size; i += 4, j++) {
12929 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12930 		if (err)
12931 			break;
12932 	}
12933 	if (i < size)
12934 		goto out;
12935 
12936 	/* Selfboot format */
12937 	magic = be32_to_cpu(buf[0]);
12938 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12939 	    TG3_EEPROM_MAGIC_FW) {
12940 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12941 
12942 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12943 		    TG3_EEPROM_SB_REVISION_2) {
12944 			/* For rev 2, the csum doesn't include the MBA. */
12945 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12946 				csum8 += buf8[i];
12947 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12948 				csum8 += buf8[i];
12949 		} else {
12950 			for (i = 0; i < size; i++)
12951 				csum8 += buf8[i];
12952 		}
12953 
12954 		if (csum8 == 0) {
12955 			err = 0;
12956 			goto out;
12957 		}
12958 
12959 		err = -EIO;
12960 		goto out;
12961 	}
12962 
12963 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12964 	    TG3_EEPROM_MAGIC_HW) {
12965 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12966 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12967 		u8 *buf8 = (u8 *) buf;
12968 
12969 		/* Separate the parity bits and the data bytes.  */
12970 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12971 			if ((i == 0) || (i == 8)) {
12972 				int l;
12973 				u8 msk;
12974 
12975 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12976 					parity[k++] = buf8[i] & msk;
12977 				i++;
12978 			} else if (i == 16) {
12979 				int l;
12980 				u8 msk;
12981 
12982 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12983 					parity[k++] = buf8[i] & msk;
12984 				i++;
12985 
12986 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12987 					parity[k++] = buf8[i] & msk;
12988 				i++;
12989 			}
12990 			data[j++] = buf8[i];
12991 		}
12992 
12993 		err = -EIO;
12994 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12995 			u8 hw8 = hweight8(data[i]);
12996 
12997 			if ((hw8 & 0x1) && parity[i])
12998 				goto out;
12999 			else if (!(hw8 & 0x1) && !parity[i])
13000 				goto out;
13001 		}
13002 		err = 0;
13003 		goto out;
13004 	}
13005 
13006 	err = -EIO;
13007 
13008 	/* Bootstrap checksum at offset 0x10 */
13009 	csum = calc_crc((unsigned char *) buf, 0x10);
13010 	if (csum != le32_to_cpu(buf[0x10/4]))
13011 		goto out;
13012 
13013 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13014 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13015 	if (csum != le32_to_cpu(buf[0xfc/4]))
13016 		goto out;
13017 
13018 	kfree(buf);
13019 
13020 	buf = tg3_vpd_readblock(tp, &len);
13021 	if (!buf)
13022 		return -ENOMEM;
13023 
13024 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13025 	if (i > 0) {
13026 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13027 		if (j < 0)
13028 			goto out;
13029 
13030 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13031 			goto out;
13032 
13033 		i += PCI_VPD_LRDT_TAG_SIZE;
13034 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13035 					      PCI_VPD_RO_KEYWORD_CHKSUM);
13036 		if (j > 0) {
13037 			u8 csum8 = 0;
13038 
13039 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
13040 
13041 			for (i = 0; i <= j; i++)
13042 				csum8 += ((u8 *)buf)[i];
13043 
13044 			if (csum8)
13045 				goto out;
13046 		}
13047 	}
13048 
13049 	err = 0;
13050 
13051 out:
13052 	kfree(buf);
13053 	return err;
13054 }
13055 
13056 #define TG3_SERDES_TIMEOUT_SEC	2
13057 #define TG3_COPPER_TIMEOUT_SEC	6
13058 
13059 static int tg3_test_link(struct tg3 *tp)
13060 {
13061 	int i, max;
13062 
13063 	if (!netif_running(tp->dev))
13064 		return -ENODEV;
13065 
13066 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13067 		max = TG3_SERDES_TIMEOUT_SEC;
13068 	else
13069 		max = TG3_COPPER_TIMEOUT_SEC;
13070 
13071 	for (i = 0; i < max; i++) {
13072 		if (tp->link_up)
13073 			return 0;
13074 
13075 		if (msleep_interruptible(1000))
13076 			break;
13077 	}
13078 
13079 	return -EIO;
13080 }
13081 
13082 /* Only test the commonly used registers */
13083 static int tg3_test_registers(struct tg3 *tp)
13084 {
13085 	int i, is_5705, is_5750;
13086 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13087 	static struct {
13088 		u16 offset;
13089 		u16 flags;
13090 #define TG3_FL_5705	0x1
13091 #define TG3_FL_NOT_5705	0x2
13092 #define TG3_FL_NOT_5788	0x4
13093 #define TG3_FL_NOT_5750	0x8
13094 		u32 read_mask;
13095 		u32 write_mask;
13096 	} reg_tbl[] = {
13097 		/* MAC Control Registers */
13098 		{ MAC_MODE, TG3_FL_NOT_5705,
13099 			0x00000000, 0x00ef6f8c },
13100 		{ MAC_MODE, TG3_FL_5705,
13101 			0x00000000, 0x01ef6b8c },
13102 		{ MAC_STATUS, TG3_FL_NOT_5705,
13103 			0x03800107, 0x00000000 },
13104 		{ MAC_STATUS, TG3_FL_5705,
13105 			0x03800100, 0x00000000 },
13106 		{ MAC_ADDR_0_HIGH, 0x0000,
13107 			0x00000000, 0x0000ffff },
13108 		{ MAC_ADDR_0_LOW, 0x0000,
13109 			0x00000000, 0xffffffff },
13110 		{ MAC_RX_MTU_SIZE, 0x0000,
13111 			0x00000000, 0x0000ffff },
13112 		{ MAC_TX_MODE, 0x0000,
13113 			0x00000000, 0x00000070 },
13114 		{ MAC_TX_LENGTHS, 0x0000,
13115 			0x00000000, 0x00003fff },
13116 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13117 			0x00000000, 0x000007fc },
13118 		{ MAC_RX_MODE, TG3_FL_5705,
13119 			0x00000000, 0x000007dc },
13120 		{ MAC_HASH_REG_0, 0x0000,
13121 			0x00000000, 0xffffffff },
13122 		{ MAC_HASH_REG_1, 0x0000,
13123 			0x00000000, 0xffffffff },
13124 		{ MAC_HASH_REG_2, 0x0000,
13125 			0x00000000, 0xffffffff },
13126 		{ MAC_HASH_REG_3, 0x0000,
13127 			0x00000000, 0xffffffff },
13128 
13129 		/* Receive Data and Receive BD Initiator Control Registers. */
13130 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13131 			0x00000000, 0xffffffff },
13132 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13133 			0x00000000, 0xffffffff },
13134 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13135 			0x00000000, 0x00000003 },
13136 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13137 			0x00000000, 0xffffffff },
13138 		{ RCVDBDI_STD_BD+0, 0x0000,
13139 			0x00000000, 0xffffffff },
13140 		{ RCVDBDI_STD_BD+4, 0x0000,
13141 			0x00000000, 0xffffffff },
13142 		{ RCVDBDI_STD_BD+8, 0x0000,
13143 			0x00000000, 0xffff0002 },
13144 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13145 			0x00000000, 0xffffffff },
13146 
13147 		/* Receive BD Initiator Control Registers. */
13148 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13149 			0x00000000, 0xffffffff },
13150 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13151 			0x00000000, 0x000003ff },
13152 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13153 			0x00000000, 0xffffffff },
13154 
13155 		/* Host Coalescing Control Registers. */
13156 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13157 			0x00000000, 0x00000004 },
13158 		{ HOSTCC_MODE, TG3_FL_5705,
13159 			0x00000000, 0x000000f6 },
13160 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13161 			0x00000000, 0xffffffff },
13162 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13163 			0x00000000, 0x000003ff },
13164 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13165 			0x00000000, 0xffffffff },
13166 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13167 			0x00000000, 0x000003ff },
13168 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13169 			0x00000000, 0xffffffff },
13170 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13171 			0x00000000, 0x000000ff },
13172 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13173 			0x00000000, 0xffffffff },
13174 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13175 			0x00000000, 0x000000ff },
13176 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13177 			0x00000000, 0xffffffff },
13178 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13179 			0x00000000, 0xffffffff },
13180 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13181 			0x00000000, 0xffffffff },
13182 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13183 			0x00000000, 0x000000ff },
13184 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13185 			0x00000000, 0xffffffff },
13186 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13187 			0x00000000, 0x000000ff },
13188 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13189 			0x00000000, 0xffffffff },
13190 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13191 			0x00000000, 0xffffffff },
13192 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13193 			0x00000000, 0xffffffff },
13194 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13195 			0x00000000, 0xffffffff },
13196 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13197 			0x00000000, 0xffffffff },
13198 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13199 			0xffffffff, 0x00000000 },
13200 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13201 			0xffffffff, 0x00000000 },
13202 
13203 		/* Buffer Manager Control Registers. */
13204 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13205 			0x00000000, 0x007fff80 },
13206 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13207 			0x00000000, 0x007fffff },
13208 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13209 			0x00000000, 0x0000003f },
13210 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13211 			0x00000000, 0x000001ff },
13212 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13213 			0x00000000, 0x000001ff },
13214 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13215 			0xffffffff, 0x00000000 },
13216 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13217 			0xffffffff, 0x00000000 },
13218 
13219 		/* Mailbox Registers */
13220 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13221 			0x00000000, 0x000001ff },
13222 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13223 			0x00000000, 0x000001ff },
13224 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13225 			0x00000000, 0x000007ff },
13226 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13227 			0x00000000, 0x000001ff },
13228 
13229 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13230 	};
13231 
13232 	is_5705 = is_5750 = 0;
13233 	if (tg3_flag(tp, 5705_PLUS)) {
13234 		is_5705 = 1;
13235 		if (tg3_flag(tp, 5750_PLUS))
13236 			is_5750 = 1;
13237 	}
13238 
13239 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13240 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13241 			continue;
13242 
13243 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13244 			continue;
13245 
13246 		if (tg3_flag(tp, IS_5788) &&
13247 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13248 			continue;
13249 
13250 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13251 			continue;
13252 
13253 		offset = (u32) reg_tbl[i].offset;
13254 		read_mask = reg_tbl[i].read_mask;
13255 		write_mask = reg_tbl[i].write_mask;
13256 
13257 		/* Save the original register content */
13258 		save_val = tr32(offset);
13259 
13260 		/* Determine the read-only value. */
13261 		read_val = save_val & read_mask;
13262 
13263 		/* Write zero to the register, then make sure the read-only bits
13264 		 * are not changed and the read/write bits are all zeros.
13265 		 */
13266 		tw32(offset, 0);
13267 
13268 		val = tr32(offset);
13269 
13270 		/* Test the read-only and read/write bits. */
13271 		if (((val & read_mask) != read_val) || (val & write_mask))
13272 			goto out;
13273 
13274 		/* Write ones to all the bits defined by RdMask and WrMask, then
13275 		 * make sure the read-only bits are not changed and the
13276 		 * read/write bits are all ones.
13277 		 */
13278 		tw32(offset, read_mask | write_mask);
13279 
13280 		val = tr32(offset);
13281 
13282 		/* Test the read-only bits. */
13283 		if ((val & read_mask) != read_val)
13284 			goto out;
13285 
13286 		/* Test the read/write bits. */
13287 		if ((val & write_mask) != write_mask)
13288 			goto out;
13289 
13290 		tw32(offset, save_val);
13291 	}
13292 
13293 	return 0;
13294 
13295 out:
13296 	if (netif_msg_hw(tp))
13297 		netdev_err(tp->dev,
13298 			   "Register test failed at offset %x\n", offset);
13299 	tw32(offset, save_val);
13300 	return -EIO;
13301 }
13302 
13303 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13304 {
13305 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13306 	int i;
13307 	u32 j;
13308 
13309 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13310 		for (j = 0; j < len; j += 4) {
13311 			u32 val;
13312 
13313 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13314 			tg3_read_mem(tp, offset + j, &val);
13315 			if (val != test_pattern[i])
13316 				return -EIO;
13317 		}
13318 	}
13319 	return 0;
13320 }
13321 
13322 static int tg3_test_memory(struct tg3 *tp)
13323 {
13324 	static struct mem_entry {
13325 		u32 offset;
13326 		u32 len;
13327 	} mem_tbl_570x[] = {
13328 		{ 0x00000000, 0x00b50},
13329 		{ 0x00002000, 0x1c000},
13330 		{ 0xffffffff, 0x00000}
13331 	}, mem_tbl_5705[] = {
13332 		{ 0x00000100, 0x0000c},
13333 		{ 0x00000200, 0x00008},
13334 		{ 0x00004000, 0x00800},
13335 		{ 0x00006000, 0x01000},
13336 		{ 0x00008000, 0x02000},
13337 		{ 0x00010000, 0x0e000},
13338 		{ 0xffffffff, 0x00000}
13339 	}, mem_tbl_5755[] = {
13340 		{ 0x00000200, 0x00008},
13341 		{ 0x00004000, 0x00800},
13342 		{ 0x00006000, 0x00800},
13343 		{ 0x00008000, 0x02000},
13344 		{ 0x00010000, 0x0c000},
13345 		{ 0xffffffff, 0x00000}
13346 	}, mem_tbl_5906[] = {
13347 		{ 0x00000200, 0x00008},
13348 		{ 0x00004000, 0x00400},
13349 		{ 0x00006000, 0x00400},
13350 		{ 0x00008000, 0x01000},
13351 		{ 0x00010000, 0x01000},
13352 		{ 0xffffffff, 0x00000}
13353 	}, mem_tbl_5717[] = {
13354 		{ 0x00000200, 0x00008},
13355 		{ 0x00010000, 0x0a000},
13356 		{ 0x00020000, 0x13c00},
13357 		{ 0xffffffff, 0x00000}
13358 	}, mem_tbl_57765[] = {
13359 		{ 0x00000200, 0x00008},
13360 		{ 0x00004000, 0x00800},
13361 		{ 0x00006000, 0x09800},
13362 		{ 0x00010000, 0x0a000},
13363 		{ 0xffffffff, 0x00000}
13364 	};
13365 	struct mem_entry *mem_tbl;
13366 	int err = 0;
13367 	int i;
13368 
13369 	if (tg3_flag(tp, 5717_PLUS))
13370 		mem_tbl = mem_tbl_5717;
13371 	else if (tg3_flag(tp, 57765_CLASS) ||
13372 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13373 		mem_tbl = mem_tbl_57765;
13374 	else if (tg3_flag(tp, 5755_PLUS))
13375 		mem_tbl = mem_tbl_5755;
13376 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13377 		mem_tbl = mem_tbl_5906;
13378 	else if (tg3_flag(tp, 5705_PLUS))
13379 		mem_tbl = mem_tbl_5705;
13380 	else
13381 		mem_tbl = mem_tbl_570x;
13382 
13383 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13384 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13385 		if (err)
13386 			break;
13387 	}
13388 
13389 	return err;
13390 }
13391 
13392 #define TG3_TSO_MSS		500
13393 
13394 #define TG3_TSO_IP_HDR_LEN	20
13395 #define TG3_TSO_TCP_HDR_LEN	20
13396 #define TG3_TSO_TCP_OPT_LEN	12
13397 
13398 static const u8 tg3_tso_header[] = {
13399 0x08, 0x00,
13400 0x45, 0x00, 0x00, 0x00,
13401 0x00, 0x00, 0x40, 0x00,
13402 0x40, 0x06, 0x00, 0x00,
13403 0x0a, 0x00, 0x00, 0x01,
13404 0x0a, 0x00, 0x00, 0x02,
13405 0x0d, 0x00, 0xe0, 0x00,
13406 0x00, 0x00, 0x01, 0x00,
13407 0x00, 0x00, 0x02, 0x00,
13408 0x80, 0x10, 0x10, 0x00,
13409 0x14, 0x09, 0x00, 0x00,
13410 0x01, 0x01, 0x08, 0x0a,
13411 0x11, 0x11, 0x11, 0x11,
13412 0x11, 0x11, 0x11, 0x11,
13413 };
13414 
13415 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13416 {
13417 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13418 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13419 	u32 budget;
13420 	struct sk_buff *skb;
13421 	u8 *tx_data, *rx_data;
13422 	dma_addr_t map;
13423 	int num_pkts, tx_len, rx_len, i, err;
13424 	struct tg3_rx_buffer_desc *desc;
13425 	struct tg3_napi *tnapi, *rnapi;
13426 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13427 
13428 	tnapi = &tp->napi[0];
13429 	rnapi = &tp->napi[0];
13430 	if (tp->irq_cnt > 1) {
13431 		if (tg3_flag(tp, ENABLE_RSS))
13432 			rnapi = &tp->napi[1];
13433 		if (tg3_flag(tp, ENABLE_TSS))
13434 			tnapi = &tp->napi[1];
13435 	}
13436 	coal_now = tnapi->coal_now | rnapi->coal_now;
13437 
13438 	err = -EIO;
13439 
13440 	tx_len = pktsz;
13441 	skb = netdev_alloc_skb(tp->dev, tx_len);
13442 	if (!skb)
13443 		return -ENOMEM;
13444 
13445 	tx_data = skb_put(skb, tx_len);
13446 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13447 	memset(tx_data + ETH_ALEN, 0x0, 8);
13448 
13449 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13450 
13451 	if (tso_loopback) {
13452 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13453 
13454 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13455 			      TG3_TSO_TCP_OPT_LEN;
13456 
13457 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13458 		       sizeof(tg3_tso_header));
13459 		mss = TG3_TSO_MSS;
13460 
13461 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13462 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13463 
13464 		/* Set the total length field in the IP header */
13465 		iph->tot_len = htons((u16)(mss + hdr_len));
13466 
13467 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13468 			      TXD_FLAG_CPU_POST_DMA);
13469 
13470 		if (tg3_flag(tp, HW_TSO_1) ||
13471 		    tg3_flag(tp, HW_TSO_2) ||
13472 		    tg3_flag(tp, HW_TSO_3)) {
13473 			struct tcphdr *th;
13474 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13475 			th = (struct tcphdr *)&tx_data[val];
13476 			th->check = 0;
13477 		} else
13478 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13479 
13480 		if (tg3_flag(tp, HW_TSO_3)) {
13481 			mss |= (hdr_len & 0xc) << 12;
13482 			if (hdr_len & 0x10)
13483 				base_flags |= 0x00000010;
13484 			base_flags |= (hdr_len & 0x3e0) << 5;
13485 		} else if (tg3_flag(tp, HW_TSO_2))
13486 			mss |= hdr_len << 9;
13487 		else if (tg3_flag(tp, HW_TSO_1) ||
13488 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13489 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13490 		} else {
13491 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13492 		}
13493 
13494 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13495 	} else {
13496 		num_pkts = 1;
13497 		data_off = ETH_HLEN;
13498 
13499 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13500 		    tx_len > VLAN_ETH_FRAME_LEN)
13501 			base_flags |= TXD_FLAG_JMB_PKT;
13502 	}
13503 
13504 	for (i = data_off; i < tx_len; i++)
13505 		tx_data[i] = (u8) (i & 0xff);
13506 
13507 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13508 	if (pci_dma_mapping_error(tp->pdev, map)) {
13509 		dev_kfree_skb(skb);
13510 		return -EIO;
13511 	}
13512 
13513 	val = tnapi->tx_prod;
13514 	tnapi->tx_buffers[val].skb = skb;
13515 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13516 
13517 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13518 	       rnapi->coal_now);
13519 
13520 	udelay(10);
13521 
13522 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13523 
13524 	budget = tg3_tx_avail(tnapi);
13525 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13526 			    base_flags | TXD_FLAG_END, mss, 0)) {
13527 		tnapi->tx_buffers[val].skb = NULL;
13528 		dev_kfree_skb(skb);
13529 		return -EIO;
13530 	}
13531 
13532 	tnapi->tx_prod++;
13533 
13534 	/* Sync BD data before updating mailbox */
13535 	wmb();
13536 
13537 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13538 	tr32_mailbox(tnapi->prodmbox);
13539 
13540 	udelay(10);
13541 
13542 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13543 	for (i = 0; i < 35; i++) {
13544 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13545 		       coal_now);
13546 
13547 		udelay(10);
13548 
13549 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13550 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13551 		if ((tx_idx == tnapi->tx_prod) &&
13552 		    (rx_idx == (rx_start_idx + num_pkts)))
13553 			break;
13554 	}
13555 
13556 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13557 	dev_kfree_skb(skb);
13558 
13559 	if (tx_idx != tnapi->tx_prod)
13560 		goto out;
13561 
13562 	if (rx_idx != rx_start_idx + num_pkts)
13563 		goto out;
13564 
13565 	val = data_off;
13566 	while (rx_idx != rx_start_idx) {
13567 		desc = &rnapi->rx_rcb[rx_start_idx++];
13568 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13569 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13570 
13571 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13572 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13573 			goto out;
13574 
13575 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13576 			 - ETH_FCS_LEN;
13577 
13578 		if (!tso_loopback) {
13579 			if (rx_len != tx_len)
13580 				goto out;
13581 
13582 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13583 				if (opaque_key != RXD_OPAQUE_RING_STD)
13584 					goto out;
13585 			} else {
13586 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13587 					goto out;
13588 			}
13589 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13590 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13591 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13592 			goto out;
13593 		}
13594 
13595 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13596 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13597 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13598 					     mapping);
13599 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13600 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13601 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13602 					     mapping);
13603 		} else
13604 			goto out;
13605 
13606 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13607 					    PCI_DMA_FROMDEVICE);
13608 
13609 		rx_data += TG3_RX_OFFSET(tp);
13610 		for (i = data_off; i < rx_len; i++, val++) {
13611 			if (*(rx_data + i) != (u8) (val & 0xff))
13612 				goto out;
13613 		}
13614 	}
13615 
13616 	err = 0;
13617 
13618 	/* tg3_free_rings will unmap and free the rx_data */
13619 out:
13620 	return err;
13621 }
13622 
13623 #define TG3_STD_LOOPBACK_FAILED		1
13624 #define TG3_JMB_LOOPBACK_FAILED		2
13625 #define TG3_TSO_LOOPBACK_FAILED		4
13626 #define TG3_LOOPBACK_FAILED \
13627 	(TG3_STD_LOOPBACK_FAILED | \
13628 	 TG3_JMB_LOOPBACK_FAILED | \
13629 	 TG3_TSO_LOOPBACK_FAILED)
13630 
13631 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13632 {
13633 	int err = -EIO;
13634 	u32 eee_cap;
13635 	u32 jmb_pkt_sz = 9000;
13636 
13637 	if (tp->dma_limit)
13638 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13639 
13640 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13641 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13642 
13643 	if (!netif_running(tp->dev)) {
13644 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13645 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13646 		if (do_extlpbk)
13647 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13648 		goto done;
13649 	}
13650 
13651 	err = tg3_reset_hw(tp, true);
13652 	if (err) {
13653 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13654 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13655 		if (do_extlpbk)
13656 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13657 		goto done;
13658 	}
13659 
13660 	if (tg3_flag(tp, ENABLE_RSS)) {
13661 		int i;
13662 
13663 		/* Reroute all rx packets to the 1st queue */
13664 		for (i = MAC_RSS_INDIR_TBL_0;
13665 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13666 			tw32(i, 0x0);
13667 	}
13668 
13669 	/* HW errata - mac loopback fails in some cases on 5780.
13670 	 * Normal traffic and PHY loopback are not affected by
13671 	 * errata.  Also, the MAC loopback test is deprecated for
13672 	 * all newer ASIC revisions.
13673 	 */
13674 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13675 	    !tg3_flag(tp, CPMU_PRESENT)) {
13676 		tg3_mac_loopback(tp, true);
13677 
13678 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13679 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13680 
13681 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13682 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13683 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13684 
13685 		tg3_mac_loopback(tp, false);
13686 	}
13687 
13688 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13689 	    !tg3_flag(tp, USE_PHYLIB)) {
13690 		int i;
13691 
13692 		tg3_phy_lpbk_set(tp, 0, false);
13693 
13694 		/* Wait for link */
13695 		for (i = 0; i < 100; i++) {
13696 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13697 				break;
13698 			mdelay(1);
13699 		}
13700 
13701 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13702 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13703 		if (tg3_flag(tp, TSO_CAPABLE) &&
13704 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13705 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13706 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13707 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13708 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13709 
13710 		if (do_extlpbk) {
13711 			tg3_phy_lpbk_set(tp, 0, true);
13712 
13713 			/* All link indications report up, but the hardware
13714 			 * isn't really ready for about 20 msec.  Double it
13715 			 * to be sure.
13716 			 */
13717 			mdelay(40);
13718 
13719 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13720 				data[TG3_EXT_LOOPB_TEST] |=
13721 							TG3_STD_LOOPBACK_FAILED;
13722 			if (tg3_flag(tp, TSO_CAPABLE) &&
13723 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13724 				data[TG3_EXT_LOOPB_TEST] |=
13725 							TG3_TSO_LOOPBACK_FAILED;
13726 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13727 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13728 				data[TG3_EXT_LOOPB_TEST] |=
13729 							TG3_JMB_LOOPBACK_FAILED;
13730 		}
13731 
13732 		/* Re-enable gphy autopowerdown. */
13733 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13734 			tg3_phy_toggle_apd(tp, true);
13735 	}
13736 
13737 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13738 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13739 
13740 done:
13741 	tp->phy_flags |= eee_cap;
13742 
13743 	return err;
13744 }
13745 
13746 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13747 			  u64 *data)
13748 {
13749 	struct tg3 *tp = netdev_priv(dev);
13750 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13751 
13752 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13753 		if (tg3_power_up(tp)) {
13754 			etest->flags |= ETH_TEST_FL_FAILED;
13755 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13756 			return;
13757 		}
13758 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13759 	}
13760 
13761 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13762 
13763 	if (tg3_test_nvram(tp) != 0) {
13764 		etest->flags |= ETH_TEST_FL_FAILED;
13765 		data[TG3_NVRAM_TEST] = 1;
13766 	}
13767 	if (!doextlpbk && tg3_test_link(tp)) {
13768 		etest->flags |= ETH_TEST_FL_FAILED;
13769 		data[TG3_LINK_TEST] = 1;
13770 	}
13771 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13772 		int err, err2 = 0, irq_sync = 0;
13773 
13774 		if (netif_running(dev)) {
13775 			tg3_phy_stop(tp);
13776 			tg3_netif_stop(tp);
13777 			irq_sync = 1;
13778 		}
13779 
13780 		tg3_full_lock(tp, irq_sync);
13781 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13782 		err = tg3_nvram_lock(tp);
13783 		tg3_halt_cpu(tp, RX_CPU_BASE);
13784 		if (!tg3_flag(tp, 5705_PLUS))
13785 			tg3_halt_cpu(tp, TX_CPU_BASE);
13786 		if (!err)
13787 			tg3_nvram_unlock(tp);
13788 
13789 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13790 			tg3_phy_reset(tp);
13791 
13792 		if (tg3_test_registers(tp) != 0) {
13793 			etest->flags |= ETH_TEST_FL_FAILED;
13794 			data[TG3_REGISTER_TEST] = 1;
13795 		}
13796 
13797 		if (tg3_test_memory(tp) != 0) {
13798 			etest->flags |= ETH_TEST_FL_FAILED;
13799 			data[TG3_MEMORY_TEST] = 1;
13800 		}
13801 
13802 		if (doextlpbk)
13803 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13804 
13805 		if (tg3_test_loopback(tp, data, doextlpbk))
13806 			etest->flags |= ETH_TEST_FL_FAILED;
13807 
13808 		tg3_full_unlock(tp);
13809 
13810 		if (tg3_test_interrupt(tp) != 0) {
13811 			etest->flags |= ETH_TEST_FL_FAILED;
13812 			data[TG3_INTERRUPT_TEST] = 1;
13813 		}
13814 
13815 		tg3_full_lock(tp, 0);
13816 
13817 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13818 		if (netif_running(dev)) {
13819 			tg3_flag_set(tp, INIT_COMPLETE);
13820 			err2 = tg3_restart_hw(tp, true);
13821 			if (!err2)
13822 				tg3_netif_start(tp);
13823 		}
13824 
13825 		tg3_full_unlock(tp);
13826 
13827 		if (irq_sync && !err2)
13828 			tg3_phy_start(tp);
13829 	}
13830 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13831 		tg3_power_down_prepare(tp);
13832 
13833 }
13834 
13835 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13836 {
13837 	struct tg3 *tp = netdev_priv(dev);
13838 	struct hwtstamp_config stmpconf;
13839 
13840 	if (!tg3_flag(tp, PTP_CAPABLE))
13841 		return -EOPNOTSUPP;
13842 
13843 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13844 		return -EFAULT;
13845 
13846 	if (stmpconf.flags)
13847 		return -EINVAL;
13848 
13849 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13850 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13851 		return -ERANGE;
13852 
13853 	switch (stmpconf.rx_filter) {
13854 	case HWTSTAMP_FILTER_NONE:
13855 		tp->rxptpctl = 0;
13856 		break;
13857 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13858 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13859 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13860 		break;
13861 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13862 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13863 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13864 		break;
13865 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13866 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13867 			       TG3_RX_PTP_CTL_DELAY_REQ;
13868 		break;
13869 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13870 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13871 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13872 		break;
13873 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13874 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13875 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13876 		break;
13877 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13878 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13879 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13880 		break;
13881 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13882 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13883 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13884 		break;
13885 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13886 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13887 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13888 		break;
13889 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13890 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13891 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13892 		break;
13893 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13894 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13895 			       TG3_RX_PTP_CTL_DELAY_REQ;
13896 		break;
13897 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13898 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13899 			       TG3_RX_PTP_CTL_DELAY_REQ;
13900 		break;
13901 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13902 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13903 			       TG3_RX_PTP_CTL_DELAY_REQ;
13904 		break;
13905 	default:
13906 		return -ERANGE;
13907 	}
13908 
13909 	if (netif_running(dev) && tp->rxptpctl)
13910 		tw32(TG3_RX_PTP_CTL,
13911 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13912 
13913 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13914 		tg3_flag_set(tp, TX_TSTAMP_EN);
13915 	else
13916 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13917 
13918 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13919 		-EFAULT : 0;
13920 }
13921 
13922 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13923 {
13924 	struct tg3 *tp = netdev_priv(dev);
13925 	struct hwtstamp_config stmpconf;
13926 
13927 	if (!tg3_flag(tp, PTP_CAPABLE))
13928 		return -EOPNOTSUPP;
13929 
13930 	stmpconf.flags = 0;
13931 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13932 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13933 
13934 	switch (tp->rxptpctl) {
13935 	case 0:
13936 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13937 		break;
13938 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13939 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13940 		break;
13941 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13942 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13943 		break;
13944 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13945 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13946 		break;
13947 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13948 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13949 		break;
13950 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13951 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13952 		break;
13953 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13954 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13955 		break;
13956 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13957 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13958 		break;
13959 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13960 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13961 		break;
13962 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13963 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13964 		break;
13965 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13966 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13967 		break;
13968 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13969 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13970 		break;
13971 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13972 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13973 		break;
13974 	default:
13975 		WARN_ON_ONCE(1);
13976 		return -ERANGE;
13977 	}
13978 
13979 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13980 		-EFAULT : 0;
13981 }
13982 
13983 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13984 {
13985 	struct mii_ioctl_data *data = if_mii(ifr);
13986 	struct tg3 *tp = netdev_priv(dev);
13987 	int err;
13988 
13989 	if (tg3_flag(tp, USE_PHYLIB)) {
13990 		struct phy_device *phydev;
13991 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13992 			return -EAGAIN;
13993 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13994 		return phy_mii_ioctl(phydev, ifr, cmd);
13995 	}
13996 
13997 	switch (cmd) {
13998 	case SIOCGMIIPHY:
13999 		data->phy_id = tp->phy_addr;
14000 
14001 		/* fallthru */
14002 	case SIOCGMIIREG: {
14003 		u32 mii_regval;
14004 
14005 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14006 			break;			/* We have no PHY */
14007 
14008 		if (!netif_running(dev))
14009 			return -EAGAIN;
14010 
14011 		spin_lock_bh(&tp->lock);
14012 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
14013 				    data->reg_num & 0x1f, &mii_regval);
14014 		spin_unlock_bh(&tp->lock);
14015 
14016 		data->val_out = mii_regval;
14017 
14018 		return err;
14019 	}
14020 
14021 	case SIOCSMIIREG:
14022 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14023 			break;			/* We have no PHY */
14024 
14025 		if (!netif_running(dev))
14026 			return -EAGAIN;
14027 
14028 		spin_lock_bh(&tp->lock);
14029 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
14030 				     data->reg_num & 0x1f, data->val_in);
14031 		spin_unlock_bh(&tp->lock);
14032 
14033 		return err;
14034 
14035 	case SIOCSHWTSTAMP:
14036 		return tg3_hwtstamp_set(dev, ifr);
14037 
14038 	case SIOCGHWTSTAMP:
14039 		return tg3_hwtstamp_get(dev, ifr);
14040 
14041 	default:
14042 		/* do nothing */
14043 		break;
14044 	}
14045 	return -EOPNOTSUPP;
14046 }
14047 
14048 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14049 {
14050 	struct tg3 *tp = netdev_priv(dev);
14051 
14052 	memcpy(ec, &tp->coal, sizeof(*ec));
14053 	return 0;
14054 }
14055 
14056 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14057 {
14058 	struct tg3 *tp = netdev_priv(dev);
14059 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14060 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14061 
14062 	if (!tg3_flag(tp, 5705_PLUS)) {
14063 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14064 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14065 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14066 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14067 	}
14068 
14069 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14070 	    (!ec->rx_coalesce_usecs) ||
14071 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14072 	    (!ec->tx_coalesce_usecs) ||
14073 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14074 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14075 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14076 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14077 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14078 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14079 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14080 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14081 		return -EINVAL;
14082 
14083 	/* Only copy relevant parameters, ignore all others. */
14084 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14085 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14086 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14087 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14088 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14089 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14090 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14091 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14092 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14093 
14094 	if (netif_running(dev)) {
14095 		tg3_full_lock(tp, 0);
14096 		__tg3_set_coalesce(tp, &tp->coal);
14097 		tg3_full_unlock(tp);
14098 	}
14099 	return 0;
14100 }
14101 
14102 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14103 {
14104 	struct tg3 *tp = netdev_priv(dev);
14105 
14106 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14107 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14108 		return -EOPNOTSUPP;
14109 	}
14110 
14111 	if (edata->advertised != tp->eee.advertised) {
14112 		netdev_warn(tp->dev,
14113 			    "Direct manipulation of EEE advertisement is not supported\n");
14114 		return -EINVAL;
14115 	}
14116 
14117 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14118 		netdev_warn(tp->dev,
14119 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14120 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14121 		return -EINVAL;
14122 	}
14123 
14124 	tp->eee = *edata;
14125 
14126 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14127 	tg3_warn_mgmt_link_flap(tp);
14128 
14129 	if (netif_running(tp->dev)) {
14130 		tg3_full_lock(tp, 0);
14131 		tg3_setup_eee(tp);
14132 		tg3_phy_reset(tp);
14133 		tg3_full_unlock(tp);
14134 	}
14135 
14136 	return 0;
14137 }
14138 
14139 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14140 {
14141 	struct tg3 *tp = netdev_priv(dev);
14142 
14143 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14144 		netdev_warn(tp->dev,
14145 			    "Board does not support EEE!\n");
14146 		return -EOPNOTSUPP;
14147 	}
14148 
14149 	*edata = tp->eee;
14150 	return 0;
14151 }
14152 
14153 static const struct ethtool_ops tg3_ethtool_ops = {
14154 	.get_drvinfo		= tg3_get_drvinfo,
14155 	.get_regs_len		= tg3_get_regs_len,
14156 	.get_regs		= tg3_get_regs,
14157 	.get_wol		= tg3_get_wol,
14158 	.set_wol		= tg3_set_wol,
14159 	.get_msglevel		= tg3_get_msglevel,
14160 	.set_msglevel		= tg3_set_msglevel,
14161 	.nway_reset		= tg3_nway_reset,
14162 	.get_link		= ethtool_op_get_link,
14163 	.get_eeprom_len		= tg3_get_eeprom_len,
14164 	.get_eeprom		= tg3_get_eeprom,
14165 	.set_eeprom		= tg3_set_eeprom,
14166 	.get_ringparam		= tg3_get_ringparam,
14167 	.set_ringparam		= tg3_set_ringparam,
14168 	.get_pauseparam		= tg3_get_pauseparam,
14169 	.set_pauseparam		= tg3_set_pauseparam,
14170 	.self_test		= tg3_self_test,
14171 	.get_strings		= tg3_get_strings,
14172 	.set_phys_id		= tg3_set_phys_id,
14173 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14174 	.get_coalesce		= tg3_get_coalesce,
14175 	.set_coalesce		= tg3_set_coalesce,
14176 	.get_sset_count		= tg3_get_sset_count,
14177 	.get_rxnfc		= tg3_get_rxnfc,
14178 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14179 	.get_rxfh		= tg3_get_rxfh,
14180 	.set_rxfh		= tg3_set_rxfh,
14181 	.get_channels		= tg3_get_channels,
14182 	.set_channels		= tg3_set_channels,
14183 	.get_ts_info		= tg3_get_ts_info,
14184 	.get_eee		= tg3_get_eee,
14185 	.set_eee		= tg3_set_eee,
14186 	.get_link_ksettings	= tg3_get_link_ksettings,
14187 	.set_link_ksettings	= tg3_set_link_ksettings,
14188 };
14189 
14190 static void tg3_get_stats64(struct net_device *dev,
14191 			    struct rtnl_link_stats64 *stats)
14192 {
14193 	struct tg3 *tp = netdev_priv(dev);
14194 
14195 	spin_lock_bh(&tp->lock);
14196 	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14197 		*stats = tp->net_stats_prev;
14198 		spin_unlock_bh(&tp->lock);
14199 		return;
14200 	}
14201 
14202 	tg3_get_nstats(tp, stats);
14203 	spin_unlock_bh(&tp->lock);
14204 }
14205 
14206 static void tg3_set_rx_mode(struct net_device *dev)
14207 {
14208 	struct tg3 *tp = netdev_priv(dev);
14209 
14210 	if (!netif_running(dev))
14211 		return;
14212 
14213 	tg3_full_lock(tp, 0);
14214 	__tg3_set_rx_mode(dev);
14215 	tg3_full_unlock(tp);
14216 }
14217 
14218 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14219 			       int new_mtu)
14220 {
14221 	dev->mtu = new_mtu;
14222 
14223 	if (new_mtu > ETH_DATA_LEN) {
14224 		if (tg3_flag(tp, 5780_CLASS)) {
14225 			netdev_update_features(dev);
14226 			tg3_flag_clear(tp, TSO_CAPABLE);
14227 		} else {
14228 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14229 		}
14230 	} else {
14231 		if (tg3_flag(tp, 5780_CLASS)) {
14232 			tg3_flag_set(tp, TSO_CAPABLE);
14233 			netdev_update_features(dev);
14234 		}
14235 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14236 	}
14237 }
14238 
14239 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14240 {
14241 	struct tg3 *tp = netdev_priv(dev);
14242 	int err;
14243 	bool reset_phy = false;
14244 
14245 	if (!netif_running(dev)) {
14246 		/* We'll just catch it later when the
14247 		 * device is up'd.
14248 		 */
14249 		tg3_set_mtu(dev, tp, new_mtu);
14250 		return 0;
14251 	}
14252 
14253 	tg3_phy_stop(tp);
14254 
14255 	tg3_netif_stop(tp);
14256 
14257 	tg3_set_mtu(dev, tp, new_mtu);
14258 
14259 	tg3_full_lock(tp, 1);
14260 
14261 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14262 
14263 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14264 	 * breaks all requests to 256 bytes.
14265 	 */
14266 	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14267 	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
14268 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14269 	    tg3_asic_rev(tp) == ASIC_REV_5720)
14270 		reset_phy = true;
14271 
14272 	err = tg3_restart_hw(tp, reset_phy);
14273 
14274 	if (!err)
14275 		tg3_netif_start(tp);
14276 
14277 	tg3_full_unlock(tp);
14278 
14279 	if (!err)
14280 		tg3_phy_start(tp);
14281 
14282 	return err;
14283 }
14284 
14285 static const struct net_device_ops tg3_netdev_ops = {
14286 	.ndo_open		= tg3_open,
14287 	.ndo_stop		= tg3_close,
14288 	.ndo_start_xmit		= tg3_start_xmit,
14289 	.ndo_get_stats64	= tg3_get_stats64,
14290 	.ndo_validate_addr	= eth_validate_addr,
14291 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14292 	.ndo_set_mac_address	= tg3_set_mac_addr,
14293 	.ndo_do_ioctl		= tg3_ioctl,
14294 	.ndo_tx_timeout		= tg3_tx_timeout,
14295 	.ndo_change_mtu		= tg3_change_mtu,
14296 	.ndo_fix_features	= tg3_fix_features,
14297 	.ndo_set_features	= tg3_set_features,
14298 #ifdef CONFIG_NET_POLL_CONTROLLER
14299 	.ndo_poll_controller	= tg3_poll_controller,
14300 #endif
14301 };
14302 
14303 static void tg3_get_eeprom_size(struct tg3 *tp)
14304 {
14305 	u32 cursize, val, magic;
14306 
14307 	tp->nvram_size = EEPROM_CHIP_SIZE;
14308 
14309 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14310 		return;
14311 
14312 	if ((magic != TG3_EEPROM_MAGIC) &&
14313 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14314 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14315 		return;
14316 
14317 	/*
14318 	 * Size the chip by reading offsets at increasing powers of two.
14319 	 * When we encounter our validation signature, we know the addressing
14320 	 * has wrapped around, and thus have our chip size.
14321 	 */
14322 	cursize = 0x10;
14323 
14324 	while (cursize < tp->nvram_size) {
14325 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14326 			return;
14327 
14328 		if (val == magic)
14329 			break;
14330 
14331 		cursize <<= 1;
14332 	}
14333 
14334 	tp->nvram_size = cursize;
14335 }
14336 
14337 static void tg3_get_nvram_size(struct tg3 *tp)
14338 {
14339 	u32 val;
14340 
14341 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14342 		return;
14343 
14344 	/* Selfboot format */
14345 	if (val != TG3_EEPROM_MAGIC) {
14346 		tg3_get_eeprom_size(tp);
14347 		return;
14348 	}
14349 
14350 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14351 		if (val != 0) {
14352 			/* This is confusing.  We want to operate on the
14353 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14354 			 * call will read from NVRAM and byteswap the data
14355 			 * according to the byteswapping settings for all
14356 			 * other register accesses.  This ensures the data we
14357 			 * want will always reside in the lower 16-bits.
14358 			 * However, the data in NVRAM is in LE format, which
14359 			 * means the data from the NVRAM read will always be
14360 			 * opposite the endianness of the CPU.  The 16-bit
14361 			 * byteswap then brings the data to CPU endianness.
14362 			 */
14363 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14364 			return;
14365 		}
14366 	}
14367 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14368 }
14369 
14370 static void tg3_get_nvram_info(struct tg3 *tp)
14371 {
14372 	u32 nvcfg1;
14373 
14374 	nvcfg1 = tr32(NVRAM_CFG1);
14375 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14376 		tg3_flag_set(tp, FLASH);
14377 	} else {
14378 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14379 		tw32(NVRAM_CFG1, nvcfg1);
14380 	}
14381 
14382 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14383 	    tg3_flag(tp, 5780_CLASS)) {
14384 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14385 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14386 			tp->nvram_jedecnum = JEDEC_ATMEL;
14387 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14388 			tg3_flag_set(tp, NVRAM_BUFFERED);
14389 			break;
14390 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14391 			tp->nvram_jedecnum = JEDEC_ATMEL;
14392 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14393 			break;
14394 		case FLASH_VENDOR_ATMEL_EEPROM:
14395 			tp->nvram_jedecnum = JEDEC_ATMEL;
14396 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14397 			tg3_flag_set(tp, NVRAM_BUFFERED);
14398 			break;
14399 		case FLASH_VENDOR_ST:
14400 			tp->nvram_jedecnum = JEDEC_ST;
14401 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14402 			tg3_flag_set(tp, NVRAM_BUFFERED);
14403 			break;
14404 		case FLASH_VENDOR_SAIFUN:
14405 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14406 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14407 			break;
14408 		case FLASH_VENDOR_SST_SMALL:
14409 		case FLASH_VENDOR_SST_LARGE:
14410 			tp->nvram_jedecnum = JEDEC_SST;
14411 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14412 			break;
14413 		}
14414 	} else {
14415 		tp->nvram_jedecnum = JEDEC_ATMEL;
14416 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14417 		tg3_flag_set(tp, NVRAM_BUFFERED);
14418 	}
14419 }
14420 
14421 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14422 {
14423 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14424 	case FLASH_5752PAGE_SIZE_256:
14425 		tp->nvram_pagesize = 256;
14426 		break;
14427 	case FLASH_5752PAGE_SIZE_512:
14428 		tp->nvram_pagesize = 512;
14429 		break;
14430 	case FLASH_5752PAGE_SIZE_1K:
14431 		tp->nvram_pagesize = 1024;
14432 		break;
14433 	case FLASH_5752PAGE_SIZE_2K:
14434 		tp->nvram_pagesize = 2048;
14435 		break;
14436 	case FLASH_5752PAGE_SIZE_4K:
14437 		tp->nvram_pagesize = 4096;
14438 		break;
14439 	case FLASH_5752PAGE_SIZE_264:
14440 		tp->nvram_pagesize = 264;
14441 		break;
14442 	case FLASH_5752PAGE_SIZE_528:
14443 		tp->nvram_pagesize = 528;
14444 		break;
14445 	}
14446 }
14447 
14448 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14449 {
14450 	u32 nvcfg1;
14451 
14452 	nvcfg1 = tr32(NVRAM_CFG1);
14453 
14454 	/* NVRAM protection for TPM */
14455 	if (nvcfg1 & (1 << 27))
14456 		tg3_flag_set(tp, PROTECTED_NVRAM);
14457 
14458 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14459 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14460 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14461 		tp->nvram_jedecnum = JEDEC_ATMEL;
14462 		tg3_flag_set(tp, NVRAM_BUFFERED);
14463 		break;
14464 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14465 		tp->nvram_jedecnum = JEDEC_ATMEL;
14466 		tg3_flag_set(tp, NVRAM_BUFFERED);
14467 		tg3_flag_set(tp, FLASH);
14468 		break;
14469 	case FLASH_5752VENDOR_ST_M45PE10:
14470 	case FLASH_5752VENDOR_ST_M45PE20:
14471 	case FLASH_5752VENDOR_ST_M45PE40:
14472 		tp->nvram_jedecnum = JEDEC_ST;
14473 		tg3_flag_set(tp, NVRAM_BUFFERED);
14474 		tg3_flag_set(tp, FLASH);
14475 		break;
14476 	}
14477 
14478 	if (tg3_flag(tp, FLASH)) {
14479 		tg3_nvram_get_pagesize(tp, nvcfg1);
14480 	} else {
14481 		/* For eeprom, set pagesize to maximum eeprom size */
14482 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14483 
14484 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14485 		tw32(NVRAM_CFG1, nvcfg1);
14486 	}
14487 }
14488 
14489 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14490 {
14491 	u32 nvcfg1, protect = 0;
14492 
14493 	nvcfg1 = tr32(NVRAM_CFG1);
14494 
14495 	/* NVRAM protection for TPM */
14496 	if (nvcfg1 & (1 << 27)) {
14497 		tg3_flag_set(tp, PROTECTED_NVRAM);
14498 		protect = 1;
14499 	}
14500 
14501 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14502 	switch (nvcfg1) {
14503 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14504 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14505 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14506 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14507 		tp->nvram_jedecnum = JEDEC_ATMEL;
14508 		tg3_flag_set(tp, NVRAM_BUFFERED);
14509 		tg3_flag_set(tp, FLASH);
14510 		tp->nvram_pagesize = 264;
14511 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14512 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14513 			tp->nvram_size = (protect ? 0x3e200 :
14514 					  TG3_NVRAM_SIZE_512KB);
14515 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14516 			tp->nvram_size = (protect ? 0x1f200 :
14517 					  TG3_NVRAM_SIZE_256KB);
14518 		else
14519 			tp->nvram_size = (protect ? 0x1f200 :
14520 					  TG3_NVRAM_SIZE_128KB);
14521 		break;
14522 	case FLASH_5752VENDOR_ST_M45PE10:
14523 	case FLASH_5752VENDOR_ST_M45PE20:
14524 	case FLASH_5752VENDOR_ST_M45PE40:
14525 		tp->nvram_jedecnum = JEDEC_ST;
14526 		tg3_flag_set(tp, NVRAM_BUFFERED);
14527 		tg3_flag_set(tp, FLASH);
14528 		tp->nvram_pagesize = 256;
14529 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14530 			tp->nvram_size = (protect ?
14531 					  TG3_NVRAM_SIZE_64KB :
14532 					  TG3_NVRAM_SIZE_128KB);
14533 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14534 			tp->nvram_size = (protect ?
14535 					  TG3_NVRAM_SIZE_64KB :
14536 					  TG3_NVRAM_SIZE_256KB);
14537 		else
14538 			tp->nvram_size = (protect ?
14539 					  TG3_NVRAM_SIZE_128KB :
14540 					  TG3_NVRAM_SIZE_512KB);
14541 		break;
14542 	}
14543 }
14544 
14545 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14546 {
14547 	u32 nvcfg1;
14548 
14549 	nvcfg1 = tr32(NVRAM_CFG1);
14550 
14551 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14552 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14553 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14554 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14555 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14556 		tp->nvram_jedecnum = JEDEC_ATMEL;
14557 		tg3_flag_set(tp, NVRAM_BUFFERED);
14558 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14559 
14560 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14561 		tw32(NVRAM_CFG1, nvcfg1);
14562 		break;
14563 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14564 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14565 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14566 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14567 		tp->nvram_jedecnum = JEDEC_ATMEL;
14568 		tg3_flag_set(tp, NVRAM_BUFFERED);
14569 		tg3_flag_set(tp, FLASH);
14570 		tp->nvram_pagesize = 264;
14571 		break;
14572 	case FLASH_5752VENDOR_ST_M45PE10:
14573 	case FLASH_5752VENDOR_ST_M45PE20:
14574 	case FLASH_5752VENDOR_ST_M45PE40:
14575 		tp->nvram_jedecnum = JEDEC_ST;
14576 		tg3_flag_set(tp, NVRAM_BUFFERED);
14577 		tg3_flag_set(tp, FLASH);
14578 		tp->nvram_pagesize = 256;
14579 		break;
14580 	}
14581 }
14582 
14583 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14584 {
14585 	u32 nvcfg1, protect = 0;
14586 
14587 	nvcfg1 = tr32(NVRAM_CFG1);
14588 
14589 	/* NVRAM protection for TPM */
14590 	if (nvcfg1 & (1 << 27)) {
14591 		tg3_flag_set(tp, PROTECTED_NVRAM);
14592 		protect = 1;
14593 	}
14594 
14595 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14596 	switch (nvcfg1) {
14597 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14598 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14599 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14600 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14601 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14602 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14603 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14604 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14605 		tp->nvram_jedecnum = JEDEC_ATMEL;
14606 		tg3_flag_set(tp, NVRAM_BUFFERED);
14607 		tg3_flag_set(tp, FLASH);
14608 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14609 		tp->nvram_pagesize = 256;
14610 		break;
14611 	case FLASH_5761VENDOR_ST_A_M45PE20:
14612 	case FLASH_5761VENDOR_ST_A_M45PE40:
14613 	case FLASH_5761VENDOR_ST_A_M45PE80:
14614 	case FLASH_5761VENDOR_ST_A_M45PE16:
14615 	case FLASH_5761VENDOR_ST_M_M45PE20:
14616 	case FLASH_5761VENDOR_ST_M_M45PE40:
14617 	case FLASH_5761VENDOR_ST_M_M45PE80:
14618 	case FLASH_5761VENDOR_ST_M_M45PE16:
14619 		tp->nvram_jedecnum = JEDEC_ST;
14620 		tg3_flag_set(tp, NVRAM_BUFFERED);
14621 		tg3_flag_set(tp, FLASH);
14622 		tp->nvram_pagesize = 256;
14623 		break;
14624 	}
14625 
14626 	if (protect) {
14627 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14628 	} else {
14629 		switch (nvcfg1) {
14630 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14631 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14632 		case FLASH_5761VENDOR_ST_A_M45PE16:
14633 		case FLASH_5761VENDOR_ST_M_M45PE16:
14634 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14635 			break;
14636 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14637 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14638 		case FLASH_5761VENDOR_ST_A_M45PE80:
14639 		case FLASH_5761VENDOR_ST_M_M45PE80:
14640 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14641 			break;
14642 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14643 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14644 		case FLASH_5761VENDOR_ST_A_M45PE40:
14645 		case FLASH_5761VENDOR_ST_M_M45PE40:
14646 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14647 			break;
14648 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14649 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14650 		case FLASH_5761VENDOR_ST_A_M45PE20:
14651 		case FLASH_5761VENDOR_ST_M_M45PE20:
14652 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14653 			break;
14654 		}
14655 	}
14656 }
14657 
14658 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14659 {
14660 	tp->nvram_jedecnum = JEDEC_ATMEL;
14661 	tg3_flag_set(tp, NVRAM_BUFFERED);
14662 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14663 }
14664 
14665 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14666 {
14667 	u32 nvcfg1;
14668 
14669 	nvcfg1 = tr32(NVRAM_CFG1);
14670 
14671 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14672 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14673 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14674 		tp->nvram_jedecnum = JEDEC_ATMEL;
14675 		tg3_flag_set(tp, NVRAM_BUFFERED);
14676 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14677 
14678 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14679 		tw32(NVRAM_CFG1, nvcfg1);
14680 		return;
14681 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14682 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14683 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14684 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14685 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14686 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14687 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14688 		tp->nvram_jedecnum = JEDEC_ATMEL;
14689 		tg3_flag_set(tp, NVRAM_BUFFERED);
14690 		tg3_flag_set(tp, FLASH);
14691 
14692 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14693 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14694 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14695 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14696 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14697 			break;
14698 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14699 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14700 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14701 			break;
14702 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14703 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14704 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14705 			break;
14706 		}
14707 		break;
14708 	case FLASH_5752VENDOR_ST_M45PE10:
14709 	case FLASH_5752VENDOR_ST_M45PE20:
14710 	case FLASH_5752VENDOR_ST_M45PE40:
14711 		tp->nvram_jedecnum = JEDEC_ST;
14712 		tg3_flag_set(tp, NVRAM_BUFFERED);
14713 		tg3_flag_set(tp, FLASH);
14714 
14715 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14716 		case FLASH_5752VENDOR_ST_M45PE10:
14717 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14718 			break;
14719 		case FLASH_5752VENDOR_ST_M45PE20:
14720 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14721 			break;
14722 		case FLASH_5752VENDOR_ST_M45PE40:
14723 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14724 			break;
14725 		}
14726 		break;
14727 	default:
14728 		tg3_flag_set(tp, NO_NVRAM);
14729 		return;
14730 	}
14731 
14732 	tg3_nvram_get_pagesize(tp, nvcfg1);
14733 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14734 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14735 }
14736 
14737 
14738 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14739 {
14740 	u32 nvcfg1;
14741 
14742 	nvcfg1 = tr32(NVRAM_CFG1);
14743 
14744 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14745 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14746 	case FLASH_5717VENDOR_MICRO_EEPROM:
14747 		tp->nvram_jedecnum = JEDEC_ATMEL;
14748 		tg3_flag_set(tp, NVRAM_BUFFERED);
14749 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14750 
14751 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14752 		tw32(NVRAM_CFG1, nvcfg1);
14753 		return;
14754 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14755 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14756 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14757 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14758 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14759 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14760 	case FLASH_5717VENDOR_ATMEL_45USPT:
14761 		tp->nvram_jedecnum = JEDEC_ATMEL;
14762 		tg3_flag_set(tp, NVRAM_BUFFERED);
14763 		tg3_flag_set(tp, FLASH);
14764 
14765 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14766 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14767 			/* Detect size with tg3_nvram_get_size() */
14768 			break;
14769 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14770 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14771 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14772 			break;
14773 		default:
14774 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14775 			break;
14776 		}
14777 		break;
14778 	case FLASH_5717VENDOR_ST_M_M25PE10:
14779 	case FLASH_5717VENDOR_ST_A_M25PE10:
14780 	case FLASH_5717VENDOR_ST_M_M45PE10:
14781 	case FLASH_5717VENDOR_ST_A_M45PE10:
14782 	case FLASH_5717VENDOR_ST_M_M25PE20:
14783 	case FLASH_5717VENDOR_ST_A_M25PE20:
14784 	case FLASH_5717VENDOR_ST_M_M45PE20:
14785 	case FLASH_5717VENDOR_ST_A_M45PE20:
14786 	case FLASH_5717VENDOR_ST_25USPT:
14787 	case FLASH_5717VENDOR_ST_45USPT:
14788 		tp->nvram_jedecnum = JEDEC_ST;
14789 		tg3_flag_set(tp, NVRAM_BUFFERED);
14790 		tg3_flag_set(tp, FLASH);
14791 
14792 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14793 		case FLASH_5717VENDOR_ST_M_M25PE20:
14794 		case FLASH_5717VENDOR_ST_M_M45PE20:
14795 			/* Detect size with tg3_nvram_get_size() */
14796 			break;
14797 		case FLASH_5717VENDOR_ST_A_M25PE20:
14798 		case FLASH_5717VENDOR_ST_A_M45PE20:
14799 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14800 			break;
14801 		default:
14802 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14803 			break;
14804 		}
14805 		break;
14806 	default:
14807 		tg3_flag_set(tp, NO_NVRAM);
14808 		return;
14809 	}
14810 
14811 	tg3_nvram_get_pagesize(tp, nvcfg1);
14812 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14813 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14814 }
14815 
14816 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14817 {
14818 	u32 nvcfg1, nvmpinstrp, nv_status;
14819 
14820 	nvcfg1 = tr32(NVRAM_CFG1);
14821 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14822 
14823 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14824 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14825 			tg3_flag_set(tp, NO_NVRAM);
14826 			return;
14827 		}
14828 
14829 		switch (nvmpinstrp) {
14830 		case FLASH_5762_MX25L_100:
14831 		case FLASH_5762_MX25L_200:
14832 		case FLASH_5762_MX25L_400:
14833 		case FLASH_5762_MX25L_800:
14834 		case FLASH_5762_MX25L_160_320:
14835 			tp->nvram_pagesize = 4096;
14836 			tp->nvram_jedecnum = JEDEC_MACRONIX;
14837 			tg3_flag_set(tp, NVRAM_BUFFERED);
14838 			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14839 			tg3_flag_set(tp, FLASH);
14840 			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14841 			tp->nvram_size =
14842 				(1 << (nv_status >> AUTOSENSE_DEVID &
14843 						AUTOSENSE_DEVID_MASK)
14844 					<< AUTOSENSE_SIZE_IN_MB);
14845 			return;
14846 
14847 		case FLASH_5762_EEPROM_HD:
14848 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14849 			break;
14850 		case FLASH_5762_EEPROM_LD:
14851 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14852 			break;
14853 		case FLASH_5720VENDOR_M_ST_M45PE20:
14854 			/* This pinstrap supports multiple sizes, so force it
14855 			 * to read the actual size from location 0xf0.
14856 			 */
14857 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14858 			break;
14859 		}
14860 	}
14861 
14862 	switch (nvmpinstrp) {
14863 	case FLASH_5720_EEPROM_HD:
14864 	case FLASH_5720_EEPROM_LD:
14865 		tp->nvram_jedecnum = JEDEC_ATMEL;
14866 		tg3_flag_set(tp, NVRAM_BUFFERED);
14867 
14868 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14869 		tw32(NVRAM_CFG1, nvcfg1);
14870 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14871 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14872 		else
14873 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14874 		return;
14875 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14876 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14877 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14878 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14879 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14880 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14881 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14882 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14883 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14884 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14885 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14886 	case FLASH_5720VENDOR_ATMEL_45USPT:
14887 		tp->nvram_jedecnum = JEDEC_ATMEL;
14888 		tg3_flag_set(tp, NVRAM_BUFFERED);
14889 		tg3_flag_set(tp, FLASH);
14890 
14891 		switch (nvmpinstrp) {
14892 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14893 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14894 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14895 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14896 			break;
14897 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14898 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14899 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14900 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14901 			break;
14902 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14903 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14904 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14905 			break;
14906 		default:
14907 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14908 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14909 			break;
14910 		}
14911 		break;
14912 	case FLASH_5720VENDOR_M_ST_M25PE10:
14913 	case FLASH_5720VENDOR_M_ST_M45PE10:
14914 	case FLASH_5720VENDOR_A_ST_M25PE10:
14915 	case FLASH_5720VENDOR_A_ST_M45PE10:
14916 	case FLASH_5720VENDOR_M_ST_M25PE20:
14917 	case FLASH_5720VENDOR_M_ST_M45PE20:
14918 	case FLASH_5720VENDOR_A_ST_M25PE20:
14919 	case FLASH_5720VENDOR_A_ST_M45PE20:
14920 	case FLASH_5720VENDOR_M_ST_M25PE40:
14921 	case FLASH_5720VENDOR_M_ST_M45PE40:
14922 	case FLASH_5720VENDOR_A_ST_M25PE40:
14923 	case FLASH_5720VENDOR_A_ST_M45PE40:
14924 	case FLASH_5720VENDOR_M_ST_M25PE80:
14925 	case FLASH_5720VENDOR_M_ST_M45PE80:
14926 	case FLASH_5720VENDOR_A_ST_M25PE80:
14927 	case FLASH_5720VENDOR_A_ST_M45PE80:
14928 	case FLASH_5720VENDOR_ST_25USPT:
14929 	case FLASH_5720VENDOR_ST_45USPT:
14930 		tp->nvram_jedecnum = JEDEC_ST;
14931 		tg3_flag_set(tp, NVRAM_BUFFERED);
14932 		tg3_flag_set(tp, FLASH);
14933 
14934 		switch (nvmpinstrp) {
14935 		case FLASH_5720VENDOR_M_ST_M25PE20:
14936 		case FLASH_5720VENDOR_M_ST_M45PE20:
14937 		case FLASH_5720VENDOR_A_ST_M25PE20:
14938 		case FLASH_5720VENDOR_A_ST_M45PE20:
14939 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14940 			break;
14941 		case FLASH_5720VENDOR_M_ST_M25PE40:
14942 		case FLASH_5720VENDOR_M_ST_M45PE40:
14943 		case FLASH_5720VENDOR_A_ST_M25PE40:
14944 		case FLASH_5720VENDOR_A_ST_M45PE40:
14945 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14946 			break;
14947 		case FLASH_5720VENDOR_M_ST_M25PE80:
14948 		case FLASH_5720VENDOR_M_ST_M45PE80:
14949 		case FLASH_5720VENDOR_A_ST_M25PE80:
14950 		case FLASH_5720VENDOR_A_ST_M45PE80:
14951 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14952 			break;
14953 		default:
14954 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14955 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14956 			break;
14957 		}
14958 		break;
14959 	default:
14960 		tg3_flag_set(tp, NO_NVRAM);
14961 		return;
14962 	}
14963 
14964 	tg3_nvram_get_pagesize(tp, nvcfg1);
14965 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14966 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14967 
14968 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14969 		u32 val;
14970 
14971 		if (tg3_nvram_read(tp, 0, &val))
14972 			return;
14973 
14974 		if (val != TG3_EEPROM_MAGIC &&
14975 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14976 			tg3_flag_set(tp, NO_NVRAM);
14977 	}
14978 }
14979 
14980 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14981 static void tg3_nvram_init(struct tg3 *tp)
14982 {
14983 	if (tg3_flag(tp, IS_SSB_CORE)) {
14984 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14985 		tg3_flag_clear(tp, NVRAM);
14986 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14987 		tg3_flag_set(tp, NO_NVRAM);
14988 		return;
14989 	}
14990 
14991 	tw32_f(GRC_EEPROM_ADDR,
14992 	     (EEPROM_ADDR_FSM_RESET |
14993 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14994 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14995 
14996 	msleep(1);
14997 
14998 	/* Enable seeprom accesses. */
14999 	tw32_f(GRC_LOCAL_CTRL,
15000 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15001 	udelay(100);
15002 
15003 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15004 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
15005 		tg3_flag_set(tp, NVRAM);
15006 
15007 		if (tg3_nvram_lock(tp)) {
15008 			netdev_warn(tp->dev,
15009 				    "Cannot get nvram lock, %s failed\n",
15010 				    __func__);
15011 			return;
15012 		}
15013 		tg3_enable_nvram_access(tp);
15014 
15015 		tp->nvram_size = 0;
15016 
15017 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
15018 			tg3_get_5752_nvram_info(tp);
15019 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15020 			tg3_get_5755_nvram_info(tp);
15021 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15022 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15023 			 tg3_asic_rev(tp) == ASIC_REV_5785)
15024 			tg3_get_5787_nvram_info(tp);
15025 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15026 			tg3_get_5761_nvram_info(tp);
15027 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15028 			tg3_get_5906_nvram_info(tp);
15029 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15030 			 tg3_flag(tp, 57765_CLASS))
15031 			tg3_get_57780_nvram_info(tp);
15032 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15033 			 tg3_asic_rev(tp) == ASIC_REV_5719)
15034 			tg3_get_5717_nvram_info(tp);
15035 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15036 			 tg3_asic_rev(tp) == ASIC_REV_5762)
15037 			tg3_get_5720_nvram_info(tp);
15038 		else
15039 			tg3_get_nvram_info(tp);
15040 
15041 		if (tp->nvram_size == 0)
15042 			tg3_get_nvram_size(tp);
15043 
15044 		tg3_disable_nvram_access(tp);
15045 		tg3_nvram_unlock(tp);
15046 
15047 	} else {
15048 		tg3_flag_clear(tp, NVRAM);
15049 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15050 
15051 		tg3_get_eeprom_size(tp);
15052 	}
15053 }
15054 
15055 struct subsys_tbl_ent {
15056 	u16 subsys_vendor, subsys_devid;
15057 	u32 phy_id;
15058 };
15059 
15060 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15061 	/* Broadcom boards. */
15062 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15063 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15064 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15065 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15066 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15067 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15068 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15069 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15070 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15071 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15072 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15073 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15074 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15075 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15076 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15077 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15078 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15079 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15080 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15081 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15082 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15083 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15084 
15085 	/* 3com boards. */
15086 	{ TG3PCI_SUBVENDOR_ID_3COM,
15087 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15088 	{ TG3PCI_SUBVENDOR_ID_3COM,
15089 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15090 	{ TG3PCI_SUBVENDOR_ID_3COM,
15091 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15092 	{ TG3PCI_SUBVENDOR_ID_3COM,
15093 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15094 	{ TG3PCI_SUBVENDOR_ID_3COM,
15095 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15096 
15097 	/* DELL boards. */
15098 	{ TG3PCI_SUBVENDOR_ID_DELL,
15099 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15100 	{ TG3PCI_SUBVENDOR_ID_DELL,
15101 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15102 	{ TG3PCI_SUBVENDOR_ID_DELL,
15103 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15104 	{ TG3PCI_SUBVENDOR_ID_DELL,
15105 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15106 
15107 	/* Compaq boards. */
15108 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15109 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15110 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15111 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15112 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15113 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15114 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15115 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15116 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15117 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15118 
15119 	/* IBM boards. */
15120 	{ TG3PCI_SUBVENDOR_ID_IBM,
15121 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15122 };
15123 
15124 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15125 {
15126 	int i;
15127 
15128 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15129 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15130 		     tp->pdev->subsystem_vendor) &&
15131 		    (subsys_id_to_phy_id[i].subsys_devid ==
15132 		     tp->pdev->subsystem_device))
15133 			return &subsys_id_to_phy_id[i];
15134 	}
15135 	return NULL;
15136 }
15137 
15138 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15139 {
15140 	u32 val;
15141 
15142 	tp->phy_id = TG3_PHY_ID_INVALID;
15143 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15144 
15145 	/* Assume an onboard device and WOL capable by default.  */
15146 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15147 	tg3_flag_set(tp, WOL_CAP);
15148 
15149 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15150 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15151 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15152 			tg3_flag_set(tp, IS_NIC);
15153 		}
15154 		val = tr32(VCPU_CFGSHDW);
15155 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15156 			tg3_flag_set(tp, ASPM_WORKAROUND);
15157 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15158 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15159 			tg3_flag_set(tp, WOL_ENABLE);
15160 			device_set_wakeup_enable(&tp->pdev->dev, true);
15161 		}
15162 		goto done;
15163 	}
15164 
15165 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15166 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15167 		u32 nic_cfg, led_cfg;
15168 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15169 		u32 nic_phy_id, ver, eeprom_phy_id;
15170 		int eeprom_phy_serdes = 0;
15171 
15172 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15173 		tp->nic_sram_data_cfg = nic_cfg;
15174 
15175 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15176 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15177 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15178 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15179 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15180 		    (ver > 0) && (ver < 0x100))
15181 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15182 
15183 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15184 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15185 
15186 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15187 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15188 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15189 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15190 
15191 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15192 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15193 			eeprom_phy_serdes = 1;
15194 
15195 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15196 		if (nic_phy_id != 0) {
15197 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15198 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15199 
15200 			eeprom_phy_id  = (id1 >> 16) << 10;
15201 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15202 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15203 		} else
15204 			eeprom_phy_id = 0;
15205 
15206 		tp->phy_id = eeprom_phy_id;
15207 		if (eeprom_phy_serdes) {
15208 			if (!tg3_flag(tp, 5705_PLUS))
15209 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15210 			else
15211 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15212 		}
15213 
15214 		if (tg3_flag(tp, 5750_PLUS))
15215 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15216 				    SHASTA_EXT_LED_MODE_MASK);
15217 		else
15218 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15219 
15220 		switch (led_cfg) {
15221 		default:
15222 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15223 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15224 			break;
15225 
15226 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15227 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15228 			break;
15229 
15230 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15231 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15232 
15233 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15234 			 * read on some older 5700/5701 bootcode.
15235 			 */
15236 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15237 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15238 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15239 
15240 			break;
15241 
15242 		case SHASTA_EXT_LED_SHARED:
15243 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15244 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15245 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15246 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15247 						 LED_CTRL_MODE_PHY_2);
15248 
15249 			if (tg3_flag(tp, 5717_PLUS) ||
15250 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15251 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15252 						LED_CTRL_BLINK_RATE_MASK;
15253 
15254 			break;
15255 
15256 		case SHASTA_EXT_LED_MAC:
15257 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15258 			break;
15259 
15260 		case SHASTA_EXT_LED_COMBO:
15261 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15262 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15263 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15264 						 LED_CTRL_MODE_PHY_2);
15265 			break;
15266 
15267 		}
15268 
15269 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15270 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15271 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15272 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15273 
15274 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15275 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15276 
15277 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15278 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15279 			if ((tp->pdev->subsystem_vendor ==
15280 			     PCI_VENDOR_ID_ARIMA) &&
15281 			    (tp->pdev->subsystem_device == 0x205a ||
15282 			     tp->pdev->subsystem_device == 0x2063))
15283 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15284 		} else {
15285 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15286 			tg3_flag_set(tp, IS_NIC);
15287 		}
15288 
15289 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15290 			tg3_flag_set(tp, ENABLE_ASF);
15291 			if (tg3_flag(tp, 5750_PLUS))
15292 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15293 		}
15294 
15295 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15296 		    tg3_flag(tp, 5750_PLUS))
15297 			tg3_flag_set(tp, ENABLE_APE);
15298 
15299 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15300 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15301 			tg3_flag_clear(tp, WOL_CAP);
15302 
15303 		if (tg3_flag(tp, WOL_CAP) &&
15304 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15305 			tg3_flag_set(tp, WOL_ENABLE);
15306 			device_set_wakeup_enable(&tp->pdev->dev, true);
15307 		}
15308 
15309 		if (cfg2 & (1 << 17))
15310 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15311 
15312 		/* serdes signal pre-emphasis in register 0x590 set by */
15313 		/* bootcode if bit 18 is set */
15314 		if (cfg2 & (1 << 18))
15315 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15316 
15317 		if ((tg3_flag(tp, 57765_PLUS) ||
15318 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15319 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15320 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15321 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15322 
15323 		if (tg3_flag(tp, PCI_EXPRESS)) {
15324 			u32 cfg3;
15325 
15326 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15327 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15328 			    !tg3_flag(tp, 57765_PLUS) &&
15329 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15330 				tg3_flag_set(tp, ASPM_WORKAROUND);
15331 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15332 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15333 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15334 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15335 		}
15336 
15337 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15338 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15339 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15340 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15341 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15342 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15343 
15344 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15345 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15346 	}
15347 done:
15348 	if (tg3_flag(tp, WOL_CAP))
15349 		device_set_wakeup_enable(&tp->pdev->dev,
15350 					 tg3_flag(tp, WOL_ENABLE));
15351 	else
15352 		device_set_wakeup_capable(&tp->pdev->dev, false);
15353 }
15354 
15355 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15356 {
15357 	int i, err;
15358 	u32 val2, off = offset * 8;
15359 
15360 	err = tg3_nvram_lock(tp);
15361 	if (err)
15362 		return err;
15363 
15364 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15365 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15366 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15367 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15368 	udelay(10);
15369 
15370 	for (i = 0; i < 100; i++) {
15371 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15372 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15373 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15374 			break;
15375 		}
15376 		udelay(10);
15377 	}
15378 
15379 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15380 
15381 	tg3_nvram_unlock(tp);
15382 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15383 		return 0;
15384 
15385 	return -EBUSY;
15386 }
15387 
15388 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15389 {
15390 	int i;
15391 	u32 val;
15392 
15393 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15394 	tw32(OTP_CTRL, cmd);
15395 
15396 	/* Wait for up to 1 ms for command to execute. */
15397 	for (i = 0; i < 100; i++) {
15398 		val = tr32(OTP_STATUS);
15399 		if (val & OTP_STATUS_CMD_DONE)
15400 			break;
15401 		udelay(10);
15402 	}
15403 
15404 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15405 }
15406 
15407 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15408  * configuration is a 32-bit value that straddles the alignment boundary.
15409  * We do two 32-bit reads and then shift and merge the results.
15410  */
15411 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15412 {
15413 	u32 bhalf_otp, thalf_otp;
15414 
15415 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15416 
15417 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15418 		return 0;
15419 
15420 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15421 
15422 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15423 		return 0;
15424 
15425 	thalf_otp = tr32(OTP_READ_DATA);
15426 
15427 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15428 
15429 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15430 		return 0;
15431 
15432 	bhalf_otp = tr32(OTP_READ_DATA);
15433 
15434 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15435 }
15436 
15437 static void tg3_phy_init_link_config(struct tg3 *tp)
15438 {
15439 	u32 adv = ADVERTISED_Autoneg;
15440 
15441 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15442 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15443 			adv |= ADVERTISED_1000baseT_Half;
15444 		adv |= ADVERTISED_1000baseT_Full;
15445 	}
15446 
15447 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15448 		adv |= ADVERTISED_100baseT_Half |
15449 		       ADVERTISED_100baseT_Full |
15450 		       ADVERTISED_10baseT_Half |
15451 		       ADVERTISED_10baseT_Full |
15452 		       ADVERTISED_TP;
15453 	else
15454 		adv |= ADVERTISED_FIBRE;
15455 
15456 	tp->link_config.advertising = adv;
15457 	tp->link_config.speed = SPEED_UNKNOWN;
15458 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15459 	tp->link_config.autoneg = AUTONEG_ENABLE;
15460 	tp->link_config.active_speed = SPEED_UNKNOWN;
15461 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15462 
15463 	tp->old_link = -1;
15464 }
15465 
15466 static int tg3_phy_probe(struct tg3 *tp)
15467 {
15468 	u32 hw_phy_id_1, hw_phy_id_2;
15469 	u32 hw_phy_id, hw_phy_id_masked;
15470 	int err;
15471 
15472 	/* flow control autonegotiation is default behavior */
15473 	tg3_flag_set(tp, PAUSE_AUTONEG);
15474 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15475 
15476 	if (tg3_flag(tp, ENABLE_APE)) {
15477 		switch (tp->pci_fn) {
15478 		case 0:
15479 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15480 			break;
15481 		case 1:
15482 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15483 			break;
15484 		case 2:
15485 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15486 			break;
15487 		case 3:
15488 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15489 			break;
15490 		}
15491 	}
15492 
15493 	if (!tg3_flag(tp, ENABLE_ASF) &&
15494 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15495 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15496 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15497 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15498 
15499 	if (tg3_flag(tp, USE_PHYLIB))
15500 		return tg3_phy_init(tp);
15501 
15502 	/* Reading the PHY ID register can conflict with ASF
15503 	 * firmware access to the PHY hardware.
15504 	 */
15505 	err = 0;
15506 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15507 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15508 	} else {
15509 		/* Now read the physical PHY_ID from the chip and verify
15510 		 * that it is sane.  If it doesn't look good, we fall back
15511 		 * to either the hard-coded table based PHY_ID and failing
15512 		 * that the value found in the eeprom area.
15513 		 */
15514 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15515 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15516 
15517 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15518 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15519 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15520 
15521 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15522 	}
15523 
15524 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15525 		tp->phy_id = hw_phy_id;
15526 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15527 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15528 		else
15529 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15530 	} else {
15531 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15532 			/* Do nothing, phy ID already set up in
15533 			 * tg3_get_eeprom_hw_cfg().
15534 			 */
15535 		} else {
15536 			struct subsys_tbl_ent *p;
15537 
15538 			/* No eeprom signature?  Try the hardcoded
15539 			 * subsys device table.
15540 			 */
15541 			p = tg3_lookup_by_subsys(tp);
15542 			if (p) {
15543 				tp->phy_id = p->phy_id;
15544 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15545 				/* For now we saw the IDs 0xbc050cd0,
15546 				 * 0xbc050f80 and 0xbc050c30 on devices
15547 				 * connected to an BCM4785 and there are
15548 				 * probably more. Just assume that the phy is
15549 				 * supported when it is connected to a SSB core
15550 				 * for now.
15551 				 */
15552 				return -ENODEV;
15553 			}
15554 
15555 			if (!tp->phy_id ||
15556 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15557 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15558 		}
15559 	}
15560 
15561 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15562 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15563 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15564 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15565 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15566 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15567 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15568 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15569 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15570 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15571 
15572 		tp->eee.supported = SUPPORTED_100baseT_Full |
15573 				    SUPPORTED_1000baseT_Full;
15574 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15575 				     ADVERTISED_1000baseT_Full;
15576 		tp->eee.eee_enabled = 1;
15577 		tp->eee.tx_lpi_enabled = 1;
15578 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15579 	}
15580 
15581 	tg3_phy_init_link_config(tp);
15582 
15583 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15584 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15585 	    !tg3_flag(tp, ENABLE_APE) &&
15586 	    !tg3_flag(tp, ENABLE_ASF)) {
15587 		u32 bmsr, dummy;
15588 
15589 		tg3_readphy(tp, MII_BMSR, &bmsr);
15590 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15591 		    (bmsr & BMSR_LSTATUS))
15592 			goto skip_phy_reset;
15593 
15594 		err = tg3_phy_reset(tp);
15595 		if (err)
15596 			return err;
15597 
15598 		tg3_phy_set_wirespeed(tp);
15599 
15600 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15601 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15602 					    tp->link_config.flowctrl);
15603 
15604 			tg3_writephy(tp, MII_BMCR,
15605 				     BMCR_ANENABLE | BMCR_ANRESTART);
15606 		}
15607 	}
15608 
15609 skip_phy_reset:
15610 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15611 		err = tg3_init_5401phy_dsp(tp);
15612 		if (err)
15613 			return err;
15614 
15615 		err = tg3_init_5401phy_dsp(tp);
15616 	}
15617 
15618 	return err;
15619 }
15620 
15621 static void tg3_read_vpd(struct tg3 *tp)
15622 {
15623 	u8 *vpd_data;
15624 	unsigned int block_end, rosize, len;
15625 	u32 vpdlen;
15626 	int j, i = 0;
15627 
15628 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15629 	if (!vpd_data)
15630 		goto out_no_vpd;
15631 
15632 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15633 	if (i < 0)
15634 		goto out_not_found;
15635 
15636 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15637 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15638 	i += PCI_VPD_LRDT_TAG_SIZE;
15639 
15640 	if (block_end > vpdlen)
15641 		goto out_not_found;
15642 
15643 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15644 				      PCI_VPD_RO_KEYWORD_MFR_ID);
15645 	if (j > 0) {
15646 		len = pci_vpd_info_field_size(&vpd_data[j]);
15647 
15648 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15649 		if (j + len > block_end || len != 4 ||
15650 		    memcmp(&vpd_data[j], "1028", 4))
15651 			goto partno;
15652 
15653 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15654 					      PCI_VPD_RO_KEYWORD_VENDOR0);
15655 		if (j < 0)
15656 			goto partno;
15657 
15658 		len = pci_vpd_info_field_size(&vpd_data[j]);
15659 
15660 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15661 		if (j + len > block_end)
15662 			goto partno;
15663 
15664 		if (len >= sizeof(tp->fw_ver))
15665 			len = sizeof(tp->fw_ver) - 1;
15666 		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15667 		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15668 			 &vpd_data[j]);
15669 	}
15670 
15671 partno:
15672 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15673 				      PCI_VPD_RO_KEYWORD_PARTNO);
15674 	if (i < 0)
15675 		goto out_not_found;
15676 
15677 	len = pci_vpd_info_field_size(&vpd_data[i]);
15678 
15679 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15680 	if (len > TG3_BPN_SIZE ||
15681 	    (len + i) > vpdlen)
15682 		goto out_not_found;
15683 
15684 	memcpy(tp->board_part_number, &vpd_data[i], len);
15685 
15686 out_not_found:
15687 	kfree(vpd_data);
15688 	if (tp->board_part_number[0])
15689 		return;
15690 
15691 out_no_vpd:
15692 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15693 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15694 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15695 			strcpy(tp->board_part_number, "BCM5717");
15696 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15697 			strcpy(tp->board_part_number, "BCM5718");
15698 		else
15699 			goto nomatch;
15700 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15701 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15702 			strcpy(tp->board_part_number, "BCM57780");
15703 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15704 			strcpy(tp->board_part_number, "BCM57760");
15705 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15706 			strcpy(tp->board_part_number, "BCM57790");
15707 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15708 			strcpy(tp->board_part_number, "BCM57788");
15709 		else
15710 			goto nomatch;
15711 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15712 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15713 			strcpy(tp->board_part_number, "BCM57761");
15714 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15715 			strcpy(tp->board_part_number, "BCM57765");
15716 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15717 			strcpy(tp->board_part_number, "BCM57781");
15718 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15719 			strcpy(tp->board_part_number, "BCM57785");
15720 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15721 			strcpy(tp->board_part_number, "BCM57791");
15722 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15723 			strcpy(tp->board_part_number, "BCM57795");
15724 		else
15725 			goto nomatch;
15726 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15727 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15728 			strcpy(tp->board_part_number, "BCM57762");
15729 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15730 			strcpy(tp->board_part_number, "BCM57766");
15731 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15732 			strcpy(tp->board_part_number, "BCM57782");
15733 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15734 			strcpy(tp->board_part_number, "BCM57786");
15735 		else
15736 			goto nomatch;
15737 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15738 		strcpy(tp->board_part_number, "BCM95906");
15739 	} else {
15740 nomatch:
15741 		strcpy(tp->board_part_number, "none");
15742 	}
15743 }
15744 
15745 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15746 {
15747 	u32 val;
15748 
15749 	if (tg3_nvram_read(tp, offset, &val) ||
15750 	    (val & 0xfc000000) != 0x0c000000 ||
15751 	    tg3_nvram_read(tp, offset + 4, &val) ||
15752 	    val != 0)
15753 		return 0;
15754 
15755 	return 1;
15756 }
15757 
15758 static void tg3_read_bc_ver(struct tg3 *tp)
15759 {
15760 	u32 val, offset, start, ver_offset;
15761 	int i, dst_off;
15762 	bool newver = false;
15763 
15764 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15765 	    tg3_nvram_read(tp, 0x4, &start))
15766 		return;
15767 
15768 	offset = tg3_nvram_logical_addr(tp, offset);
15769 
15770 	if (tg3_nvram_read(tp, offset, &val))
15771 		return;
15772 
15773 	if ((val & 0xfc000000) == 0x0c000000) {
15774 		if (tg3_nvram_read(tp, offset + 4, &val))
15775 			return;
15776 
15777 		if (val == 0)
15778 			newver = true;
15779 	}
15780 
15781 	dst_off = strlen(tp->fw_ver);
15782 
15783 	if (newver) {
15784 		if (TG3_VER_SIZE - dst_off < 16 ||
15785 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15786 			return;
15787 
15788 		offset = offset + ver_offset - start;
15789 		for (i = 0; i < 16; i += 4) {
15790 			__be32 v;
15791 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15792 				return;
15793 
15794 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15795 		}
15796 	} else {
15797 		u32 major, minor;
15798 
15799 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15800 			return;
15801 
15802 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15803 			TG3_NVM_BCVER_MAJSFT;
15804 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15805 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15806 			 "v%d.%02d", major, minor);
15807 	}
15808 }
15809 
15810 static void tg3_read_hwsb_ver(struct tg3 *tp)
15811 {
15812 	u32 val, major, minor;
15813 
15814 	/* Use native endian representation */
15815 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15816 		return;
15817 
15818 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15819 		TG3_NVM_HWSB_CFG1_MAJSFT;
15820 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15821 		TG3_NVM_HWSB_CFG1_MINSFT;
15822 
15823 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15824 }
15825 
15826 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15827 {
15828 	u32 offset, major, minor, build;
15829 
15830 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15831 
15832 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15833 		return;
15834 
15835 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15836 	case TG3_EEPROM_SB_REVISION_0:
15837 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15838 		break;
15839 	case TG3_EEPROM_SB_REVISION_2:
15840 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15841 		break;
15842 	case TG3_EEPROM_SB_REVISION_3:
15843 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15844 		break;
15845 	case TG3_EEPROM_SB_REVISION_4:
15846 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15847 		break;
15848 	case TG3_EEPROM_SB_REVISION_5:
15849 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15850 		break;
15851 	case TG3_EEPROM_SB_REVISION_6:
15852 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15853 		break;
15854 	default:
15855 		return;
15856 	}
15857 
15858 	if (tg3_nvram_read(tp, offset, &val))
15859 		return;
15860 
15861 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15862 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15863 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15864 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15865 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15866 
15867 	if (minor > 99 || build > 26)
15868 		return;
15869 
15870 	offset = strlen(tp->fw_ver);
15871 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15872 		 " v%d.%02d", major, minor);
15873 
15874 	if (build > 0) {
15875 		offset = strlen(tp->fw_ver);
15876 		if (offset < TG3_VER_SIZE - 1)
15877 			tp->fw_ver[offset] = 'a' + build - 1;
15878 	}
15879 }
15880 
15881 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15882 {
15883 	u32 val, offset, start;
15884 	int i, vlen;
15885 
15886 	for (offset = TG3_NVM_DIR_START;
15887 	     offset < TG3_NVM_DIR_END;
15888 	     offset += TG3_NVM_DIRENT_SIZE) {
15889 		if (tg3_nvram_read(tp, offset, &val))
15890 			return;
15891 
15892 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15893 			break;
15894 	}
15895 
15896 	if (offset == TG3_NVM_DIR_END)
15897 		return;
15898 
15899 	if (!tg3_flag(tp, 5705_PLUS))
15900 		start = 0x08000000;
15901 	else if (tg3_nvram_read(tp, offset - 4, &start))
15902 		return;
15903 
15904 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15905 	    !tg3_fw_img_is_valid(tp, offset) ||
15906 	    tg3_nvram_read(tp, offset + 8, &val))
15907 		return;
15908 
15909 	offset += val - start;
15910 
15911 	vlen = strlen(tp->fw_ver);
15912 
15913 	tp->fw_ver[vlen++] = ',';
15914 	tp->fw_ver[vlen++] = ' ';
15915 
15916 	for (i = 0; i < 4; i++) {
15917 		__be32 v;
15918 		if (tg3_nvram_read_be32(tp, offset, &v))
15919 			return;
15920 
15921 		offset += sizeof(v);
15922 
15923 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15924 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15925 			break;
15926 		}
15927 
15928 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15929 		vlen += sizeof(v);
15930 	}
15931 }
15932 
15933 static void tg3_probe_ncsi(struct tg3 *tp)
15934 {
15935 	u32 apedata;
15936 
15937 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15938 	if (apedata != APE_SEG_SIG_MAGIC)
15939 		return;
15940 
15941 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15942 	if (!(apedata & APE_FW_STATUS_READY))
15943 		return;
15944 
15945 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15946 		tg3_flag_set(tp, APE_HAS_NCSI);
15947 }
15948 
15949 static void tg3_read_dash_ver(struct tg3 *tp)
15950 {
15951 	int vlen;
15952 	u32 apedata;
15953 	char *fwtype;
15954 
15955 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15956 
15957 	if (tg3_flag(tp, APE_HAS_NCSI))
15958 		fwtype = "NCSI";
15959 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15960 		fwtype = "SMASH";
15961 	else
15962 		fwtype = "DASH";
15963 
15964 	vlen = strlen(tp->fw_ver);
15965 
15966 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15967 		 fwtype,
15968 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15969 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15970 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15971 		 (apedata & APE_FW_VERSION_BLDMSK));
15972 }
15973 
15974 static void tg3_read_otp_ver(struct tg3 *tp)
15975 {
15976 	u32 val, val2;
15977 
15978 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15979 		return;
15980 
15981 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15982 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15983 	    TG3_OTP_MAGIC0_VALID(val)) {
15984 		u64 val64 = (u64) val << 32 | val2;
15985 		u32 ver = 0;
15986 		int i, vlen;
15987 
15988 		for (i = 0; i < 7; i++) {
15989 			if ((val64 & 0xff) == 0)
15990 				break;
15991 			ver = val64 & 0xff;
15992 			val64 >>= 8;
15993 		}
15994 		vlen = strlen(tp->fw_ver);
15995 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15996 	}
15997 }
15998 
15999 static void tg3_read_fw_ver(struct tg3 *tp)
16000 {
16001 	u32 val;
16002 	bool vpd_vers = false;
16003 
16004 	if (tp->fw_ver[0] != 0)
16005 		vpd_vers = true;
16006 
16007 	if (tg3_flag(tp, NO_NVRAM)) {
16008 		strcat(tp->fw_ver, "sb");
16009 		tg3_read_otp_ver(tp);
16010 		return;
16011 	}
16012 
16013 	if (tg3_nvram_read(tp, 0, &val))
16014 		return;
16015 
16016 	if (val == TG3_EEPROM_MAGIC)
16017 		tg3_read_bc_ver(tp);
16018 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16019 		tg3_read_sb_ver(tp, val);
16020 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16021 		tg3_read_hwsb_ver(tp);
16022 
16023 	if (tg3_flag(tp, ENABLE_ASF)) {
16024 		if (tg3_flag(tp, ENABLE_APE)) {
16025 			tg3_probe_ncsi(tp);
16026 			if (!vpd_vers)
16027 				tg3_read_dash_ver(tp);
16028 		} else if (!vpd_vers) {
16029 			tg3_read_mgmtfw_ver(tp);
16030 		}
16031 	}
16032 
16033 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16034 }
16035 
16036 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16037 {
16038 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
16039 		return TG3_RX_RET_MAX_SIZE_5717;
16040 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16041 		return TG3_RX_RET_MAX_SIZE_5700;
16042 	else
16043 		return TG3_RX_RET_MAX_SIZE_5705;
16044 }
16045 
16046 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16047 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16048 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16049 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16050 	{ },
16051 };
16052 
16053 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16054 {
16055 	struct pci_dev *peer;
16056 	unsigned int func, devnr = tp->pdev->devfn & ~7;
16057 
16058 	for (func = 0; func < 8; func++) {
16059 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16060 		if (peer && peer != tp->pdev)
16061 			break;
16062 		pci_dev_put(peer);
16063 	}
16064 	/* 5704 can be configured in single-port mode, set peer to
16065 	 * tp->pdev in that case.
16066 	 */
16067 	if (!peer) {
16068 		peer = tp->pdev;
16069 		return peer;
16070 	}
16071 
16072 	/*
16073 	 * We don't need to keep the refcount elevated; there's no way
16074 	 * to remove one half of this device without removing the other
16075 	 */
16076 	pci_dev_put(peer);
16077 
16078 	return peer;
16079 }
16080 
16081 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16082 {
16083 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16084 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16085 		u32 reg;
16086 
16087 		/* All devices that use the alternate
16088 		 * ASIC REV location have a CPMU.
16089 		 */
16090 		tg3_flag_set(tp, CPMU_PRESENT);
16091 
16092 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16093 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16094 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16095 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16096 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16097 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16098 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16099 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16100 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16101 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16102 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16103 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16104 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16105 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16106 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16107 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16108 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16109 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16110 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16111 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16112 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16113 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16114 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16115 		else
16116 			reg = TG3PCI_PRODID_ASICREV;
16117 
16118 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16119 	}
16120 
16121 	/* Wrong chip ID in 5752 A0. This code can be removed later
16122 	 * as A0 is not in production.
16123 	 */
16124 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16125 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16126 
16127 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16128 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16129 
16130 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16131 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16132 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16133 		tg3_flag_set(tp, 5717_PLUS);
16134 
16135 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16136 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16137 		tg3_flag_set(tp, 57765_CLASS);
16138 
16139 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16140 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16141 		tg3_flag_set(tp, 57765_PLUS);
16142 
16143 	/* Intentionally exclude ASIC_REV_5906 */
16144 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16145 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16146 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16147 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16148 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16149 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16150 	    tg3_flag(tp, 57765_PLUS))
16151 		tg3_flag_set(tp, 5755_PLUS);
16152 
16153 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16154 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16155 		tg3_flag_set(tp, 5780_CLASS);
16156 
16157 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16158 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16159 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16160 	    tg3_flag(tp, 5755_PLUS) ||
16161 	    tg3_flag(tp, 5780_CLASS))
16162 		tg3_flag_set(tp, 5750_PLUS);
16163 
16164 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16165 	    tg3_flag(tp, 5750_PLUS))
16166 		tg3_flag_set(tp, 5705_PLUS);
16167 }
16168 
16169 static bool tg3_10_100_only_device(struct tg3 *tp,
16170 				   const struct pci_device_id *ent)
16171 {
16172 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16173 
16174 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16175 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16176 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16177 		return true;
16178 
16179 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16180 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16181 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16182 				return true;
16183 		} else {
16184 			return true;
16185 		}
16186 	}
16187 
16188 	return false;
16189 }
16190 
16191 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16192 {
16193 	u32 misc_ctrl_reg;
16194 	u32 pci_state_reg, grc_misc_cfg;
16195 	u32 val;
16196 	u16 pci_cmd;
16197 	int err;
16198 
16199 	/* Force memory write invalidate off.  If we leave it on,
16200 	 * then on 5700_BX chips we have to enable a workaround.
16201 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16202 	 * to match the cacheline size.  The Broadcom driver have this
16203 	 * workaround but turns MWI off all the times so never uses
16204 	 * it.  This seems to suggest that the workaround is insufficient.
16205 	 */
16206 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16207 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16208 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16209 
16210 	/* Important! -- Make sure register accesses are byteswapped
16211 	 * correctly.  Also, for those chips that require it, make
16212 	 * sure that indirect register accesses are enabled before
16213 	 * the first operation.
16214 	 */
16215 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16216 			      &misc_ctrl_reg);
16217 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16218 			       MISC_HOST_CTRL_CHIPREV);
16219 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16220 			       tp->misc_host_ctrl);
16221 
16222 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16223 
16224 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16225 	 * we need to disable memory and use config. cycles
16226 	 * only to access all registers. The 5702/03 chips
16227 	 * can mistakenly decode the special cycles from the
16228 	 * ICH chipsets as memory write cycles, causing corruption
16229 	 * of register and memory space. Only certain ICH bridges
16230 	 * will drive special cycles with non-zero data during the
16231 	 * address phase which can fall within the 5703's address
16232 	 * range. This is not an ICH bug as the PCI spec allows
16233 	 * non-zero address during special cycles. However, only
16234 	 * these ICH bridges are known to drive non-zero addresses
16235 	 * during special cycles.
16236 	 *
16237 	 * Since special cycles do not cross PCI bridges, we only
16238 	 * enable this workaround if the 5703 is on the secondary
16239 	 * bus of these ICH bridges.
16240 	 */
16241 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16242 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16243 		static struct tg3_dev_id {
16244 			u32	vendor;
16245 			u32	device;
16246 			u32	rev;
16247 		} ich_chipsets[] = {
16248 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16249 			  PCI_ANY_ID },
16250 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16251 			  PCI_ANY_ID },
16252 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16253 			  0xa },
16254 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16255 			  PCI_ANY_ID },
16256 			{ },
16257 		};
16258 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16259 		struct pci_dev *bridge = NULL;
16260 
16261 		while (pci_id->vendor != 0) {
16262 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16263 						bridge);
16264 			if (!bridge) {
16265 				pci_id++;
16266 				continue;
16267 			}
16268 			if (pci_id->rev != PCI_ANY_ID) {
16269 				if (bridge->revision > pci_id->rev)
16270 					continue;
16271 			}
16272 			if (bridge->subordinate &&
16273 			    (bridge->subordinate->number ==
16274 			     tp->pdev->bus->number)) {
16275 				tg3_flag_set(tp, ICH_WORKAROUND);
16276 				pci_dev_put(bridge);
16277 				break;
16278 			}
16279 		}
16280 	}
16281 
16282 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16283 		static struct tg3_dev_id {
16284 			u32	vendor;
16285 			u32	device;
16286 		} bridge_chipsets[] = {
16287 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16288 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16289 			{ },
16290 		};
16291 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16292 		struct pci_dev *bridge = NULL;
16293 
16294 		while (pci_id->vendor != 0) {
16295 			bridge = pci_get_device(pci_id->vendor,
16296 						pci_id->device,
16297 						bridge);
16298 			if (!bridge) {
16299 				pci_id++;
16300 				continue;
16301 			}
16302 			if (bridge->subordinate &&
16303 			    (bridge->subordinate->number <=
16304 			     tp->pdev->bus->number) &&
16305 			    (bridge->subordinate->busn_res.end >=
16306 			     tp->pdev->bus->number)) {
16307 				tg3_flag_set(tp, 5701_DMA_BUG);
16308 				pci_dev_put(bridge);
16309 				break;
16310 			}
16311 		}
16312 	}
16313 
16314 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16315 	 * DMA addresses > 40-bit. This bridge may have other additional
16316 	 * 57xx devices behind it in some 4-port NIC designs for example.
16317 	 * Any tg3 device found behind the bridge will also need the 40-bit
16318 	 * DMA workaround.
16319 	 */
16320 	if (tg3_flag(tp, 5780_CLASS)) {
16321 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16322 		tp->msi_cap = tp->pdev->msi_cap;
16323 	} else {
16324 		struct pci_dev *bridge = NULL;
16325 
16326 		do {
16327 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16328 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16329 						bridge);
16330 			if (bridge && bridge->subordinate &&
16331 			    (bridge->subordinate->number <=
16332 			     tp->pdev->bus->number) &&
16333 			    (bridge->subordinate->busn_res.end >=
16334 			     tp->pdev->bus->number)) {
16335 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16336 				pci_dev_put(bridge);
16337 				break;
16338 			}
16339 		} while (bridge);
16340 	}
16341 
16342 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16343 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16344 		tp->pdev_peer = tg3_find_peer(tp);
16345 
16346 	/* Determine TSO capabilities */
16347 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16348 		; /* Do nothing. HW bug. */
16349 	else if (tg3_flag(tp, 57765_PLUS))
16350 		tg3_flag_set(tp, HW_TSO_3);
16351 	else if (tg3_flag(tp, 5755_PLUS) ||
16352 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16353 		tg3_flag_set(tp, HW_TSO_2);
16354 	else if (tg3_flag(tp, 5750_PLUS)) {
16355 		tg3_flag_set(tp, HW_TSO_1);
16356 		tg3_flag_set(tp, TSO_BUG);
16357 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16358 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16359 			tg3_flag_clear(tp, TSO_BUG);
16360 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16361 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16362 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16363 		tg3_flag_set(tp, FW_TSO);
16364 		tg3_flag_set(tp, TSO_BUG);
16365 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16366 			tp->fw_needed = FIRMWARE_TG3TSO5;
16367 		else
16368 			tp->fw_needed = FIRMWARE_TG3TSO;
16369 	}
16370 
16371 	/* Selectively allow TSO based on operating conditions */
16372 	if (tg3_flag(tp, HW_TSO_1) ||
16373 	    tg3_flag(tp, HW_TSO_2) ||
16374 	    tg3_flag(tp, HW_TSO_3) ||
16375 	    tg3_flag(tp, FW_TSO)) {
16376 		/* For firmware TSO, assume ASF is disabled.
16377 		 * We'll disable TSO later if we discover ASF
16378 		 * is enabled in tg3_get_eeprom_hw_cfg().
16379 		 */
16380 		tg3_flag_set(tp, TSO_CAPABLE);
16381 	} else {
16382 		tg3_flag_clear(tp, TSO_CAPABLE);
16383 		tg3_flag_clear(tp, TSO_BUG);
16384 		tp->fw_needed = NULL;
16385 	}
16386 
16387 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16388 		tp->fw_needed = FIRMWARE_TG3;
16389 
16390 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16391 		tp->fw_needed = FIRMWARE_TG357766;
16392 
16393 	tp->irq_max = 1;
16394 
16395 	if (tg3_flag(tp, 5750_PLUS)) {
16396 		tg3_flag_set(tp, SUPPORT_MSI);
16397 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16398 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16399 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16400 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16401 		     tp->pdev_peer == tp->pdev))
16402 			tg3_flag_clear(tp, SUPPORT_MSI);
16403 
16404 		if (tg3_flag(tp, 5755_PLUS) ||
16405 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16406 			tg3_flag_set(tp, 1SHOT_MSI);
16407 		}
16408 
16409 		if (tg3_flag(tp, 57765_PLUS)) {
16410 			tg3_flag_set(tp, SUPPORT_MSIX);
16411 			tp->irq_max = TG3_IRQ_MAX_VECS;
16412 		}
16413 	}
16414 
16415 	tp->txq_max = 1;
16416 	tp->rxq_max = 1;
16417 	if (tp->irq_max > 1) {
16418 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16419 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16420 
16421 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16422 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16423 			tp->txq_max = tp->irq_max - 1;
16424 	}
16425 
16426 	if (tg3_flag(tp, 5755_PLUS) ||
16427 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16428 		tg3_flag_set(tp, SHORT_DMA_BUG);
16429 
16430 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16431 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16432 
16433 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16434 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16435 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16436 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16437 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16438 
16439 	if (tg3_flag(tp, 57765_PLUS) &&
16440 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16441 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16442 
16443 	if (!tg3_flag(tp, 5705_PLUS) ||
16444 	    tg3_flag(tp, 5780_CLASS) ||
16445 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16446 		tg3_flag_set(tp, JUMBO_CAPABLE);
16447 
16448 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16449 			      &pci_state_reg);
16450 
16451 	if (pci_is_pcie(tp->pdev)) {
16452 		u16 lnkctl;
16453 
16454 		tg3_flag_set(tp, PCI_EXPRESS);
16455 
16456 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16457 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16458 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16459 				tg3_flag_clear(tp, HW_TSO_2);
16460 				tg3_flag_clear(tp, TSO_CAPABLE);
16461 			}
16462 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16463 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16464 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16465 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16466 				tg3_flag_set(tp, CLKREQ_BUG);
16467 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16468 			tg3_flag_set(tp, L1PLLPD_EN);
16469 		}
16470 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16471 		/* BCM5785 devices are effectively PCIe devices, and should
16472 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16473 		 * section.
16474 		 */
16475 		tg3_flag_set(tp, PCI_EXPRESS);
16476 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16477 		   tg3_flag(tp, 5780_CLASS)) {
16478 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16479 		if (!tp->pcix_cap) {
16480 			dev_err(&tp->pdev->dev,
16481 				"Cannot find PCI-X capability, aborting\n");
16482 			return -EIO;
16483 		}
16484 
16485 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16486 			tg3_flag_set(tp, PCIX_MODE);
16487 	}
16488 
16489 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16490 	 * reordering to the mailbox registers done by the host
16491 	 * controller can cause major troubles.  We read back from
16492 	 * every mailbox register write to force the writes to be
16493 	 * posted to the chip in order.
16494 	 */
16495 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16496 	    !tg3_flag(tp, PCI_EXPRESS))
16497 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16498 
16499 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16500 			     &tp->pci_cacheline_sz);
16501 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16502 			     &tp->pci_lat_timer);
16503 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16504 	    tp->pci_lat_timer < 64) {
16505 		tp->pci_lat_timer = 64;
16506 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16507 				      tp->pci_lat_timer);
16508 	}
16509 
16510 	/* Important! -- It is critical that the PCI-X hw workaround
16511 	 * situation is decided before the first MMIO register access.
16512 	 */
16513 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16514 		/* 5700 BX chips need to have their TX producer index
16515 		 * mailboxes written twice to workaround a bug.
16516 		 */
16517 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16518 
16519 		/* If we are in PCI-X mode, enable register write workaround.
16520 		 *
16521 		 * The workaround is to use indirect register accesses
16522 		 * for all chip writes not to mailbox registers.
16523 		 */
16524 		if (tg3_flag(tp, PCIX_MODE)) {
16525 			u32 pm_reg;
16526 
16527 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16528 
16529 			/* The chip can have it's power management PCI config
16530 			 * space registers clobbered due to this bug.
16531 			 * So explicitly force the chip into D0 here.
16532 			 */
16533 			pci_read_config_dword(tp->pdev,
16534 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16535 					      &pm_reg);
16536 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16537 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16538 			pci_write_config_dword(tp->pdev,
16539 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16540 					       pm_reg);
16541 
16542 			/* Also, force SERR#/PERR# in PCI command. */
16543 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16544 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16545 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16546 		}
16547 	}
16548 
16549 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16550 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16551 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16552 		tg3_flag_set(tp, PCI_32BIT);
16553 
16554 	/* Chip-specific fixup from Broadcom driver */
16555 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16556 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16557 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16558 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16559 	}
16560 
16561 	/* Default fast path register access methods */
16562 	tp->read32 = tg3_read32;
16563 	tp->write32 = tg3_write32;
16564 	tp->read32_mbox = tg3_read32;
16565 	tp->write32_mbox = tg3_write32;
16566 	tp->write32_tx_mbox = tg3_write32;
16567 	tp->write32_rx_mbox = tg3_write32;
16568 
16569 	/* Various workaround register access methods */
16570 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16571 		tp->write32 = tg3_write_indirect_reg32;
16572 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16573 		 (tg3_flag(tp, PCI_EXPRESS) &&
16574 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16575 		/*
16576 		 * Back to back register writes can cause problems on these
16577 		 * chips, the workaround is to read back all reg writes
16578 		 * except those to mailbox regs.
16579 		 *
16580 		 * See tg3_write_indirect_reg32().
16581 		 */
16582 		tp->write32 = tg3_write_flush_reg32;
16583 	}
16584 
16585 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16586 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16587 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16588 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16589 	}
16590 
16591 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16592 		tp->read32 = tg3_read_indirect_reg32;
16593 		tp->write32 = tg3_write_indirect_reg32;
16594 		tp->read32_mbox = tg3_read_indirect_mbox;
16595 		tp->write32_mbox = tg3_write_indirect_mbox;
16596 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16597 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16598 
16599 		iounmap(tp->regs);
16600 		tp->regs = NULL;
16601 
16602 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16603 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16604 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16605 	}
16606 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16607 		tp->read32_mbox = tg3_read32_mbox_5906;
16608 		tp->write32_mbox = tg3_write32_mbox_5906;
16609 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16610 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16611 	}
16612 
16613 	if (tp->write32 == tg3_write_indirect_reg32 ||
16614 	    (tg3_flag(tp, PCIX_MODE) &&
16615 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16616 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16617 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16618 
16619 	/* The memory arbiter has to be enabled in order for SRAM accesses
16620 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16621 	 * sure it is enabled, but other entities such as system netboot
16622 	 * code might disable it.
16623 	 */
16624 	val = tr32(MEMARB_MODE);
16625 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16626 
16627 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16628 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16629 	    tg3_flag(tp, 5780_CLASS)) {
16630 		if (tg3_flag(tp, PCIX_MODE)) {
16631 			pci_read_config_dword(tp->pdev,
16632 					      tp->pcix_cap + PCI_X_STATUS,
16633 					      &val);
16634 			tp->pci_fn = val & 0x7;
16635 		}
16636 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16637 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16638 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16639 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16640 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16641 			val = tr32(TG3_CPMU_STATUS);
16642 
16643 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16644 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16645 		else
16646 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16647 				     TG3_CPMU_STATUS_FSHFT_5719;
16648 	}
16649 
16650 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16651 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16652 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16653 	}
16654 
16655 	/* Get eeprom hw config before calling tg3_set_power_state().
16656 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16657 	 * determined before calling tg3_set_power_state() so that
16658 	 * we know whether or not to switch out of Vaux power.
16659 	 * When the flag is set, it means that GPIO1 is used for eeprom
16660 	 * write protect and also implies that it is a LOM where GPIOs
16661 	 * are not used to switch power.
16662 	 */
16663 	tg3_get_eeprom_hw_cfg(tp);
16664 
16665 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16666 		tg3_flag_clear(tp, TSO_CAPABLE);
16667 		tg3_flag_clear(tp, TSO_BUG);
16668 		tp->fw_needed = NULL;
16669 	}
16670 
16671 	if (tg3_flag(tp, ENABLE_APE)) {
16672 		/* Allow reads and writes to the
16673 		 * APE register and memory space.
16674 		 */
16675 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16676 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16677 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16678 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16679 				       pci_state_reg);
16680 
16681 		tg3_ape_lock_init(tp);
16682 		tp->ape_hb_interval =
16683 			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16684 	}
16685 
16686 	/* Set up tp->grc_local_ctrl before calling
16687 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16688 	 * will bring 5700's external PHY out of reset.
16689 	 * It is also used as eeprom write protect on LOMs.
16690 	 */
16691 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16692 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16693 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16694 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16695 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16696 	/* Unused GPIO3 must be driven as output on 5752 because there
16697 	 * are no pull-up resistors on unused GPIO pins.
16698 	 */
16699 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16700 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16701 
16702 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16703 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16704 	    tg3_flag(tp, 57765_CLASS))
16705 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16706 
16707 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16708 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16709 		/* Turn off the debug UART. */
16710 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16711 		if (tg3_flag(tp, IS_NIC))
16712 			/* Keep VMain power. */
16713 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16714 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16715 	}
16716 
16717 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16718 		tp->grc_local_ctrl |=
16719 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16720 
16721 	/* Switch out of Vaux if it is a NIC */
16722 	tg3_pwrsrc_switch_to_vmain(tp);
16723 
16724 	/* Derive initial jumbo mode from MTU assigned in
16725 	 * ether_setup() via the alloc_etherdev() call
16726 	 */
16727 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16728 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16729 
16730 	/* Determine WakeOnLan speed to use. */
16731 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16732 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16733 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16734 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16735 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16736 	} else {
16737 		tg3_flag_set(tp, WOL_SPEED_100MB);
16738 	}
16739 
16740 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16741 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16742 
16743 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16744 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16745 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16746 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16747 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16748 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16749 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16750 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16751 
16752 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16753 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16754 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16755 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16756 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16757 
16758 	if (tg3_flag(tp, 5705_PLUS) &&
16759 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16760 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16761 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16762 	    !tg3_flag(tp, 57765_PLUS)) {
16763 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16764 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16765 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16766 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16767 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16768 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16769 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16770 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16771 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16772 		} else
16773 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16774 	}
16775 
16776 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16777 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16778 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16779 		if (tp->phy_otp == 0)
16780 			tp->phy_otp = TG3_OTP_DEFAULT;
16781 	}
16782 
16783 	if (tg3_flag(tp, CPMU_PRESENT))
16784 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16785 	else
16786 		tp->mi_mode = MAC_MI_MODE_BASE;
16787 
16788 	tp->coalesce_mode = 0;
16789 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16790 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16791 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16792 
16793 	/* Set these bits to enable statistics workaround. */
16794 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16795 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16796 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16797 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16798 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16799 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16800 	}
16801 
16802 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16803 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16804 		tg3_flag_set(tp, USE_PHYLIB);
16805 
16806 	err = tg3_mdio_init(tp);
16807 	if (err)
16808 		return err;
16809 
16810 	/* Initialize data/descriptor byte/word swapping. */
16811 	val = tr32(GRC_MODE);
16812 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16813 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16814 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16815 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16816 			GRC_MODE_B2HRX_ENABLE |
16817 			GRC_MODE_HTX2B_ENABLE |
16818 			GRC_MODE_HOST_STACKUP);
16819 	else
16820 		val &= GRC_MODE_HOST_STACKUP;
16821 
16822 	tw32(GRC_MODE, val | tp->grc_mode);
16823 
16824 	tg3_switch_clocks(tp);
16825 
16826 	/* Clear this out for sanity. */
16827 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16828 
16829 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16830 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16831 
16832 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16833 			      &pci_state_reg);
16834 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16835 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16836 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16837 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16838 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16839 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16840 			void __iomem *sram_base;
16841 
16842 			/* Write some dummy words into the SRAM status block
16843 			 * area, see if it reads back correctly.  If the return
16844 			 * value is bad, force enable the PCIX workaround.
16845 			 */
16846 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16847 
16848 			writel(0x00000000, sram_base);
16849 			writel(0x00000000, sram_base + 4);
16850 			writel(0xffffffff, sram_base + 4);
16851 			if (readl(sram_base) != 0x00000000)
16852 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16853 		}
16854 	}
16855 
16856 	udelay(50);
16857 	tg3_nvram_init(tp);
16858 
16859 	/* If the device has an NVRAM, no need to load patch firmware */
16860 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16861 	    !tg3_flag(tp, NO_NVRAM))
16862 		tp->fw_needed = NULL;
16863 
16864 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16865 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16866 
16867 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16868 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16869 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16870 		tg3_flag_set(tp, IS_5788);
16871 
16872 	if (!tg3_flag(tp, IS_5788) &&
16873 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16874 		tg3_flag_set(tp, TAGGED_STATUS);
16875 	if (tg3_flag(tp, TAGGED_STATUS)) {
16876 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16877 				      HOSTCC_MODE_CLRTICK_TXBD);
16878 
16879 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16880 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16881 				       tp->misc_host_ctrl);
16882 	}
16883 
16884 	/* Preserve the APE MAC_MODE bits */
16885 	if (tg3_flag(tp, ENABLE_APE))
16886 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16887 	else
16888 		tp->mac_mode = 0;
16889 
16890 	if (tg3_10_100_only_device(tp, ent))
16891 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16892 
16893 	err = tg3_phy_probe(tp);
16894 	if (err) {
16895 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16896 		/* ... but do not return immediately ... */
16897 		tg3_mdio_fini(tp);
16898 	}
16899 
16900 	tg3_read_vpd(tp);
16901 	tg3_read_fw_ver(tp);
16902 
16903 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16904 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16905 	} else {
16906 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16907 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16908 		else
16909 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16910 	}
16911 
16912 	/* 5700 {AX,BX} chips have a broken status block link
16913 	 * change bit implementation, so we must use the
16914 	 * status register in those cases.
16915 	 */
16916 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16917 		tg3_flag_set(tp, USE_LINKCHG_REG);
16918 	else
16919 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16920 
16921 	/* The led_ctrl is set during tg3_phy_probe, here we might
16922 	 * have to force the link status polling mechanism based
16923 	 * upon subsystem IDs.
16924 	 */
16925 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16926 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16927 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16928 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16929 		tg3_flag_set(tp, USE_LINKCHG_REG);
16930 	}
16931 
16932 	/* For all SERDES we poll the MAC status register. */
16933 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16934 		tg3_flag_set(tp, POLL_SERDES);
16935 	else
16936 		tg3_flag_clear(tp, POLL_SERDES);
16937 
16938 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16939 		tg3_flag_set(tp, POLL_CPMU_LINK);
16940 
16941 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16942 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16943 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16944 	    tg3_flag(tp, PCIX_MODE)) {
16945 		tp->rx_offset = NET_SKB_PAD;
16946 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16947 		tp->rx_copy_thresh = ~(u16)0;
16948 #endif
16949 	}
16950 
16951 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16952 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16953 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16954 
16955 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16956 
16957 	/* Increment the rx prod index on the rx std ring by at most
16958 	 * 8 for these chips to workaround hw errata.
16959 	 */
16960 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16961 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16962 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16963 		tp->rx_std_max_post = 8;
16964 
16965 	if (tg3_flag(tp, ASPM_WORKAROUND))
16966 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16967 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16968 
16969 	return err;
16970 }
16971 
16972 #ifdef CONFIG_SPARC
16973 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16974 {
16975 	struct net_device *dev = tp->dev;
16976 	struct pci_dev *pdev = tp->pdev;
16977 	struct device_node *dp = pci_device_to_OF_node(pdev);
16978 	const unsigned char *addr;
16979 	int len;
16980 
16981 	addr = of_get_property(dp, "local-mac-address", &len);
16982 	if (addr && len == ETH_ALEN) {
16983 		memcpy(dev->dev_addr, addr, ETH_ALEN);
16984 		return 0;
16985 	}
16986 	return -ENODEV;
16987 }
16988 
16989 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16990 {
16991 	struct net_device *dev = tp->dev;
16992 
16993 	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16994 	return 0;
16995 }
16996 #endif
16997 
16998 static int tg3_get_device_address(struct tg3 *tp)
16999 {
17000 	struct net_device *dev = tp->dev;
17001 	u32 hi, lo, mac_offset;
17002 	int addr_ok = 0;
17003 	int err;
17004 
17005 #ifdef CONFIG_SPARC
17006 	if (!tg3_get_macaddr_sparc(tp))
17007 		return 0;
17008 #endif
17009 
17010 	if (tg3_flag(tp, IS_SSB_CORE)) {
17011 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
17012 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17013 			return 0;
17014 	}
17015 
17016 	mac_offset = 0x7c;
17017 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17018 	    tg3_flag(tp, 5780_CLASS)) {
17019 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17020 			mac_offset = 0xcc;
17021 		if (tg3_nvram_lock(tp))
17022 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17023 		else
17024 			tg3_nvram_unlock(tp);
17025 	} else if (tg3_flag(tp, 5717_PLUS)) {
17026 		if (tp->pci_fn & 1)
17027 			mac_offset = 0xcc;
17028 		if (tp->pci_fn > 1)
17029 			mac_offset += 0x18c;
17030 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17031 		mac_offset = 0x10;
17032 
17033 	/* First try to get it from MAC address mailbox. */
17034 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17035 	if ((hi >> 16) == 0x484b) {
17036 		dev->dev_addr[0] = (hi >>  8) & 0xff;
17037 		dev->dev_addr[1] = (hi >>  0) & 0xff;
17038 
17039 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17040 		dev->dev_addr[2] = (lo >> 24) & 0xff;
17041 		dev->dev_addr[3] = (lo >> 16) & 0xff;
17042 		dev->dev_addr[4] = (lo >>  8) & 0xff;
17043 		dev->dev_addr[5] = (lo >>  0) & 0xff;
17044 
17045 		/* Some old bootcode may report a 0 MAC address in SRAM */
17046 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17047 	}
17048 	if (!addr_ok) {
17049 		/* Next, try NVRAM. */
17050 		if (!tg3_flag(tp, NO_NVRAM) &&
17051 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17052 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17053 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17054 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17055 		}
17056 		/* Finally just fetch it out of the MAC control regs. */
17057 		else {
17058 			hi = tr32(MAC_ADDR_0_HIGH);
17059 			lo = tr32(MAC_ADDR_0_LOW);
17060 
17061 			dev->dev_addr[5] = lo & 0xff;
17062 			dev->dev_addr[4] = (lo >> 8) & 0xff;
17063 			dev->dev_addr[3] = (lo >> 16) & 0xff;
17064 			dev->dev_addr[2] = (lo >> 24) & 0xff;
17065 			dev->dev_addr[1] = hi & 0xff;
17066 			dev->dev_addr[0] = (hi >> 8) & 0xff;
17067 		}
17068 	}
17069 
17070 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17071 #ifdef CONFIG_SPARC
17072 		if (!tg3_get_default_macaddr_sparc(tp))
17073 			return 0;
17074 #endif
17075 		return -EINVAL;
17076 	}
17077 	return 0;
17078 }
17079 
17080 #define BOUNDARY_SINGLE_CACHELINE	1
17081 #define BOUNDARY_MULTI_CACHELINE	2
17082 
17083 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17084 {
17085 	int cacheline_size;
17086 	u8 byte;
17087 	int goal;
17088 
17089 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17090 	if (byte == 0)
17091 		cacheline_size = 1024;
17092 	else
17093 		cacheline_size = (int) byte * 4;
17094 
17095 	/* On 5703 and later chips, the boundary bits have no
17096 	 * effect.
17097 	 */
17098 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17099 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17100 	    !tg3_flag(tp, PCI_EXPRESS))
17101 		goto out;
17102 
17103 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17104 	goal = BOUNDARY_MULTI_CACHELINE;
17105 #else
17106 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17107 	goal = BOUNDARY_SINGLE_CACHELINE;
17108 #else
17109 	goal = 0;
17110 #endif
17111 #endif
17112 
17113 	if (tg3_flag(tp, 57765_PLUS)) {
17114 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17115 		goto out;
17116 	}
17117 
17118 	if (!goal)
17119 		goto out;
17120 
17121 	/* PCI controllers on most RISC systems tend to disconnect
17122 	 * when a device tries to burst across a cache-line boundary.
17123 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17124 	 *
17125 	 * Unfortunately, for PCI-E there are only limited
17126 	 * write-side controls for this, and thus for reads
17127 	 * we will still get the disconnects.  We'll also waste
17128 	 * these PCI cycles for both read and write for chips
17129 	 * other than 5700 and 5701 which do not implement the
17130 	 * boundary bits.
17131 	 */
17132 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17133 		switch (cacheline_size) {
17134 		case 16:
17135 		case 32:
17136 		case 64:
17137 		case 128:
17138 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17139 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17140 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17141 			} else {
17142 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17143 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17144 			}
17145 			break;
17146 
17147 		case 256:
17148 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17149 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17150 			break;
17151 
17152 		default:
17153 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17154 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17155 			break;
17156 		}
17157 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17158 		switch (cacheline_size) {
17159 		case 16:
17160 		case 32:
17161 		case 64:
17162 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17163 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17164 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17165 				break;
17166 			}
17167 			/* fallthrough */
17168 		case 128:
17169 		default:
17170 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17171 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17172 			break;
17173 		}
17174 	} else {
17175 		switch (cacheline_size) {
17176 		case 16:
17177 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17178 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17179 					DMA_RWCTRL_WRITE_BNDRY_16);
17180 				break;
17181 			}
17182 			/* fallthrough */
17183 		case 32:
17184 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17185 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17186 					DMA_RWCTRL_WRITE_BNDRY_32);
17187 				break;
17188 			}
17189 			/* fallthrough */
17190 		case 64:
17191 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17192 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17193 					DMA_RWCTRL_WRITE_BNDRY_64);
17194 				break;
17195 			}
17196 			/* fallthrough */
17197 		case 128:
17198 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17199 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17200 					DMA_RWCTRL_WRITE_BNDRY_128);
17201 				break;
17202 			}
17203 			/* fallthrough */
17204 		case 256:
17205 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17206 				DMA_RWCTRL_WRITE_BNDRY_256);
17207 			break;
17208 		case 512:
17209 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17210 				DMA_RWCTRL_WRITE_BNDRY_512);
17211 			break;
17212 		case 1024:
17213 		default:
17214 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17215 				DMA_RWCTRL_WRITE_BNDRY_1024);
17216 			break;
17217 		}
17218 	}
17219 
17220 out:
17221 	return val;
17222 }
17223 
17224 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17225 			   int size, bool to_device)
17226 {
17227 	struct tg3_internal_buffer_desc test_desc;
17228 	u32 sram_dma_descs;
17229 	int i, ret;
17230 
17231 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17232 
17233 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17234 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17235 	tw32(RDMAC_STATUS, 0);
17236 	tw32(WDMAC_STATUS, 0);
17237 
17238 	tw32(BUFMGR_MODE, 0);
17239 	tw32(FTQ_RESET, 0);
17240 
17241 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17242 	test_desc.addr_lo = buf_dma & 0xffffffff;
17243 	test_desc.nic_mbuf = 0x00002100;
17244 	test_desc.len = size;
17245 
17246 	/*
17247 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17248 	 * the *second* time the tg3 driver was getting loaded after an
17249 	 * initial scan.
17250 	 *
17251 	 * Broadcom tells me:
17252 	 *   ...the DMA engine is connected to the GRC block and a DMA
17253 	 *   reset may affect the GRC block in some unpredictable way...
17254 	 *   The behavior of resets to individual blocks has not been tested.
17255 	 *
17256 	 * Broadcom noted the GRC reset will also reset all sub-components.
17257 	 */
17258 	if (to_device) {
17259 		test_desc.cqid_sqid = (13 << 8) | 2;
17260 
17261 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17262 		udelay(40);
17263 	} else {
17264 		test_desc.cqid_sqid = (16 << 8) | 7;
17265 
17266 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17267 		udelay(40);
17268 	}
17269 	test_desc.flags = 0x00000005;
17270 
17271 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17272 		u32 val;
17273 
17274 		val = *(((u32 *)&test_desc) + i);
17275 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17276 				       sram_dma_descs + (i * sizeof(u32)));
17277 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17278 	}
17279 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17280 
17281 	if (to_device)
17282 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17283 	else
17284 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17285 
17286 	ret = -ENODEV;
17287 	for (i = 0; i < 40; i++) {
17288 		u32 val;
17289 
17290 		if (to_device)
17291 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17292 		else
17293 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17294 		if ((val & 0xffff) == sram_dma_descs) {
17295 			ret = 0;
17296 			break;
17297 		}
17298 
17299 		udelay(100);
17300 	}
17301 
17302 	return ret;
17303 }
17304 
17305 #define TEST_BUFFER_SIZE	0x2000
17306 
17307 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17308 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17309 	{ },
17310 };
17311 
17312 static int tg3_test_dma(struct tg3 *tp)
17313 {
17314 	dma_addr_t buf_dma;
17315 	u32 *buf, saved_dma_rwctrl;
17316 	int ret = 0;
17317 
17318 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17319 				 &buf_dma, GFP_KERNEL);
17320 	if (!buf) {
17321 		ret = -ENOMEM;
17322 		goto out_nofree;
17323 	}
17324 
17325 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17326 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17327 
17328 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17329 
17330 	if (tg3_flag(tp, 57765_PLUS))
17331 		goto out;
17332 
17333 	if (tg3_flag(tp, PCI_EXPRESS)) {
17334 		/* DMA read watermark not used on PCIE */
17335 		tp->dma_rwctrl |= 0x00180000;
17336 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17337 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17338 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17339 			tp->dma_rwctrl |= 0x003f0000;
17340 		else
17341 			tp->dma_rwctrl |= 0x003f000f;
17342 	} else {
17343 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17344 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17345 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17346 			u32 read_water = 0x7;
17347 
17348 			/* If the 5704 is behind the EPB bridge, we can
17349 			 * do the less restrictive ONE_DMA workaround for
17350 			 * better performance.
17351 			 */
17352 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17353 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17354 				tp->dma_rwctrl |= 0x8000;
17355 			else if (ccval == 0x6 || ccval == 0x7)
17356 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17357 
17358 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17359 				read_water = 4;
17360 			/* Set bit 23 to enable PCIX hw bug fix */
17361 			tp->dma_rwctrl |=
17362 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17363 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17364 				(1 << 23);
17365 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17366 			/* 5780 always in PCIX mode */
17367 			tp->dma_rwctrl |= 0x00144000;
17368 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17369 			/* 5714 always in PCIX mode */
17370 			tp->dma_rwctrl |= 0x00148000;
17371 		} else {
17372 			tp->dma_rwctrl |= 0x001b000f;
17373 		}
17374 	}
17375 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17376 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17377 
17378 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17379 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17380 		tp->dma_rwctrl &= 0xfffffff0;
17381 
17382 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17383 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17384 		/* Remove this if it causes problems for some boards. */
17385 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17386 
17387 		/* On 5700/5701 chips, we need to set this bit.
17388 		 * Otherwise the chip will issue cacheline transactions
17389 		 * to streamable DMA memory with not all the byte
17390 		 * enables turned on.  This is an error on several
17391 		 * RISC PCI controllers, in particular sparc64.
17392 		 *
17393 		 * On 5703/5704 chips, this bit has been reassigned
17394 		 * a different meaning.  In particular, it is used
17395 		 * on those chips to enable a PCI-X workaround.
17396 		 */
17397 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17398 	}
17399 
17400 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17401 
17402 
17403 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17404 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17405 		goto out;
17406 
17407 	/* It is best to perform DMA test with maximum write burst size
17408 	 * to expose the 5700/5701 write DMA bug.
17409 	 */
17410 	saved_dma_rwctrl = tp->dma_rwctrl;
17411 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17412 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17413 
17414 	while (1) {
17415 		u32 *p = buf, i;
17416 
17417 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17418 			p[i] = i;
17419 
17420 		/* Send the buffer to the chip. */
17421 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17422 		if (ret) {
17423 			dev_err(&tp->pdev->dev,
17424 				"%s: Buffer write failed. err = %d\n",
17425 				__func__, ret);
17426 			break;
17427 		}
17428 
17429 		/* Now read it back. */
17430 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17431 		if (ret) {
17432 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17433 				"err = %d\n", __func__, ret);
17434 			break;
17435 		}
17436 
17437 		/* Verify it. */
17438 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17439 			if (p[i] == i)
17440 				continue;
17441 
17442 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17443 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17444 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17445 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17446 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17447 				break;
17448 			} else {
17449 				dev_err(&tp->pdev->dev,
17450 					"%s: Buffer corrupted on read back! "
17451 					"(%d != %d)\n", __func__, p[i], i);
17452 				ret = -ENODEV;
17453 				goto out;
17454 			}
17455 		}
17456 
17457 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17458 			/* Success. */
17459 			ret = 0;
17460 			break;
17461 		}
17462 	}
17463 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17464 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17465 		/* DMA test passed without adjusting DMA boundary,
17466 		 * now look for chipsets that are known to expose the
17467 		 * DMA bug without failing the test.
17468 		 */
17469 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17470 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17471 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17472 		} else {
17473 			/* Safe to use the calculated DMA boundary. */
17474 			tp->dma_rwctrl = saved_dma_rwctrl;
17475 		}
17476 
17477 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17478 	}
17479 
17480 out:
17481 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17482 out_nofree:
17483 	return ret;
17484 }
17485 
17486 static void tg3_init_bufmgr_config(struct tg3 *tp)
17487 {
17488 	if (tg3_flag(tp, 57765_PLUS)) {
17489 		tp->bufmgr_config.mbuf_read_dma_low_water =
17490 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17491 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17492 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17493 		tp->bufmgr_config.mbuf_high_water =
17494 			DEFAULT_MB_HIGH_WATER_57765;
17495 
17496 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17497 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17498 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17499 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17500 		tp->bufmgr_config.mbuf_high_water_jumbo =
17501 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17502 	} else if (tg3_flag(tp, 5705_PLUS)) {
17503 		tp->bufmgr_config.mbuf_read_dma_low_water =
17504 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17505 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17506 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17507 		tp->bufmgr_config.mbuf_high_water =
17508 			DEFAULT_MB_HIGH_WATER_5705;
17509 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17510 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17511 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17512 			tp->bufmgr_config.mbuf_high_water =
17513 				DEFAULT_MB_HIGH_WATER_5906;
17514 		}
17515 
17516 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17517 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17518 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17519 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17520 		tp->bufmgr_config.mbuf_high_water_jumbo =
17521 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17522 	} else {
17523 		tp->bufmgr_config.mbuf_read_dma_low_water =
17524 			DEFAULT_MB_RDMA_LOW_WATER;
17525 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17526 			DEFAULT_MB_MACRX_LOW_WATER;
17527 		tp->bufmgr_config.mbuf_high_water =
17528 			DEFAULT_MB_HIGH_WATER;
17529 
17530 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17531 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17532 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17533 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17534 		tp->bufmgr_config.mbuf_high_water_jumbo =
17535 			DEFAULT_MB_HIGH_WATER_JUMBO;
17536 	}
17537 
17538 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17539 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17540 }
17541 
17542 static char *tg3_phy_string(struct tg3 *tp)
17543 {
17544 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17545 	case TG3_PHY_ID_BCM5400:	return "5400";
17546 	case TG3_PHY_ID_BCM5401:	return "5401";
17547 	case TG3_PHY_ID_BCM5411:	return "5411";
17548 	case TG3_PHY_ID_BCM5701:	return "5701";
17549 	case TG3_PHY_ID_BCM5703:	return "5703";
17550 	case TG3_PHY_ID_BCM5704:	return "5704";
17551 	case TG3_PHY_ID_BCM5705:	return "5705";
17552 	case TG3_PHY_ID_BCM5750:	return "5750";
17553 	case TG3_PHY_ID_BCM5752:	return "5752";
17554 	case TG3_PHY_ID_BCM5714:	return "5714";
17555 	case TG3_PHY_ID_BCM5780:	return "5780";
17556 	case TG3_PHY_ID_BCM5755:	return "5755";
17557 	case TG3_PHY_ID_BCM5787:	return "5787";
17558 	case TG3_PHY_ID_BCM5784:	return "5784";
17559 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17560 	case TG3_PHY_ID_BCM5906:	return "5906";
17561 	case TG3_PHY_ID_BCM5761:	return "5761";
17562 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17563 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17564 	case TG3_PHY_ID_BCM57765:	return "57765";
17565 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17566 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17567 	case TG3_PHY_ID_BCM5762:	return "5762C";
17568 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17569 	case 0:			return "serdes";
17570 	default:		return "unknown";
17571 	}
17572 }
17573 
17574 static char *tg3_bus_string(struct tg3 *tp, char *str)
17575 {
17576 	if (tg3_flag(tp, PCI_EXPRESS)) {
17577 		strcpy(str, "PCI Express");
17578 		return str;
17579 	} else if (tg3_flag(tp, PCIX_MODE)) {
17580 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17581 
17582 		strcpy(str, "PCIX:");
17583 
17584 		if ((clock_ctrl == 7) ||
17585 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17586 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17587 			strcat(str, "133MHz");
17588 		else if (clock_ctrl == 0)
17589 			strcat(str, "33MHz");
17590 		else if (clock_ctrl == 2)
17591 			strcat(str, "50MHz");
17592 		else if (clock_ctrl == 4)
17593 			strcat(str, "66MHz");
17594 		else if (clock_ctrl == 6)
17595 			strcat(str, "100MHz");
17596 	} else {
17597 		strcpy(str, "PCI:");
17598 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17599 			strcat(str, "66MHz");
17600 		else
17601 			strcat(str, "33MHz");
17602 	}
17603 	if (tg3_flag(tp, PCI_32BIT))
17604 		strcat(str, ":32-bit");
17605 	else
17606 		strcat(str, ":64-bit");
17607 	return str;
17608 }
17609 
17610 static void tg3_init_coal(struct tg3 *tp)
17611 {
17612 	struct ethtool_coalesce *ec = &tp->coal;
17613 
17614 	memset(ec, 0, sizeof(*ec));
17615 	ec->cmd = ETHTOOL_GCOALESCE;
17616 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17617 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17618 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17619 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17620 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17621 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17622 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17623 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17624 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17625 
17626 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17627 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17628 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17629 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17630 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17631 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17632 	}
17633 
17634 	if (tg3_flag(tp, 5705_PLUS)) {
17635 		ec->rx_coalesce_usecs_irq = 0;
17636 		ec->tx_coalesce_usecs_irq = 0;
17637 		ec->stats_block_coalesce_usecs = 0;
17638 	}
17639 }
17640 
17641 static int tg3_init_one(struct pci_dev *pdev,
17642 				  const struct pci_device_id *ent)
17643 {
17644 	struct net_device *dev;
17645 	struct tg3 *tp;
17646 	int i, err;
17647 	u32 sndmbx, rcvmbx, intmbx;
17648 	char str[40];
17649 	u64 dma_mask, persist_dma_mask;
17650 	netdev_features_t features = 0;
17651 
17652 	printk_once(KERN_INFO "%s\n", version);
17653 
17654 	err = pci_enable_device(pdev);
17655 	if (err) {
17656 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17657 		return err;
17658 	}
17659 
17660 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17661 	if (err) {
17662 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17663 		goto err_out_disable_pdev;
17664 	}
17665 
17666 	pci_set_master(pdev);
17667 
17668 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17669 	if (!dev) {
17670 		err = -ENOMEM;
17671 		goto err_out_free_res;
17672 	}
17673 
17674 	SET_NETDEV_DEV(dev, &pdev->dev);
17675 
17676 	tp = netdev_priv(dev);
17677 	tp->pdev = pdev;
17678 	tp->dev = dev;
17679 	tp->rx_mode = TG3_DEF_RX_MODE;
17680 	tp->tx_mode = TG3_DEF_TX_MODE;
17681 	tp->irq_sync = 1;
17682 	tp->pcierr_recovery = false;
17683 
17684 	if (tg3_debug > 0)
17685 		tp->msg_enable = tg3_debug;
17686 	else
17687 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17688 
17689 	if (pdev_is_ssb_gige_core(pdev)) {
17690 		tg3_flag_set(tp, IS_SSB_CORE);
17691 		if (ssb_gige_must_flush_posted_writes(pdev))
17692 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17693 		if (ssb_gige_one_dma_at_once(pdev))
17694 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17695 		if (ssb_gige_have_roboswitch(pdev)) {
17696 			tg3_flag_set(tp, USE_PHYLIB);
17697 			tg3_flag_set(tp, ROBOSWITCH);
17698 		}
17699 		if (ssb_gige_is_rgmii(pdev))
17700 			tg3_flag_set(tp, RGMII_MODE);
17701 	}
17702 
17703 	/* The word/byte swap controls here control register access byte
17704 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17705 	 * setting below.
17706 	 */
17707 	tp->misc_host_ctrl =
17708 		MISC_HOST_CTRL_MASK_PCI_INT |
17709 		MISC_HOST_CTRL_WORD_SWAP |
17710 		MISC_HOST_CTRL_INDIR_ACCESS |
17711 		MISC_HOST_CTRL_PCISTATE_RW;
17712 
17713 	/* The NONFRM (non-frame) byte/word swap controls take effect
17714 	 * on descriptor entries, anything which isn't packet data.
17715 	 *
17716 	 * The StrongARM chips on the board (one for tx, one for rx)
17717 	 * are running in big-endian mode.
17718 	 */
17719 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17720 			GRC_MODE_WSWAP_NONFRM_DATA);
17721 #ifdef __BIG_ENDIAN
17722 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17723 #endif
17724 	spin_lock_init(&tp->lock);
17725 	spin_lock_init(&tp->indirect_lock);
17726 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17727 
17728 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17729 	if (!tp->regs) {
17730 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17731 		err = -ENOMEM;
17732 		goto err_out_free_dev;
17733 	}
17734 
17735 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17736 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17737 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17738 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17739 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17740 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17741 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17742 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17743 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17744 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17745 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17746 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17747 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17748 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17749 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17750 		tg3_flag_set(tp, ENABLE_APE);
17751 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17752 		if (!tp->aperegs) {
17753 			dev_err(&pdev->dev,
17754 				"Cannot map APE registers, aborting\n");
17755 			err = -ENOMEM;
17756 			goto err_out_iounmap;
17757 		}
17758 	}
17759 
17760 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17761 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17762 
17763 	dev->ethtool_ops = &tg3_ethtool_ops;
17764 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17765 	dev->netdev_ops = &tg3_netdev_ops;
17766 	dev->irq = pdev->irq;
17767 
17768 	err = tg3_get_invariants(tp, ent);
17769 	if (err) {
17770 		dev_err(&pdev->dev,
17771 			"Problem fetching invariants of chip, aborting\n");
17772 		goto err_out_apeunmap;
17773 	}
17774 
17775 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17776 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17777 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17778 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17779 	 * do DMA address check in tg3_start_xmit().
17780 	 */
17781 	if (tg3_flag(tp, IS_5788))
17782 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17783 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17784 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17785 #ifdef CONFIG_HIGHMEM
17786 		dma_mask = DMA_BIT_MASK(64);
17787 #endif
17788 	} else
17789 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17790 
17791 	/* Configure DMA attributes. */
17792 	if (dma_mask > DMA_BIT_MASK(32)) {
17793 		err = pci_set_dma_mask(pdev, dma_mask);
17794 		if (!err) {
17795 			features |= NETIF_F_HIGHDMA;
17796 			err = pci_set_consistent_dma_mask(pdev,
17797 							  persist_dma_mask);
17798 			if (err < 0) {
17799 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17800 					"DMA for consistent allocations\n");
17801 				goto err_out_apeunmap;
17802 			}
17803 		}
17804 	}
17805 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17806 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17807 		if (err) {
17808 			dev_err(&pdev->dev,
17809 				"No usable DMA configuration, aborting\n");
17810 			goto err_out_apeunmap;
17811 		}
17812 	}
17813 
17814 	tg3_init_bufmgr_config(tp);
17815 
17816 	/* 5700 B0 chips do not support checksumming correctly due
17817 	 * to hardware bugs.
17818 	 */
17819 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17820 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17821 
17822 		if (tg3_flag(tp, 5755_PLUS))
17823 			features |= NETIF_F_IPV6_CSUM;
17824 	}
17825 
17826 	/* TSO is on by default on chips that support hardware TSO.
17827 	 * Firmware TSO on older chips gives lower performance, so it
17828 	 * is off by default, but can be enabled using ethtool.
17829 	 */
17830 	if ((tg3_flag(tp, HW_TSO_1) ||
17831 	     tg3_flag(tp, HW_TSO_2) ||
17832 	     tg3_flag(tp, HW_TSO_3)) &&
17833 	    (features & NETIF_F_IP_CSUM))
17834 		features |= NETIF_F_TSO;
17835 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17836 		if (features & NETIF_F_IPV6_CSUM)
17837 			features |= NETIF_F_TSO6;
17838 		if (tg3_flag(tp, HW_TSO_3) ||
17839 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17840 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17841 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17842 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17843 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17844 			features |= NETIF_F_TSO_ECN;
17845 	}
17846 
17847 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17848 			 NETIF_F_HW_VLAN_CTAG_RX;
17849 	dev->vlan_features |= features;
17850 
17851 	/*
17852 	 * Add loopback capability only for a subset of devices that support
17853 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17854 	 * loopback for the remaining devices.
17855 	 */
17856 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17857 	    !tg3_flag(tp, CPMU_PRESENT))
17858 		/* Add the loopback capability */
17859 		features |= NETIF_F_LOOPBACK;
17860 
17861 	dev->hw_features |= features;
17862 	dev->priv_flags |= IFF_UNICAST_FLT;
17863 
17864 	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17865 	dev->min_mtu = TG3_MIN_MTU;
17866 	dev->max_mtu = TG3_MAX_MTU(tp);
17867 
17868 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17869 	    !tg3_flag(tp, TSO_CAPABLE) &&
17870 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17871 		tg3_flag_set(tp, MAX_RXPEND_64);
17872 		tp->rx_pending = 63;
17873 	}
17874 
17875 	err = tg3_get_device_address(tp);
17876 	if (err) {
17877 		dev_err(&pdev->dev,
17878 			"Could not obtain valid ethernet address, aborting\n");
17879 		goto err_out_apeunmap;
17880 	}
17881 
17882 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17883 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17884 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17885 	for (i = 0; i < tp->irq_max; i++) {
17886 		struct tg3_napi *tnapi = &tp->napi[i];
17887 
17888 		tnapi->tp = tp;
17889 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17890 
17891 		tnapi->int_mbox = intmbx;
17892 		if (i <= 4)
17893 			intmbx += 0x8;
17894 		else
17895 			intmbx += 0x4;
17896 
17897 		tnapi->consmbox = rcvmbx;
17898 		tnapi->prodmbox = sndmbx;
17899 
17900 		if (i)
17901 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17902 		else
17903 			tnapi->coal_now = HOSTCC_MODE_NOW;
17904 
17905 		if (!tg3_flag(tp, SUPPORT_MSIX))
17906 			break;
17907 
17908 		/*
17909 		 * If we support MSIX, we'll be using RSS.  If we're using
17910 		 * RSS, the first vector only handles link interrupts and the
17911 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17912 		 * mailbox values for the next iteration.  The values we setup
17913 		 * above are still useful for the single vectored mode.
17914 		 */
17915 		if (!i)
17916 			continue;
17917 
17918 		rcvmbx += 0x8;
17919 
17920 		if (sndmbx & 0x4)
17921 			sndmbx -= 0x4;
17922 		else
17923 			sndmbx += 0xc;
17924 	}
17925 
17926 	/*
17927 	 * Reset chip in case UNDI or EFI driver did not shutdown
17928 	 * DMA self test will enable WDMAC and we'll see (spurious)
17929 	 * pending DMA on the PCI bus at that point.
17930 	 */
17931 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17932 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17933 		tg3_full_lock(tp, 0);
17934 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17935 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17936 		tg3_full_unlock(tp);
17937 	}
17938 
17939 	err = tg3_test_dma(tp);
17940 	if (err) {
17941 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17942 		goto err_out_apeunmap;
17943 	}
17944 
17945 	tg3_init_coal(tp);
17946 
17947 	pci_set_drvdata(pdev, dev);
17948 
17949 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17950 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17951 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17952 		tg3_flag_set(tp, PTP_CAPABLE);
17953 
17954 	tg3_timer_init(tp);
17955 
17956 	tg3_carrier_off(tp);
17957 
17958 	err = register_netdev(dev);
17959 	if (err) {
17960 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17961 		goto err_out_apeunmap;
17962 	}
17963 
17964 	if (tg3_flag(tp, PTP_CAPABLE)) {
17965 		tg3_ptp_init(tp);
17966 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17967 						   &tp->pdev->dev);
17968 		if (IS_ERR(tp->ptp_clock))
17969 			tp->ptp_clock = NULL;
17970 	}
17971 
17972 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17973 		    tp->board_part_number,
17974 		    tg3_chip_rev_id(tp),
17975 		    tg3_bus_string(tp, str),
17976 		    dev->dev_addr);
17977 
17978 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17979 		char *ethtype;
17980 
17981 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17982 			ethtype = "10/100Base-TX";
17983 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17984 			ethtype = "1000Base-SX";
17985 		else
17986 			ethtype = "10/100/1000Base-T";
17987 
17988 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17989 			    "(WireSpeed[%d], EEE[%d])\n",
17990 			    tg3_phy_string(tp), ethtype,
17991 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17992 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17993 	}
17994 
17995 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17996 		    (dev->features & NETIF_F_RXCSUM) != 0,
17997 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17998 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17999 		    tg3_flag(tp, ENABLE_ASF) != 0,
18000 		    tg3_flag(tp, TSO_CAPABLE) != 0);
18001 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18002 		    tp->dma_rwctrl,
18003 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18004 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18005 
18006 	pci_save_state(pdev);
18007 
18008 	return 0;
18009 
18010 err_out_apeunmap:
18011 	if (tp->aperegs) {
18012 		iounmap(tp->aperegs);
18013 		tp->aperegs = NULL;
18014 	}
18015 
18016 err_out_iounmap:
18017 	if (tp->regs) {
18018 		iounmap(tp->regs);
18019 		tp->regs = NULL;
18020 	}
18021 
18022 err_out_free_dev:
18023 	free_netdev(dev);
18024 
18025 err_out_free_res:
18026 	pci_release_regions(pdev);
18027 
18028 err_out_disable_pdev:
18029 	if (pci_is_enabled(pdev))
18030 		pci_disable_device(pdev);
18031 	return err;
18032 }
18033 
18034 static void tg3_remove_one(struct pci_dev *pdev)
18035 {
18036 	struct net_device *dev = pci_get_drvdata(pdev);
18037 
18038 	if (dev) {
18039 		struct tg3 *tp = netdev_priv(dev);
18040 
18041 		tg3_ptp_fini(tp);
18042 
18043 		release_firmware(tp->fw);
18044 
18045 		tg3_reset_task_cancel(tp);
18046 
18047 		if (tg3_flag(tp, USE_PHYLIB)) {
18048 			tg3_phy_fini(tp);
18049 			tg3_mdio_fini(tp);
18050 		}
18051 
18052 		unregister_netdev(dev);
18053 		if (tp->aperegs) {
18054 			iounmap(tp->aperegs);
18055 			tp->aperegs = NULL;
18056 		}
18057 		if (tp->regs) {
18058 			iounmap(tp->regs);
18059 			tp->regs = NULL;
18060 		}
18061 		free_netdev(dev);
18062 		pci_release_regions(pdev);
18063 		pci_disable_device(pdev);
18064 	}
18065 }
18066 
18067 #ifdef CONFIG_PM_SLEEP
18068 static int tg3_suspend(struct device *device)
18069 {
18070 	struct pci_dev *pdev = to_pci_dev(device);
18071 	struct net_device *dev = pci_get_drvdata(pdev);
18072 	struct tg3 *tp = netdev_priv(dev);
18073 	int err = 0;
18074 
18075 	rtnl_lock();
18076 
18077 	if (!netif_running(dev))
18078 		goto unlock;
18079 
18080 	tg3_reset_task_cancel(tp);
18081 	tg3_phy_stop(tp);
18082 	tg3_netif_stop(tp);
18083 
18084 	tg3_timer_stop(tp);
18085 
18086 	tg3_full_lock(tp, 1);
18087 	tg3_disable_ints(tp);
18088 	tg3_full_unlock(tp);
18089 
18090 	netif_device_detach(dev);
18091 
18092 	tg3_full_lock(tp, 0);
18093 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18094 	tg3_flag_clear(tp, INIT_COMPLETE);
18095 	tg3_full_unlock(tp);
18096 
18097 	err = tg3_power_down_prepare(tp);
18098 	if (err) {
18099 		int err2;
18100 
18101 		tg3_full_lock(tp, 0);
18102 
18103 		tg3_flag_set(tp, INIT_COMPLETE);
18104 		err2 = tg3_restart_hw(tp, true);
18105 		if (err2)
18106 			goto out;
18107 
18108 		tg3_timer_start(tp);
18109 
18110 		netif_device_attach(dev);
18111 		tg3_netif_start(tp);
18112 
18113 out:
18114 		tg3_full_unlock(tp);
18115 
18116 		if (!err2)
18117 			tg3_phy_start(tp);
18118 	}
18119 
18120 unlock:
18121 	rtnl_unlock();
18122 	return err;
18123 }
18124 
18125 static int tg3_resume(struct device *device)
18126 {
18127 	struct pci_dev *pdev = to_pci_dev(device);
18128 	struct net_device *dev = pci_get_drvdata(pdev);
18129 	struct tg3 *tp = netdev_priv(dev);
18130 	int err = 0;
18131 
18132 	rtnl_lock();
18133 
18134 	if (!netif_running(dev))
18135 		goto unlock;
18136 
18137 	netif_device_attach(dev);
18138 
18139 	tg3_full_lock(tp, 0);
18140 
18141 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18142 
18143 	tg3_flag_set(tp, INIT_COMPLETE);
18144 	err = tg3_restart_hw(tp,
18145 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18146 	if (err)
18147 		goto out;
18148 
18149 	tg3_timer_start(tp);
18150 
18151 	tg3_netif_start(tp);
18152 
18153 out:
18154 	tg3_full_unlock(tp);
18155 
18156 	if (!err)
18157 		tg3_phy_start(tp);
18158 
18159 unlock:
18160 	rtnl_unlock();
18161 	return err;
18162 }
18163 #endif /* CONFIG_PM_SLEEP */
18164 
18165 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18166 
18167 static void tg3_shutdown(struct pci_dev *pdev)
18168 {
18169 	struct net_device *dev = pci_get_drvdata(pdev);
18170 	struct tg3 *tp = netdev_priv(dev);
18171 
18172 	rtnl_lock();
18173 	netif_device_detach(dev);
18174 
18175 	if (netif_running(dev))
18176 		dev_close(dev);
18177 
18178 	if (system_state == SYSTEM_POWER_OFF)
18179 		tg3_power_down(tp);
18180 
18181 	rtnl_unlock();
18182 }
18183 
18184 /**
18185  * tg3_io_error_detected - called when PCI error is detected
18186  * @pdev: Pointer to PCI device
18187  * @state: The current pci connection state
18188  *
18189  * This function is called after a PCI bus error affecting
18190  * this device has been detected.
18191  */
18192 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18193 					      pci_channel_state_t state)
18194 {
18195 	struct net_device *netdev = pci_get_drvdata(pdev);
18196 	struct tg3 *tp = netdev_priv(netdev);
18197 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18198 
18199 	netdev_info(netdev, "PCI I/O error detected\n");
18200 
18201 	rtnl_lock();
18202 
18203 	/* We probably don't have netdev yet */
18204 	if (!netdev || !netif_running(netdev))
18205 		goto done;
18206 
18207 	/* We needn't recover from permanent error */
18208 	if (state == pci_channel_io_frozen)
18209 		tp->pcierr_recovery = true;
18210 
18211 	tg3_phy_stop(tp);
18212 
18213 	tg3_netif_stop(tp);
18214 
18215 	tg3_timer_stop(tp);
18216 
18217 	/* Want to make sure that the reset task doesn't run */
18218 	tg3_reset_task_cancel(tp);
18219 
18220 	netif_device_detach(netdev);
18221 
18222 	/* Clean up software state, even if MMIO is blocked */
18223 	tg3_full_lock(tp, 0);
18224 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18225 	tg3_full_unlock(tp);
18226 
18227 done:
18228 	if (state == pci_channel_io_perm_failure) {
18229 		if (netdev) {
18230 			tg3_napi_enable(tp);
18231 			dev_close(netdev);
18232 		}
18233 		err = PCI_ERS_RESULT_DISCONNECT;
18234 	} else {
18235 		pci_disable_device(pdev);
18236 	}
18237 
18238 	rtnl_unlock();
18239 
18240 	return err;
18241 }
18242 
18243 /**
18244  * tg3_io_slot_reset - called after the pci bus has been reset.
18245  * @pdev: Pointer to PCI device
18246  *
18247  * Restart the card from scratch, as if from a cold-boot.
18248  * At this point, the card has exprienced a hard reset,
18249  * followed by fixups by BIOS, and has its config space
18250  * set up identically to what it was at cold boot.
18251  */
18252 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18253 {
18254 	struct net_device *netdev = pci_get_drvdata(pdev);
18255 	struct tg3 *tp = netdev_priv(netdev);
18256 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18257 	int err;
18258 
18259 	rtnl_lock();
18260 
18261 	if (pci_enable_device(pdev)) {
18262 		dev_err(&pdev->dev,
18263 			"Cannot re-enable PCI device after reset.\n");
18264 		goto done;
18265 	}
18266 
18267 	pci_set_master(pdev);
18268 	pci_restore_state(pdev);
18269 	pci_save_state(pdev);
18270 
18271 	if (!netdev || !netif_running(netdev)) {
18272 		rc = PCI_ERS_RESULT_RECOVERED;
18273 		goto done;
18274 	}
18275 
18276 	err = tg3_power_up(tp);
18277 	if (err)
18278 		goto done;
18279 
18280 	rc = PCI_ERS_RESULT_RECOVERED;
18281 
18282 done:
18283 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18284 		tg3_napi_enable(tp);
18285 		dev_close(netdev);
18286 	}
18287 	rtnl_unlock();
18288 
18289 	return rc;
18290 }
18291 
18292 /**
18293  * tg3_io_resume - called when traffic can start flowing again.
18294  * @pdev: Pointer to PCI device
18295  *
18296  * This callback is called when the error recovery driver tells
18297  * us that its OK to resume normal operation.
18298  */
18299 static void tg3_io_resume(struct pci_dev *pdev)
18300 {
18301 	struct net_device *netdev = pci_get_drvdata(pdev);
18302 	struct tg3 *tp = netdev_priv(netdev);
18303 	int err;
18304 
18305 	rtnl_lock();
18306 
18307 	if (!netdev || !netif_running(netdev))
18308 		goto done;
18309 
18310 	tg3_full_lock(tp, 0);
18311 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18312 	tg3_flag_set(tp, INIT_COMPLETE);
18313 	err = tg3_restart_hw(tp, true);
18314 	if (err) {
18315 		tg3_full_unlock(tp);
18316 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18317 		goto done;
18318 	}
18319 
18320 	netif_device_attach(netdev);
18321 
18322 	tg3_timer_start(tp);
18323 
18324 	tg3_netif_start(tp);
18325 
18326 	tg3_full_unlock(tp);
18327 
18328 	tg3_phy_start(tp);
18329 
18330 done:
18331 	tp->pcierr_recovery = false;
18332 	rtnl_unlock();
18333 }
18334 
18335 static const struct pci_error_handlers tg3_err_handler = {
18336 	.error_detected	= tg3_io_error_detected,
18337 	.slot_reset	= tg3_io_slot_reset,
18338 	.resume		= tg3_io_resume
18339 };
18340 
18341 static struct pci_driver tg3_driver = {
18342 	.name		= DRV_MODULE_NAME,
18343 	.id_table	= tg3_pci_tbl,
18344 	.probe		= tg3_init_one,
18345 	.remove		= tg3_remove_one,
18346 	.err_handler	= &tg3_err_handler,
18347 	.driver.pm	= &tg3_pm_ops,
18348 	.shutdown	= tg3_shutdown,
18349 };
18350 
18351 module_pci_driver(tg3_driver);
18352