xref: /linux/drivers/net/ethernet/broadcom/tg3.c (revision 02000b55850deeadffe433e4b4930a8831f477de)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  *
10  * Firmware is:
11  *	Derived from proprietary unpublished source code,
12  *	Copyright (C) 2000-2016 Broadcom Corporation.
13  *	Copyright (C) 2016-2017 Broadcom Ltd.
14  *
15  *	Permission is hereby granted for the distribution of this firmware
16  *	data in hexadecimal or equivalent format, provided this copyright
17  *	notice is accompanying it.
18  */
19 
20 
21 #include <linux/module.h>
22 #include <linux/moduleparam.h>
23 #include <linux/stringify.h>
24 #include <linux/kernel.h>
25 #include <linux/sched/signal.h>
26 #include <linux/types.h>
27 #include <linux/compiler.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/in.h>
31 #include <linux/interrupt.h>
32 #include <linux/ioport.h>
33 #include <linux/pci.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/mdio.h>
39 #include <linux/mii.h>
40 #include <linux/phy.h>
41 #include <linux/brcmphy.h>
42 #include <linux/if.h>
43 #include <linux/if_vlan.h>
44 #include <linux/ip.h>
45 #include <linux/tcp.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/firmware.h>
50 #include <linux/ssb/ssb_driver_gige.h>
51 #include <linux/hwmon.h>
52 #include <linux/hwmon-sysfs.h>
53 
54 #include <net/checksum.h>
55 #include <net/ip.h>
56 
57 #include <linux/io.h>
58 #include <asm/byteorder.h>
59 #include <linux/uaccess.h>
60 
61 #include <uapi/linux/net_tstamp.h>
62 #include <linux/ptp_clock_kernel.h>
63 
64 #ifdef CONFIG_SPARC
65 #include <asm/idprom.h>
66 #include <asm/prom.h>
67 #endif
68 
69 #define BAR_0	0
70 #define BAR_2	2
71 
72 #include "tg3.h"
73 
74 /* Functions & macros to verify TG3_FLAGS types */
75 
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78 	return test_bit(flag, bits);
79 }
80 
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83 	set_bit(flag, bits);
84 }
85 
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88 	clear_bit(flag, bits);
89 }
90 
91 #define tg3_flag(tp, flag)				\
92 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag)				\
94 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag)			\
96 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97 
98 #define DRV_MODULE_NAME		"tg3"
99 #define TG3_MAJ_NUM			3
100 #define TG3_MIN_NUM			137
101 #define DRV_MODULE_VERSION	\
102 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
103 #define DRV_MODULE_RELDATE	"May 11, 2014"
104 
105 #define RESET_KIND_SHUTDOWN	0
106 #define RESET_KIND_INIT		1
107 #define RESET_KIND_SUSPEND	2
108 
109 #define TG3_DEF_RX_MODE		0
110 #define TG3_DEF_TX_MODE		0
111 #define TG3_DEF_MSG_ENABLE	  \
112 	(NETIF_MSG_DRV		| \
113 	 NETIF_MSG_PROBE	| \
114 	 NETIF_MSG_LINK		| \
115 	 NETIF_MSG_TIMER	| \
116 	 NETIF_MSG_IFDOWN	| \
117 	 NETIF_MSG_IFUP		| \
118 	 NETIF_MSG_RX_ERR	| \
119 	 NETIF_MSG_TX_ERR)
120 
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
122 
123 /* length of time before we decide the hardware is borked,
124  * and dev->tx_timeout() should be called to fix the problem
125  */
126 
127 #define TG3_TX_TIMEOUT			(5 * HZ)
128 
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU			ETH_ZLEN
131 #define TG3_MAX_MTU(tp)	\
132 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133 
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135  * You can't change the ring sizes, but you can change where you place
136  * them in the NIC onboard memory.
137  */
138 #define TG3_RX_STD_RING_SIZE(tp) \
139 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING		200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
146 
147 /* Do not place this n-ring entries value into the tp struct itself,
148  * we really want to expose these constants to GCC so that modulo et
149  * al.  operations are done with shifts and masks instead of with
150  * hw multiply/modulo instructions.  Another solution would be to
151  * replace things like '% foo' with '& (foo - 1)'.
152  */
153 
154 #define TG3_TX_RING_SIZE		512
155 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
156 
157 #define TG3_RX_STD_RING_BYTES(tp) \
158 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
164 				 TG3_TX_RING_SIZE)
165 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
166 
167 #define TG3_DMA_BYTE_ENAB		64
168 
169 #define TG3_RX_STD_DMA_SZ		1536
170 #define TG3_RX_JMB_DMA_SZ		9046
171 
172 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
173 
174 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176 
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179 
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182 
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184  * that are at least dword aligned when used in PCIX mode.  The driver
185  * works around this bug by double copying the packet.  This workaround
186  * is built into the normal double copy length check for efficiency.
187  *
188  * However, the double copy is only necessary on those architectures
189  * where unaligned memory accesses are inefficient.  For those architectures
190  * where unaligned memory accesses incur little penalty, we can reintegrate
191  * the 5701 in the normal rx path.  Doing so saves a device structure
192  * dereference by hardcoding the double copy threshold in place.
193  */
194 #define TG3_RX_COPY_THRESHOLD		256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
197 #else
198 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
199 #endif
200 
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
205 #endif
206 
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K		2048
210 #define TG3_TX_BD_DMA_MAX_4K		4096
211 
212 #define TG3_RAW_IP_ALIGN 2
213 
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216 
217 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
218 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219 
220 #define FIRMWARE_TG3		"tigon/tg3.bin"
221 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
224 
225 static char version[] =
226 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
227 
228 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
229 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
230 MODULE_LICENSE("GPL");
231 MODULE_VERSION(DRV_MODULE_VERSION);
232 MODULE_FIRMWARE(FIRMWARE_TG3);
233 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
234 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
235 
236 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
237 module_param(tg3_debug, int, 0);
238 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
239 
240 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
241 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
242 
243 static const struct pci_device_id tg3_pci_tbl[] = {
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
260 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
261 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
263 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 			TG3_DRV_DATA_FLAG_5705_10_100},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
266 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 			TG3_DRV_DATA_FLAG_5705_10_100},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
270 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
271 			TG3_DRV_DATA_FLAG_5705_10_100},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
278 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
284 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
289 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
292 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
293 			PCI_VENDOR_ID_LENOVO,
294 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
295 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
298 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
312 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
313 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
314 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
315 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
316 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
317 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
319 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
321 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
322 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
326 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
330 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
332 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
336 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
338 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
352 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
353 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
354 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
355 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
356 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
357 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
358 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
359 	{}
360 };
361 
362 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
363 
364 static const struct {
365 	const char string[ETH_GSTRING_LEN];
366 } ethtool_stats_keys[] = {
367 	{ "rx_octets" },
368 	{ "rx_fragments" },
369 	{ "rx_ucast_packets" },
370 	{ "rx_mcast_packets" },
371 	{ "rx_bcast_packets" },
372 	{ "rx_fcs_errors" },
373 	{ "rx_align_errors" },
374 	{ "rx_xon_pause_rcvd" },
375 	{ "rx_xoff_pause_rcvd" },
376 	{ "rx_mac_ctrl_rcvd" },
377 	{ "rx_xoff_entered" },
378 	{ "rx_frame_too_long_errors" },
379 	{ "rx_jabbers" },
380 	{ "rx_undersize_packets" },
381 	{ "rx_in_length_errors" },
382 	{ "rx_out_length_errors" },
383 	{ "rx_64_or_less_octet_packets" },
384 	{ "rx_65_to_127_octet_packets" },
385 	{ "rx_128_to_255_octet_packets" },
386 	{ "rx_256_to_511_octet_packets" },
387 	{ "rx_512_to_1023_octet_packets" },
388 	{ "rx_1024_to_1522_octet_packets" },
389 	{ "rx_1523_to_2047_octet_packets" },
390 	{ "rx_2048_to_4095_octet_packets" },
391 	{ "rx_4096_to_8191_octet_packets" },
392 	{ "rx_8192_to_9022_octet_packets" },
393 
394 	{ "tx_octets" },
395 	{ "tx_collisions" },
396 
397 	{ "tx_xon_sent" },
398 	{ "tx_xoff_sent" },
399 	{ "tx_flow_control" },
400 	{ "tx_mac_errors" },
401 	{ "tx_single_collisions" },
402 	{ "tx_mult_collisions" },
403 	{ "tx_deferred" },
404 	{ "tx_excessive_collisions" },
405 	{ "tx_late_collisions" },
406 	{ "tx_collide_2times" },
407 	{ "tx_collide_3times" },
408 	{ "tx_collide_4times" },
409 	{ "tx_collide_5times" },
410 	{ "tx_collide_6times" },
411 	{ "tx_collide_7times" },
412 	{ "tx_collide_8times" },
413 	{ "tx_collide_9times" },
414 	{ "tx_collide_10times" },
415 	{ "tx_collide_11times" },
416 	{ "tx_collide_12times" },
417 	{ "tx_collide_13times" },
418 	{ "tx_collide_14times" },
419 	{ "tx_collide_15times" },
420 	{ "tx_ucast_packets" },
421 	{ "tx_mcast_packets" },
422 	{ "tx_bcast_packets" },
423 	{ "tx_carrier_sense_errors" },
424 	{ "tx_discards" },
425 	{ "tx_errors" },
426 
427 	{ "dma_writeq_full" },
428 	{ "dma_write_prioq_full" },
429 	{ "rxbds_empty" },
430 	{ "rx_discards" },
431 	{ "rx_errors" },
432 	{ "rx_threshold_hit" },
433 
434 	{ "dma_readq_full" },
435 	{ "dma_read_prioq_full" },
436 	{ "tx_comp_queue_full" },
437 
438 	{ "ring_set_send_prod_index" },
439 	{ "ring_status_update" },
440 	{ "nic_irqs" },
441 	{ "nic_avoided_irqs" },
442 	{ "nic_tx_threshold_hit" },
443 
444 	{ "mbuf_lwm_thresh_hit" },
445 };
446 
447 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
448 #define TG3_NVRAM_TEST		0
449 #define TG3_LINK_TEST		1
450 #define TG3_REGISTER_TEST	2
451 #define TG3_MEMORY_TEST		3
452 #define TG3_MAC_LOOPB_TEST	4
453 #define TG3_PHY_LOOPB_TEST	5
454 #define TG3_EXT_LOOPB_TEST	6
455 #define TG3_INTERRUPT_TEST	7
456 
457 
458 static const struct {
459 	const char string[ETH_GSTRING_LEN];
460 } ethtool_test_keys[] = {
461 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
462 	[TG3_LINK_TEST]		= { "link test         (online) " },
463 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
464 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
465 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
466 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
467 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
468 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
469 };
470 
471 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
472 
473 
474 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
475 {
476 	writel(val, tp->regs + off);
477 }
478 
479 static u32 tg3_read32(struct tg3 *tp, u32 off)
480 {
481 	return readl(tp->regs + off);
482 }
483 
484 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
485 {
486 	writel(val, tp->aperegs + off);
487 }
488 
489 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
490 {
491 	return readl(tp->aperegs + off);
492 }
493 
494 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
495 {
496 	unsigned long flags;
497 
498 	spin_lock_irqsave(&tp->indirect_lock, flags);
499 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
500 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
501 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
502 }
503 
504 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
505 {
506 	writel(val, tp->regs + off);
507 	readl(tp->regs + off);
508 }
509 
510 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
511 {
512 	unsigned long flags;
513 	u32 val;
514 
515 	spin_lock_irqsave(&tp->indirect_lock, flags);
516 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
517 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
518 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
519 	return val;
520 }
521 
522 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
523 {
524 	unsigned long flags;
525 
526 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
527 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
528 				       TG3_64BIT_REG_LOW, val);
529 		return;
530 	}
531 	if (off == TG3_RX_STD_PROD_IDX_REG) {
532 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
533 				       TG3_64BIT_REG_LOW, val);
534 		return;
535 	}
536 
537 	spin_lock_irqsave(&tp->indirect_lock, flags);
538 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
539 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
540 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
541 
542 	/* In indirect mode when disabling interrupts, we also need
543 	 * to clear the interrupt bit in the GRC local ctrl register.
544 	 */
545 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
546 	    (val == 0x1)) {
547 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
548 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
549 	}
550 }
551 
552 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
553 {
554 	unsigned long flags;
555 	u32 val;
556 
557 	spin_lock_irqsave(&tp->indirect_lock, flags);
558 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
559 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
560 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
561 	return val;
562 }
563 
564 /* usec_wait specifies the wait time in usec when writing to certain registers
565  * where it is unsafe to read back the register without some delay.
566  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
567  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
568  */
569 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
570 {
571 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
572 		/* Non-posted methods */
573 		tp->write32(tp, off, val);
574 	else {
575 		/* Posted method */
576 		tg3_write32(tp, off, val);
577 		if (usec_wait)
578 			udelay(usec_wait);
579 		tp->read32(tp, off);
580 	}
581 	/* Wait again after the read for the posted method to guarantee that
582 	 * the wait time is met.
583 	 */
584 	if (usec_wait)
585 		udelay(usec_wait);
586 }
587 
588 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
589 {
590 	tp->write32_mbox(tp, off, val);
591 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
592 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
593 	     !tg3_flag(tp, ICH_WORKAROUND)))
594 		tp->read32_mbox(tp, off);
595 }
596 
597 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
598 {
599 	void __iomem *mbox = tp->regs + off;
600 	writel(val, mbox);
601 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
602 		writel(val, mbox);
603 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
604 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
605 		readl(mbox);
606 }
607 
608 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
609 {
610 	return readl(tp->regs + off + GRCMBOX_BASE);
611 }
612 
613 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
614 {
615 	writel(val, tp->regs + off + GRCMBOX_BASE);
616 }
617 
618 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
619 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
620 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
621 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
622 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
623 
624 #define tw32(reg, val)			tp->write32(tp, reg, val)
625 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
626 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
627 #define tr32(reg)			tp->read32(tp, reg)
628 
629 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
630 {
631 	unsigned long flags;
632 
633 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
634 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
635 		return;
636 
637 	spin_lock_irqsave(&tp->indirect_lock, flags);
638 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
639 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
640 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
641 
642 		/* Always leave this as zero. */
643 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
644 	} else {
645 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
646 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
647 
648 		/* Always leave this as zero. */
649 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
650 	}
651 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
652 }
653 
654 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
655 {
656 	unsigned long flags;
657 
658 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
659 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
660 		*val = 0;
661 		return;
662 	}
663 
664 	spin_lock_irqsave(&tp->indirect_lock, flags);
665 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
666 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
667 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
668 
669 		/* Always leave this as zero. */
670 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
671 	} else {
672 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
673 		*val = tr32(TG3PCI_MEM_WIN_DATA);
674 
675 		/* Always leave this as zero. */
676 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
677 	}
678 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
679 }
680 
681 static void tg3_ape_lock_init(struct tg3 *tp)
682 {
683 	int i;
684 	u32 regbase, bit;
685 
686 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
687 		regbase = TG3_APE_LOCK_GRANT;
688 	else
689 		regbase = TG3_APE_PER_LOCK_GRANT;
690 
691 	/* Make sure the driver hasn't any stale locks. */
692 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
693 		switch (i) {
694 		case TG3_APE_LOCK_PHY0:
695 		case TG3_APE_LOCK_PHY1:
696 		case TG3_APE_LOCK_PHY2:
697 		case TG3_APE_LOCK_PHY3:
698 			bit = APE_LOCK_GRANT_DRIVER;
699 			break;
700 		default:
701 			if (!tp->pci_fn)
702 				bit = APE_LOCK_GRANT_DRIVER;
703 			else
704 				bit = 1 << tp->pci_fn;
705 		}
706 		tg3_ape_write32(tp, regbase + 4 * i, bit);
707 	}
708 
709 }
710 
711 static int tg3_ape_lock(struct tg3 *tp, int locknum)
712 {
713 	int i, off;
714 	int ret = 0;
715 	u32 status, req, gnt, bit;
716 
717 	if (!tg3_flag(tp, ENABLE_APE))
718 		return 0;
719 
720 	switch (locknum) {
721 	case TG3_APE_LOCK_GPIO:
722 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
723 			return 0;
724 		/* else: fall through */
725 	case TG3_APE_LOCK_GRC:
726 	case TG3_APE_LOCK_MEM:
727 		if (!tp->pci_fn)
728 			bit = APE_LOCK_REQ_DRIVER;
729 		else
730 			bit = 1 << tp->pci_fn;
731 		break;
732 	case TG3_APE_LOCK_PHY0:
733 	case TG3_APE_LOCK_PHY1:
734 	case TG3_APE_LOCK_PHY2:
735 	case TG3_APE_LOCK_PHY3:
736 		bit = APE_LOCK_REQ_DRIVER;
737 		break;
738 	default:
739 		return -EINVAL;
740 	}
741 
742 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
743 		req = TG3_APE_LOCK_REQ;
744 		gnt = TG3_APE_LOCK_GRANT;
745 	} else {
746 		req = TG3_APE_PER_LOCK_REQ;
747 		gnt = TG3_APE_PER_LOCK_GRANT;
748 	}
749 
750 	off = 4 * locknum;
751 
752 	tg3_ape_write32(tp, req + off, bit);
753 
754 	/* Wait for up to 1 millisecond to acquire lock. */
755 	for (i = 0; i < 100; i++) {
756 		status = tg3_ape_read32(tp, gnt + off);
757 		if (status == bit)
758 			break;
759 		if (pci_channel_offline(tp->pdev))
760 			break;
761 
762 		udelay(10);
763 	}
764 
765 	if (status != bit) {
766 		/* Revoke the lock request. */
767 		tg3_ape_write32(tp, gnt + off, bit);
768 		ret = -EBUSY;
769 	}
770 
771 	return ret;
772 }
773 
774 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
775 {
776 	u32 gnt, bit;
777 
778 	if (!tg3_flag(tp, ENABLE_APE))
779 		return;
780 
781 	switch (locknum) {
782 	case TG3_APE_LOCK_GPIO:
783 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
784 			return;
785 		/* else: fall through */
786 	case TG3_APE_LOCK_GRC:
787 	case TG3_APE_LOCK_MEM:
788 		if (!tp->pci_fn)
789 			bit = APE_LOCK_GRANT_DRIVER;
790 		else
791 			bit = 1 << tp->pci_fn;
792 		break;
793 	case TG3_APE_LOCK_PHY0:
794 	case TG3_APE_LOCK_PHY1:
795 	case TG3_APE_LOCK_PHY2:
796 	case TG3_APE_LOCK_PHY3:
797 		bit = APE_LOCK_GRANT_DRIVER;
798 		break;
799 	default:
800 		return;
801 	}
802 
803 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
804 		gnt = TG3_APE_LOCK_GRANT;
805 	else
806 		gnt = TG3_APE_PER_LOCK_GRANT;
807 
808 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
809 }
810 
811 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
812 {
813 	u32 apedata;
814 
815 	while (timeout_us) {
816 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
817 			return -EBUSY;
818 
819 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
820 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
821 			break;
822 
823 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
824 
825 		udelay(10);
826 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
827 	}
828 
829 	return timeout_us ? 0 : -EBUSY;
830 }
831 
832 #ifdef CONFIG_TIGON3_HWMON
833 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
834 {
835 	u32 i, apedata;
836 
837 	for (i = 0; i < timeout_us / 10; i++) {
838 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
839 
840 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
841 			break;
842 
843 		udelay(10);
844 	}
845 
846 	return i == timeout_us / 10;
847 }
848 
849 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
850 				   u32 len)
851 {
852 	int err;
853 	u32 i, bufoff, msgoff, maxlen, apedata;
854 
855 	if (!tg3_flag(tp, APE_HAS_NCSI))
856 		return 0;
857 
858 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
859 	if (apedata != APE_SEG_SIG_MAGIC)
860 		return -ENODEV;
861 
862 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
863 	if (!(apedata & APE_FW_STATUS_READY))
864 		return -EAGAIN;
865 
866 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
867 		 TG3_APE_SHMEM_BASE;
868 	msgoff = bufoff + 2 * sizeof(u32);
869 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
870 
871 	while (len) {
872 		u32 length;
873 
874 		/* Cap xfer sizes to scratchpad limits. */
875 		length = (len > maxlen) ? maxlen : len;
876 		len -= length;
877 
878 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
879 		if (!(apedata & APE_FW_STATUS_READY))
880 			return -EAGAIN;
881 
882 		/* Wait for up to 1 msec for APE to service previous event. */
883 		err = tg3_ape_event_lock(tp, 1000);
884 		if (err)
885 			return err;
886 
887 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
888 			  APE_EVENT_STATUS_SCRTCHPD_READ |
889 			  APE_EVENT_STATUS_EVENT_PENDING;
890 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
891 
892 		tg3_ape_write32(tp, bufoff, base_off);
893 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
894 
895 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
896 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
897 
898 		base_off += length;
899 
900 		if (tg3_ape_wait_for_event(tp, 30000))
901 			return -EAGAIN;
902 
903 		for (i = 0; length; i += 4, length -= 4) {
904 			u32 val = tg3_ape_read32(tp, msgoff + i);
905 			memcpy(data, &val, sizeof(u32));
906 			data++;
907 		}
908 	}
909 
910 	return 0;
911 }
912 #endif
913 
914 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
915 {
916 	int err;
917 	u32 apedata;
918 
919 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
920 	if (apedata != APE_SEG_SIG_MAGIC)
921 		return -EAGAIN;
922 
923 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
924 	if (!(apedata & APE_FW_STATUS_READY))
925 		return -EAGAIN;
926 
927 	/* Wait for up to 20 millisecond for APE to service previous event. */
928 	err = tg3_ape_event_lock(tp, 20000);
929 	if (err)
930 		return err;
931 
932 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
933 			event | APE_EVENT_STATUS_EVENT_PENDING);
934 
935 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
936 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
937 
938 	return 0;
939 }
940 
941 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
942 {
943 	u32 event;
944 	u32 apedata;
945 
946 	if (!tg3_flag(tp, ENABLE_APE))
947 		return;
948 
949 	switch (kind) {
950 	case RESET_KIND_INIT:
951 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
952 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
953 				APE_HOST_SEG_SIG_MAGIC);
954 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
955 				APE_HOST_SEG_LEN_MAGIC);
956 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
957 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
958 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
959 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
960 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
961 				APE_HOST_BEHAV_NO_PHYLOCK);
962 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
963 				    TG3_APE_HOST_DRVR_STATE_START);
964 
965 		event = APE_EVENT_STATUS_STATE_START;
966 		break;
967 	case RESET_KIND_SHUTDOWN:
968 		if (device_may_wakeup(&tp->pdev->dev) &&
969 		    tg3_flag(tp, WOL_ENABLE)) {
970 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
971 					    TG3_APE_HOST_WOL_SPEED_AUTO);
972 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
973 		} else
974 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
975 
976 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
977 
978 		event = APE_EVENT_STATUS_STATE_UNLOAD;
979 		break;
980 	default:
981 		return;
982 	}
983 
984 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
985 
986 	tg3_ape_send_event(tp, event);
987 }
988 
989 static void tg3_send_ape_heartbeat(struct tg3 *tp,
990 				   unsigned long interval)
991 {
992 	/* Check if hb interval has exceeded */
993 	if (!tg3_flag(tp, ENABLE_APE) ||
994 	    time_before(jiffies, tp->ape_hb_jiffies + interval))
995 		return;
996 
997 	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
998 	tp->ape_hb_jiffies = jiffies;
999 }
1000 
1001 static void tg3_disable_ints(struct tg3 *tp)
1002 {
1003 	int i;
1004 
1005 	tw32(TG3PCI_MISC_HOST_CTRL,
1006 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1007 	for (i = 0; i < tp->irq_max; i++)
1008 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1009 }
1010 
1011 static void tg3_enable_ints(struct tg3 *tp)
1012 {
1013 	int i;
1014 
1015 	tp->irq_sync = 0;
1016 	wmb();
1017 
1018 	tw32(TG3PCI_MISC_HOST_CTRL,
1019 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1020 
1021 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1022 	for (i = 0; i < tp->irq_cnt; i++) {
1023 		struct tg3_napi *tnapi = &tp->napi[i];
1024 
1025 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1026 		if (tg3_flag(tp, 1SHOT_MSI))
1027 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1028 
1029 		tp->coal_now |= tnapi->coal_now;
1030 	}
1031 
1032 	/* Force an initial interrupt */
1033 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1034 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1035 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1036 	else
1037 		tw32(HOSTCC_MODE, tp->coal_now);
1038 
1039 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1040 }
1041 
1042 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1043 {
1044 	struct tg3 *tp = tnapi->tp;
1045 	struct tg3_hw_status *sblk = tnapi->hw_status;
1046 	unsigned int work_exists = 0;
1047 
1048 	/* check for phy events */
1049 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1050 		if (sblk->status & SD_STATUS_LINK_CHG)
1051 			work_exists = 1;
1052 	}
1053 
1054 	/* check for TX work to do */
1055 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1056 		work_exists = 1;
1057 
1058 	/* check for RX work to do */
1059 	if (tnapi->rx_rcb_prod_idx &&
1060 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1061 		work_exists = 1;
1062 
1063 	return work_exists;
1064 }
1065 
1066 /* tg3_int_reenable
1067  *  similar to tg3_enable_ints, but it accurately determines whether there
1068  *  is new work pending and can return without flushing the PIO write
1069  *  which reenables interrupts
1070  */
1071 static void tg3_int_reenable(struct tg3_napi *tnapi)
1072 {
1073 	struct tg3 *tp = tnapi->tp;
1074 
1075 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1076 	mmiowb();
1077 
1078 	/* When doing tagged status, this work check is unnecessary.
1079 	 * The last_tag we write above tells the chip which piece of
1080 	 * work we've completed.
1081 	 */
1082 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1083 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1084 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1085 }
1086 
1087 static void tg3_switch_clocks(struct tg3 *tp)
1088 {
1089 	u32 clock_ctrl;
1090 	u32 orig_clock_ctrl;
1091 
1092 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1093 		return;
1094 
1095 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1096 
1097 	orig_clock_ctrl = clock_ctrl;
1098 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1099 		       CLOCK_CTRL_CLKRUN_OENABLE |
1100 		       0x1f);
1101 	tp->pci_clock_ctrl = clock_ctrl;
1102 
1103 	if (tg3_flag(tp, 5705_PLUS)) {
1104 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1105 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1106 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1107 		}
1108 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1109 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1110 			    clock_ctrl |
1111 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1112 			    40);
1113 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1114 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1115 			    40);
1116 	}
1117 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1118 }
1119 
1120 #define PHY_BUSY_LOOPS	5000
1121 
1122 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1123 			 u32 *val)
1124 {
1125 	u32 frame_val;
1126 	unsigned int loops;
1127 	int ret;
1128 
1129 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1130 		tw32_f(MAC_MI_MODE,
1131 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1132 		udelay(80);
1133 	}
1134 
1135 	tg3_ape_lock(tp, tp->phy_ape_lock);
1136 
1137 	*val = 0x0;
1138 
1139 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1140 		      MI_COM_PHY_ADDR_MASK);
1141 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1142 		      MI_COM_REG_ADDR_MASK);
1143 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1144 
1145 	tw32_f(MAC_MI_COM, frame_val);
1146 
1147 	loops = PHY_BUSY_LOOPS;
1148 	while (loops != 0) {
1149 		udelay(10);
1150 		frame_val = tr32(MAC_MI_COM);
1151 
1152 		if ((frame_val & MI_COM_BUSY) == 0) {
1153 			udelay(5);
1154 			frame_val = tr32(MAC_MI_COM);
1155 			break;
1156 		}
1157 		loops -= 1;
1158 	}
1159 
1160 	ret = -EBUSY;
1161 	if (loops != 0) {
1162 		*val = frame_val & MI_COM_DATA_MASK;
1163 		ret = 0;
1164 	}
1165 
1166 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1167 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1168 		udelay(80);
1169 	}
1170 
1171 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1172 
1173 	return ret;
1174 }
1175 
1176 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1177 {
1178 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1179 }
1180 
1181 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1182 			  u32 val)
1183 {
1184 	u32 frame_val;
1185 	unsigned int loops;
1186 	int ret;
1187 
1188 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1189 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1190 		return 0;
1191 
1192 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1193 		tw32_f(MAC_MI_MODE,
1194 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1195 		udelay(80);
1196 	}
1197 
1198 	tg3_ape_lock(tp, tp->phy_ape_lock);
1199 
1200 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1201 		      MI_COM_PHY_ADDR_MASK);
1202 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1203 		      MI_COM_REG_ADDR_MASK);
1204 	frame_val |= (val & MI_COM_DATA_MASK);
1205 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1206 
1207 	tw32_f(MAC_MI_COM, frame_val);
1208 
1209 	loops = PHY_BUSY_LOOPS;
1210 	while (loops != 0) {
1211 		udelay(10);
1212 		frame_val = tr32(MAC_MI_COM);
1213 		if ((frame_val & MI_COM_BUSY) == 0) {
1214 			udelay(5);
1215 			frame_val = tr32(MAC_MI_COM);
1216 			break;
1217 		}
1218 		loops -= 1;
1219 	}
1220 
1221 	ret = -EBUSY;
1222 	if (loops != 0)
1223 		ret = 0;
1224 
1225 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1226 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1227 		udelay(80);
1228 	}
1229 
1230 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1231 
1232 	return ret;
1233 }
1234 
1235 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1236 {
1237 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1238 }
1239 
1240 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1241 {
1242 	int err;
1243 
1244 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1245 	if (err)
1246 		goto done;
1247 
1248 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1249 	if (err)
1250 		goto done;
1251 
1252 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1253 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1254 	if (err)
1255 		goto done;
1256 
1257 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1258 
1259 done:
1260 	return err;
1261 }
1262 
1263 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1264 {
1265 	int err;
1266 
1267 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1268 	if (err)
1269 		goto done;
1270 
1271 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1272 	if (err)
1273 		goto done;
1274 
1275 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1276 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1277 	if (err)
1278 		goto done;
1279 
1280 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1281 
1282 done:
1283 	return err;
1284 }
1285 
1286 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1287 {
1288 	int err;
1289 
1290 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1291 	if (!err)
1292 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1293 
1294 	return err;
1295 }
1296 
1297 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1298 {
1299 	int err;
1300 
1301 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1302 	if (!err)
1303 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1304 
1305 	return err;
1306 }
1307 
1308 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1309 {
1310 	int err;
1311 
1312 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1313 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1314 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1315 	if (!err)
1316 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1317 
1318 	return err;
1319 }
1320 
1321 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1322 {
1323 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1324 		set |= MII_TG3_AUXCTL_MISC_WREN;
1325 
1326 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1327 }
1328 
1329 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1330 {
1331 	u32 val;
1332 	int err;
1333 
1334 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1335 
1336 	if (err)
1337 		return err;
1338 
1339 	if (enable)
1340 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1341 	else
1342 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1343 
1344 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1345 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1346 
1347 	return err;
1348 }
1349 
1350 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1351 {
1352 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1353 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1354 }
1355 
1356 static int tg3_bmcr_reset(struct tg3 *tp)
1357 {
1358 	u32 phy_control;
1359 	int limit, err;
1360 
1361 	/* OK, reset it, and poll the BMCR_RESET bit until it
1362 	 * clears or we time out.
1363 	 */
1364 	phy_control = BMCR_RESET;
1365 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1366 	if (err != 0)
1367 		return -EBUSY;
1368 
1369 	limit = 5000;
1370 	while (limit--) {
1371 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1372 		if (err != 0)
1373 			return -EBUSY;
1374 
1375 		if ((phy_control & BMCR_RESET) == 0) {
1376 			udelay(40);
1377 			break;
1378 		}
1379 		udelay(10);
1380 	}
1381 	if (limit < 0)
1382 		return -EBUSY;
1383 
1384 	return 0;
1385 }
1386 
1387 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1388 {
1389 	struct tg3 *tp = bp->priv;
1390 	u32 val;
1391 
1392 	spin_lock_bh(&tp->lock);
1393 
1394 	if (__tg3_readphy(tp, mii_id, reg, &val))
1395 		val = -EIO;
1396 
1397 	spin_unlock_bh(&tp->lock);
1398 
1399 	return val;
1400 }
1401 
1402 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1403 {
1404 	struct tg3 *tp = bp->priv;
1405 	u32 ret = 0;
1406 
1407 	spin_lock_bh(&tp->lock);
1408 
1409 	if (__tg3_writephy(tp, mii_id, reg, val))
1410 		ret = -EIO;
1411 
1412 	spin_unlock_bh(&tp->lock);
1413 
1414 	return ret;
1415 }
1416 
1417 static void tg3_mdio_config_5785(struct tg3 *tp)
1418 {
1419 	u32 val;
1420 	struct phy_device *phydev;
1421 
1422 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1423 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1424 	case PHY_ID_BCM50610:
1425 	case PHY_ID_BCM50610M:
1426 		val = MAC_PHYCFG2_50610_LED_MODES;
1427 		break;
1428 	case PHY_ID_BCMAC131:
1429 		val = MAC_PHYCFG2_AC131_LED_MODES;
1430 		break;
1431 	case PHY_ID_RTL8211C:
1432 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1433 		break;
1434 	case PHY_ID_RTL8201E:
1435 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1436 		break;
1437 	default:
1438 		return;
1439 	}
1440 
1441 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1442 		tw32(MAC_PHYCFG2, val);
1443 
1444 		val = tr32(MAC_PHYCFG1);
1445 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1446 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1447 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1448 		tw32(MAC_PHYCFG1, val);
1449 
1450 		return;
1451 	}
1452 
1453 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1454 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1455 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1456 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1457 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1458 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1459 		       MAC_PHYCFG2_INBAND_ENABLE;
1460 
1461 	tw32(MAC_PHYCFG2, val);
1462 
1463 	val = tr32(MAC_PHYCFG1);
1464 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1465 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1466 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1467 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1468 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1469 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1470 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1471 	}
1472 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1473 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1474 	tw32(MAC_PHYCFG1, val);
1475 
1476 	val = tr32(MAC_EXT_RGMII_MODE);
1477 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1478 		 MAC_RGMII_MODE_RX_QUALITY |
1479 		 MAC_RGMII_MODE_RX_ACTIVITY |
1480 		 MAC_RGMII_MODE_RX_ENG_DET |
1481 		 MAC_RGMII_MODE_TX_ENABLE |
1482 		 MAC_RGMII_MODE_TX_LOWPWR |
1483 		 MAC_RGMII_MODE_TX_RESET);
1484 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1485 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1486 			val |= MAC_RGMII_MODE_RX_INT_B |
1487 			       MAC_RGMII_MODE_RX_QUALITY |
1488 			       MAC_RGMII_MODE_RX_ACTIVITY |
1489 			       MAC_RGMII_MODE_RX_ENG_DET;
1490 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1491 			val |= MAC_RGMII_MODE_TX_ENABLE |
1492 			       MAC_RGMII_MODE_TX_LOWPWR |
1493 			       MAC_RGMII_MODE_TX_RESET;
1494 	}
1495 	tw32(MAC_EXT_RGMII_MODE, val);
1496 }
1497 
1498 static void tg3_mdio_start(struct tg3 *tp)
1499 {
1500 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1501 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1502 	udelay(80);
1503 
1504 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1505 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1506 		tg3_mdio_config_5785(tp);
1507 }
1508 
1509 static int tg3_mdio_init(struct tg3 *tp)
1510 {
1511 	int i;
1512 	u32 reg;
1513 	struct phy_device *phydev;
1514 
1515 	if (tg3_flag(tp, 5717_PLUS)) {
1516 		u32 is_serdes;
1517 
1518 		tp->phy_addr = tp->pci_fn + 1;
1519 
1520 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1521 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1522 		else
1523 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1524 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1525 		if (is_serdes)
1526 			tp->phy_addr += 7;
1527 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1528 		int addr;
1529 
1530 		addr = ssb_gige_get_phyaddr(tp->pdev);
1531 		if (addr < 0)
1532 			return addr;
1533 		tp->phy_addr = addr;
1534 	} else
1535 		tp->phy_addr = TG3_PHY_MII_ADDR;
1536 
1537 	tg3_mdio_start(tp);
1538 
1539 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1540 		return 0;
1541 
1542 	tp->mdio_bus = mdiobus_alloc();
1543 	if (tp->mdio_bus == NULL)
1544 		return -ENOMEM;
1545 
1546 	tp->mdio_bus->name     = "tg3 mdio bus";
1547 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1548 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1549 	tp->mdio_bus->priv     = tp;
1550 	tp->mdio_bus->parent   = &tp->pdev->dev;
1551 	tp->mdio_bus->read     = &tg3_mdio_read;
1552 	tp->mdio_bus->write    = &tg3_mdio_write;
1553 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1554 
1555 	/* The bus registration will look for all the PHYs on the mdio bus.
1556 	 * Unfortunately, it does not ensure the PHY is powered up before
1557 	 * accessing the PHY ID registers.  A chip reset is the
1558 	 * quickest way to bring the device back to an operational state..
1559 	 */
1560 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1561 		tg3_bmcr_reset(tp);
1562 
1563 	i = mdiobus_register(tp->mdio_bus);
1564 	if (i) {
1565 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1566 		mdiobus_free(tp->mdio_bus);
1567 		return i;
1568 	}
1569 
1570 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1571 
1572 	if (!phydev || !phydev->drv) {
1573 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1574 		mdiobus_unregister(tp->mdio_bus);
1575 		mdiobus_free(tp->mdio_bus);
1576 		return -ENODEV;
1577 	}
1578 
1579 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1580 	case PHY_ID_BCM57780:
1581 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1582 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583 		break;
1584 	case PHY_ID_BCM50610:
1585 	case PHY_ID_BCM50610M:
1586 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1587 				     PHY_BRCM_RX_REFCLK_UNUSED |
1588 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1589 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1590 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1591 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1592 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1593 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1594 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1595 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1596 		/* fallthru */
1597 	case PHY_ID_RTL8211C:
1598 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1599 		break;
1600 	case PHY_ID_RTL8201E:
1601 	case PHY_ID_BCMAC131:
1602 		phydev->interface = PHY_INTERFACE_MODE_MII;
1603 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1604 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1605 		break;
1606 	}
1607 
1608 	tg3_flag_set(tp, MDIOBUS_INITED);
1609 
1610 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1611 		tg3_mdio_config_5785(tp);
1612 
1613 	return 0;
1614 }
1615 
1616 static void tg3_mdio_fini(struct tg3 *tp)
1617 {
1618 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1619 		tg3_flag_clear(tp, MDIOBUS_INITED);
1620 		mdiobus_unregister(tp->mdio_bus);
1621 		mdiobus_free(tp->mdio_bus);
1622 	}
1623 }
1624 
1625 /* tp->lock is held. */
1626 static inline void tg3_generate_fw_event(struct tg3 *tp)
1627 {
1628 	u32 val;
1629 
1630 	val = tr32(GRC_RX_CPU_EVENT);
1631 	val |= GRC_RX_CPU_DRIVER_EVENT;
1632 	tw32_f(GRC_RX_CPU_EVENT, val);
1633 
1634 	tp->last_event_jiffies = jiffies;
1635 }
1636 
1637 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1638 
1639 /* tp->lock is held. */
1640 static void tg3_wait_for_event_ack(struct tg3 *tp)
1641 {
1642 	int i;
1643 	unsigned int delay_cnt;
1644 	long time_remain;
1645 
1646 	/* If enough time has passed, no wait is necessary. */
1647 	time_remain = (long)(tp->last_event_jiffies + 1 +
1648 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1649 		      (long)jiffies;
1650 	if (time_remain < 0)
1651 		return;
1652 
1653 	/* Check if we can shorten the wait time. */
1654 	delay_cnt = jiffies_to_usecs(time_remain);
1655 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1656 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1657 	delay_cnt = (delay_cnt >> 3) + 1;
1658 
1659 	for (i = 0; i < delay_cnt; i++) {
1660 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1661 			break;
1662 		if (pci_channel_offline(tp->pdev))
1663 			break;
1664 
1665 		udelay(8);
1666 	}
1667 }
1668 
1669 /* tp->lock is held. */
1670 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1671 {
1672 	u32 reg, val;
1673 
1674 	val = 0;
1675 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1676 		val = reg << 16;
1677 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1678 		val |= (reg & 0xffff);
1679 	*data++ = val;
1680 
1681 	val = 0;
1682 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1683 		val = reg << 16;
1684 	if (!tg3_readphy(tp, MII_LPA, &reg))
1685 		val |= (reg & 0xffff);
1686 	*data++ = val;
1687 
1688 	val = 0;
1689 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1690 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1691 			val = reg << 16;
1692 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1693 			val |= (reg & 0xffff);
1694 	}
1695 	*data++ = val;
1696 
1697 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1698 		val = reg << 16;
1699 	else
1700 		val = 0;
1701 	*data++ = val;
1702 }
1703 
1704 /* tp->lock is held. */
1705 static void tg3_ump_link_report(struct tg3 *tp)
1706 {
1707 	u32 data[4];
1708 
1709 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1710 		return;
1711 
1712 	tg3_phy_gather_ump_data(tp, data);
1713 
1714 	tg3_wait_for_event_ack(tp);
1715 
1716 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1717 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1718 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1719 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1720 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1721 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1722 
1723 	tg3_generate_fw_event(tp);
1724 }
1725 
1726 /* tp->lock is held. */
1727 static void tg3_stop_fw(struct tg3 *tp)
1728 {
1729 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1730 		/* Wait for RX cpu to ACK the previous event. */
1731 		tg3_wait_for_event_ack(tp);
1732 
1733 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1734 
1735 		tg3_generate_fw_event(tp);
1736 
1737 		/* Wait for RX cpu to ACK this event. */
1738 		tg3_wait_for_event_ack(tp);
1739 	}
1740 }
1741 
1742 /* tp->lock is held. */
1743 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1744 {
1745 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1746 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1747 
1748 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1749 		switch (kind) {
1750 		case RESET_KIND_INIT:
1751 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1752 				      DRV_STATE_START);
1753 			break;
1754 
1755 		case RESET_KIND_SHUTDOWN:
1756 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1757 				      DRV_STATE_UNLOAD);
1758 			break;
1759 
1760 		case RESET_KIND_SUSPEND:
1761 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1762 				      DRV_STATE_SUSPEND);
1763 			break;
1764 
1765 		default:
1766 			break;
1767 		}
1768 	}
1769 }
1770 
1771 /* tp->lock is held. */
1772 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1773 {
1774 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1775 		switch (kind) {
1776 		case RESET_KIND_INIT:
1777 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1778 				      DRV_STATE_START_DONE);
1779 			break;
1780 
1781 		case RESET_KIND_SHUTDOWN:
1782 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1783 				      DRV_STATE_UNLOAD_DONE);
1784 			break;
1785 
1786 		default:
1787 			break;
1788 		}
1789 	}
1790 }
1791 
1792 /* tp->lock is held. */
1793 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1794 {
1795 	if (tg3_flag(tp, ENABLE_ASF)) {
1796 		switch (kind) {
1797 		case RESET_KIND_INIT:
1798 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1799 				      DRV_STATE_START);
1800 			break;
1801 
1802 		case RESET_KIND_SHUTDOWN:
1803 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1804 				      DRV_STATE_UNLOAD);
1805 			break;
1806 
1807 		case RESET_KIND_SUSPEND:
1808 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1809 				      DRV_STATE_SUSPEND);
1810 			break;
1811 
1812 		default:
1813 			break;
1814 		}
1815 	}
1816 }
1817 
1818 static int tg3_poll_fw(struct tg3 *tp)
1819 {
1820 	int i;
1821 	u32 val;
1822 
1823 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1824 		return 0;
1825 
1826 	if (tg3_flag(tp, IS_SSB_CORE)) {
1827 		/* We don't use firmware. */
1828 		return 0;
1829 	}
1830 
1831 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1832 		/* Wait up to 20ms for init done. */
1833 		for (i = 0; i < 200; i++) {
1834 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1835 				return 0;
1836 			if (pci_channel_offline(tp->pdev))
1837 				return -ENODEV;
1838 
1839 			udelay(100);
1840 		}
1841 		return -ENODEV;
1842 	}
1843 
1844 	/* Wait for firmware initialization to complete. */
1845 	for (i = 0; i < 100000; i++) {
1846 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1847 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1848 			break;
1849 		if (pci_channel_offline(tp->pdev)) {
1850 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1851 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1852 				netdev_info(tp->dev, "No firmware running\n");
1853 			}
1854 
1855 			break;
1856 		}
1857 
1858 		udelay(10);
1859 	}
1860 
1861 	/* Chip might not be fitted with firmware.  Some Sun onboard
1862 	 * parts are configured like that.  So don't signal the timeout
1863 	 * of the above loop as an error, but do report the lack of
1864 	 * running firmware once.
1865 	 */
1866 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1867 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1868 
1869 		netdev_info(tp->dev, "No firmware running\n");
1870 	}
1871 
1872 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1873 		/* The 57765 A0 needs a little more
1874 		 * time to do some important work.
1875 		 */
1876 		mdelay(10);
1877 	}
1878 
1879 	return 0;
1880 }
1881 
1882 static void tg3_link_report(struct tg3 *tp)
1883 {
1884 	if (!netif_carrier_ok(tp->dev)) {
1885 		netif_info(tp, link, tp->dev, "Link is down\n");
1886 		tg3_ump_link_report(tp);
1887 	} else if (netif_msg_link(tp)) {
1888 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1889 			    (tp->link_config.active_speed == SPEED_1000 ?
1890 			     1000 :
1891 			     (tp->link_config.active_speed == SPEED_100 ?
1892 			      100 : 10)),
1893 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1894 			     "full" : "half"));
1895 
1896 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1897 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1898 			    "on" : "off",
1899 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1900 			    "on" : "off");
1901 
1902 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1903 			netdev_info(tp->dev, "EEE is %s\n",
1904 				    tp->setlpicnt ? "enabled" : "disabled");
1905 
1906 		tg3_ump_link_report(tp);
1907 	}
1908 
1909 	tp->link_up = netif_carrier_ok(tp->dev);
1910 }
1911 
1912 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1913 {
1914 	u32 flowctrl = 0;
1915 
1916 	if (adv & ADVERTISE_PAUSE_CAP) {
1917 		flowctrl |= FLOW_CTRL_RX;
1918 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1919 			flowctrl |= FLOW_CTRL_TX;
1920 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1921 		flowctrl |= FLOW_CTRL_TX;
1922 
1923 	return flowctrl;
1924 }
1925 
1926 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1927 {
1928 	u16 miireg;
1929 
1930 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1931 		miireg = ADVERTISE_1000XPAUSE;
1932 	else if (flow_ctrl & FLOW_CTRL_TX)
1933 		miireg = ADVERTISE_1000XPSE_ASYM;
1934 	else if (flow_ctrl & FLOW_CTRL_RX)
1935 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1936 	else
1937 		miireg = 0;
1938 
1939 	return miireg;
1940 }
1941 
1942 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1943 {
1944 	u32 flowctrl = 0;
1945 
1946 	if (adv & ADVERTISE_1000XPAUSE) {
1947 		flowctrl |= FLOW_CTRL_RX;
1948 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1949 			flowctrl |= FLOW_CTRL_TX;
1950 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1951 		flowctrl |= FLOW_CTRL_TX;
1952 
1953 	return flowctrl;
1954 }
1955 
1956 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1957 {
1958 	u8 cap = 0;
1959 
1960 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1961 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1962 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1963 		if (lcladv & ADVERTISE_1000XPAUSE)
1964 			cap = FLOW_CTRL_RX;
1965 		if (rmtadv & ADVERTISE_1000XPAUSE)
1966 			cap = FLOW_CTRL_TX;
1967 	}
1968 
1969 	return cap;
1970 }
1971 
1972 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1973 {
1974 	u8 autoneg;
1975 	u8 flowctrl = 0;
1976 	u32 old_rx_mode = tp->rx_mode;
1977 	u32 old_tx_mode = tp->tx_mode;
1978 
1979 	if (tg3_flag(tp, USE_PHYLIB))
1980 		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1981 	else
1982 		autoneg = tp->link_config.autoneg;
1983 
1984 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1985 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1986 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1987 		else
1988 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1989 	} else
1990 		flowctrl = tp->link_config.flowctrl;
1991 
1992 	tp->link_config.active_flowctrl = flowctrl;
1993 
1994 	if (flowctrl & FLOW_CTRL_RX)
1995 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1996 	else
1997 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1998 
1999 	if (old_rx_mode != tp->rx_mode)
2000 		tw32_f(MAC_RX_MODE, tp->rx_mode);
2001 
2002 	if (flowctrl & FLOW_CTRL_TX)
2003 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
2004 	else
2005 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2006 
2007 	if (old_tx_mode != tp->tx_mode)
2008 		tw32_f(MAC_TX_MODE, tp->tx_mode);
2009 }
2010 
2011 static void tg3_adjust_link(struct net_device *dev)
2012 {
2013 	u8 oldflowctrl, linkmesg = 0;
2014 	u32 mac_mode, lcl_adv, rmt_adv;
2015 	struct tg3 *tp = netdev_priv(dev);
2016 	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2017 
2018 	spin_lock_bh(&tp->lock);
2019 
2020 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2021 				    MAC_MODE_HALF_DUPLEX);
2022 
2023 	oldflowctrl = tp->link_config.active_flowctrl;
2024 
2025 	if (phydev->link) {
2026 		lcl_adv = 0;
2027 		rmt_adv = 0;
2028 
2029 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2030 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2031 		else if (phydev->speed == SPEED_1000 ||
2032 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2033 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2034 		else
2035 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2036 
2037 		if (phydev->duplex == DUPLEX_HALF)
2038 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2039 		else {
2040 			lcl_adv = mii_advertise_flowctrl(
2041 				  tp->link_config.flowctrl);
2042 
2043 			if (phydev->pause)
2044 				rmt_adv = LPA_PAUSE_CAP;
2045 			if (phydev->asym_pause)
2046 				rmt_adv |= LPA_PAUSE_ASYM;
2047 		}
2048 
2049 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2050 	} else
2051 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2052 
2053 	if (mac_mode != tp->mac_mode) {
2054 		tp->mac_mode = mac_mode;
2055 		tw32_f(MAC_MODE, tp->mac_mode);
2056 		udelay(40);
2057 	}
2058 
2059 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2060 		if (phydev->speed == SPEED_10)
2061 			tw32(MAC_MI_STAT,
2062 			     MAC_MI_STAT_10MBPS_MODE |
2063 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2064 		else
2065 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2066 	}
2067 
2068 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2069 		tw32(MAC_TX_LENGTHS,
2070 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2071 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2072 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2073 	else
2074 		tw32(MAC_TX_LENGTHS,
2075 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2076 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2077 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2078 
2079 	if (phydev->link != tp->old_link ||
2080 	    phydev->speed != tp->link_config.active_speed ||
2081 	    phydev->duplex != tp->link_config.active_duplex ||
2082 	    oldflowctrl != tp->link_config.active_flowctrl)
2083 		linkmesg = 1;
2084 
2085 	tp->old_link = phydev->link;
2086 	tp->link_config.active_speed = phydev->speed;
2087 	tp->link_config.active_duplex = phydev->duplex;
2088 
2089 	spin_unlock_bh(&tp->lock);
2090 
2091 	if (linkmesg)
2092 		tg3_link_report(tp);
2093 }
2094 
2095 static int tg3_phy_init(struct tg3 *tp)
2096 {
2097 	struct phy_device *phydev;
2098 
2099 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2100 		return 0;
2101 
2102 	/* Bring the PHY back to a known state. */
2103 	tg3_bmcr_reset(tp);
2104 
2105 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2106 
2107 	/* Attach the MAC to the PHY. */
2108 	phydev = phy_connect(tp->dev, phydev_name(phydev),
2109 			     tg3_adjust_link, phydev->interface);
2110 	if (IS_ERR(phydev)) {
2111 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2112 		return PTR_ERR(phydev);
2113 	}
2114 
2115 	/* Mask with MAC supported features. */
2116 	switch (phydev->interface) {
2117 	case PHY_INTERFACE_MODE_GMII:
2118 	case PHY_INTERFACE_MODE_RGMII:
2119 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2120 			phydev->supported &= (PHY_GBIT_FEATURES |
2121 					      SUPPORTED_Pause |
2122 					      SUPPORTED_Asym_Pause);
2123 			break;
2124 		}
2125 		/* fallthru */
2126 	case PHY_INTERFACE_MODE_MII:
2127 		phydev->supported &= (PHY_BASIC_FEATURES |
2128 				      SUPPORTED_Pause |
2129 				      SUPPORTED_Asym_Pause);
2130 		break;
2131 	default:
2132 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2133 		return -EINVAL;
2134 	}
2135 
2136 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2137 
2138 	phydev->advertising = phydev->supported;
2139 
2140 	phy_attached_info(phydev);
2141 
2142 	return 0;
2143 }
2144 
2145 static void tg3_phy_start(struct tg3 *tp)
2146 {
2147 	struct phy_device *phydev;
2148 
2149 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2150 		return;
2151 
2152 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2153 
2154 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2155 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2156 		phydev->speed = tp->link_config.speed;
2157 		phydev->duplex = tp->link_config.duplex;
2158 		phydev->autoneg = tp->link_config.autoneg;
2159 		phydev->advertising = tp->link_config.advertising;
2160 	}
2161 
2162 	phy_start(phydev);
2163 
2164 	phy_start_aneg(phydev);
2165 }
2166 
2167 static void tg3_phy_stop(struct tg3 *tp)
2168 {
2169 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2170 		return;
2171 
2172 	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2173 }
2174 
2175 static void tg3_phy_fini(struct tg3 *tp)
2176 {
2177 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2178 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2179 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2180 	}
2181 }
2182 
2183 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2184 {
2185 	int err;
2186 	u32 val;
2187 
2188 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2189 		return 0;
2190 
2191 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2192 		/* Cannot do read-modify-write on 5401 */
2193 		err = tg3_phy_auxctl_write(tp,
2194 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2195 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2196 					   0x4c20);
2197 		goto done;
2198 	}
2199 
2200 	err = tg3_phy_auxctl_read(tp,
2201 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2202 	if (err)
2203 		return err;
2204 
2205 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2206 	err = tg3_phy_auxctl_write(tp,
2207 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2208 
2209 done:
2210 	return err;
2211 }
2212 
2213 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2214 {
2215 	u32 phytest;
2216 
2217 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2218 		u32 phy;
2219 
2220 		tg3_writephy(tp, MII_TG3_FET_TEST,
2221 			     phytest | MII_TG3_FET_SHADOW_EN);
2222 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2223 			if (enable)
2224 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2225 			else
2226 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2227 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2228 		}
2229 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2230 	}
2231 }
2232 
2233 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2234 {
2235 	u32 reg;
2236 
2237 	if (!tg3_flag(tp, 5705_PLUS) ||
2238 	    (tg3_flag(tp, 5717_PLUS) &&
2239 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2240 		return;
2241 
2242 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2243 		tg3_phy_fet_toggle_apd(tp, enable);
2244 		return;
2245 	}
2246 
2247 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2248 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2249 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2250 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2251 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2252 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2253 
2254 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2255 
2256 
2257 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2258 	if (enable)
2259 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2260 
2261 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2262 }
2263 
2264 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2265 {
2266 	u32 phy;
2267 
2268 	if (!tg3_flag(tp, 5705_PLUS) ||
2269 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2270 		return;
2271 
2272 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2273 		u32 ephy;
2274 
2275 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2276 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2277 
2278 			tg3_writephy(tp, MII_TG3_FET_TEST,
2279 				     ephy | MII_TG3_FET_SHADOW_EN);
2280 			if (!tg3_readphy(tp, reg, &phy)) {
2281 				if (enable)
2282 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2283 				else
2284 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2285 				tg3_writephy(tp, reg, phy);
2286 			}
2287 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2288 		}
2289 	} else {
2290 		int ret;
2291 
2292 		ret = tg3_phy_auxctl_read(tp,
2293 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2294 		if (!ret) {
2295 			if (enable)
2296 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2297 			else
2298 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2299 			tg3_phy_auxctl_write(tp,
2300 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2301 		}
2302 	}
2303 }
2304 
2305 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2306 {
2307 	int ret;
2308 	u32 val;
2309 
2310 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2311 		return;
2312 
2313 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2314 	if (!ret)
2315 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2316 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2317 }
2318 
2319 static void tg3_phy_apply_otp(struct tg3 *tp)
2320 {
2321 	u32 otp, phy;
2322 
2323 	if (!tp->phy_otp)
2324 		return;
2325 
2326 	otp = tp->phy_otp;
2327 
2328 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2329 		return;
2330 
2331 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2332 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2333 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2334 
2335 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2336 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2337 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2338 
2339 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2340 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2341 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2342 
2343 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2344 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2345 
2346 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2347 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2348 
2349 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2350 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2351 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2352 
2353 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2354 }
2355 
2356 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2357 {
2358 	u32 val;
2359 	struct ethtool_eee *dest = &tp->eee;
2360 
2361 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2362 		return;
2363 
2364 	if (eee)
2365 		dest = eee;
2366 
2367 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2368 		return;
2369 
2370 	/* Pull eee_active */
2371 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2372 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2373 		dest->eee_active = 1;
2374 	} else
2375 		dest->eee_active = 0;
2376 
2377 	/* Pull lp advertised settings */
2378 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2379 		return;
2380 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2381 
2382 	/* Pull advertised and eee_enabled settings */
2383 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2384 		return;
2385 	dest->eee_enabled = !!val;
2386 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2387 
2388 	/* Pull tx_lpi_enabled */
2389 	val = tr32(TG3_CPMU_EEE_MODE);
2390 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2391 
2392 	/* Pull lpi timer value */
2393 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2394 }
2395 
2396 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2397 {
2398 	u32 val;
2399 
2400 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2401 		return;
2402 
2403 	tp->setlpicnt = 0;
2404 
2405 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2406 	    current_link_up &&
2407 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2408 	    (tp->link_config.active_speed == SPEED_100 ||
2409 	     tp->link_config.active_speed == SPEED_1000)) {
2410 		u32 eeectl;
2411 
2412 		if (tp->link_config.active_speed == SPEED_1000)
2413 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2414 		else
2415 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2416 
2417 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2418 
2419 		tg3_eee_pull_config(tp, NULL);
2420 		if (tp->eee.eee_active)
2421 			tp->setlpicnt = 2;
2422 	}
2423 
2424 	if (!tp->setlpicnt) {
2425 		if (current_link_up &&
2426 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2427 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2428 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2429 		}
2430 
2431 		val = tr32(TG3_CPMU_EEE_MODE);
2432 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2433 	}
2434 }
2435 
2436 static void tg3_phy_eee_enable(struct tg3 *tp)
2437 {
2438 	u32 val;
2439 
2440 	if (tp->link_config.active_speed == SPEED_1000 &&
2441 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2442 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2443 	     tg3_flag(tp, 57765_CLASS)) &&
2444 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2445 		val = MII_TG3_DSP_TAP26_ALNOKO |
2446 		      MII_TG3_DSP_TAP26_RMRXSTO;
2447 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2448 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2449 	}
2450 
2451 	val = tr32(TG3_CPMU_EEE_MODE);
2452 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2453 }
2454 
2455 static int tg3_wait_macro_done(struct tg3 *tp)
2456 {
2457 	int limit = 100;
2458 
2459 	while (limit--) {
2460 		u32 tmp32;
2461 
2462 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2463 			if ((tmp32 & 0x1000) == 0)
2464 				break;
2465 		}
2466 	}
2467 	if (limit < 0)
2468 		return -EBUSY;
2469 
2470 	return 0;
2471 }
2472 
2473 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2474 {
2475 	static const u32 test_pat[4][6] = {
2476 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2477 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2478 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2479 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2480 	};
2481 	int chan;
2482 
2483 	for (chan = 0; chan < 4; chan++) {
2484 		int i;
2485 
2486 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2487 			     (chan * 0x2000) | 0x0200);
2488 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2489 
2490 		for (i = 0; i < 6; i++)
2491 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2492 				     test_pat[chan][i]);
2493 
2494 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2495 		if (tg3_wait_macro_done(tp)) {
2496 			*resetp = 1;
2497 			return -EBUSY;
2498 		}
2499 
2500 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2501 			     (chan * 0x2000) | 0x0200);
2502 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2503 		if (tg3_wait_macro_done(tp)) {
2504 			*resetp = 1;
2505 			return -EBUSY;
2506 		}
2507 
2508 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2509 		if (tg3_wait_macro_done(tp)) {
2510 			*resetp = 1;
2511 			return -EBUSY;
2512 		}
2513 
2514 		for (i = 0; i < 6; i += 2) {
2515 			u32 low, high;
2516 
2517 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2518 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2519 			    tg3_wait_macro_done(tp)) {
2520 				*resetp = 1;
2521 				return -EBUSY;
2522 			}
2523 			low &= 0x7fff;
2524 			high &= 0x000f;
2525 			if (low != test_pat[chan][i] ||
2526 			    high != test_pat[chan][i+1]) {
2527 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2528 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2529 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2530 
2531 				return -EBUSY;
2532 			}
2533 		}
2534 	}
2535 
2536 	return 0;
2537 }
2538 
2539 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2540 {
2541 	int chan;
2542 
2543 	for (chan = 0; chan < 4; chan++) {
2544 		int i;
2545 
2546 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2547 			     (chan * 0x2000) | 0x0200);
2548 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2549 		for (i = 0; i < 6; i++)
2550 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2551 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2552 		if (tg3_wait_macro_done(tp))
2553 			return -EBUSY;
2554 	}
2555 
2556 	return 0;
2557 }
2558 
2559 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2560 {
2561 	u32 reg32, phy9_orig;
2562 	int retries, do_phy_reset, err;
2563 
2564 	retries = 10;
2565 	do_phy_reset = 1;
2566 	do {
2567 		if (do_phy_reset) {
2568 			err = tg3_bmcr_reset(tp);
2569 			if (err)
2570 				return err;
2571 			do_phy_reset = 0;
2572 		}
2573 
2574 		/* Disable transmitter and interrupt.  */
2575 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2576 			continue;
2577 
2578 		reg32 |= 0x3000;
2579 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2580 
2581 		/* Set full-duplex, 1000 mbps.  */
2582 		tg3_writephy(tp, MII_BMCR,
2583 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2584 
2585 		/* Set to master mode.  */
2586 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2587 			continue;
2588 
2589 		tg3_writephy(tp, MII_CTRL1000,
2590 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2591 
2592 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2593 		if (err)
2594 			return err;
2595 
2596 		/* Block the PHY control access.  */
2597 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2598 
2599 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2600 		if (!err)
2601 			break;
2602 	} while (--retries);
2603 
2604 	err = tg3_phy_reset_chanpat(tp);
2605 	if (err)
2606 		return err;
2607 
2608 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2609 
2610 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2611 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2612 
2613 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2614 
2615 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2616 
2617 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2618 	if (err)
2619 		return err;
2620 
2621 	reg32 &= ~0x3000;
2622 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2623 
2624 	return 0;
2625 }
2626 
2627 static void tg3_carrier_off(struct tg3 *tp)
2628 {
2629 	netif_carrier_off(tp->dev);
2630 	tp->link_up = false;
2631 }
2632 
2633 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2634 {
2635 	if (tg3_flag(tp, ENABLE_ASF))
2636 		netdev_warn(tp->dev,
2637 			    "Management side-band traffic will be interrupted during phy settings change\n");
2638 }
2639 
2640 /* This will reset the tigon3 PHY if there is no valid
2641  * link unless the FORCE argument is non-zero.
2642  */
2643 static int tg3_phy_reset(struct tg3 *tp)
2644 {
2645 	u32 val, cpmuctrl;
2646 	int err;
2647 
2648 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2649 		val = tr32(GRC_MISC_CFG);
2650 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2651 		udelay(40);
2652 	}
2653 	err  = tg3_readphy(tp, MII_BMSR, &val);
2654 	err |= tg3_readphy(tp, MII_BMSR, &val);
2655 	if (err != 0)
2656 		return -EBUSY;
2657 
2658 	if (netif_running(tp->dev) && tp->link_up) {
2659 		netif_carrier_off(tp->dev);
2660 		tg3_link_report(tp);
2661 	}
2662 
2663 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2664 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2665 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2666 		err = tg3_phy_reset_5703_4_5(tp);
2667 		if (err)
2668 			return err;
2669 		goto out;
2670 	}
2671 
2672 	cpmuctrl = 0;
2673 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2674 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2675 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2676 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2677 			tw32(TG3_CPMU_CTRL,
2678 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2679 	}
2680 
2681 	err = tg3_bmcr_reset(tp);
2682 	if (err)
2683 		return err;
2684 
2685 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2686 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2687 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2688 
2689 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2690 	}
2691 
2692 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2693 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2694 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2695 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2696 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2697 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2698 			udelay(40);
2699 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2700 		}
2701 	}
2702 
2703 	if (tg3_flag(tp, 5717_PLUS) &&
2704 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2705 		return 0;
2706 
2707 	tg3_phy_apply_otp(tp);
2708 
2709 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2710 		tg3_phy_toggle_apd(tp, true);
2711 	else
2712 		tg3_phy_toggle_apd(tp, false);
2713 
2714 out:
2715 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2716 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2717 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2718 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2719 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2720 	}
2721 
2722 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2723 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2724 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2725 	}
2726 
2727 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2728 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2729 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2730 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2731 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2732 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2733 		}
2734 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2735 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2736 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2737 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2738 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2739 				tg3_writephy(tp, MII_TG3_TEST1,
2740 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2741 			} else
2742 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2743 
2744 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2745 		}
2746 	}
2747 
2748 	/* Set Extended packet length bit (bit 14) on all chips that */
2749 	/* support jumbo frames */
2750 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2751 		/* Cannot do read-modify-write on 5401 */
2752 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2753 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2754 		/* Set bit 14 with read-modify-write to preserve other bits */
2755 		err = tg3_phy_auxctl_read(tp,
2756 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2757 		if (!err)
2758 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2759 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2760 	}
2761 
2762 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2763 	 * jumbo frames transmission.
2764 	 */
2765 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2766 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2767 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2768 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2769 	}
2770 
2771 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2772 		/* adjust output voltage */
2773 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2774 	}
2775 
2776 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2777 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2778 
2779 	tg3_phy_toggle_automdix(tp, true);
2780 	tg3_phy_set_wirespeed(tp);
2781 	return 0;
2782 }
2783 
2784 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2785 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2786 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2787 					  TG3_GPIO_MSG_NEED_VAUX)
2788 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2789 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2790 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2791 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2792 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2793 
2794 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2795 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2796 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2797 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2798 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2799 
2800 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2801 {
2802 	u32 status, shift;
2803 
2804 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2805 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2806 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2807 	else
2808 		status = tr32(TG3_CPMU_DRV_STATUS);
2809 
2810 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2811 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2812 	status |= (newstat << shift);
2813 
2814 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2815 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2816 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2817 	else
2818 		tw32(TG3_CPMU_DRV_STATUS, status);
2819 
2820 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2821 }
2822 
2823 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2824 {
2825 	if (!tg3_flag(tp, IS_NIC))
2826 		return 0;
2827 
2828 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2829 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2830 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2831 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2832 			return -EIO;
2833 
2834 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2835 
2836 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2837 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2838 
2839 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2840 	} else {
2841 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2842 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2843 	}
2844 
2845 	return 0;
2846 }
2847 
2848 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2849 {
2850 	u32 grc_local_ctrl;
2851 
2852 	if (!tg3_flag(tp, IS_NIC) ||
2853 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2854 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2855 		return;
2856 
2857 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2858 
2859 	tw32_wait_f(GRC_LOCAL_CTRL,
2860 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2861 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2862 
2863 	tw32_wait_f(GRC_LOCAL_CTRL,
2864 		    grc_local_ctrl,
2865 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2866 
2867 	tw32_wait_f(GRC_LOCAL_CTRL,
2868 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2869 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2870 }
2871 
2872 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2873 {
2874 	if (!tg3_flag(tp, IS_NIC))
2875 		return;
2876 
2877 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2878 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2879 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2880 			    (GRC_LCLCTRL_GPIO_OE0 |
2881 			     GRC_LCLCTRL_GPIO_OE1 |
2882 			     GRC_LCLCTRL_GPIO_OE2 |
2883 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2884 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2885 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2886 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2887 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2888 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2889 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2890 				     GRC_LCLCTRL_GPIO_OE1 |
2891 				     GRC_LCLCTRL_GPIO_OE2 |
2892 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2893 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2894 				     tp->grc_local_ctrl;
2895 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2896 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2897 
2898 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2899 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2900 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2901 
2902 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2903 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2904 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2905 	} else {
2906 		u32 no_gpio2;
2907 		u32 grc_local_ctrl = 0;
2908 
2909 		/* Workaround to prevent overdrawing Amps. */
2910 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2911 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2912 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2913 				    grc_local_ctrl,
2914 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2915 		}
2916 
2917 		/* On 5753 and variants, GPIO2 cannot be used. */
2918 		no_gpio2 = tp->nic_sram_data_cfg &
2919 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2920 
2921 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2922 				  GRC_LCLCTRL_GPIO_OE1 |
2923 				  GRC_LCLCTRL_GPIO_OE2 |
2924 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2925 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2926 		if (no_gpio2) {
2927 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2928 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2929 		}
2930 		tw32_wait_f(GRC_LOCAL_CTRL,
2931 			    tp->grc_local_ctrl | grc_local_ctrl,
2932 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2933 
2934 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2935 
2936 		tw32_wait_f(GRC_LOCAL_CTRL,
2937 			    tp->grc_local_ctrl | grc_local_ctrl,
2938 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2939 
2940 		if (!no_gpio2) {
2941 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2942 			tw32_wait_f(GRC_LOCAL_CTRL,
2943 				    tp->grc_local_ctrl | grc_local_ctrl,
2944 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2945 		}
2946 	}
2947 }
2948 
2949 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2950 {
2951 	u32 msg = 0;
2952 
2953 	/* Serialize power state transitions */
2954 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2955 		return;
2956 
2957 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2958 		msg = TG3_GPIO_MSG_NEED_VAUX;
2959 
2960 	msg = tg3_set_function_status(tp, msg);
2961 
2962 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2963 		goto done;
2964 
2965 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2966 		tg3_pwrsrc_switch_to_vaux(tp);
2967 	else
2968 		tg3_pwrsrc_die_with_vmain(tp);
2969 
2970 done:
2971 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2972 }
2973 
2974 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2975 {
2976 	bool need_vaux = false;
2977 
2978 	/* The GPIOs do something completely different on 57765. */
2979 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2980 		return;
2981 
2982 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2983 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2984 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2985 		tg3_frob_aux_power_5717(tp, include_wol ?
2986 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2987 		return;
2988 	}
2989 
2990 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2991 		struct net_device *dev_peer;
2992 
2993 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2994 
2995 		/* remove_one() may have been run on the peer. */
2996 		if (dev_peer) {
2997 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2998 
2999 			if (tg3_flag(tp_peer, INIT_COMPLETE))
3000 				return;
3001 
3002 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
3003 			    tg3_flag(tp_peer, ENABLE_ASF))
3004 				need_vaux = true;
3005 		}
3006 	}
3007 
3008 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3009 	    tg3_flag(tp, ENABLE_ASF))
3010 		need_vaux = true;
3011 
3012 	if (need_vaux)
3013 		tg3_pwrsrc_switch_to_vaux(tp);
3014 	else
3015 		tg3_pwrsrc_die_with_vmain(tp);
3016 }
3017 
3018 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3019 {
3020 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3021 		return 1;
3022 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3023 		if (speed != SPEED_10)
3024 			return 1;
3025 	} else if (speed == SPEED_10)
3026 		return 1;
3027 
3028 	return 0;
3029 }
3030 
3031 static bool tg3_phy_power_bug(struct tg3 *tp)
3032 {
3033 	switch (tg3_asic_rev(tp)) {
3034 	case ASIC_REV_5700:
3035 	case ASIC_REV_5704:
3036 		return true;
3037 	case ASIC_REV_5780:
3038 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3039 			return true;
3040 		return false;
3041 	case ASIC_REV_5717:
3042 		if (!tp->pci_fn)
3043 			return true;
3044 		return false;
3045 	case ASIC_REV_5719:
3046 	case ASIC_REV_5720:
3047 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3048 		    !tp->pci_fn)
3049 			return true;
3050 		return false;
3051 	}
3052 
3053 	return false;
3054 }
3055 
3056 static bool tg3_phy_led_bug(struct tg3 *tp)
3057 {
3058 	switch (tg3_asic_rev(tp)) {
3059 	case ASIC_REV_5719:
3060 	case ASIC_REV_5720:
3061 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3062 		    !tp->pci_fn)
3063 			return true;
3064 		return false;
3065 	}
3066 
3067 	return false;
3068 }
3069 
3070 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3071 {
3072 	u32 val;
3073 
3074 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3075 		return;
3076 
3077 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3078 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3079 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3080 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3081 
3082 			sg_dig_ctrl |=
3083 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3084 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3085 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3086 		}
3087 		return;
3088 	}
3089 
3090 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3091 		tg3_bmcr_reset(tp);
3092 		val = tr32(GRC_MISC_CFG);
3093 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3094 		udelay(40);
3095 		return;
3096 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3097 		u32 phytest;
3098 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3099 			u32 phy;
3100 
3101 			tg3_writephy(tp, MII_ADVERTISE, 0);
3102 			tg3_writephy(tp, MII_BMCR,
3103 				     BMCR_ANENABLE | BMCR_ANRESTART);
3104 
3105 			tg3_writephy(tp, MII_TG3_FET_TEST,
3106 				     phytest | MII_TG3_FET_SHADOW_EN);
3107 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3108 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3109 				tg3_writephy(tp,
3110 					     MII_TG3_FET_SHDW_AUXMODE4,
3111 					     phy);
3112 			}
3113 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3114 		}
3115 		return;
3116 	} else if (do_low_power) {
3117 		if (!tg3_phy_led_bug(tp))
3118 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3119 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3120 
3121 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3122 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3123 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3124 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3125 	}
3126 
3127 	/* The PHY should not be powered down on some chips because
3128 	 * of bugs.
3129 	 */
3130 	if (tg3_phy_power_bug(tp))
3131 		return;
3132 
3133 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3134 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3135 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3136 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3137 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3138 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3139 	}
3140 
3141 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3142 }
3143 
3144 /* tp->lock is held. */
3145 static int tg3_nvram_lock(struct tg3 *tp)
3146 {
3147 	if (tg3_flag(tp, NVRAM)) {
3148 		int i;
3149 
3150 		if (tp->nvram_lock_cnt == 0) {
3151 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3152 			for (i = 0; i < 8000; i++) {
3153 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3154 					break;
3155 				udelay(20);
3156 			}
3157 			if (i == 8000) {
3158 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3159 				return -ENODEV;
3160 			}
3161 		}
3162 		tp->nvram_lock_cnt++;
3163 	}
3164 	return 0;
3165 }
3166 
3167 /* tp->lock is held. */
3168 static void tg3_nvram_unlock(struct tg3 *tp)
3169 {
3170 	if (tg3_flag(tp, NVRAM)) {
3171 		if (tp->nvram_lock_cnt > 0)
3172 			tp->nvram_lock_cnt--;
3173 		if (tp->nvram_lock_cnt == 0)
3174 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3175 	}
3176 }
3177 
3178 /* tp->lock is held. */
3179 static void tg3_enable_nvram_access(struct tg3 *tp)
3180 {
3181 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3182 		u32 nvaccess = tr32(NVRAM_ACCESS);
3183 
3184 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3185 	}
3186 }
3187 
3188 /* tp->lock is held. */
3189 static void tg3_disable_nvram_access(struct tg3 *tp)
3190 {
3191 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3192 		u32 nvaccess = tr32(NVRAM_ACCESS);
3193 
3194 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3195 	}
3196 }
3197 
3198 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3199 					u32 offset, u32 *val)
3200 {
3201 	u32 tmp;
3202 	int i;
3203 
3204 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3205 		return -EINVAL;
3206 
3207 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3208 					EEPROM_ADDR_DEVID_MASK |
3209 					EEPROM_ADDR_READ);
3210 	tw32(GRC_EEPROM_ADDR,
3211 	     tmp |
3212 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3213 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3214 	      EEPROM_ADDR_ADDR_MASK) |
3215 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3216 
3217 	for (i = 0; i < 1000; i++) {
3218 		tmp = tr32(GRC_EEPROM_ADDR);
3219 
3220 		if (tmp & EEPROM_ADDR_COMPLETE)
3221 			break;
3222 		msleep(1);
3223 	}
3224 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3225 		return -EBUSY;
3226 
3227 	tmp = tr32(GRC_EEPROM_DATA);
3228 
3229 	/*
3230 	 * The data will always be opposite the native endian
3231 	 * format.  Perform a blind byteswap to compensate.
3232 	 */
3233 	*val = swab32(tmp);
3234 
3235 	return 0;
3236 }
3237 
3238 #define NVRAM_CMD_TIMEOUT 10000
3239 
3240 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3241 {
3242 	int i;
3243 
3244 	tw32(NVRAM_CMD, nvram_cmd);
3245 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3246 		usleep_range(10, 40);
3247 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3248 			udelay(10);
3249 			break;
3250 		}
3251 	}
3252 
3253 	if (i == NVRAM_CMD_TIMEOUT)
3254 		return -EBUSY;
3255 
3256 	return 0;
3257 }
3258 
3259 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3260 {
3261 	if (tg3_flag(tp, NVRAM) &&
3262 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3263 	    tg3_flag(tp, FLASH) &&
3264 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3265 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3266 
3267 		addr = ((addr / tp->nvram_pagesize) <<
3268 			ATMEL_AT45DB0X1B_PAGE_POS) +
3269 		       (addr % tp->nvram_pagesize);
3270 
3271 	return addr;
3272 }
3273 
3274 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3275 {
3276 	if (tg3_flag(tp, NVRAM) &&
3277 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3278 	    tg3_flag(tp, FLASH) &&
3279 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3280 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3281 
3282 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3283 			tp->nvram_pagesize) +
3284 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3285 
3286 	return addr;
3287 }
3288 
3289 /* NOTE: Data read in from NVRAM is byteswapped according to
3290  * the byteswapping settings for all other register accesses.
3291  * tg3 devices are BE devices, so on a BE machine, the data
3292  * returned will be exactly as it is seen in NVRAM.  On a LE
3293  * machine, the 32-bit value will be byteswapped.
3294  */
3295 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3296 {
3297 	int ret;
3298 
3299 	if (!tg3_flag(tp, NVRAM))
3300 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3301 
3302 	offset = tg3_nvram_phys_addr(tp, offset);
3303 
3304 	if (offset > NVRAM_ADDR_MSK)
3305 		return -EINVAL;
3306 
3307 	ret = tg3_nvram_lock(tp);
3308 	if (ret)
3309 		return ret;
3310 
3311 	tg3_enable_nvram_access(tp);
3312 
3313 	tw32(NVRAM_ADDR, offset);
3314 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3315 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3316 
3317 	if (ret == 0)
3318 		*val = tr32(NVRAM_RDDATA);
3319 
3320 	tg3_disable_nvram_access(tp);
3321 
3322 	tg3_nvram_unlock(tp);
3323 
3324 	return ret;
3325 }
3326 
3327 /* Ensures NVRAM data is in bytestream format. */
3328 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3329 {
3330 	u32 v;
3331 	int res = tg3_nvram_read(tp, offset, &v);
3332 	if (!res)
3333 		*val = cpu_to_be32(v);
3334 	return res;
3335 }
3336 
3337 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3338 				    u32 offset, u32 len, u8 *buf)
3339 {
3340 	int i, j, rc = 0;
3341 	u32 val;
3342 
3343 	for (i = 0; i < len; i += 4) {
3344 		u32 addr;
3345 		__be32 data;
3346 
3347 		addr = offset + i;
3348 
3349 		memcpy(&data, buf + i, 4);
3350 
3351 		/*
3352 		 * The SEEPROM interface expects the data to always be opposite
3353 		 * the native endian format.  We accomplish this by reversing
3354 		 * all the operations that would have been performed on the
3355 		 * data from a call to tg3_nvram_read_be32().
3356 		 */
3357 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3358 
3359 		val = tr32(GRC_EEPROM_ADDR);
3360 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3361 
3362 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3363 			EEPROM_ADDR_READ);
3364 		tw32(GRC_EEPROM_ADDR, val |
3365 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3366 			(addr & EEPROM_ADDR_ADDR_MASK) |
3367 			EEPROM_ADDR_START |
3368 			EEPROM_ADDR_WRITE);
3369 
3370 		for (j = 0; j < 1000; j++) {
3371 			val = tr32(GRC_EEPROM_ADDR);
3372 
3373 			if (val & EEPROM_ADDR_COMPLETE)
3374 				break;
3375 			msleep(1);
3376 		}
3377 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3378 			rc = -EBUSY;
3379 			break;
3380 		}
3381 	}
3382 
3383 	return rc;
3384 }
3385 
3386 /* offset and length are dword aligned */
3387 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3388 		u8 *buf)
3389 {
3390 	int ret = 0;
3391 	u32 pagesize = tp->nvram_pagesize;
3392 	u32 pagemask = pagesize - 1;
3393 	u32 nvram_cmd;
3394 	u8 *tmp;
3395 
3396 	tmp = kmalloc(pagesize, GFP_KERNEL);
3397 	if (tmp == NULL)
3398 		return -ENOMEM;
3399 
3400 	while (len) {
3401 		int j;
3402 		u32 phy_addr, page_off, size;
3403 
3404 		phy_addr = offset & ~pagemask;
3405 
3406 		for (j = 0; j < pagesize; j += 4) {
3407 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3408 						  (__be32 *) (tmp + j));
3409 			if (ret)
3410 				break;
3411 		}
3412 		if (ret)
3413 			break;
3414 
3415 		page_off = offset & pagemask;
3416 		size = pagesize;
3417 		if (len < size)
3418 			size = len;
3419 
3420 		len -= size;
3421 
3422 		memcpy(tmp + page_off, buf, size);
3423 
3424 		offset = offset + (pagesize - page_off);
3425 
3426 		tg3_enable_nvram_access(tp);
3427 
3428 		/*
3429 		 * Before we can erase the flash page, we need
3430 		 * to issue a special "write enable" command.
3431 		 */
3432 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3433 
3434 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435 			break;
3436 
3437 		/* Erase the target page */
3438 		tw32(NVRAM_ADDR, phy_addr);
3439 
3440 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3441 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3442 
3443 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3444 			break;
3445 
3446 		/* Issue another write enable to start the write. */
3447 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3448 
3449 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3450 			break;
3451 
3452 		for (j = 0; j < pagesize; j += 4) {
3453 			__be32 data;
3454 
3455 			data = *((__be32 *) (tmp + j));
3456 
3457 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3458 
3459 			tw32(NVRAM_ADDR, phy_addr + j);
3460 
3461 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3462 				NVRAM_CMD_WR;
3463 
3464 			if (j == 0)
3465 				nvram_cmd |= NVRAM_CMD_FIRST;
3466 			else if (j == (pagesize - 4))
3467 				nvram_cmd |= NVRAM_CMD_LAST;
3468 
3469 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3470 			if (ret)
3471 				break;
3472 		}
3473 		if (ret)
3474 			break;
3475 	}
3476 
3477 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3478 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3479 
3480 	kfree(tmp);
3481 
3482 	return ret;
3483 }
3484 
3485 /* offset and length are dword aligned */
3486 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3487 		u8 *buf)
3488 {
3489 	int i, ret = 0;
3490 
3491 	for (i = 0; i < len; i += 4, offset += 4) {
3492 		u32 page_off, phy_addr, nvram_cmd;
3493 		__be32 data;
3494 
3495 		memcpy(&data, buf + i, 4);
3496 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3497 
3498 		page_off = offset % tp->nvram_pagesize;
3499 
3500 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3501 
3502 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3503 
3504 		if (page_off == 0 || i == 0)
3505 			nvram_cmd |= NVRAM_CMD_FIRST;
3506 		if (page_off == (tp->nvram_pagesize - 4))
3507 			nvram_cmd |= NVRAM_CMD_LAST;
3508 
3509 		if (i == (len - 4))
3510 			nvram_cmd |= NVRAM_CMD_LAST;
3511 
3512 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3513 		    !tg3_flag(tp, FLASH) ||
3514 		    !tg3_flag(tp, 57765_PLUS))
3515 			tw32(NVRAM_ADDR, phy_addr);
3516 
3517 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3518 		    !tg3_flag(tp, 5755_PLUS) &&
3519 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3520 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3521 			u32 cmd;
3522 
3523 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3524 			ret = tg3_nvram_exec_cmd(tp, cmd);
3525 			if (ret)
3526 				break;
3527 		}
3528 		if (!tg3_flag(tp, FLASH)) {
3529 			/* We always do complete word writes to eeprom. */
3530 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3531 		}
3532 
3533 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3534 		if (ret)
3535 			break;
3536 	}
3537 	return ret;
3538 }
3539 
3540 /* offset and length are dword aligned */
3541 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3542 {
3543 	int ret;
3544 
3545 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3546 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3547 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3548 		udelay(40);
3549 	}
3550 
3551 	if (!tg3_flag(tp, NVRAM)) {
3552 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3553 	} else {
3554 		u32 grc_mode;
3555 
3556 		ret = tg3_nvram_lock(tp);
3557 		if (ret)
3558 			return ret;
3559 
3560 		tg3_enable_nvram_access(tp);
3561 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3562 			tw32(NVRAM_WRITE1, 0x406);
3563 
3564 		grc_mode = tr32(GRC_MODE);
3565 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3566 
3567 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3568 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3569 				buf);
3570 		} else {
3571 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3572 				buf);
3573 		}
3574 
3575 		grc_mode = tr32(GRC_MODE);
3576 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3577 
3578 		tg3_disable_nvram_access(tp);
3579 		tg3_nvram_unlock(tp);
3580 	}
3581 
3582 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3583 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3584 		udelay(40);
3585 	}
3586 
3587 	return ret;
3588 }
3589 
3590 #define RX_CPU_SCRATCH_BASE	0x30000
3591 #define RX_CPU_SCRATCH_SIZE	0x04000
3592 #define TX_CPU_SCRATCH_BASE	0x34000
3593 #define TX_CPU_SCRATCH_SIZE	0x04000
3594 
3595 /* tp->lock is held. */
3596 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3597 {
3598 	int i;
3599 	const int iters = 10000;
3600 
3601 	for (i = 0; i < iters; i++) {
3602 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3603 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3604 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3605 			break;
3606 		if (pci_channel_offline(tp->pdev))
3607 			return -EBUSY;
3608 	}
3609 
3610 	return (i == iters) ? -EBUSY : 0;
3611 }
3612 
3613 /* tp->lock is held. */
3614 static int tg3_rxcpu_pause(struct tg3 *tp)
3615 {
3616 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3617 
3618 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3619 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3620 	udelay(10);
3621 
3622 	return rc;
3623 }
3624 
3625 /* tp->lock is held. */
3626 static int tg3_txcpu_pause(struct tg3 *tp)
3627 {
3628 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3629 }
3630 
3631 /* tp->lock is held. */
3632 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3633 {
3634 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3635 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3636 }
3637 
3638 /* tp->lock is held. */
3639 static void tg3_rxcpu_resume(struct tg3 *tp)
3640 {
3641 	tg3_resume_cpu(tp, RX_CPU_BASE);
3642 }
3643 
3644 /* tp->lock is held. */
3645 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3646 {
3647 	int rc;
3648 
3649 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3650 
3651 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3652 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3653 
3654 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3655 		return 0;
3656 	}
3657 	if (cpu_base == RX_CPU_BASE) {
3658 		rc = tg3_rxcpu_pause(tp);
3659 	} else {
3660 		/*
3661 		 * There is only an Rx CPU for the 5750 derivative in the
3662 		 * BCM4785.
3663 		 */
3664 		if (tg3_flag(tp, IS_SSB_CORE))
3665 			return 0;
3666 
3667 		rc = tg3_txcpu_pause(tp);
3668 	}
3669 
3670 	if (rc) {
3671 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3672 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3673 		return -ENODEV;
3674 	}
3675 
3676 	/* Clear firmware's nvram arbitration. */
3677 	if (tg3_flag(tp, NVRAM))
3678 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3679 	return 0;
3680 }
3681 
3682 static int tg3_fw_data_len(struct tg3 *tp,
3683 			   const struct tg3_firmware_hdr *fw_hdr)
3684 {
3685 	int fw_len;
3686 
3687 	/* Non fragmented firmware have one firmware header followed by a
3688 	 * contiguous chunk of data to be written. The length field in that
3689 	 * header is not the length of data to be written but the complete
3690 	 * length of the bss. The data length is determined based on
3691 	 * tp->fw->size minus headers.
3692 	 *
3693 	 * Fragmented firmware have a main header followed by multiple
3694 	 * fragments. Each fragment is identical to non fragmented firmware
3695 	 * with a firmware header followed by a contiguous chunk of data. In
3696 	 * the main header, the length field is unused and set to 0xffffffff.
3697 	 * In each fragment header the length is the entire size of that
3698 	 * fragment i.e. fragment data + header length. Data length is
3699 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3700 	 */
3701 	if (tp->fw_len == 0xffffffff)
3702 		fw_len = be32_to_cpu(fw_hdr->len);
3703 	else
3704 		fw_len = tp->fw->size;
3705 
3706 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3707 }
3708 
3709 /* tp->lock is held. */
3710 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3711 				 u32 cpu_scratch_base, int cpu_scratch_size,
3712 				 const struct tg3_firmware_hdr *fw_hdr)
3713 {
3714 	int err, i;
3715 	void (*write_op)(struct tg3 *, u32, u32);
3716 	int total_len = tp->fw->size;
3717 
3718 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3719 		netdev_err(tp->dev,
3720 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3721 			   __func__);
3722 		return -EINVAL;
3723 	}
3724 
3725 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3726 		write_op = tg3_write_mem;
3727 	else
3728 		write_op = tg3_write_indirect_reg32;
3729 
3730 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3731 		/* It is possible that bootcode is still loading at this point.
3732 		 * Get the nvram lock first before halting the cpu.
3733 		 */
3734 		int lock_err = tg3_nvram_lock(tp);
3735 		err = tg3_halt_cpu(tp, cpu_base);
3736 		if (!lock_err)
3737 			tg3_nvram_unlock(tp);
3738 		if (err)
3739 			goto out;
3740 
3741 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3742 			write_op(tp, cpu_scratch_base + i, 0);
3743 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3744 		tw32(cpu_base + CPU_MODE,
3745 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3746 	} else {
3747 		/* Subtract additional main header for fragmented firmware and
3748 		 * advance to the first fragment
3749 		 */
3750 		total_len -= TG3_FW_HDR_LEN;
3751 		fw_hdr++;
3752 	}
3753 
3754 	do {
3755 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3756 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3757 			write_op(tp, cpu_scratch_base +
3758 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3759 				     (i * sizeof(u32)),
3760 				 be32_to_cpu(fw_data[i]));
3761 
3762 		total_len -= be32_to_cpu(fw_hdr->len);
3763 
3764 		/* Advance to next fragment */
3765 		fw_hdr = (struct tg3_firmware_hdr *)
3766 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3767 	} while (total_len > 0);
3768 
3769 	err = 0;
3770 
3771 out:
3772 	return err;
3773 }
3774 
3775 /* tp->lock is held. */
3776 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3777 {
3778 	int i;
3779 	const int iters = 5;
3780 
3781 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3782 	tw32_f(cpu_base + CPU_PC, pc);
3783 
3784 	for (i = 0; i < iters; i++) {
3785 		if (tr32(cpu_base + CPU_PC) == pc)
3786 			break;
3787 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3788 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3789 		tw32_f(cpu_base + CPU_PC, pc);
3790 		udelay(1000);
3791 	}
3792 
3793 	return (i == iters) ? -EBUSY : 0;
3794 }
3795 
3796 /* tp->lock is held. */
3797 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3798 {
3799 	const struct tg3_firmware_hdr *fw_hdr;
3800 	int err;
3801 
3802 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3803 
3804 	/* Firmware blob starts with version numbers, followed by
3805 	   start address and length. We are setting complete length.
3806 	   length = end_address_of_bss - start_address_of_text.
3807 	   Remainder is the blob to be loaded contiguously
3808 	   from start address. */
3809 
3810 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3811 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3812 				    fw_hdr);
3813 	if (err)
3814 		return err;
3815 
3816 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3817 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3818 				    fw_hdr);
3819 	if (err)
3820 		return err;
3821 
3822 	/* Now startup only the RX cpu. */
3823 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3824 				       be32_to_cpu(fw_hdr->base_addr));
3825 	if (err) {
3826 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3827 			   "should be %08x\n", __func__,
3828 			   tr32(RX_CPU_BASE + CPU_PC),
3829 				be32_to_cpu(fw_hdr->base_addr));
3830 		return -ENODEV;
3831 	}
3832 
3833 	tg3_rxcpu_resume(tp);
3834 
3835 	return 0;
3836 }
3837 
3838 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3839 {
3840 	const int iters = 1000;
3841 	int i;
3842 	u32 val;
3843 
3844 	/* Wait for boot code to complete initialization and enter service
3845 	 * loop. It is then safe to download service patches
3846 	 */
3847 	for (i = 0; i < iters; i++) {
3848 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3849 			break;
3850 
3851 		udelay(10);
3852 	}
3853 
3854 	if (i == iters) {
3855 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3856 		return -EBUSY;
3857 	}
3858 
3859 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3860 	if (val & 0xff) {
3861 		netdev_warn(tp->dev,
3862 			    "Other patches exist. Not downloading EEE patch\n");
3863 		return -EEXIST;
3864 	}
3865 
3866 	return 0;
3867 }
3868 
3869 /* tp->lock is held. */
3870 static void tg3_load_57766_firmware(struct tg3 *tp)
3871 {
3872 	struct tg3_firmware_hdr *fw_hdr;
3873 
3874 	if (!tg3_flag(tp, NO_NVRAM))
3875 		return;
3876 
3877 	if (tg3_validate_rxcpu_state(tp))
3878 		return;
3879 
3880 	if (!tp->fw)
3881 		return;
3882 
3883 	/* This firmware blob has a different format than older firmware
3884 	 * releases as given below. The main difference is we have fragmented
3885 	 * data to be written to non-contiguous locations.
3886 	 *
3887 	 * In the beginning we have a firmware header identical to other
3888 	 * firmware which consists of version, base addr and length. The length
3889 	 * here is unused and set to 0xffffffff.
3890 	 *
3891 	 * This is followed by a series of firmware fragments which are
3892 	 * individually identical to previous firmware. i.e. they have the
3893 	 * firmware header and followed by data for that fragment. The version
3894 	 * field of the individual fragment header is unused.
3895 	 */
3896 
3897 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3898 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3899 		return;
3900 
3901 	if (tg3_rxcpu_pause(tp))
3902 		return;
3903 
3904 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3905 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3906 
3907 	tg3_rxcpu_resume(tp);
3908 }
3909 
3910 /* tp->lock is held. */
3911 static int tg3_load_tso_firmware(struct tg3 *tp)
3912 {
3913 	const struct tg3_firmware_hdr *fw_hdr;
3914 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3915 	int err;
3916 
3917 	if (!tg3_flag(tp, FW_TSO))
3918 		return 0;
3919 
3920 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3921 
3922 	/* Firmware blob starts with version numbers, followed by
3923 	   start address and length. We are setting complete length.
3924 	   length = end_address_of_bss - start_address_of_text.
3925 	   Remainder is the blob to be loaded contiguously
3926 	   from start address. */
3927 
3928 	cpu_scratch_size = tp->fw_len;
3929 
3930 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3931 		cpu_base = RX_CPU_BASE;
3932 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3933 	} else {
3934 		cpu_base = TX_CPU_BASE;
3935 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3936 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3937 	}
3938 
3939 	err = tg3_load_firmware_cpu(tp, cpu_base,
3940 				    cpu_scratch_base, cpu_scratch_size,
3941 				    fw_hdr);
3942 	if (err)
3943 		return err;
3944 
3945 	/* Now startup the cpu. */
3946 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3947 				       be32_to_cpu(fw_hdr->base_addr));
3948 	if (err) {
3949 		netdev_err(tp->dev,
3950 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3951 			   __func__, tr32(cpu_base + CPU_PC),
3952 			   be32_to_cpu(fw_hdr->base_addr));
3953 		return -ENODEV;
3954 	}
3955 
3956 	tg3_resume_cpu(tp, cpu_base);
3957 	return 0;
3958 }
3959 
3960 /* tp->lock is held. */
3961 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3962 {
3963 	u32 addr_high, addr_low;
3964 
3965 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3966 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3967 		    (mac_addr[4] <<  8) | mac_addr[5]);
3968 
3969 	if (index < 4) {
3970 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3971 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3972 	} else {
3973 		index -= 4;
3974 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3975 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3976 	}
3977 }
3978 
3979 /* tp->lock is held. */
3980 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3981 {
3982 	u32 addr_high;
3983 	int i;
3984 
3985 	for (i = 0; i < 4; i++) {
3986 		if (i == 1 && skip_mac_1)
3987 			continue;
3988 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3989 	}
3990 
3991 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3992 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3993 		for (i = 4; i < 16; i++)
3994 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3995 	}
3996 
3997 	addr_high = (tp->dev->dev_addr[0] +
3998 		     tp->dev->dev_addr[1] +
3999 		     tp->dev->dev_addr[2] +
4000 		     tp->dev->dev_addr[3] +
4001 		     tp->dev->dev_addr[4] +
4002 		     tp->dev->dev_addr[5]) &
4003 		TX_BACKOFF_SEED_MASK;
4004 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
4005 }
4006 
4007 static void tg3_enable_register_access(struct tg3 *tp)
4008 {
4009 	/*
4010 	 * Make sure register accesses (indirect or otherwise) will function
4011 	 * correctly.
4012 	 */
4013 	pci_write_config_dword(tp->pdev,
4014 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4015 }
4016 
4017 static int tg3_power_up(struct tg3 *tp)
4018 {
4019 	int err;
4020 
4021 	tg3_enable_register_access(tp);
4022 
4023 	err = pci_set_power_state(tp->pdev, PCI_D0);
4024 	if (!err) {
4025 		/* Switch out of Vaux if it is a NIC */
4026 		tg3_pwrsrc_switch_to_vmain(tp);
4027 	} else {
4028 		netdev_err(tp->dev, "Transition to D0 failed\n");
4029 	}
4030 
4031 	return err;
4032 }
4033 
4034 static int tg3_setup_phy(struct tg3 *, bool);
4035 
4036 static int tg3_power_down_prepare(struct tg3 *tp)
4037 {
4038 	u32 misc_host_ctrl;
4039 	bool device_should_wake, do_low_power;
4040 
4041 	tg3_enable_register_access(tp);
4042 
4043 	/* Restore the CLKREQ setting. */
4044 	if (tg3_flag(tp, CLKREQ_BUG))
4045 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4046 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4047 
4048 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4049 	tw32(TG3PCI_MISC_HOST_CTRL,
4050 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4051 
4052 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4053 			     tg3_flag(tp, WOL_ENABLE);
4054 
4055 	if (tg3_flag(tp, USE_PHYLIB)) {
4056 		do_low_power = false;
4057 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4058 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4059 			struct phy_device *phydev;
4060 			u32 phyid, advertising;
4061 
4062 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4063 
4064 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4065 
4066 			tp->link_config.speed = phydev->speed;
4067 			tp->link_config.duplex = phydev->duplex;
4068 			tp->link_config.autoneg = phydev->autoneg;
4069 			tp->link_config.advertising = phydev->advertising;
4070 
4071 			advertising = ADVERTISED_TP |
4072 				      ADVERTISED_Pause |
4073 				      ADVERTISED_Autoneg |
4074 				      ADVERTISED_10baseT_Half;
4075 
4076 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4077 				if (tg3_flag(tp, WOL_SPEED_100MB))
4078 					advertising |=
4079 						ADVERTISED_100baseT_Half |
4080 						ADVERTISED_100baseT_Full |
4081 						ADVERTISED_10baseT_Full;
4082 				else
4083 					advertising |= ADVERTISED_10baseT_Full;
4084 			}
4085 
4086 			phydev->advertising = advertising;
4087 
4088 			phy_start_aneg(phydev);
4089 
4090 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4091 			if (phyid != PHY_ID_BCMAC131) {
4092 				phyid &= PHY_BCM_OUI_MASK;
4093 				if (phyid == PHY_BCM_OUI_1 ||
4094 				    phyid == PHY_BCM_OUI_2 ||
4095 				    phyid == PHY_BCM_OUI_3)
4096 					do_low_power = true;
4097 			}
4098 		}
4099 	} else {
4100 		do_low_power = true;
4101 
4102 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4103 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4104 
4105 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4106 			tg3_setup_phy(tp, false);
4107 	}
4108 
4109 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4110 		u32 val;
4111 
4112 		val = tr32(GRC_VCPU_EXT_CTRL);
4113 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4114 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4115 		int i;
4116 		u32 val;
4117 
4118 		for (i = 0; i < 200; i++) {
4119 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4120 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4121 				break;
4122 			msleep(1);
4123 		}
4124 	}
4125 	if (tg3_flag(tp, WOL_CAP))
4126 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4127 						     WOL_DRV_STATE_SHUTDOWN |
4128 						     WOL_DRV_WOL |
4129 						     WOL_SET_MAGIC_PKT);
4130 
4131 	if (device_should_wake) {
4132 		u32 mac_mode;
4133 
4134 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4135 			if (do_low_power &&
4136 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4137 				tg3_phy_auxctl_write(tp,
4138 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4139 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4140 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4141 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4142 				udelay(40);
4143 			}
4144 
4145 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4146 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4147 			else if (tp->phy_flags &
4148 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4149 				if (tp->link_config.active_speed == SPEED_1000)
4150 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4151 				else
4152 					mac_mode = MAC_MODE_PORT_MODE_MII;
4153 			} else
4154 				mac_mode = MAC_MODE_PORT_MODE_MII;
4155 
4156 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4157 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4158 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4159 					     SPEED_100 : SPEED_10;
4160 				if (tg3_5700_link_polarity(tp, speed))
4161 					mac_mode |= MAC_MODE_LINK_POLARITY;
4162 				else
4163 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4164 			}
4165 		} else {
4166 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4167 		}
4168 
4169 		if (!tg3_flag(tp, 5750_PLUS))
4170 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4171 
4172 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4173 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4174 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4175 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4176 
4177 		if (tg3_flag(tp, ENABLE_APE))
4178 			mac_mode |= MAC_MODE_APE_TX_EN |
4179 				    MAC_MODE_APE_RX_EN |
4180 				    MAC_MODE_TDE_ENABLE;
4181 
4182 		tw32_f(MAC_MODE, mac_mode);
4183 		udelay(100);
4184 
4185 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4186 		udelay(10);
4187 	}
4188 
4189 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4190 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4191 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4192 		u32 base_val;
4193 
4194 		base_val = tp->pci_clock_ctrl;
4195 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4196 			     CLOCK_CTRL_TXCLK_DISABLE);
4197 
4198 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4199 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4200 	} else if (tg3_flag(tp, 5780_CLASS) ||
4201 		   tg3_flag(tp, CPMU_PRESENT) ||
4202 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4203 		/* do nothing */
4204 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4205 		u32 newbits1, newbits2;
4206 
4207 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4208 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4209 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4210 				    CLOCK_CTRL_TXCLK_DISABLE |
4211 				    CLOCK_CTRL_ALTCLK);
4212 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4213 		} else if (tg3_flag(tp, 5705_PLUS)) {
4214 			newbits1 = CLOCK_CTRL_625_CORE;
4215 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4216 		} else {
4217 			newbits1 = CLOCK_CTRL_ALTCLK;
4218 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4219 		}
4220 
4221 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4222 			    40);
4223 
4224 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4225 			    40);
4226 
4227 		if (!tg3_flag(tp, 5705_PLUS)) {
4228 			u32 newbits3;
4229 
4230 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4231 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4232 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4233 					    CLOCK_CTRL_TXCLK_DISABLE |
4234 					    CLOCK_CTRL_44MHZ_CORE);
4235 			} else {
4236 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4237 			}
4238 
4239 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4240 				    tp->pci_clock_ctrl | newbits3, 40);
4241 		}
4242 	}
4243 
4244 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4245 		tg3_power_down_phy(tp, do_low_power);
4246 
4247 	tg3_frob_aux_power(tp, true);
4248 
4249 	/* Workaround for unstable PLL clock */
4250 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4251 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4252 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4253 		u32 val = tr32(0x7d00);
4254 
4255 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4256 		tw32(0x7d00, val);
4257 		if (!tg3_flag(tp, ENABLE_ASF)) {
4258 			int err;
4259 
4260 			err = tg3_nvram_lock(tp);
4261 			tg3_halt_cpu(tp, RX_CPU_BASE);
4262 			if (!err)
4263 				tg3_nvram_unlock(tp);
4264 		}
4265 	}
4266 
4267 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4268 
4269 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4270 
4271 	return 0;
4272 }
4273 
4274 static void tg3_power_down(struct tg3 *tp)
4275 {
4276 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4277 	pci_set_power_state(tp->pdev, PCI_D3hot);
4278 }
4279 
4280 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4281 {
4282 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4283 	case MII_TG3_AUX_STAT_10HALF:
4284 		*speed = SPEED_10;
4285 		*duplex = DUPLEX_HALF;
4286 		break;
4287 
4288 	case MII_TG3_AUX_STAT_10FULL:
4289 		*speed = SPEED_10;
4290 		*duplex = DUPLEX_FULL;
4291 		break;
4292 
4293 	case MII_TG3_AUX_STAT_100HALF:
4294 		*speed = SPEED_100;
4295 		*duplex = DUPLEX_HALF;
4296 		break;
4297 
4298 	case MII_TG3_AUX_STAT_100FULL:
4299 		*speed = SPEED_100;
4300 		*duplex = DUPLEX_FULL;
4301 		break;
4302 
4303 	case MII_TG3_AUX_STAT_1000HALF:
4304 		*speed = SPEED_1000;
4305 		*duplex = DUPLEX_HALF;
4306 		break;
4307 
4308 	case MII_TG3_AUX_STAT_1000FULL:
4309 		*speed = SPEED_1000;
4310 		*duplex = DUPLEX_FULL;
4311 		break;
4312 
4313 	default:
4314 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4315 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4316 				 SPEED_10;
4317 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4318 				  DUPLEX_HALF;
4319 			break;
4320 		}
4321 		*speed = SPEED_UNKNOWN;
4322 		*duplex = DUPLEX_UNKNOWN;
4323 		break;
4324 	}
4325 }
4326 
4327 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4328 {
4329 	int err = 0;
4330 	u32 val, new_adv;
4331 
4332 	new_adv = ADVERTISE_CSMA;
4333 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4334 	new_adv |= mii_advertise_flowctrl(flowctrl);
4335 
4336 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4337 	if (err)
4338 		goto done;
4339 
4340 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4341 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4342 
4343 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4344 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4345 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4346 
4347 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4348 		if (err)
4349 			goto done;
4350 	}
4351 
4352 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4353 		goto done;
4354 
4355 	tw32(TG3_CPMU_EEE_MODE,
4356 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4357 
4358 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4359 	if (!err) {
4360 		u32 err2;
4361 
4362 		val = 0;
4363 		/* Advertise 100-BaseTX EEE ability */
4364 		if (advertise & ADVERTISED_100baseT_Full)
4365 			val |= MDIO_AN_EEE_ADV_100TX;
4366 		/* Advertise 1000-BaseT EEE ability */
4367 		if (advertise & ADVERTISED_1000baseT_Full)
4368 			val |= MDIO_AN_EEE_ADV_1000T;
4369 
4370 		if (!tp->eee.eee_enabled) {
4371 			val = 0;
4372 			tp->eee.advertised = 0;
4373 		} else {
4374 			tp->eee.advertised = advertise &
4375 					     (ADVERTISED_100baseT_Full |
4376 					      ADVERTISED_1000baseT_Full);
4377 		}
4378 
4379 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4380 		if (err)
4381 			val = 0;
4382 
4383 		switch (tg3_asic_rev(tp)) {
4384 		case ASIC_REV_5717:
4385 		case ASIC_REV_57765:
4386 		case ASIC_REV_57766:
4387 		case ASIC_REV_5719:
4388 			/* If we advertised any eee advertisements above... */
4389 			if (val)
4390 				val = MII_TG3_DSP_TAP26_ALNOKO |
4391 				      MII_TG3_DSP_TAP26_RMRXSTO |
4392 				      MII_TG3_DSP_TAP26_OPCSINPT;
4393 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4394 			/* Fall through */
4395 		case ASIC_REV_5720:
4396 		case ASIC_REV_5762:
4397 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4398 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4399 						 MII_TG3_DSP_CH34TP2_HIBW01);
4400 		}
4401 
4402 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4403 		if (!err)
4404 			err = err2;
4405 	}
4406 
4407 done:
4408 	return err;
4409 }
4410 
4411 static void tg3_phy_copper_begin(struct tg3 *tp)
4412 {
4413 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4414 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4415 		u32 adv, fc;
4416 
4417 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4418 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4419 			adv = ADVERTISED_10baseT_Half |
4420 			      ADVERTISED_10baseT_Full;
4421 			if (tg3_flag(tp, WOL_SPEED_100MB))
4422 				adv |= ADVERTISED_100baseT_Half |
4423 				       ADVERTISED_100baseT_Full;
4424 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4425 				if (!(tp->phy_flags &
4426 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4427 					adv |= ADVERTISED_1000baseT_Half;
4428 				adv |= ADVERTISED_1000baseT_Full;
4429 			}
4430 
4431 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4432 		} else {
4433 			adv = tp->link_config.advertising;
4434 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4435 				adv &= ~(ADVERTISED_1000baseT_Half |
4436 					 ADVERTISED_1000baseT_Full);
4437 
4438 			fc = tp->link_config.flowctrl;
4439 		}
4440 
4441 		tg3_phy_autoneg_cfg(tp, adv, fc);
4442 
4443 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4444 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4445 			/* Normally during power down we want to autonegotiate
4446 			 * the lowest possible speed for WOL. However, to avoid
4447 			 * link flap, we leave it untouched.
4448 			 */
4449 			return;
4450 		}
4451 
4452 		tg3_writephy(tp, MII_BMCR,
4453 			     BMCR_ANENABLE | BMCR_ANRESTART);
4454 	} else {
4455 		int i;
4456 		u32 bmcr, orig_bmcr;
4457 
4458 		tp->link_config.active_speed = tp->link_config.speed;
4459 		tp->link_config.active_duplex = tp->link_config.duplex;
4460 
4461 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4462 			/* With autoneg disabled, 5715 only links up when the
4463 			 * advertisement register has the configured speed
4464 			 * enabled.
4465 			 */
4466 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4467 		}
4468 
4469 		bmcr = 0;
4470 		switch (tp->link_config.speed) {
4471 		default:
4472 		case SPEED_10:
4473 			break;
4474 
4475 		case SPEED_100:
4476 			bmcr |= BMCR_SPEED100;
4477 			break;
4478 
4479 		case SPEED_1000:
4480 			bmcr |= BMCR_SPEED1000;
4481 			break;
4482 		}
4483 
4484 		if (tp->link_config.duplex == DUPLEX_FULL)
4485 			bmcr |= BMCR_FULLDPLX;
4486 
4487 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4488 		    (bmcr != orig_bmcr)) {
4489 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4490 			for (i = 0; i < 1500; i++) {
4491 				u32 tmp;
4492 
4493 				udelay(10);
4494 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4495 				    tg3_readphy(tp, MII_BMSR, &tmp))
4496 					continue;
4497 				if (!(tmp & BMSR_LSTATUS)) {
4498 					udelay(40);
4499 					break;
4500 				}
4501 			}
4502 			tg3_writephy(tp, MII_BMCR, bmcr);
4503 			udelay(40);
4504 		}
4505 	}
4506 }
4507 
4508 static int tg3_phy_pull_config(struct tg3 *tp)
4509 {
4510 	int err;
4511 	u32 val;
4512 
4513 	err = tg3_readphy(tp, MII_BMCR, &val);
4514 	if (err)
4515 		goto done;
4516 
4517 	if (!(val & BMCR_ANENABLE)) {
4518 		tp->link_config.autoneg = AUTONEG_DISABLE;
4519 		tp->link_config.advertising = 0;
4520 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4521 
4522 		err = -EIO;
4523 
4524 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4525 		case 0:
4526 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4527 				goto done;
4528 
4529 			tp->link_config.speed = SPEED_10;
4530 			break;
4531 		case BMCR_SPEED100:
4532 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4533 				goto done;
4534 
4535 			tp->link_config.speed = SPEED_100;
4536 			break;
4537 		case BMCR_SPEED1000:
4538 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4539 				tp->link_config.speed = SPEED_1000;
4540 				break;
4541 			}
4542 			/* Fall through */
4543 		default:
4544 			goto done;
4545 		}
4546 
4547 		if (val & BMCR_FULLDPLX)
4548 			tp->link_config.duplex = DUPLEX_FULL;
4549 		else
4550 			tp->link_config.duplex = DUPLEX_HALF;
4551 
4552 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4553 
4554 		err = 0;
4555 		goto done;
4556 	}
4557 
4558 	tp->link_config.autoneg = AUTONEG_ENABLE;
4559 	tp->link_config.advertising = ADVERTISED_Autoneg;
4560 	tg3_flag_set(tp, PAUSE_AUTONEG);
4561 
4562 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4563 		u32 adv;
4564 
4565 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4566 		if (err)
4567 			goto done;
4568 
4569 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4570 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4571 
4572 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4573 	} else {
4574 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4575 	}
4576 
4577 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4578 		u32 adv;
4579 
4580 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4581 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4582 			if (err)
4583 				goto done;
4584 
4585 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4586 		} else {
4587 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4588 			if (err)
4589 				goto done;
4590 
4591 			adv = tg3_decode_flowctrl_1000X(val);
4592 			tp->link_config.flowctrl = adv;
4593 
4594 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4595 			adv = mii_adv_to_ethtool_adv_x(val);
4596 		}
4597 
4598 		tp->link_config.advertising |= adv;
4599 	}
4600 
4601 done:
4602 	return err;
4603 }
4604 
4605 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4606 {
4607 	int err;
4608 
4609 	/* Turn off tap power management. */
4610 	/* Set Extended packet length bit */
4611 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4612 
4613 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4614 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4615 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4616 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4617 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4618 
4619 	udelay(40);
4620 
4621 	return err;
4622 }
4623 
4624 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4625 {
4626 	struct ethtool_eee eee;
4627 
4628 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4629 		return true;
4630 
4631 	tg3_eee_pull_config(tp, &eee);
4632 
4633 	if (tp->eee.eee_enabled) {
4634 		if (tp->eee.advertised != eee.advertised ||
4635 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4636 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4637 			return false;
4638 	} else {
4639 		/* EEE is disabled but we're advertising */
4640 		if (eee.advertised)
4641 			return false;
4642 	}
4643 
4644 	return true;
4645 }
4646 
4647 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4648 {
4649 	u32 advmsk, tgtadv, advertising;
4650 
4651 	advertising = tp->link_config.advertising;
4652 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4653 
4654 	advmsk = ADVERTISE_ALL;
4655 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4656 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4657 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4658 	}
4659 
4660 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4661 		return false;
4662 
4663 	if ((*lcladv & advmsk) != tgtadv)
4664 		return false;
4665 
4666 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4667 		u32 tg3_ctrl;
4668 
4669 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4670 
4671 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4672 			return false;
4673 
4674 		if (tgtadv &&
4675 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4676 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4677 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4678 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4679 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4680 		} else {
4681 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4682 		}
4683 
4684 		if (tg3_ctrl != tgtadv)
4685 			return false;
4686 	}
4687 
4688 	return true;
4689 }
4690 
4691 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4692 {
4693 	u32 lpeth = 0;
4694 
4695 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4696 		u32 val;
4697 
4698 		if (tg3_readphy(tp, MII_STAT1000, &val))
4699 			return false;
4700 
4701 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4702 	}
4703 
4704 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4705 		return false;
4706 
4707 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4708 	tp->link_config.rmt_adv = lpeth;
4709 
4710 	return true;
4711 }
4712 
4713 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4714 {
4715 	if (curr_link_up != tp->link_up) {
4716 		if (curr_link_up) {
4717 			netif_carrier_on(tp->dev);
4718 		} else {
4719 			netif_carrier_off(tp->dev);
4720 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4721 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4722 		}
4723 
4724 		tg3_link_report(tp);
4725 		return true;
4726 	}
4727 
4728 	return false;
4729 }
4730 
4731 static void tg3_clear_mac_status(struct tg3 *tp)
4732 {
4733 	tw32(MAC_EVENT, 0);
4734 
4735 	tw32_f(MAC_STATUS,
4736 	       MAC_STATUS_SYNC_CHANGED |
4737 	       MAC_STATUS_CFG_CHANGED |
4738 	       MAC_STATUS_MI_COMPLETION |
4739 	       MAC_STATUS_LNKSTATE_CHANGED);
4740 	udelay(40);
4741 }
4742 
4743 static void tg3_setup_eee(struct tg3 *tp)
4744 {
4745 	u32 val;
4746 
4747 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4748 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4749 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4750 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4751 
4752 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4753 
4754 	tw32_f(TG3_CPMU_EEE_CTRL,
4755 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4756 
4757 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4758 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4759 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4760 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4761 
4762 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4763 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4764 
4765 	if (tg3_flag(tp, ENABLE_APE))
4766 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4767 
4768 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4769 
4770 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4771 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4772 	       (tp->eee.tx_lpi_timer & 0xffff));
4773 
4774 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4775 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4776 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4777 }
4778 
4779 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4780 {
4781 	bool current_link_up;
4782 	u32 bmsr, val;
4783 	u32 lcl_adv, rmt_adv;
4784 	u16 current_speed;
4785 	u8 current_duplex;
4786 	int i, err;
4787 
4788 	tg3_clear_mac_status(tp);
4789 
4790 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4791 		tw32_f(MAC_MI_MODE,
4792 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4793 		udelay(80);
4794 	}
4795 
4796 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4797 
4798 	/* Some third-party PHYs need to be reset on link going
4799 	 * down.
4800 	 */
4801 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4802 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4803 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4804 	    tp->link_up) {
4805 		tg3_readphy(tp, MII_BMSR, &bmsr);
4806 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4807 		    !(bmsr & BMSR_LSTATUS))
4808 			force_reset = true;
4809 	}
4810 	if (force_reset)
4811 		tg3_phy_reset(tp);
4812 
4813 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4814 		tg3_readphy(tp, MII_BMSR, &bmsr);
4815 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4816 		    !tg3_flag(tp, INIT_COMPLETE))
4817 			bmsr = 0;
4818 
4819 		if (!(bmsr & BMSR_LSTATUS)) {
4820 			err = tg3_init_5401phy_dsp(tp);
4821 			if (err)
4822 				return err;
4823 
4824 			tg3_readphy(tp, MII_BMSR, &bmsr);
4825 			for (i = 0; i < 1000; i++) {
4826 				udelay(10);
4827 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4828 				    (bmsr & BMSR_LSTATUS)) {
4829 					udelay(40);
4830 					break;
4831 				}
4832 			}
4833 
4834 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4835 			    TG3_PHY_REV_BCM5401_B0 &&
4836 			    !(bmsr & BMSR_LSTATUS) &&
4837 			    tp->link_config.active_speed == SPEED_1000) {
4838 				err = tg3_phy_reset(tp);
4839 				if (!err)
4840 					err = tg3_init_5401phy_dsp(tp);
4841 				if (err)
4842 					return err;
4843 			}
4844 		}
4845 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4846 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4847 		/* 5701 {A0,B0} CRC bug workaround */
4848 		tg3_writephy(tp, 0x15, 0x0a75);
4849 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4850 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4851 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4852 	}
4853 
4854 	/* Clear pending interrupts... */
4855 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4856 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4857 
4858 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4859 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4860 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4861 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4862 
4863 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4864 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4865 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4866 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4867 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4868 		else
4869 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4870 	}
4871 
4872 	current_link_up = false;
4873 	current_speed = SPEED_UNKNOWN;
4874 	current_duplex = DUPLEX_UNKNOWN;
4875 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4876 	tp->link_config.rmt_adv = 0;
4877 
4878 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4879 		err = tg3_phy_auxctl_read(tp,
4880 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4881 					  &val);
4882 		if (!err && !(val & (1 << 10))) {
4883 			tg3_phy_auxctl_write(tp,
4884 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4885 					     val | (1 << 10));
4886 			goto relink;
4887 		}
4888 	}
4889 
4890 	bmsr = 0;
4891 	for (i = 0; i < 100; i++) {
4892 		tg3_readphy(tp, MII_BMSR, &bmsr);
4893 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4894 		    (bmsr & BMSR_LSTATUS))
4895 			break;
4896 		udelay(40);
4897 	}
4898 
4899 	if (bmsr & BMSR_LSTATUS) {
4900 		u32 aux_stat, bmcr;
4901 
4902 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4903 		for (i = 0; i < 2000; i++) {
4904 			udelay(10);
4905 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4906 			    aux_stat)
4907 				break;
4908 		}
4909 
4910 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4911 					     &current_speed,
4912 					     &current_duplex);
4913 
4914 		bmcr = 0;
4915 		for (i = 0; i < 200; i++) {
4916 			tg3_readphy(tp, MII_BMCR, &bmcr);
4917 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4918 				continue;
4919 			if (bmcr && bmcr != 0x7fff)
4920 				break;
4921 			udelay(10);
4922 		}
4923 
4924 		lcl_adv = 0;
4925 		rmt_adv = 0;
4926 
4927 		tp->link_config.active_speed = current_speed;
4928 		tp->link_config.active_duplex = current_duplex;
4929 
4930 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4931 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4932 
4933 			if ((bmcr & BMCR_ANENABLE) &&
4934 			    eee_config_ok &&
4935 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4936 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4937 				current_link_up = true;
4938 
4939 			/* EEE settings changes take effect only after a phy
4940 			 * reset.  If we have skipped a reset due to Link Flap
4941 			 * Avoidance being enabled, do it now.
4942 			 */
4943 			if (!eee_config_ok &&
4944 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4945 			    !force_reset) {
4946 				tg3_setup_eee(tp);
4947 				tg3_phy_reset(tp);
4948 			}
4949 		} else {
4950 			if (!(bmcr & BMCR_ANENABLE) &&
4951 			    tp->link_config.speed == current_speed &&
4952 			    tp->link_config.duplex == current_duplex) {
4953 				current_link_up = true;
4954 			}
4955 		}
4956 
4957 		if (current_link_up &&
4958 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4959 			u32 reg, bit;
4960 
4961 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4962 				reg = MII_TG3_FET_GEN_STAT;
4963 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4964 			} else {
4965 				reg = MII_TG3_EXT_STAT;
4966 				bit = MII_TG3_EXT_STAT_MDIX;
4967 			}
4968 
4969 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4970 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4971 
4972 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4973 		}
4974 	}
4975 
4976 relink:
4977 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4978 		tg3_phy_copper_begin(tp);
4979 
4980 		if (tg3_flag(tp, ROBOSWITCH)) {
4981 			current_link_up = true;
4982 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4983 			current_speed = SPEED_1000;
4984 			current_duplex = DUPLEX_FULL;
4985 			tp->link_config.active_speed = current_speed;
4986 			tp->link_config.active_duplex = current_duplex;
4987 		}
4988 
4989 		tg3_readphy(tp, MII_BMSR, &bmsr);
4990 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4991 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4992 			current_link_up = true;
4993 	}
4994 
4995 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4996 	if (current_link_up) {
4997 		if (tp->link_config.active_speed == SPEED_100 ||
4998 		    tp->link_config.active_speed == SPEED_10)
4999 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5000 		else
5001 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5002 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5003 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5004 	else
5005 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5006 
5007 	/* In order for the 5750 core in BCM4785 chip to work properly
5008 	 * in RGMII mode, the Led Control Register must be set up.
5009 	 */
5010 	if (tg3_flag(tp, RGMII_MODE)) {
5011 		u32 led_ctrl = tr32(MAC_LED_CTRL);
5012 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5013 
5014 		if (tp->link_config.active_speed == SPEED_10)
5015 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5016 		else if (tp->link_config.active_speed == SPEED_100)
5017 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5018 				     LED_CTRL_100MBPS_ON);
5019 		else if (tp->link_config.active_speed == SPEED_1000)
5020 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5021 				     LED_CTRL_1000MBPS_ON);
5022 
5023 		tw32(MAC_LED_CTRL, led_ctrl);
5024 		udelay(40);
5025 	}
5026 
5027 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5028 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5029 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5030 
5031 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5032 		if (current_link_up &&
5033 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5034 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5035 		else
5036 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5037 	}
5038 
5039 	/* ??? Without this setting Netgear GA302T PHY does not
5040 	 * ??? send/receive packets...
5041 	 */
5042 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5043 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5044 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5045 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5046 		udelay(80);
5047 	}
5048 
5049 	tw32_f(MAC_MODE, tp->mac_mode);
5050 	udelay(40);
5051 
5052 	tg3_phy_eee_adjust(tp, current_link_up);
5053 
5054 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5055 		/* Polled via timer. */
5056 		tw32_f(MAC_EVENT, 0);
5057 	} else {
5058 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5059 	}
5060 	udelay(40);
5061 
5062 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5063 	    current_link_up &&
5064 	    tp->link_config.active_speed == SPEED_1000 &&
5065 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5066 		udelay(120);
5067 		tw32_f(MAC_STATUS,
5068 		     (MAC_STATUS_SYNC_CHANGED |
5069 		      MAC_STATUS_CFG_CHANGED));
5070 		udelay(40);
5071 		tg3_write_mem(tp,
5072 			      NIC_SRAM_FIRMWARE_MBOX,
5073 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5074 	}
5075 
5076 	/* Prevent send BD corruption. */
5077 	if (tg3_flag(tp, CLKREQ_BUG)) {
5078 		if (tp->link_config.active_speed == SPEED_100 ||
5079 		    tp->link_config.active_speed == SPEED_10)
5080 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5081 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5082 		else
5083 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5084 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5085 	}
5086 
5087 	tg3_test_and_report_link_chg(tp, current_link_up);
5088 
5089 	return 0;
5090 }
5091 
5092 struct tg3_fiber_aneginfo {
5093 	int state;
5094 #define ANEG_STATE_UNKNOWN		0
5095 #define ANEG_STATE_AN_ENABLE		1
5096 #define ANEG_STATE_RESTART_INIT		2
5097 #define ANEG_STATE_RESTART		3
5098 #define ANEG_STATE_DISABLE_LINK_OK	4
5099 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5100 #define ANEG_STATE_ABILITY_DETECT	6
5101 #define ANEG_STATE_ACK_DETECT_INIT	7
5102 #define ANEG_STATE_ACK_DETECT		8
5103 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5104 #define ANEG_STATE_COMPLETE_ACK		10
5105 #define ANEG_STATE_IDLE_DETECT_INIT	11
5106 #define ANEG_STATE_IDLE_DETECT		12
5107 #define ANEG_STATE_LINK_OK		13
5108 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5109 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5110 
5111 	u32 flags;
5112 #define MR_AN_ENABLE		0x00000001
5113 #define MR_RESTART_AN		0x00000002
5114 #define MR_AN_COMPLETE		0x00000004
5115 #define MR_PAGE_RX		0x00000008
5116 #define MR_NP_LOADED		0x00000010
5117 #define MR_TOGGLE_TX		0x00000020
5118 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5119 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5120 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5121 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5122 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5123 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5124 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5125 #define MR_TOGGLE_RX		0x00002000
5126 #define MR_NP_RX		0x00004000
5127 
5128 #define MR_LINK_OK		0x80000000
5129 
5130 	unsigned long link_time, cur_time;
5131 
5132 	u32 ability_match_cfg;
5133 	int ability_match_count;
5134 
5135 	char ability_match, idle_match, ack_match;
5136 
5137 	u32 txconfig, rxconfig;
5138 #define ANEG_CFG_NP		0x00000080
5139 #define ANEG_CFG_ACK		0x00000040
5140 #define ANEG_CFG_RF2		0x00000020
5141 #define ANEG_CFG_RF1		0x00000010
5142 #define ANEG_CFG_PS2		0x00000001
5143 #define ANEG_CFG_PS1		0x00008000
5144 #define ANEG_CFG_HD		0x00004000
5145 #define ANEG_CFG_FD		0x00002000
5146 #define ANEG_CFG_INVAL		0x00001f06
5147 
5148 };
5149 #define ANEG_OK		0
5150 #define ANEG_DONE	1
5151 #define ANEG_TIMER_ENAB	2
5152 #define ANEG_FAILED	-1
5153 
5154 #define ANEG_STATE_SETTLE_TIME	10000
5155 
5156 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5157 				   struct tg3_fiber_aneginfo *ap)
5158 {
5159 	u16 flowctrl;
5160 	unsigned long delta;
5161 	u32 rx_cfg_reg;
5162 	int ret;
5163 
5164 	if (ap->state == ANEG_STATE_UNKNOWN) {
5165 		ap->rxconfig = 0;
5166 		ap->link_time = 0;
5167 		ap->cur_time = 0;
5168 		ap->ability_match_cfg = 0;
5169 		ap->ability_match_count = 0;
5170 		ap->ability_match = 0;
5171 		ap->idle_match = 0;
5172 		ap->ack_match = 0;
5173 	}
5174 	ap->cur_time++;
5175 
5176 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5177 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5178 
5179 		if (rx_cfg_reg != ap->ability_match_cfg) {
5180 			ap->ability_match_cfg = rx_cfg_reg;
5181 			ap->ability_match = 0;
5182 			ap->ability_match_count = 0;
5183 		} else {
5184 			if (++ap->ability_match_count > 1) {
5185 				ap->ability_match = 1;
5186 				ap->ability_match_cfg = rx_cfg_reg;
5187 			}
5188 		}
5189 		if (rx_cfg_reg & ANEG_CFG_ACK)
5190 			ap->ack_match = 1;
5191 		else
5192 			ap->ack_match = 0;
5193 
5194 		ap->idle_match = 0;
5195 	} else {
5196 		ap->idle_match = 1;
5197 		ap->ability_match_cfg = 0;
5198 		ap->ability_match_count = 0;
5199 		ap->ability_match = 0;
5200 		ap->ack_match = 0;
5201 
5202 		rx_cfg_reg = 0;
5203 	}
5204 
5205 	ap->rxconfig = rx_cfg_reg;
5206 	ret = ANEG_OK;
5207 
5208 	switch (ap->state) {
5209 	case ANEG_STATE_UNKNOWN:
5210 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5211 			ap->state = ANEG_STATE_AN_ENABLE;
5212 
5213 		/* fallthru */
5214 	case ANEG_STATE_AN_ENABLE:
5215 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5216 		if (ap->flags & MR_AN_ENABLE) {
5217 			ap->link_time = 0;
5218 			ap->cur_time = 0;
5219 			ap->ability_match_cfg = 0;
5220 			ap->ability_match_count = 0;
5221 			ap->ability_match = 0;
5222 			ap->idle_match = 0;
5223 			ap->ack_match = 0;
5224 
5225 			ap->state = ANEG_STATE_RESTART_INIT;
5226 		} else {
5227 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5228 		}
5229 		break;
5230 
5231 	case ANEG_STATE_RESTART_INIT:
5232 		ap->link_time = ap->cur_time;
5233 		ap->flags &= ~(MR_NP_LOADED);
5234 		ap->txconfig = 0;
5235 		tw32(MAC_TX_AUTO_NEG, 0);
5236 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5237 		tw32_f(MAC_MODE, tp->mac_mode);
5238 		udelay(40);
5239 
5240 		ret = ANEG_TIMER_ENAB;
5241 		ap->state = ANEG_STATE_RESTART;
5242 
5243 		/* fallthru */
5244 	case ANEG_STATE_RESTART:
5245 		delta = ap->cur_time - ap->link_time;
5246 		if (delta > ANEG_STATE_SETTLE_TIME)
5247 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5248 		else
5249 			ret = ANEG_TIMER_ENAB;
5250 		break;
5251 
5252 	case ANEG_STATE_DISABLE_LINK_OK:
5253 		ret = ANEG_DONE;
5254 		break;
5255 
5256 	case ANEG_STATE_ABILITY_DETECT_INIT:
5257 		ap->flags &= ~(MR_TOGGLE_TX);
5258 		ap->txconfig = ANEG_CFG_FD;
5259 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5260 		if (flowctrl & ADVERTISE_1000XPAUSE)
5261 			ap->txconfig |= ANEG_CFG_PS1;
5262 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5263 			ap->txconfig |= ANEG_CFG_PS2;
5264 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5265 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5266 		tw32_f(MAC_MODE, tp->mac_mode);
5267 		udelay(40);
5268 
5269 		ap->state = ANEG_STATE_ABILITY_DETECT;
5270 		break;
5271 
5272 	case ANEG_STATE_ABILITY_DETECT:
5273 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5274 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5275 		break;
5276 
5277 	case ANEG_STATE_ACK_DETECT_INIT:
5278 		ap->txconfig |= ANEG_CFG_ACK;
5279 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5280 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5281 		tw32_f(MAC_MODE, tp->mac_mode);
5282 		udelay(40);
5283 
5284 		ap->state = ANEG_STATE_ACK_DETECT;
5285 
5286 		/* fallthru */
5287 	case ANEG_STATE_ACK_DETECT:
5288 		if (ap->ack_match != 0) {
5289 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5290 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5291 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5292 			} else {
5293 				ap->state = ANEG_STATE_AN_ENABLE;
5294 			}
5295 		} else if (ap->ability_match != 0 &&
5296 			   ap->rxconfig == 0) {
5297 			ap->state = ANEG_STATE_AN_ENABLE;
5298 		}
5299 		break;
5300 
5301 	case ANEG_STATE_COMPLETE_ACK_INIT:
5302 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5303 			ret = ANEG_FAILED;
5304 			break;
5305 		}
5306 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5307 			       MR_LP_ADV_HALF_DUPLEX |
5308 			       MR_LP_ADV_SYM_PAUSE |
5309 			       MR_LP_ADV_ASYM_PAUSE |
5310 			       MR_LP_ADV_REMOTE_FAULT1 |
5311 			       MR_LP_ADV_REMOTE_FAULT2 |
5312 			       MR_LP_ADV_NEXT_PAGE |
5313 			       MR_TOGGLE_RX |
5314 			       MR_NP_RX);
5315 		if (ap->rxconfig & ANEG_CFG_FD)
5316 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5317 		if (ap->rxconfig & ANEG_CFG_HD)
5318 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5319 		if (ap->rxconfig & ANEG_CFG_PS1)
5320 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5321 		if (ap->rxconfig & ANEG_CFG_PS2)
5322 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5323 		if (ap->rxconfig & ANEG_CFG_RF1)
5324 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5325 		if (ap->rxconfig & ANEG_CFG_RF2)
5326 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5327 		if (ap->rxconfig & ANEG_CFG_NP)
5328 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5329 
5330 		ap->link_time = ap->cur_time;
5331 
5332 		ap->flags ^= (MR_TOGGLE_TX);
5333 		if (ap->rxconfig & 0x0008)
5334 			ap->flags |= MR_TOGGLE_RX;
5335 		if (ap->rxconfig & ANEG_CFG_NP)
5336 			ap->flags |= MR_NP_RX;
5337 		ap->flags |= MR_PAGE_RX;
5338 
5339 		ap->state = ANEG_STATE_COMPLETE_ACK;
5340 		ret = ANEG_TIMER_ENAB;
5341 		break;
5342 
5343 	case ANEG_STATE_COMPLETE_ACK:
5344 		if (ap->ability_match != 0 &&
5345 		    ap->rxconfig == 0) {
5346 			ap->state = ANEG_STATE_AN_ENABLE;
5347 			break;
5348 		}
5349 		delta = ap->cur_time - ap->link_time;
5350 		if (delta > ANEG_STATE_SETTLE_TIME) {
5351 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5352 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5353 			} else {
5354 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5355 				    !(ap->flags & MR_NP_RX)) {
5356 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5357 				} else {
5358 					ret = ANEG_FAILED;
5359 				}
5360 			}
5361 		}
5362 		break;
5363 
5364 	case ANEG_STATE_IDLE_DETECT_INIT:
5365 		ap->link_time = ap->cur_time;
5366 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5367 		tw32_f(MAC_MODE, tp->mac_mode);
5368 		udelay(40);
5369 
5370 		ap->state = ANEG_STATE_IDLE_DETECT;
5371 		ret = ANEG_TIMER_ENAB;
5372 		break;
5373 
5374 	case ANEG_STATE_IDLE_DETECT:
5375 		if (ap->ability_match != 0 &&
5376 		    ap->rxconfig == 0) {
5377 			ap->state = ANEG_STATE_AN_ENABLE;
5378 			break;
5379 		}
5380 		delta = ap->cur_time - ap->link_time;
5381 		if (delta > ANEG_STATE_SETTLE_TIME) {
5382 			/* XXX another gem from the Broadcom driver :( */
5383 			ap->state = ANEG_STATE_LINK_OK;
5384 		}
5385 		break;
5386 
5387 	case ANEG_STATE_LINK_OK:
5388 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5389 		ret = ANEG_DONE;
5390 		break;
5391 
5392 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5393 		/* ??? unimplemented */
5394 		break;
5395 
5396 	case ANEG_STATE_NEXT_PAGE_WAIT:
5397 		/* ??? unimplemented */
5398 		break;
5399 
5400 	default:
5401 		ret = ANEG_FAILED;
5402 		break;
5403 	}
5404 
5405 	return ret;
5406 }
5407 
5408 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5409 {
5410 	int res = 0;
5411 	struct tg3_fiber_aneginfo aninfo;
5412 	int status = ANEG_FAILED;
5413 	unsigned int tick;
5414 	u32 tmp;
5415 
5416 	tw32_f(MAC_TX_AUTO_NEG, 0);
5417 
5418 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5419 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5420 	udelay(40);
5421 
5422 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5423 	udelay(40);
5424 
5425 	memset(&aninfo, 0, sizeof(aninfo));
5426 	aninfo.flags |= MR_AN_ENABLE;
5427 	aninfo.state = ANEG_STATE_UNKNOWN;
5428 	aninfo.cur_time = 0;
5429 	tick = 0;
5430 	while (++tick < 195000) {
5431 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5432 		if (status == ANEG_DONE || status == ANEG_FAILED)
5433 			break;
5434 
5435 		udelay(1);
5436 	}
5437 
5438 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5439 	tw32_f(MAC_MODE, tp->mac_mode);
5440 	udelay(40);
5441 
5442 	*txflags = aninfo.txconfig;
5443 	*rxflags = aninfo.flags;
5444 
5445 	if (status == ANEG_DONE &&
5446 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5447 			     MR_LP_ADV_FULL_DUPLEX)))
5448 		res = 1;
5449 
5450 	return res;
5451 }
5452 
5453 static void tg3_init_bcm8002(struct tg3 *tp)
5454 {
5455 	u32 mac_status = tr32(MAC_STATUS);
5456 	int i;
5457 
5458 	/* Reset when initting first time or we have a link. */
5459 	if (tg3_flag(tp, INIT_COMPLETE) &&
5460 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5461 		return;
5462 
5463 	/* Set PLL lock range. */
5464 	tg3_writephy(tp, 0x16, 0x8007);
5465 
5466 	/* SW reset */
5467 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5468 
5469 	/* Wait for reset to complete. */
5470 	/* XXX schedule_timeout() ... */
5471 	for (i = 0; i < 500; i++)
5472 		udelay(10);
5473 
5474 	/* Config mode; select PMA/Ch 1 regs. */
5475 	tg3_writephy(tp, 0x10, 0x8411);
5476 
5477 	/* Enable auto-lock and comdet, select txclk for tx. */
5478 	tg3_writephy(tp, 0x11, 0x0a10);
5479 
5480 	tg3_writephy(tp, 0x18, 0x00a0);
5481 	tg3_writephy(tp, 0x16, 0x41ff);
5482 
5483 	/* Assert and deassert POR. */
5484 	tg3_writephy(tp, 0x13, 0x0400);
5485 	udelay(40);
5486 	tg3_writephy(tp, 0x13, 0x0000);
5487 
5488 	tg3_writephy(tp, 0x11, 0x0a50);
5489 	udelay(40);
5490 	tg3_writephy(tp, 0x11, 0x0a10);
5491 
5492 	/* Wait for signal to stabilize */
5493 	/* XXX schedule_timeout() ... */
5494 	for (i = 0; i < 15000; i++)
5495 		udelay(10);
5496 
5497 	/* Deselect the channel register so we can read the PHYID
5498 	 * later.
5499 	 */
5500 	tg3_writephy(tp, 0x10, 0x8011);
5501 }
5502 
5503 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5504 {
5505 	u16 flowctrl;
5506 	bool current_link_up;
5507 	u32 sg_dig_ctrl, sg_dig_status;
5508 	u32 serdes_cfg, expected_sg_dig_ctrl;
5509 	int workaround, port_a;
5510 
5511 	serdes_cfg = 0;
5512 	expected_sg_dig_ctrl = 0;
5513 	workaround = 0;
5514 	port_a = 1;
5515 	current_link_up = false;
5516 
5517 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5518 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5519 		workaround = 1;
5520 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5521 			port_a = 0;
5522 
5523 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5524 		/* preserve bits 20-23 for voltage regulator */
5525 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5526 	}
5527 
5528 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5529 
5530 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5531 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5532 			if (workaround) {
5533 				u32 val = serdes_cfg;
5534 
5535 				if (port_a)
5536 					val |= 0xc010000;
5537 				else
5538 					val |= 0x4010000;
5539 				tw32_f(MAC_SERDES_CFG, val);
5540 			}
5541 
5542 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5543 		}
5544 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5545 			tg3_setup_flow_control(tp, 0, 0);
5546 			current_link_up = true;
5547 		}
5548 		goto out;
5549 	}
5550 
5551 	/* Want auto-negotiation.  */
5552 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5553 
5554 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5555 	if (flowctrl & ADVERTISE_1000XPAUSE)
5556 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5557 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5558 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5559 
5560 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5561 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5562 		    tp->serdes_counter &&
5563 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5564 				    MAC_STATUS_RCVD_CFG)) ==
5565 		     MAC_STATUS_PCS_SYNCED)) {
5566 			tp->serdes_counter--;
5567 			current_link_up = true;
5568 			goto out;
5569 		}
5570 restart_autoneg:
5571 		if (workaround)
5572 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5573 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5574 		udelay(5);
5575 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5576 
5577 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5578 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5579 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5580 				 MAC_STATUS_SIGNAL_DET)) {
5581 		sg_dig_status = tr32(SG_DIG_STATUS);
5582 		mac_status = tr32(MAC_STATUS);
5583 
5584 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5585 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5586 			u32 local_adv = 0, remote_adv = 0;
5587 
5588 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5589 				local_adv |= ADVERTISE_1000XPAUSE;
5590 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5591 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5592 
5593 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5594 				remote_adv |= LPA_1000XPAUSE;
5595 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5596 				remote_adv |= LPA_1000XPAUSE_ASYM;
5597 
5598 			tp->link_config.rmt_adv =
5599 					   mii_adv_to_ethtool_adv_x(remote_adv);
5600 
5601 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5602 			current_link_up = true;
5603 			tp->serdes_counter = 0;
5604 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5605 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5606 			if (tp->serdes_counter)
5607 				tp->serdes_counter--;
5608 			else {
5609 				if (workaround) {
5610 					u32 val = serdes_cfg;
5611 
5612 					if (port_a)
5613 						val |= 0xc010000;
5614 					else
5615 						val |= 0x4010000;
5616 
5617 					tw32_f(MAC_SERDES_CFG, val);
5618 				}
5619 
5620 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5621 				udelay(40);
5622 
5623 				/* Link parallel detection - link is up */
5624 				/* only if we have PCS_SYNC and not */
5625 				/* receiving config code words */
5626 				mac_status = tr32(MAC_STATUS);
5627 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5628 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5629 					tg3_setup_flow_control(tp, 0, 0);
5630 					current_link_up = true;
5631 					tp->phy_flags |=
5632 						TG3_PHYFLG_PARALLEL_DETECT;
5633 					tp->serdes_counter =
5634 						SERDES_PARALLEL_DET_TIMEOUT;
5635 				} else
5636 					goto restart_autoneg;
5637 			}
5638 		}
5639 	} else {
5640 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5641 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5642 	}
5643 
5644 out:
5645 	return current_link_up;
5646 }
5647 
5648 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5649 {
5650 	bool current_link_up = false;
5651 
5652 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5653 		goto out;
5654 
5655 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5656 		u32 txflags, rxflags;
5657 		int i;
5658 
5659 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5660 			u32 local_adv = 0, remote_adv = 0;
5661 
5662 			if (txflags & ANEG_CFG_PS1)
5663 				local_adv |= ADVERTISE_1000XPAUSE;
5664 			if (txflags & ANEG_CFG_PS2)
5665 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5666 
5667 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5668 				remote_adv |= LPA_1000XPAUSE;
5669 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5670 				remote_adv |= LPA_1000XPAUSE_ASYM;
5671 
5672 			tp->link_config.rmt_adv =
5673 					   mii_adv_to_ethtool_adv_x(remote_adv);
5674 
5675 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5676 
5677 			current_link_up = true;
5678 		}
5679 		for (i = 0; i < 30; i++) {
5680 			udelay(20);
5681 			tw32_f(MAC_STATUS,
5682 			       (MAC_STATUS_SYNC_CHANGED |
5683 				MAC_STATUS_CFG_CHANGED));
5684 			udelay(40);
5685 			if ((tr32(MAC_STATUS) &
5686 			     (MAC_STATUS_SYNC_CHANGED |
5687 			      MAC_STATUS_CFG_CHANGED)) == 0)
5688 				break;
5689 		}
5690 
5691 		mac_status = tr32(MAC_STATUS);
5692 		if (!current_link_up &&
5693 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5694 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5695 			current_link_up = true;
5696 	} else {
5697 		tg3_setup_flow_control(tp, 0, 0);
5698 
5699 		/* Forcing 1000FD link up. */
5700 		current_link_up = true;
5701 
5702 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5703 		udelay(40);
5704 
5705 		tw32_f(MAC_MODE, tp->mac_mode);
5706 		udelay(40);
5707 	}
5708 
5709 out:
5710 	return current_link_up;
5711 }
5712 
5713 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5714 {
5715 	u32 orig_pause_cfg;
5716 	u16 orig_active_speed;
5717 	u8 orig_active_duplex;
5718 	u32 mac_status;
5719 	bool current_link_up;
5720 	int i;
5721 
5722 	orig_pause_cfg = tp->link_config.active_flowctrl;
5723 	orig_active_speed = tp->link_config.active_speed;
5724 	orig_active_duplex = tp->link_config.active_duplex;
5725 
5726 	if (!tg3_flag(tp, HW_AUTONEG) &&
5727 	    tp->link_up &&
5728 	    tg3_flag(tp, INIT_COMPLETE)) {
5729 		mac_status = tr32(MAC_STATUS);
5730 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5731 			       MAC_STATUS_SIGNAL_DET |
5732 			       MAC_STATUS_CFG_CHANGED |
5733 			       MAC_STATUS_RCVD_CFG);
5734 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5735 				   MAC_STATUS_SIGNAL_DET)) {
5736 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5737 					    MAC_STATUS_CFG_CHANGED));
5738 			return 0;
5739 		}
5740 	}
5741 
5742 	tw32_f(MAC_TX_AUTO_NEG, 0);
5743 
5744 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5745 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5746 	tw32_f(MAC_MODE, tp->mac_mode);
5747 	udelay(40);
5748 
5749 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5750 		tg3_init_bcm8002(tp);
5751 
5752 	/* Enable link change event even when serdes polling.  */
5753 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5754 	udelay(40);
5755 
5756 	current_link_up = false;
5757 	tp->link_config.rmt_adv = 0;
5758 	mac_status = tr32(MAC_STATUS);
5759 
5760 	if (tg3_flag(tp, HW_AUTONEG))
5761 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5762 	else
5763 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5764 
5765 	tp->napi[0].hw_status->status =
5766 		(SD_STATUS_UPDATED |
5767 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5768 
5769 	for (i = 0; i < 100; i++) {
5770 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5771 				    MAC_STATUS_CFG_CHANGED));
5772 		udelay(5);
5773 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5774 					 MAC_STATUS_CFG_CHANGED |
5775 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5776 			break;
5777 	}
5778 
5779 	mac_status = tr32(MAC_STATUS);
5780 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5781 		current_link_up = false;
5782 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5783 		    tp->serdes_counter == 0) {
5784 			tw32_f(MAC_MODE, (tp->mac_mode |
5785 					  MAC_MODE_SEND_CONFIGS));
5786 			udelay(1);
5787 			tw32_f(MAC_MODE, tp->mac_mode);
5788 		}
5789 	}
5790 
5791 	if (current_link_up) {
5792 		tp->link_config.active_speed = SPEED_1000;
5793 		tp->link_config.active_duplex = DUPLEX_FULL;
5794 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5795 				    LED_CTRL_LNKLED_OVERRIDE |
5796 				    LED_CTRL_1000MBPS_ON));
5797 	} else {
5798 		tp->link_config.active_speed = SPEED_UNKNOWN;
5799 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5800 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5801 				    LED_CTRL_LNKLED_OVERRIDE |
5802 				    LED_CTRL_TRAFFIC_OVERRIDE));
5803 	}
5804 
5805 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5806 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5807 		if (orig_pause_cfg != now_pause_cfg ||
5808 		    orig_active_speed != tp->link_config.active_speed ||
5809 		    orig_active_duplex != tp->link_config.active_duplex)
5810 			tg3_link_report(tp);
5811 	}
5812 
5813 	return 0;
5814 }
5815 
5816 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5817 {
5818 	int err = 0;
5819 	u32 bmsr, bmcr;
5820 	u16 current_speed = SPEED_UNKNOWN;
5821 	u8 current_duplex = DUPLEX_UNKNOWN;
5822 	bool current_link_up = false;
5823 	u32 local_adv, remote_adv, sgsr;
5824 
5825 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5826 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5827 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5828 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5829 
5830 		if (force_reset)
5831 			tg3_phy_reset(tp);
5832 
5833 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5834 
5835 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5836 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5837 		} else {
5838 			current_link_up = true;
5839 			if (sgsr & SERDES_TG3_SPEED_1000) {
5840 				current_speed = SPEED_1000;
5841 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5842 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5843 				current_speed = SPEED_100;
5844 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5845 			} else {
5846 				current_speed = SPEED_10;
5847 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5848 			}
5849 
5850 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5851 				current_duplex = DUPLEX_FULL;
5852 			else
5853 				current_duplex = DUPLEX_HALF;
5854 		}
5855 
5856 		tw32_f(MAC_MODE, tp->mac_mode);
5857 		udelay(40);
5858 
5859 		tg3_clear_mac_status(tp);
5860 
5861 		goto fiber_setup_done;
5862 	}
5863 
5864 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5865 	tw32_f(MAC_MODE, tp->mac_mode);
5866 	udelay(40);
5867 
5868 	tg3_clear_mac_status(tp);
5869 
5870 	if (force_reset)
5871 		tg3_phy_reset(tp);
5872 
5873 	tp->link_config.rmt_adv = 0;
5874 
5875 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5876 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5877 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5878 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5879 			bmsr |= BMSR_LSTATUS;
5880 		else
5881 			bmsr &= ~BMSR_LSTATUS;
5882 	}
5883 
5884 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5885 
5886 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5887 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5888 		/* do nothing, just check for link up at the end */
5889 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5890 		u32 adv, newadv;
5891 
5892 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5893 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5894 				 ADVERTISE_1000XPAUSE |
5895 				 ADVERTISE_1000XPSE_ASYM |
5896 				 ADVERTISE_SLCT);
5897 
5898 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5899 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5900 
5901 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5902 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5903 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5904 			tg3_writephy(tp, MII_BMCR, bmcr);
5905 
5906 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5907 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5908 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5909 
5910 			return err;
5911 		}
5912 	} else {
5913 		u32 new_bmcr;
5914 
5915 		bmcr &= ~BMCR_SPEED1000;
5916 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5917 
5918 		if (tp->link_config.duplex == DUPLEX_FULL)
5919 			new_bmcr |= BMCR_FULLDPLX;
5920 
5921 		if (new_bmcr != bmcr) {
5922 			/* BMCR_SPEED1000 is a reserved bit that needs
5923 			 * to be set on write.
5924 			 */
5925 			new_bmcr |= BMCR_SPEED1000;
5926 
5927 			/* Force a linkdown */
5928 			if (tp->link_up) {
5929 				u32 adv;
5930 
5931 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5932 				adv &= ~(ADVERTISE_1000XFULL |
5933 					 ADVERTISE_1000XHALF |
5934 					 ADVERTISE_SLCT);
5935 				tg3_writephy(tp, MII_ADVERTISE, adv);
5936 				tg3_writephy(tp, MII_BMCR, bmcr |
5937 							   BMCR_ANRESTART |
5938 							   BMCR_ANENABLE);
5939 				udelay(10);
5940 				tg3_carrier_off(tp);
5941 			}
5942 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5943 			bmcr = new_bmcr;
5944 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5945 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5946 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5947 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5948 					bmsr |= BMSR_LSTATUS;
5949 				else
5950 					bmsr &= ~BMSR_LSTATUS;
5951 			}
5952 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5953 		}
5954 	}
5955 
5956 	if (bmsr & BMSR_LSTATUS) {
5957 		current_speed = SPEED_1000;
5958 		current_link_up = true;
5959 		if (bmcr & BMCR_FULLDPLX)
5960 			current_duplex = DUPLEX_FULL;
5961 		else
5962 			current_duplex = DUPLEX_HALF;
5963 
5964 		local_adv = 0;
5965 		remote_adv = 0;
5966 
5967 		if (bmcr & BMCR_ANENABLE) {
5968 			u32 common;
5969 
5970 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5971 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5972 			common = local_adv & remote_adv;
5973 			if (common & (ADVERTISE_1000XHALF |
5974 				      ADVERTISE_1000XFULL)) {
5975 				if (common & ADVERTISE_1000XFULL)
5976 					current_duplex = DUPLEX_FULL;
5977 				else
5978 					current_duplex = DUPLEX_HALF;
5979 
5980 				tp->link_config.rmt_adv =
5981 					   mii_adv_to_ethtool_adv_x(remote_adv);
5982 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5983 				/* Link is up via parallel detect */
5984 			} else {
5985 				current_link_up = false;
5986 			}
5987 		}
5988 	}
5989 
5990 fiber_setup_done:
5991 	if (current_link_up && current_duplex == DUPLEX_FULL)
5992 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5993 
5994 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5995 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5996 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5997 
5998 	tw32_f(MAC_MODE, tp->mac_mode);
5999 	udelay(40);
6000 
6001 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6002 
6003 	tp->link_config.active_speed = current_speed;
6004 	tp->link_config.active_duplex = current_duplex;
6005 
6006 	tg3_test_and_report_link_chg(tp, current_link_up);
6007 	return err;
6008 }
6009 
6010 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6011 {
6012 	if (tp->serdes_counter) {
6013 		/* Give autoneg time to complete. */
6014 		tp->serdes_counter--;
6015 		return;
6016 	}
6017 
6018 	if (!tp->link_up &&
6019 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6020 		u32 bmcr;
6021 
6022 		tg3_readphy(tp, MII_BMCR, &bmcr);
6023 		if (bmcr & BMCR_ANENABLE) {
6024 			u32 phy1, phy2;
6025 
6026 			/* Select shadow register 0x1f */
6027 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6028 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6029 
6030 			/* Select expansion interrupt status register */
6031 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6032 					 MII_TG3_DSP_EXP1_INT_STAT);
6033 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6034 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6035 
6036 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6037 				/* We have signal detect and not receiving
6038 				 * config code words, link is up by parallel
6039 				 * detection.
6040 				 */
6041 
6042 				bmcr &= ~BMCR_ANENABLE;
6043 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6044 				tg3_writephy(tp, MII_BMCR, bmcr);
6045 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6046 			}
6047 		}
6048 	} else if (tp->link_up &&
6049 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6050 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6051 		u32 phy2;
6052 
6053 		/* Select expansion interrupt status register */
6054 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6055 				 MII_TG3_DSP_EXP1_INT_STAT);
6056 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6057 		if (phy2 & 0x20) {
6058 			u32 bmcr;
6059 
6060 			/* Config code words received, turn on autoneg. */
6061 			tg3_readphy(tp, MII_BMCR, &bmcr);
6062 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6063 
6064 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6065 
6066 		}
6067 	}
6068 }
6069 
6070 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6071 {
6072 	u32 val;
6073 	int err;
6074 
6075 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6076 		err = tg3_setup_fiber_phy(tp, force_reset);
6077 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6078 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6079 	else
6080 		err = tg3_setup_copper_phy(tp, force_reset);
6081 
6082 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6083 		u32 scale;
6084 
6085 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6086 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6087 			scale = 65;
6088 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6089 			scale = 6;
6090 		else
6091 			scale = 12;
6092 
6093 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6094 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6095 		tw32(GRC_MISC_CFG, val);
6096 	}
6097 
6098 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6099 	      (6 << TX_LENGTHS_IPG_SHIFT);
6100 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6101 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6102 		val |= tr32(MAC_TX_LENGTHS) &
6103 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6104 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6105 
6106 	if (tp->link_config.active_speed == SPEED_1000 &&
6107 	    tp->link_config.active_duplex == DUPLEX_HALF)
6108 		tw32(MAC_TX_LENGTHS, val |
6109 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6110 	else
6111 		tw32(MAC_TX_LENGTHS, val |
6112 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6113 
6114 	if (!tg3_flag(tp, 5705_PLUS)) {
6115 		if (tp->link_up) {
6116 			tw32(HOSTCC_STAT_COAL_TICKS,
6117 			     tp->coal.stats_block_coalesce_usecs);
6118 		} else {
6119 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6120 		}
6121 	}
6122 
6123 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6124 		val = tr32(PCIE_PWR_MGMT_THRESH);
6125 		if (!tp->link_up)
6126 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6127 			      tp->pwrmgmt_thresh;
6128 		else
6129 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6130 		tw32(PCIE_PWR_MGMT_THRESH, val);
6131 	}
6132 
6133 	return err;
6134 }
6135 
6136 /* tp->lock must be held */
6137 static u64 tg3_refclk_read(struct tg3 *tp)
6138 {
6139 	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6140 	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6141 }
6142 
6143 /* tp->lock must be held */
6144 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6145 {
6146 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6147 
6148 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6149 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6150 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6151 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6152 }
6153 
6154 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6155 static inline void tg3_full_unlock(struct tg3 *tp);
6156 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6157 {
6158 	struct tg3 *tp = netdev_priv(dev);
6159 
6160 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6161 				SOF_TIMESTAMPING_RX_SOFTWARE |
6162 				SOF_TIMESTAMPING_SOFTWARE;
6163 
6164 	if (tg3_flag(tp, PTP_CAPABLE)) {
6165 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6166 					SOF_TIMESTAMPING_RX_HARDWARE |
6167 					SOF_TIMESTAMPING_RAW_HARDWARE;
6168 	}
6169 
6170 	if (tp->ptp_clock)
6171 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6172 	else
6173 		info->phc_index = -1;
6174 
6175 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6176 
6177 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6178 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6179 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6180 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6181 	return 0;
6182 }
6183 
6184 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6185 {
6186 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6187 	bool neg_adj = false;
6188 	u32 correction = 0;
6189 
6190 	if (ppb < 0) {
6191 		neg_adj = true;
6192 		ppb = -ppb;
6193 	}
6194 
6195 	/* Frequency adjustment is performed using hardware with a 24 bit
6196 	 * accumulator and a programmable correction value. On each clk, the
6197 	 * correction value gets added to the accumulator and when it
6198 	 * overflows, the time counter is incremented/decremented.
6199 	 *
6200 	 * So conversion from ppb to correction value is
6201 	 *		ppb * (1 << 24) / 1000000000
6202 	 */
6203 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6204 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6205 
6206 	tg3_full_lock(tp, 0);
6207 
6208 	if (correction)
6209 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6210 		     TG3_EAV_REF_CLK_CORRECT_EN |
6211 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6212 	else
6213 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6214 
6215 	tg3_full_unlock(tp);
6216 
6217 	return 0;
6218 }
6219 
6220 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6221 {
6222 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6223 
6224 	tg3_full_lock(tp, 0);
6225 	tp->ptp_adjust += delta;
6226 	tg3_full_unlock(tp);
6227 
6228 	return 0;
6229 }
6230 
6231 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6232 {
6233 	u64 ns;
6234 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6235 
6236 	tg3_full_lock(tp, 0);
6237 	ns = tg3_refclk_read(tp);
6238 	ns += tp->ptp_adjust;
6239 	tg3_full_unlock(tp);
6240 
6241 	*ts = ns_to_timespec64(ns);
6242 
6243 	return 0;
6244 }
6245 
6246 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6247 			   const struct timespec64 *ts)
6248 {
6249 	u64 ns;
6250 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6251 
6252 	ns = timespec64_to_ns(ts);
6253 
6254 	tg3_full_lock(tp, 0);
6255 	tg3_refclk_write(tp, ns);
6256 	tp->ptp_adjust = 0;
6257 	tg3_full_unlock(tp);
6258 
6259 	return 0;
6260 }
6261 
6262 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6263 			  struct ptp_clock_request *rq, int on)
6264 {
6265 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6266 	u32 clock_ctl;
6267 	int rval = 0;
6268 
6269 	switch (rq->type) {
6270 	case PTP_CLK_REQ_PEROUT:
6271 		if (rq->perout.index != 0)
6272 			return -EINVAL;
6273 
6274 		tg3_full_lock(tp, 0);
6275 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6276 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6277 
6278 		if (on) {
6279 			u64 nsec;
6280 
6281 			nsec = rq->perout.start.sec * 1000000000ULL +
6282 			       rq->perout.start.nsec;
6283 
6284 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6285 				netdev_warn(tp->dev,
6286 					    "Device supports only a one-shot timesync output, period must be 0\n");
6287 				rval = -EINVAL;
6288 				goto err_out;
6289 			}
6290 
6291 			if (nsec & (1ULL << 63)) {
6292 				netdev_warn(tp->dev,
6293 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6294 				rval = -EINVAL;
6295 				goto err_out;
6296 			}
6297 
6298 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6299 			tw32(TG3_EAV_WATCHDOG0_MSB,
6300 			     TG3_EAV_WATCHDOG0_EN |
6301 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6302 
6303 			tw32(TG3_EAV_REF_CLCK_CTL,
6304 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6305 		} else {
6306 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6307 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6308 		}
6309 
6310 err_out:
6311 		tg3_full_unlock(tp);
6312 		return rval;
6313 
6314 	default:
6315 		break;
6316 	}
6317 
6318 	return -EOPNOTSUPP;
6319 }
6320 
6321 static const struct ptp_clock_info tg3_ptp_caps = {
6322 	.owner		= THIS_MODULE,
6323 	.name		= "tg3 clock",
6324 	.max_adj	= 250000000,
6325 	.n_alarm	= 0,
6326 	.n_ext_ts	= 0,
6327 	.n_per_out	= 1,
6328 	.n_pins		= 0,
6329 	.pps		= 0,
6330 	.adjfreq	= tg3_ptp_adjfreq,
6331 	.adjtime	= tg3_ptp_adjtime,
6332 	.gettime64	= tg3_ptp_gettime,
6333 	.settime64	= tg3_ptp_settime,
6334 	.enable		= tg3_ptp_enable,
6335 };
6336 
6337 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6338 				     struct skb_shared_hwtstamps *timestamp)
6339 {
6340 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6341 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6342 					   tp->ptp_adjust);
6343 }
6344 
6345 /* tp->lock must be held */
6346 static void tg3_ptp_init(struct tg3 *tp)
6347 {
6348 	if (!tg3_flag(tp, PTP_CAPABLE))
6349 		return;
6350 
6351 	/* Initialize the hardware clock to the system time. */
6352 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6353 	tp->ptp_adjust = 0;
6354 	tp->ptp_info = tg3_ptp_caps;
6355 }
6356 
6357 /* tp->lock must be held */
6358 static void tg3_ptp_resume(struct tg3 *tp)
6359 {
6360 	if (!tg3_flag(tp, PTP_CAPABLE))
6361 		return;
6362 
6363 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6364 	tp->ptp_adjust = 0;
6365 }
6366 
6367 static void tg3_ptp_fini(struct tg3 *tp)
6368 {
6369 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6370 		return;
6371 
6372 	ptp_clock_unregister(tp->ptp_clock);
6373 	tp->ptp_clock = NULL;
6374 	tp->ptp_adjust = 0;
6375 }
6376 
6377 static inline int tg3_irq_sync(struct tg3 *tp)
6378 {
6379 	return tp->irq_sync;
6380 }
6381 
6382 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6383 {
6384 	int i;
6385 
6386 	dst = (u32 *)((u8 *)dst + off);
6387 	for (i = 0; i < len; i += sizeof(u32))
6388 		*dst++ = tr32(off + i);
6389 }
6390 
6391 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6392 {
6393 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6394 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6395 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6396 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6397 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6398 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6399 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6400 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6401 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6402 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6403 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6404 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6405 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6406 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6407 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6408 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6409 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6410 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6411 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6412 
6413 	if (tg3_flag(tp, SUPPORT_MSIX))
6414 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6415 
6416 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6417 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6418 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6419 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6420 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6421 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6422 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6423 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6424 
6425 	if (!tg3_flag(tp, 5705_PLUS)) {
6426 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6427 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6428 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6429 	}
6430 
6431 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6432 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6433 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6434 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6435 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6436 
6437 	if (tg3_flag(tp, NVRAM))
6438 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6439 }
6440 
6441 static void tg3_dump_state(struct tg3 *tp)
6442 {
6443 	int i;
6444 	u32 *regs;
6445 
6446 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6447 	if (!regs)
6448 		return;
6449 
6450 	if (tg3_flag(tp, PCI_EXPRESS)) {
6451 		/* Read up to but not including private PCI registers */
6452 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6453 			regs[i / sizeof(u32)] = tr32(i);
6454 	} else
6455 		tg3_dump_legacy_regs(tp, regs);
6456 
6457 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6458 		if (!regs[i + 0] && !regs[i + 1] &&
6459 		    !regs[i + 2] && !regs[i + 3])
6460 			continue;
6461 
6462 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6463 			   i * 4,
6464 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6465 	}
6466 
6467 	kfree(regs);
6468 
6469 	for (i = 0; i < tp->irq_cnt; i++) {
6470 		struct tg3_napi *tnapi = &tp->napi[i];
6471 
6472 		/* SW status block */
6473 		netdev_err(tp->dev,
6474 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6475 			   i,
6476 			   tnapi->hw_status->status,
6477 			   tnapi->hw_status->status_tag,
6478 			   tnapi->hw_status->rx_jumbo_consumer,
6479 			   tnapi->hw_status->rx_consumer,
6480 			   tnapi->hw_status->rx_mini_consumer,
6481 			   tnapi->hw_status->idx[0].rx_producer,
6482 			   tnapi->hw_status->idx[0].tx_consumer);
6483 
6484 		netdev_err(tp->dev,
6485 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6486 			   i,
6487 			   tnapi->last_tag, tnapi->last_irq_tag,
6488 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6489 			   tnapi->rx_rcb_ptr,
6490 			   tnapi->prodring.rx_std_prod_idx,
6491 			   tnapi->prodring.rx_std_cons_idx,
6492 			   tnapi->prodring.rx_jmb_prod_idx,
6493 			   tnapi->prodring.rx_jmb_cons_idx);
6494 	}
6495 }
6496 
6497 /* This is called whenever we suspect that the system chipset is re-
6498  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6499  * is bogus tx completions. We try to recover by setting the
6500  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6501  * in the workqueue.
6502  */
6503 static void tg3_tx_recover(struct tg3 *tp)
6504 {
6505 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6506 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6507 
6508 	netdev_warn(tp->dev,
6509 		    "The system may be re-ordering memory-mapped I/O "
6510 		    "cycles to the network device, attempting to recover. "
6511 		    "Please report the problem to the driver maintainer "
6512 		    "and include system chipset information.\n");
6513 
6514 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6515 }
6516 
6517 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6518 {
6519 	/* Tell compiler to fetch tx indices from memory. */
6520 	barrier();
6521 	return tnapi->tx_pending -
6522 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6523 }
6524 
6525 /* Tigon3 never reports partial packet sends.  So we do not
6526  * need special logic to handle SKBs that have not had all
6527  * of their frags sent yet, like SunGEM does.
6528  */
6529 static void tg3_tx(struct tg3_napi *tnapi)
6530 {
6531 	struct tg3 *tp = tnapi->tp;
6532 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6533 	u32 sw_idx = tnapi->tx_cons;
6534 	struct netdev_queue *txq;
6535 	int index = tnapi - tp->napi;
6536 	unsigned int pkts_compl = 0, bytes_compl = 0;
6537 
6538 	if (tg3_flag(tp, ENABLE_TSS))
6539 		index--;
6540 
6541 	txq = netdev_get_tx_queue(tp->dev, index);
6542 
6543 	while (sw_idx != hw_idx) {
6544 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6545 		struct sk_buff *skb = ri->skb;
6546 		int i, tx_bug = 0;
6547 
6548 		if (unlikely(skb == NULL)) {
6549 			tg3_tx_recover(tp);
6550 			return;
6551 		}
6552 
6553 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6554 			struct skb_shared_hwtstamps timestamp;
6555 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6556 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6557 
6558 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6559 
6560 			skb_tstamp_tx(skb, &timestamp);
6561 		}
6562 
6563 		pci_unmap_single(tp->pdev,
6564 				 dma_unmap_addr(ri, mapping),
6565 				 skb_headlen(skb),
6566 				 PCI_DMA_TODEVICE);
6567 
6568 		ri->skb = NULL;
6569 
6570 		while (ri->fragmented) {
6571 			ri->fragmented = false;
6572 			sw_idx = NEXT_TX(sw_idx);
6573 			ri = &tnapi->tx_buffers[sw_idx];
6574 		}
6575 
6576 		sw_idx = NEXT_TX(sw_idx);
6577 
6578 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6579 			ri = &tnapi->tx_buffers[sw_idx];
6580 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6581 				tx_bug = 1;
6582 
6583 			pci_unmap_page(tp->pdev,
6584 				       dma_unmap_addr(ri, mapping),
6585 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6586 				       PCI_DMA_TODEVICE);
6587 
6588 			while (ri->fragmented) {
6589 				ri->fragmented = false;
6590 				sw_idx = NEXT_TX(sw_idx);
6591 				ri = &tnapi->tx_buffers[sw_idx];
6592 			}
6593 
6594 			sw_idx = NEXT_TX(sw_idx);
6595 		}
6596 
6597 		pkts_compl++;
6598 		bytes_compl += skb->len;
6599 
6600 		dev_consume_skb_any(skb);
6601 
6602 		if (unlikely(tx_bug)) {
6603 			tg3_tx_recover(tp);
6604 			return;
6605 		}
6606 	}
6607 
6608 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6609 
6610 	tnapi->tx_cons = sw_idx;
6611 
6612 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6613 	 * before checking for netif_queue_stopped().  Without the
6614 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6615 	 * will miss it and cause the queue to be stopped forever.
6616 	 */
6617 	smp_mb();
6618 
6619 	if (unlikely(netif_tx_queue_stopped(txq) &&
6620 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6621 		__netif_tx_lock(txq, smp_processor_id());
6622 		if (netif_tx_queue_stopped(txq) &&
6623 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6624 			netif_tx_wake_queue(txq);
6625 		__netif_tx_unlock(txq);
6626 	}
6627 }
6628 
6629 static void tg3_frag_free(bool is_frag, void *data)
6630 {
6631 	if (is_frag)
6632 		skb_free_frag(data);
6633 	else
6634 		kfree(data);
6635 }
6636 
6637 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6638 {
6639 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6640 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6641 
6642 	if (!ri->data)
6643 		return;
6644 
6645 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6646 			 map_sz, PCI_DMA_FROMDEVICE);
6647 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6648 	ri->data = NULL;
6649 }
6650 
6651 
6652 /* Returns size of skb allocated or < 0 on error.
6653  *
6654  * We only need to fill in the address because the other members
6655  * of the RX descriptor are invariant, see tg3_init_rings.
6656  *
6657  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6658  * posting buffers we only dirty the first cache line of the RX
6659  * descriptor (containing the address).  Whereas for the RX status
6660  * buffers the cpu only reads the last cacheline of the RX descriptor
6661  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6662  */
6663 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6664 			     u32 opaque_key, u32 dest_idx_unmasked,
6665 			     unsigned int *frag_size)
6666 {
6667 	struct tg3_rx_buffer_desc *desc;
6668 	struct ring_info *map;
6669 	u8 *data;
6670 	dma_addr_t mapping;
6671 	int skb_size, data_size, dest_idx;
6672 
6673 	switch (opaque_key) {
6674 	case RXD_OPAQUE_RING_STD:
6675 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6676 		desc = &tpr->rx_std[dest_idx];
6677 		map = &tpr->rx_std_buffers[dest_idx];
6678 		data_size = tp->rx_pkt_map_sz;
6679 		break;
6680 
6681 	case RXD_OPAQUE_RING_JUMBO:
6682 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6683 		desc = &tpr->rx_jmb[dest_idx].std;
6684 		map = &tpr->rx_jmb_buffers[dest_idx];
6685 		data_size = TG3_RX_JMB_MAP_SZ;
6686 		break;
6687 
6688 	default:
6689 		return -EINVAL;
6690 	}
6691 
6692 	/* Do not overwrite any of the map or rp information
6693 	 * until we are sure we can commit to a new buffer.
6694 	 *
6695 	 * Callers depend upon this behavior and assume that
6696 	 * we leave everything unchanged if we fail.
6697 	 */
6698 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6699 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6700 	if (skb_size <= PAGE_SIZE) {
6701 		data = netdev_alloc_frag(skb_size);
6702 		*frag_size = skb_size;
6703 	} else {
6704 		data = kmalloc(skb_size, GFP_ATOMIC);
6705 		*frag_size = 0;
6706 	}
6707 	if (!data)
6708 		return -ENOMEM;
6709 
6710 	mapping = pci_map_single(tp->pdev,
6711 				 data + TG3_RX_OFFSET(tp),
6712 				 data_size,
6713 				 PCI_DMA_FROMDEVICE);
6714 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6715 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6716 		return -EIO;
6717 	}
6718 
6719 	map->data = data;
6720 	dma_unmap_addr_set(map, mapping, mapping);
6721 
6722 	desc->addr_hi = ((u64)mapping >> 32);
6723 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6724 
6725 	return data_size;
6726 }
6727 
6728 /* We only need to move over in the address because the other
6729  * members of the RX descriptor are invariant.  See notes above
6730  * tg3_alloc_rx_data for full details.
6731  */
6732 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6733 			   struct tg3_rx_prodring_set *dpr,
6734 			   u32 opaque_key, int src_idx,
6735 			   u32 dest_idx_unmasked)
6736 {
6737 	struct tg3 *tp = tnapi->tp;
6738 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6739 	struct ring_info *src_map, *dest_map;
6740 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6741 	int dest_idx;
6742 
6743 	switch (opaque_key) {
6744 	case RXD_OPAQUE_RING_STD:
6745 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6746 		dest_desc = &dpr->rx_std[dest_idx];
6747 		dest_map = &dpr->rx_std_buffers[dest_idx];
6748 		src_desc = &spr->rx_std[src_idx];
6749 		src_map = &spr->rx_std_buffers[src_idx];
6750 		break;
6751 
6752 	case RXD_OPAQUE_RING_JUMBO:
6753 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6754 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6755 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6756 		src_desc = &spr->rx_jmb[src_idx].std;
6757 		src_map = &spr->rx_jmb_buffers[src_idx];
6758 		break;
6759 
6760 	default:
6761 		return;
6762 	}
6763 
6764 	dest_map->data = src_map->data;
6765 	dma_unmap_addr_set(dest_map, mapping,
6766 			   dma_unmap_addr(src_map, mapping));
6767 	dest_desc->addr_hi = src_desc->addr_hi;
6768 	dest_desc->addr_lo = src_desc->addr_lo;
6769 
6770 	/* Ensure that the update to the skb happens after the physical
6771 	 * addresses have been transferred to the new BD location.
6772 	 */
6773 	smp_wmb();
6774 
6775 	src_map->data = NULL;
6776 }
6777 
6778 /* The RX ring scheme is composed of multiple rings which post fresh
6779  * buffers to the chip, and one special ring the chip uses to report
6780  * status back to the host.
6781  *
6782  * The special ring reports the status of received packets to the
6783  * host.  The chip does not write into the original descriptor the
6784  * RX buffer was obtained from.  The chip simply takes the original
6785  * descriptor as provided by the host, updates the status and length
6786  * field, then writes this into the next status ring entry.
6787  *
6788  * Each ring the host uses to post buffers to the chip is described
6789  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6790  * it is first placed into the on-chip ram.  When the packet's length
6791  * is known, it walks down the TG3_BDINFO entries to select the ring.
6792  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6793  * which is within the range of the new packet's length is chosen.
6794  *
6795  * The "separate ring for rx status" scheme may sound queer, but it makes
6796  * sense from a cache coherency perspective.  If only the host writes
6797  * to the buffer post rings, and only the chip writes to the rx status
6798  * rings, then cache lines never move beyond shared-modified state.
6799  * If both the host and chip were to write into the same ring, cache line
6800  * eviction could occur since both entities want it in an exclusive state.
6801  */
6802 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6803 {
6804 	struct tg3 *tp = tnapi->tp;
6805 	u32 work_mask, rx_std_posted = 0;
6806 	u32 std_prod_idx, jmb_prod_idx;
6807 	u32 sw_idx = tnapi->rx_rcb_ptr;
6808 	u16 hw_idx;
6809 	int received;
6810 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6811 
6812 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6813 	/*
6814 	 * We need to order the read of hw_idx and the read of
6815 	 * the opaque cookie.
6816 	 */
6817 	rmb();
6818 	work_mask = 0;
6819 	received = 0;
6820 	std_prod_idx = tpr->rx_std_prod_idx;
6821 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6822 	while (sw_idx != hw_idx && budget > 0) {
6823 		struct ring_info *ri;
6824 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6825 		unsigned int len;
6826 		struct sk_buff *skb;
6827 		dma_addr_t dma_addr;
6828 		u32 opaque_key, desc_idx, *post_ptr;
6829 		u8 *data;
6830 		u64 tstamp = 0;
6831 
6832 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6833 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6834 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6835 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6836 			dma_addr = dma_unmap_addr(ri, mapping);
6837 			data = ri->data;
6838 			post_ptr = &std_prod_idx;
6839 			rx_std_posted++;
6840 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6841 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6842 			dma_addr = dma_unmap_addr(ri, mapping);
6843 			data = ri->data;
6844 			post_ptr = &jmb_prod_idx;
6845 		} else
6846 			goto next_pkt_nopost;
6847 
6848 		work_mask |= opaque_key;
6849 
6850 		if (desc->err_vlan & RXD_ERR_MASK) {
6851 		drop_it:
6852 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6853 				       desc_idx, *post_ptr);
6854 		drop_it_no_recycle:
6855 			/* Other statistics kept track of by card. */
6856 			tp->rx_dropped++;
6857 			goto next_pkt;
6858 		}
6859 
6860 		prefetch(data + TG3_RX_OFFSET(tp));
6861 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6862 		      ETH_FCS_LEN;
6863 
6864 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6865 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6866 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6867 		     RXD_FLAG_PTPSTAT_PTPV2) {
6868 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6869 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6870 		}
6871 
6872 		if (len > TG3_RX_COPY_THRESH(tp)) {
6873 			int skb_size;
6874 			unsigned int frag_size;
6875 
6876 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6877 						    *post_ptr, &frag_size);
6878 			if (skb_size < 0)
6879 				goto drop_it;
6880 
6881 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6882 					 PCI_DMA_FROMDEVICE);
6883 
6884 			/* Ensure that the update to the data happens
6885 			 * after the usage of the old DMA mapping.
6886 			 */
6887 			smp_wmb();
6888 
6889 			ri->data = NULL;
6890 
6891 			skb = build_skb(data, frag_size);
6892 			if (!skb) {
6893 				tg3_frag_free(frag_size != 0, data);
6894 				goto drop_it_no_recycle;
6895 			}
6896 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6897 		} else {
6898 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6899 				       desc_idx, *post_ptr);
6900 
6901 			skb = netdev_alloc_skb(tp->dev,
6902 					       len + TG3_RAW_IP_ALIGN);
6903 			if (skb == NULL)
6904 				goto drop_it_no_recycle;
6905 
6906 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6907 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6908 			memcpy(skb->data,
6909 			       data + TG3_RX_OFFSET(tp),
6910 			       len);
6911 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6912 		}
6913 
6914 		skb_put(skb, len);
6915 		if (tstamp)
6916 			tg3_hwclock_to_timestamp(tp, tstamp,
6917 						 skb_hwtstamps(skb));
6918 
6919 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6920 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6921 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6922 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6923 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6924 		else
6925 			skb_checksum_none_assert(skb);
6926 
6927 		skb->protocol = eth_type_trans(skb, tp->dev);
6928 
6929 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6930 		    skb->protocol != htons(ETH_P_8021Q) &&
6931 		    skb->protocol != htons(ETH_P_8021AD)) {
6932 			dev_kfree_skb_any(skb);
6933 			goto drop_it_no_recycle;
6934 		}
6935 
6936 		if (desc->type_flags & RXD_FLAG_VLAN &&
6937 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6938 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6939 					       desc->err_vlan & RXD_VLAN_MASK);
6940 
6941 		napi_gro_receive(&tnapi->napi, skb);
6942 
6943 		received++;
6944 		budget--;
6945 
6946 next_pkt:
6947 		(*post_ptr)++;
6948 
6949 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6950 			tpr->rx_std_prod_idx = std_prod_idx &
6951 					       tp->rx_std_ring_mask;
6952 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6953 				     tpr->rx_std_prod_idx);
6954 			work_mask &= ~RXD_OPAQUE_RING_STD;
6955 			rx_std_posted = 0;
6956 		}
6957 next_pkt_nopost:
6958 		sw_idx++;
6959 		sw_idx &= tp->rx_ret_ring_mask;
6960 
6961 		/* Refresh hw_idx to see if there is new work */
6962 		if (sw_idx == hw_idx) {
6963 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6964 			rmb();
6965 		}
6966 	}
6967 
6968 	/* ACK the status ring. */
6969 	tnapi->rx_rcb_ptr = sw_idx;
6970 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6971 
6972 	/* Refill RX ring(s). */
6973 	if (!tg3_flag(tp, ENABLE_RSS)) {
6974 		/* Sync BD data before updating mailbox */
6975 		wmb();
6976 
6977 		if (work_mask & RXD_OPAQUE_RING_STD) {
6978 			tpr->rx_std_prod_idx = std_prod_idx &
6979 					       tp->rx_std_ring_mask;
6980 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6981 				     tpr->rx_std_prod_idx);
6982 		}
6983 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6984 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6985 					       tp->rx_jmb_ring_mask;
6986 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6987 				     tpr->rx_jmb_prod_idx);
6988 		}
6989 		mmiowb();
6990 	} else if (work_mask) {
6991 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6992 		 * updated before the producer indices can be updated.
6993 		 */
6994 		smp_wmb();
6995 
6996 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6997 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6998 
6999 		if (tnapi != &tp->napi[1]) {
7000 			tp->rx_refill = true;
7001 			napi_schedule(&tp->napi[1].napi);
7002 		}
7003 	}
7004 
7005 	return received;
7006 }
7007 
7008 static void tg3_poll_link(struct tg3 *tp)
7009 {
7010 	/* handle link change and other phy events */
7011 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7012 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7013 
7014 		if (sblk->status & SD_STATUS_LINK_CHG) {
7015 			sblk->status = SD_STATUS_UPDATED |
7016 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7017 			spin_lock(&tp->lock);
7018 			if (tg3_flag(tp, USE_PHYLIB)) {
7019 				tw32_f(MAC_STATUS,
7020 				     (MAC_STATUS_SYNC_CHANGED |
7021 				      MAC_STATUS_CFG_CHANGED |
7022 				      MAC_STATUS_MI_COMPLETION |
7023 				      MAC_STATUS_LNKSTATE_CHANGED));
7024 				udelay(40);
7025 			} else
7026 				tg3_setup_phy(tp, false);
7027 			spin_unlock(&tp->lock);
7028 		}
7029 	}
7030 }
7031 
7032 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7033 				struct tg3_rx_prodring_set *dpr,
7034 				struct tg3_rx_prodring_set *spr)
7035 {
7036 	u32 si, di, cpycnt, src_prod_idx;
7037 	int i, err = 0;
7038 
7039 	while (1) {
7040 		src_prod_idx = spr->rx_std_prod_idx;
7041 
7042 		/* Make sure updates to the rx_std_buffers[] entries and the
7043 		 * standard producer index are seen in the correct order.
7044 		 */
7045 		smp_rmb();
7046 
7047 		if (spr->rx_std_cons_idx == src_prod_idx)
7048 			break;
7049 
7050 		if (spr->rx_std_cons_idx < src_prod_idx)
7051 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7052 		else
7053 			cpycnt = tp->rx_std_ring_mask + 1 -
7054 				 spr->rx_std_cons_idx;
7055 
7056 		cpycnt = min(cpycnt,
7057 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7058 
7059 		si = spr->rx_std_cons_idx;
7060 		di = dpr->rx_std_prod_idx;
7061 
7062 		for (i = di; i < di + cpycnt; i++) {
7063 			if (dpr->rx_std_buffers[i].data) {
7064 				cpycnt = i - di;
7065 				err = -ENOSPC;
7066 				break;
7067 			}
7068 		}
7069 
7070 		if (!cpycnt)
7071 			break;
7072 
7073 		/* Ensure that updates to the rx_std_buffers ring and the
7074 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7075 		 * ordered correctly WRT the skb check above.
7076 		 */
7077 		smp_rmb();
7078 
7079 		memcpy(&dpr->rx_std_buffers[di],
7080 		       &spr->rx_std_buffers[si],
7081 		       cpycnt * sizeof(struct ring_info));
7082 
7083 		for (i = 0; i < cpycnt; i++, di++, si++) {
7084 			struct tg3_rx_buffer_desc *sbd, *dbd;
7085 			sbd = &spr->rx_std[si];
7086 			dbd = &dpr->rx_std[di];
7087 			dbd->addr_hi = sbd->addr_hi;
7088 			dbd->addr_lo = sbd->addr_lo;
7089 		}
7090 
7091 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7092 				       tp->rx_std_ring_mask;
7093 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7094 				       tp->rx_std_ring_mask;
7095 	}
7096 
7097 	while (1) {
7098 		src_prod_idx = spr->rx_jmb_prod_idx;
7099 
7100 		/* Make sure updates to the rx_jmb_buffers[] entries and
7101 		 * the jumbo producer index are seen in the correct order.
7102 		 */
7103 		smp_rmb();
7104 
7105 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7106 			break;
7107 
7108 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7109 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7110 		else
7111 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7112 				 spr->rx_jmb_cons_idx;
7113 
7114 		cpycnt = min(cpycnt,
7115 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7116 
7117 		si = spr->rx_jmb_cons_idx;
7118 		di = dpr->rx_jmb_prod_idx;
7119 
7120 		for (i = di; i < di + cpycnt; i++) {
7121 			if (dpr->rx_jmb_buffers[i].data) {
7122 				cpycnt = i - di;
7123 				err = -ENOSPC;
7124 				break;
7125 			}
7126 		}
7127 
7128 		if (!cpycnt)
7129 			break;
7130 
7131 		/* Ensure that updates to the rx_jmb_buffers ring and the
7132 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7133 		 * ordered correctly WRT the skb check above.
7134 		 */
7135 		smp_rmb();
7136 
7137 		memcpy(&dpr->rx_jmb_buffers[di],
7138 		       &spr->rx_jmb_buffers[si],
7139 		       cpycnt * sizeof(struct ring_info));
7140 
7141 		for (i = 0; i < cpycnt; i++, di++, si++) {
7142 			struct tg3_rx_buffer_desc *sbd, *dbd;
7143 			sbd = &spr->rx_jmb[si].std;
7144 			dbd = &dpr->rx_jmb[di].std;
7145 			dbd->addr_hi = sbd->addr_hi;
7146 			dbd->addr_lo = sbd->addr_lo;
7147 		}
7148 
7149 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7150 				       tp->rx_jmb_ring_mask;
7151 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7152 				       tp->rx_jmb_ring_mask;
7153 	}
7154 
7155 	return err;
7156 }
7157 
7158 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7159 {
7160 	struct tg3 *tp = tnapi->tp;
7161 
7162 	/* run TX completion thread */
7163 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7164 		tg3_tx(tnapi);
7165 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7166 			return work_done;
7167 	}
7168 
7169 	if (!tnapi->rx_rcb_prod_idx)
7170 		return work_done;
7171 
7172 	/* run RX thread, within the bounds set by NAPI.
7173 	 * All RX "locking" is done by ensuring outside
7174 	 * code synchronizes with tg3->napi.poll()
7175 	 */
7176 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7177 		work_done += tg3_rx(tnapi, budget - work_done);
7178 
7179 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7180 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7181 		int i, err = 0;
7182 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7183 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7184 
7185 		tp->rx_refill = false;
7186 		for (i = 1; i <= tp->rxq_cnt; i++)
7187 			err |= tg3_rx_prodring_xfer(tp, dpr,
7188 						    &tp->napi[i].prodring);
7189 
7190 		wmb();
7191 
7192 		if (std_prod_idx != dpr->rx_std_prod_idx)
7193 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7194 				     dpr->rx_std_prod_idx);
7195 
7196 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7197 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7198 				     dpr->rx_jmb_prod_idx);
7199 
7200 		mmiowb();
7201 
7202 		if (err)
7203 			tw32_f(HOSTCC_MODE, tp->coal_now);
7204 	}
7205 
7206 	return work_done;
7207 }
7208 
7209 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7210 {
7211 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7212 		schedule_work(&tp->reset_task);
7213 }
7214 
7215 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7216 {
7217 	cancel_work_sync(&tp->reset_task);
7218 	tg3_flag_clear(tp, RESET_TASK_PENDING);
7219 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7220 }
7221 
7222 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7223 {
7224 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7225 	struct tg3 *tp = tnapi->tp;
7226 	int work_done = 0;
7227 	struct tg3_hw_status *sblk = tnapi->hw_status;
7228 
7229 	while (1) {
7230 		work_done = tg3_poll_work(tnapi, work_done, budget);
7231 
7232 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7233 			goto tx_recovery;
7234 
7235 		if (unlikely(work_done >= budget))
7236 			break;
7237 
7238 		/* tp->last_tag is used in tg3_int_reenable() below
7239 		 * to tell the hw how much work has been processed,
7240 		 * so we must read it before checking for more work.
7241 		 */
7242 		tnapi->last_tag = sblk->status_tag;
7243 		tnapi->last_irq_tag = tnapi->last_tag;
7244 		rmb();
7245 
7246 		/* check for RX/TX work to do */
7247 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7248 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7249 
7250 			/* This test here is not race free, but will reduce
7251 			 * the number of interrupts by looping again.
7252 			 */
7253 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7254 				continue;
7255 
7256 			napi_complete_done(napi, work_done);
7257 			/* Reenable interrupts. */
7258 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7259 
7260 			/* This test here is synchronized by napi_schedule()
7261 			 * and napi_complete() to close the race condition.
7262 			 */
7263 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7264 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7265 						  HOSTCC_MODE_ENABLE |
7266 						  tnapi->coal_now);
7267 			}
7268 			mmiowb();
7269 			break;
7270 		}
7271 	}
7272 
7273 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7274 	return work_done;
7275 
7276 tx_recovery:
7277 	/* work_done is guaranteed to be less than budget. */
7278 	napi_complete(napi);
7279 	tg3_reset_task_schedule(tp);
7280 	return work_done;
7281 }
7282 
7283 static void tg3_process_error(struct tg3 *tp)
7284 {
7285 	u32 val;
7286 	bool real_error = false;
7287 
7288 	if (tg3_flag(tp, ERROR_PROCESSED))
7289 		return;
7290 
7291 	/* Check Flow Attention register */
7292 	val = tr32(HOSTCC_FLOW_ATTN);
7293 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7294 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7295 		real_error = true;
7296 	}
7297 
7298 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7299 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7300 		real_error = true;
7301 	}
7302 
7303 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7304 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7305 		real_error = true;
7306 	}
7307 
7308 	if (!real_error)
7309 		return;
7310 
7311 	tg3_dump_state(tp);
7312 
7313 	tg3_flag_set(tp, ERROR_PROCESSED);
7314 	tg3_reset_task_schedule(tp);
7315 }
7316 
7317 static int tg3_poll(struct napi_struct *napi, int budget)
7318 {
7319 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7320 	struct tg3 *tp = tnapi->tp;
7321 	int work_done = 0;
7322 	struct tg3_hw_status *sblk = tnapi->hw_status;
7323 
7324 	while (1) {
7325 		if (sblk->status & SD_STATUS_ERROR)
7326 			tg3_process_error(tp);
7327 
7328 		tg3_poll_link(tp);
7329 
7330 		work_done = tg3_poll_work(tnapi, work_done, budget);
7331 
7332 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7333 			goto tx_recovery;
7334 
7335 		if (unlikely(work_done >= budget))
7336 			break;
7337 
7338 		if (tg3_flag(tp, TAGGED_STATUS)) {
7339 			/* tp->last_tag is used in tg3_int_reenable() below
7340 			 * to tell the hw how much work has been processed,
7341 			 * so we must read it before checking for more work.
7342 			 */
7343 			tnapi->last_tag = sblk->status_tag;
7344 			tnapi->last_irq_tag = tnapi->last_tag;
7345 			rmb();
7346 		} else
7347 			sblk->status &= ~SD_STATUS_UPDATED;
7348 
7349 		if (likely(!tg3_has_work(tnapi))) {
7350 			napi_complete_done(napi, work_done);
7351 			tg3_int_reenable(tnapi);
7352 			break;
7353 		}
7354 	}
7355 
7356 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7357 	return work_done;
7358 
7359 tx_recovery:
7360 	/* work_done is guaranteed to be less than budget. */
7361 	napi_complete(napi);
7362 	tg3_reset_task_schedule(tp);
7363 	return work_done;
7364 }
7365 
7366 static void tg3_napi_disable(struct tg3 *tp)
7367 {
7368 	int i;
7369 
7370 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7371 		napi_disable(&tp->napi[i].napi);
7372 }
7373 
7374 static void tg3_napi_enable(struct tg3 *tp)
7375 {
7376 	int i;
7377 
7378 	for (i = 0; i < tp->irq_cnt; i++)
7379 		napi_enable(&tp->napi[i].napi);
7380 }
7381 
7382 static void tg3_napi_init(struct tg3 *tp)
7383 {
7384 	int i;
7385 
7386 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7387 	for (i = 1; i < tp->irq_cnt; i++)
7388 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7389 }
7390 
7391 static void tg3_napi_fini(struct tg3 *tp)
7392 {
7393 	int i;
7394 
7395 	for (i = 0; i < tp->irq_cnt; i++)
7396 		netif_napi_del(&tp->napi[i].napi);
7397 }
7398 
7399 static inline void tg3_netif_stop(struct tg3 *tp)
7400 {
7401 	netif_trans_update(tp->dev);	/* prevent tx timeout */
7402 	tg3_napi_disable(tp);
7403 	netif_carrier_off(tp->dev);
7404 	netif_tx_disable(tp->dev);
7405 }
7406 
7407 /* tp->lock must be held */
7408 static inline void tg3_netif_start(struct tg3 *tp)
7409 {
7410 	tg3_ptp_resume(tp);
7411 
7412 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7413 	 * appropriate so long as all callers are assured to
7414 	 * have free tx slots (such as after tg3_init_hw)
7415 	 */
7416 	netif_tx_wake_all_queues(tp->dev);
7417 
7418 	if (tp->link_up)
7419 		netif_carrier_on(tp->dev);
7420 
7421 	tg3_napi_enable(tp);
7422 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7423 	tg3_enable_ints(tp);
7424 }
7425 
7426 static void tg3_irq_quiesce(struct tg3 *tp)
7427 	__releases(tp->lock)
7428 	__acquires(tp->lock)
7429 {
7430 	int i;
7431 
7432 	BUG_ON(tp->irq_sync);
7433 
7434 	tp->irq_sync = 1;
7435 	smp_mb();
7436 
7437 	spin_unlock_bh(&tp->lock);
7438 
7439 	for (i = 0; i < tp->irq_cnt; i++)
7440 		synchronize_irq(tp->napi[i].irq_vec);
7441 
7442 	spin_lock_bh(&tp->lock);
7443 }
7444 
7445 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7446  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7447  * with as well.  Most of the time, this is not necessary except when
7448  * shutting down the device.
7449  */
7450 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7451 {
7452 	spin_lock_bh(&tp->lock);
7453 	if (irq_sync)
7454 		tg3_irq_quiesce(tp);
7455 }
7456 
7457 static inline void tg3_full_unlock(struct tg3 *tp)
7458 {
7459 	spin_unlock_bh(&tp->lock);
7460 }
7461 
7462 /* One-shot MSI handler - Chip automatically disables interrupt
7463  * after sending MSI so driver doesn't have to do it.
7464  */
7465 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7466 {
7467 	struct tg3_napi *tnapi = dev_id;
7468 	struct tg3 *tp = tnapi->tp;
7469 
7470 	prefetch(tnapi->hw_status);
7471 	if (tnapi->rx_rcb)
7472 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7473 
7474 	if (likely(!tg3_irq_sync(tp)))
7475 		napi_schedule(&tnapi->napi);
7476 
7477 	return IRQ_HANDLED;
7478 }
7479 
7480 /* MSI ISR - No need to check for interrupt sharing and no need to
7481  * flush status block and interrupt mailbox. PCI ordering rules
7482  * guarantee that MSI will arrive after the status block.
7483  */
7484 static irqreturn_t tg3_msi(int irq, void *dev_id)
7485 {
7486 	struct tg3_napi *tnapi = dev_id;
7487 	struct tg3 *tp = tnapi->tp;
7488 
7489 	prefetch(tnapi->hw_status);
7490 	if (tnapi->rx_rcb)
7491 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7492 	/*
7493 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7494 	 * chip-internal interrupt pending events.
7495 	 * Writing non-zero to intr-mbox-0 additional tells the
7496 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7497 	 * event coalescing.
7498 	 */
7499 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7500 	if (likely(!tg3_irq_sync(tp)))
7501 		napi_schedule(&tnapi->napi);
7502 
7503 	return IRQ_RETVAL(1);
7504 }
7505 
7506 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7507 {
7508 	struct tg3_napi *tnapi = dev_id;
7509 	struct tg3 *tp = tnapi->tp;
7510 	struct tg3_hw_status *sblk = tnapi->hw_status;
7511 	unsigned int handled = 1;
7512 
7513 	/* In INTx mode, it is possible for the interrupt to arrive at
7514 	 * the CPU before the status block posted prior to the interrupt.
7515 	 * Reading the PCI State register will confirm whether the
7516 	 * interrupt is ours and will flush the status block.
7517 	 */
7518 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7519 		if (tg3_flag(tp, CHIP_RESETTING) ||
7520 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7521 			handled = 0;
7522 			goto out;
7523 		}
7524 	}
7525 
7526 	/*
7527 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7528 	 * chip-internal interrupt pending events.
7529 	 * Writing non-zero to intr-mbox-0 additional tells the
7530 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7531 	 * event coalescing.
7532 	 *
7533 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7534 	 * spurious interrupts.  The flush impacts performance but
7535 	 * excessive spurious interrupts can be worse in some cases.
7536 	 */
7537 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7538 	if (tg3_irq_sync(tp))
7539 		goto out;
7540 	sblk->status &= ~SD_STATUS_UPDATED;
7541 	if (likely(tg3_has_work(tnapi))) {
7542 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7543 		napi_schedule(&tnapi->napi);
7544 	} else {
7545 		/* No work, shared interrupt perhaps?  re-enable
7546 		 * interrupts, and flush that PCI write
7547 		 */
7548 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7549 			       0x00000000);
7550 	}
7551 out:
7552 	return IRQ_RETVAL(handled);
7553 }
7554 
7555 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7556 {
7557 	struct tg3_napi *tnapi = dev_id;
7558 	struct tg3 *tp = tnapi->tp;
7559 	struct tg3_hw_status *sblk = tnapi->hw_status;
7560 	unsigned int handled = 1;
7561 
7562 	/* In INTx mode, it is possible for the interrupt to arrive at
7563 	 * the CPU before the status block posted prior to the interrupt.
7564 	 * Reading the PCI State register will confirm whether the
7565 	 * interrupt is ours and will flush the status block.
7566 	 */
7567 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7568 		if (tg3_flag(tp, CHIP_RESETTING) ||
7569 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7570 			handled = 0;
7571 			goto out;
7572 		}
7573 	}
7574 
7575 	/*
7576 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7577 	 * chip-internal interrupt pending events.
7578 	 * writing non-zero to intr-mbox-0 additional tells the
7579 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7580 	 * event coalescing.
7581 	 *
7582 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7583 	 * spurious interrupts.  The flush impacts performance but
7584 	 * excessive spurious interrupts can be worse in some cases.
7585 	 */
7586 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7587 
7588 	/*
7589 	 * In a shared interrupt configuration, sometimes other devices'
7590 	 * interrupts will scream.  We record the current status tag here
7591 	 * so that the above check can report that the screaming interrupts
7592 	 * are unhandled.  Eventually they will be silenced.
7593 	 */
7594 	tnapi->last_irq_tag = sblk->status_tag;
7595 
7596 	if (tg3_irq_sync(tp))
7597 		goto out;
7598 
7599 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7600 
7601 	napi_schedule(&tnapi->napi);
7602 
7603 out:
7604 	return IRQ_RETVAL(handled);
7605 }
7606 
7607 /* ISR for interrupt test */
7608 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7609 {
7610 	struct tg3_napi *tnapi = dev_id;
7611 	struct tg3 *tp = tnapi->tp;
7612 	struct tg3_hw_status *sblk = tnapi->hw_status;
7613 
7614 	if ((sblk->status & SD_STATUS_UPDATED) ||
7615 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7616 		tg3_disable_ints(tp);
7617 		return IRQ_RETVAL(1);
7618 	}
7619 	return IRQ_RETVAL(0);
7620 }
7621 
7622 #ifdef CONFIG_NET_POLL_CONTROLLER
7623 static void tg3_poll_controller(struct net_device *dev)
7624 {
7625 	int i;
7626 	struct tg3 *tp = netdev_priv(dev);
7627 
7628 	if (tg3_irq_sync(tp))
7629 		return;
7630 
7631 	for (i = 0; i < tp->irq_cnt; i++)
7632 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7633 }
7634 #endif
7635 
7636 static void tg3_tx_timeout(struct net_device *dev)
7637 {
7638 	struct tg3 *tp = netdev_priv(dev);
7639 
7640 	if (netif_msg_tx_err(tp)) {
7641 		netdev_err(dev, "transmit timed out, resetting\n");
7642 		tg3_dump_state(tp);
7643 	}
7644 
7645 	tg3_reset_task_schedule(tp);
7646 }
7647 
7648 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7649 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7650 {
7651 	u32 base = (u32) mapping & 0xffffffff;
7652 
7653 	return base + len + 8 < base;
7654 }
7655 
7656 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7657  * of any 4GB boundaries: 4G, 8G, etc
7658  */
7659 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7660 					   u32 len, u32 mss)
7661 {
7662 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7663 		u32 base = (u32) mapping & 0xffffffff;
7664 
7665 		return ((base + len + (mss & 0x3fff)) < base);
7666 	}
7667 	return 0;
7668 }
7669 
7670 /* Test for DMA addresses > 40-bit */
7671 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7672 					  int len)
7673 {
7674 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7675 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7676 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7677 	return 0;
7678 #else
7679 	return 0;
7680 #endif
7681 }
7682 
7683 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7684 				 dma_addr_t mapping, u32 len, u32 flags,
7685 				 u32 mss, u32 vlan)
7686 {
7687 	txbd->addr_hi = ((u64) mapping >> 32);
7688 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7689 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7690 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7691 }
7692 
7693 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7694 			    dma_addr_t map, u32 len, u32 flags,
7695 			    u32 mss, u32 vlan)
7696 {
7697 	struct tg3 *tp = tnapi->tp;
7698 	bool hwbug = false;
7699 
7700 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7701 		hwbug = true;
7702 
7703 	if (tg3_4g_overflow_test(map, len))
7704 		hwbug = true;
7705 
7706 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7707 		hwbug = true;
7708 
7709 	if (tg3_40bit_overflow_test(tp, map, len))
7710 		hwbug = true;
7711 
7712 	if (tp->dma_limit) {
7713 		u32 prvidx = *entry;
7714 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7715 		while (len > tp->dma_limit && *budget) {
7716 			u32 frag_len = tp->dma_limit;
7717 			len -= tp->dma_limit;
7718 
7719 			/* Avoid the 8byte DMA problem */
7720 			if (len <= 8) {
7721 				len += tp->dma_limit / 2;
7722 				frag_len = tp->dma_limit / 2;
7723 			}
7724 
7725 			tnapi->tx_buffers[*entry].fragmented = true;
7726 
7727 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7728 				      frag_len, tmp_flag, mss, vlan);
7729 			*budget -= 1;
7730 			prvidx = *entry;
7731 			*entry = NEXT_TX(*entry);
7732 
7733 			map += frag_len;
7734 		}
7735 
7736 		if (len) {
7737 			if (*budget) {
7738 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7739 					      len, flags, mss, vlan);
7740 				*budget -= 1;
7741 				*entry = NEXT_TX(*entry);
7742 			} else {
7743 				hwbug = true;
7744 				tnapi->tx_buffers[prvidx].fragmented = false;
7745 			}
7746 		}
7747 	} else {
7748 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7749 			      len, flags, mss, vlan);
7750 		*entry = NEXT_TX(*entry);
7751 	}
7752 
7753 	return hwbug;
7754 }
7755 
7756 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7757 {
7758 	int i;
7759 	struct sk_buff *skb;
7760 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7761 
7762 	skb = txb->skb;
7763 	txb->skb = NULL;
7764 
7765 	pci_unmap_single(tnapi->tp->pdev,
7766 			 dma_unmap_addr(txb, mapping),
7767 			 skb_headlen(skb),
7768 			 PCI_DMA_TODEVICE);
7769 
7770 	while (txb->fragmented) {
7771 		txb->fragmented = false;
7772 		entry = NEXT_TX(entry);
7773 		txb = &tnapi->tx_buffers[entry];
7774 	}
7775 
7776 	for (i = 0; i <= last; i++) {
7777 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7778 
7779 		entry = NEXT_TX(entry);
7780 		txb = &tnapi->tx_buffers[entry];
7781 
7782 		pci_unmap_page(tnapi->tp->pdev,
7783 			       dma_unmap_addr(txb, mapping),
7784 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7785 
7786 		while (txb->fragmented) {
7787 			txb->fragmented = false;
7788 			entry = NEXT_TX(entry);
7789 			txb = &tnapi->tx_buffers[entry];
7790 		}
7791 	}
7792 }
7793 
7794 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7795 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7796 				       struct sk_buff **pskb,
7797 				       u32 *entry, u32 *budget,
7798 				       u32 base_flags, u32 mss, u32 vlan)
7799 {
7800 	struct tg3 *tp = tnapi->tp;
7801 	struct sk_buff *new_skb, *skb = *pskb;
7802 	dma_addr_t new_addr = 0;
7803 	int ret = 0;
7804 
7805 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7806 		new_skb = skb_copy(skb, GFP_ATOMIC);
7807 	else {
7808 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7809 
7810 		new_skb = skb_copy_expand(skb,
7811 					  skb_headroom(skb) + more_headroom,
7812 					  skb_tailroom(skb), GFP_ATOMIC);
7813 	}
7814 
7815 	if (!new_skb) {
7816 		ret = -1;
7817 	} else {
7818 		/* New SKB is guaranteed to be linear. */
7819 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7820 					  PCI_DMA_TODEVICE);
7821 		/* Make sure the mapping succeeded */
7822 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7823 			dev_kfree_skb_any(new_skb);
7824 			ret = -1;
7825 		} else {
7826 			u32 save_entry = *entry;
7827 
7828 			base_flags |= TXD_FLAG_END;
7829 
7830 			tnapi->tx_buffers[*entry].skb = new_skb;
7831 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7832 					   mapping, new_addr);
7833 
7834 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7835 					    new_skb->len, base_flags,
7836 					    mss, vlan)) {
7837 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7838 				dev_kfree_skb_any(new_skb);
7839 				ret = -1;
7840 			}
7841 		}
7842 	}
7843 
7844 	dev_consume_skb_any(skb);
7845 	*pskb = new_skb;
7846 	return ret;
7847 }
7848 
7849 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7850 {
7851 	/* Check if we will never have enough descriptors,
7852 	 * as gso_segs can be more than current ring size
7853 	 */
7854 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7855 }
7856 
7857 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7858 
7859 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7860  * indicated in tg3_tx_frag_set()
7861  */
7862 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7863 		       struct netdev_queue *txq, struct sk_buff *skb)
7864 {
7865 	struct sk_buff *segs, *nskb;
7866 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7867 
7868 	/* Estimate the number of fragments in the worst case */
7869 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7870 		netif_tx_stop_queue(txq);
7871 
7872 		/* netif_tx_stop_queue() must be done before checking
7873 		 * checking tx index in tg3_tx_avail() below, because in
7874 		 * tg3_tx(), we update tx index before checking for
7875 		 * netif_tx_queue_stopped().
7876 		 */
7877 		smp_mb();
7878 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7879 			return NETDEV_TX_BUSY;
7880 
7881 		netif_tx_wake_queue(txq);
7882 	}
7883 
7884 	segs = skb_gso_segment(skb, tp->dev->features &
7885 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7886 	if (IS_ERR(segs) || !segs)
7887 		goto tg3_tso_bug_end;
7888 
7889 	do {
7890 		nskb = segs;
7891 		segs = segs->next;
7892 		nskb->next = NULL;
7893 		tg3_start_xmit(nskb, tp->dev);
7894 	} while (segs);
7895 
7896 tg3_tso_bug_end:
7897 	dev_consume_skb_any(skb);
7898 
7899 	return NETDEV_TX_OK;
7900 }
7901 
7902 /* hard_start_xmit for all devices */
7903 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7904 {
7905 	struct tg3 *tp = netdev_priv(dev);
7906 	u32 len, entry, base_flags, mss, vlan = 0;
7907 	u32 budget;
7908 	int i = -1, would_hit_hwbug;
7909 	dma_addr_t mapping;
7910 	struct tg3_napi *tnapi;
7911 	struct netdev_queue *txq;
7912 	unsigned int last;
7913 	struct iphdr *iph = NULL;
7914 	struct tcphdr *tcph = NULL;
7915 	__sum16 tcp_csum = 0, ip_csum = 0;
7916 	__be16 ip_tot_len = 0;
7917 
7918 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7919 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7920 	if (tg3_flag(tp, ENABLE_TSS))
7921 		tnapi++;
7922 
7923 	budget = tg3_tx_avail(tnapi);
7924 
7925 	/* We are running in BH disabled context with netif_tx_lock
7926 	 * and TX reclaim runs via tp->napi.poll inside of a software
7927 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7928 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7929 	 */
7930 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7931 		if (!netif_tx_queue_stopped(txq)) {
7932 			netif_tx_stop_queue(txq);
7933 
7934 			/* This is a hard error, log it. */
7935 			netdev_err(dev,
7936 				   "BUG! Tx Ring full when queue awake!\n");
7937 		}
7938 		return NETDEV_TX_BUSY;
7939 	}
7940 
7941 	entry = tnapi->tx_prod;
7942 	base_flags = 0;
7943 
7944 	mss = skb_shinfo(skb)->gso_size;
7945 	if (mss) {
7946 		u32 tcp_opt_len, hdr_len;
7947 
7948 		if (skb_cow_head(skb, 0))
7949 			goto drop;
7950 
7951 		iph = ip_hdr(skb);
7952 		tcp_opt_len = tcp_optlen(skb);
7953 
7954 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7955 
7956 		/* HW/FW can not correctly segment packets that have been
7957 		 * vlan encapsulated.
7958 		 */
7959 		if (skb->protocol == htons(ETH_P_8021Q) ||
7960 		    skb->protocol == htons(ETH_P_8021AD)) {
7961 			if (tg3_tso_bug_gso_check(tnapi, skb))
7962 				return tg3_tso_bug(tp, tnapi, txq, skb);
7963 			goto drop;
7964 		}
7965 
7966 		if (!skb_is_gso_v6(skb)) {
7967 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7968 			    tg3_flag(tp, TSO_BUG)) {
7969 				if (tg3_tso_bug_gso_check(tnapi, skb))
7970 					return tg3_tso_bug(tp, tnapi, txq, skb);
7971 				goto drop;
7972 			}
7973 			ip_csum = iph->check;
7974 			ip_tot_len = iph->tot_len;
7975 			iph->check = 0;
7976 			iph->tot_len = htons(mss + hdr_len);
7977 		}
7978 
7979 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7980 			       TXD_FLAG_CPU_POST_DMA);
7981 
7982 		tcph = tcp_hdr(skb);
7983 		tcp_csum = tcph->check;
7984 
7985 		if (tg3_flag(tp, HW_TSO_1) ||
7986 		    tg3_flag(tp, HW_TSO_2) ||
7987 		    tg3_flag(tp, HW_TSO_3)) {
7988 			tcph->check = 0;
7989 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7990 		} else {
7991 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7992 							 0, IPPROTO_TCP, 0);
7993 		}
7994 
7995 		if (tg3_flag(tp, HW_TSO_3)) {
7996 			mss |= (hdr_len & 0xc) << 12;
7997 			if (hdr_len & 0x10)
7998 				base_flags |= 0x00000010;
7999 			base_flags |= (hdr_len & 0x3e0) << 5;
8000 		} else if (tg3_flag(tp, HW_TSO_2))
8001 			mss |= hdr_len << 9;
8002 		else if (tg3_flag(tp, HW_TSO_1) ||
8003 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
8004 			if (tcp_opt_len || iph->ihl > 5) {
8005 				int tsflags;
8006 
8007 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8008 				mss |= (tsflags << 11);
8009 			}
8010 		} else {
8011 			if (tcp_opt_len || iph->ihl > 5) {
8012 				int tsflags;
8013 
8014 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8015 				base_flags |= tsflags << 12;
8016 			}
8017 		}
8018 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8019 		/* HW/FW can not correctly checksum packets that have been
8020 		 * vlan encapsulated.
8021 		 */
8022 		if (skb->protocol == htons(ETH_P_8021Q) ||
8023 		    skb->protocol == htons(ETH_P_8021AD)) {
8024 			if (skb_checksum_help(skb))
8025 				goto drop;
8026 		} else  {
8027 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8028 		}
8029 	}
8030 
8031 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8032 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8033 		base_flags |= TXD_FLAG_JMB_PKT;
8034 
8035 	if (skb_vlan_tag_present(skb)) {
8036 		base_flags |= TXD_FLAG_VLAN;
8037 		vlan = skb_vlan_tag_get(skb);
8038 	}
8039 
8040 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8041 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8042 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8043 		base_flags |= TXD_FLAG_HWTSTAMP;
8044 	}
8045 
8046 	len = skb_headlen(skb);
8047 
8048 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8049 	if (pci_dma_mapping_error(tp->pdev, mapping))
8050 		goto drop;
8051 
8052 
8053 	tnapi->tx_buffers[entry].skb = skb;
8054 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8055 
8056 	would_hit_hwbug = 0;
8057 
8058 	if (tg3_flag(tp, 5701_DMA_BUG))
8059 		would_hit_hwbug = 1;
8060 
8061 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8062 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8063 			    mss, vlan)) {
8064 		would_hit_hwbug = 1;
8065 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8066 		u32 tmp_mss = mss;
8067 
8068 		if (!tg3_flag(tp, HW_TSO_1) &&
8069 		    !tg3_flag(tp, HW_TSO_2) &&
8070 		    !tg3_flag(tp, HW_TSO_3))
8071 			tmp_mss = 0;
8072 
8073 		/* Now loop through additional data
8074 		 * fragments, and queue them.
8075 		 */
8076 		last = skb_shinfo(skb)->nr_frags - 1;
8077 		for (i = 0; i <= last; i++) {
8078 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8079 
8080 			len = skb_frag_size(frag);
8081 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8082 						   len, DMA_TO_DEVICE);
8083 
8084 			tnapi->tx_buffers[entry].skb = NULL;
8085 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8086 					   mapping);
8087 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8088 				goto dma_error;
8089 
8090 			if (!budget ||
8091 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8092 					    len, base_flags |
8093 					    ((i == last) ? TXD_FLAG_END : 0),
8094 					    tmp_mss, vlan)) {
8095 				would_hit_hwbug = 1;
8096 				break;
8097 			}
8098 		}
8099 	}
8100 
8101 	if (would_hit_hwbug) {
8102 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8103 
8104 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8105 			/* If it's a TSO packet, do GSO instead of
8106 			 * allocating and copying to a large linear SKB
8107 			 */
8108 			if (ip_tot_len) {
8109 				iph->check = ip_csum;
8110 				iph->tot_len = ip_tot_len;
8111 			}
8112 			tcph->check = tcp_csum;
8113 			return tg3_tso_bug(tp, tnapi, txq, skb);
8114 		}
8115 
8116 		/* If the workaround fails due to memory/mapping
8117 		 * failure, silently drop this packet.
8118 		 */
8119 		entry = tnapi->tx_prod;
8120 		budget = tg3_tx_avail(tnapi);
8121 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8122 						base_flags, mss, vlan))
8123 			goto drop_nofree;
8124 	}
8125 
8126 	skb_tx_timestamp(skb);
8127 	netdev_tx_sent_queue(txq, skb->len);
8128 
8129 	/* Sync BD data before updating mailbox */
8130 	wmb();
8131 
8132 	tnapi->tx_prod = entry;
8133 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8134 		netif_tx_stop_queue(txq);
8135 
8136 		/* netif_tx_stop_queue() must be done before checking
8137 		 * checking tx index in tg3_tx_avail() below, because in
8138 		 * tg3_tx(), we update tx index before checking for
8139 		 * netif_tx_queue_stopped().
8140 		 */
8141 		smp_mb();
8142 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8143 			netif_tx_wake_queue(txq);
8144 	}
8145 
8146 	if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8147 		/* Packets are ready, update Tx producer idx on card. */
8148 		tw32_tx_mbox(tnapi->prodmbox, entry);
8149 		mmiowb();
8150 	}
8151 
8152 	return NETDEV_TX_OK;
8153 
8154 dma_error:
8155 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8156 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8157 drop:
8158 	dev_kfree_skb_any(skb);
8159 drop_nofree:
8160 	tp->tx_dropped++;
8161 	return NETDEV_TX_OK;
8162 }
8163 
8164 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8165 {
8166 	if (enable) {
8167 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8168 				  MAC_MODE_PORT_MODE_MASK);
8169 
8170 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8171 
8172 		if (!tg3_flag(tp, 5705_PLUS))
8173 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8174 
8175 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8176 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8177 		else
8178 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8179 	} else {
8180 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8181 
8182 		if (tg3_flag(tp, 5705_PLUS) ||
8183 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8184 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8185 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8186 	}
8187 
8188 	tw32(MAC_MODE, tp->mac_mode);
8189 	udelay(40);
8190 }
8191 
8192 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8193 {
8194 	u32 val, bmcr, mac_mode, ptest = 0;
8195 
8196 	tg3_phy_toggle_apd(tp, false);
8197 	tg3_phy_toggle_automdix(tp, false);
8198 
8199 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8200 		return -EIO;
8201 
8202 	bmcr = BMCR_FULLDPLX;
8203 	switch (speed) {
8204 	case SPEED_10:
8205 		break;
8206 	case SPEED_100:
8207 		bmcr |= BMCR_SPEED100;
8208 		break;
8209 	case SPEED_1000:
8210 	default:
8211 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8212 			speed = SPEED_100;
8213 			bmcr |= BMCR_SPEED100;
8214 		} else {
8215 			speed = SPEED_1000;
8216 			bmcr |= BMCR_SPEED1000;
8217 		}
8218 	}
8219 
8220 	if (extlpbk) {
8221 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8222 			tg3_readphy(tp, MII_CTRL1000, &val);
8223 			val |= CTL1000_AS_MASTER |
8224 			       CTL1000_ENABLE_MASTER;
8225 			tg3_writephy(tp, MII_CTRL1000, val);
8226 		} else {
8227 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8228 				MII_TG3_FET_PTEST_TRIM_2;
8229 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8230 		}
8231 	} else
8232 		bmcr |= BMCR_LOOPBACK;
8233 
8234 	tg3_writephy(tp, MII_BMCR, bmcr);
8235 
8236 	/* The write needs to be flushed for the FETs */
8237 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8238 		tg3_readphy(tp, MII_BMCR, &bmcr);
8239 
8240 	udelay(40);
8241 
8242 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8243 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8244 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8245 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8246 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8247 
8248 		/* The write needs to be flushed for the AC131 */
8249 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8250 	}
8251 
8252 	/* Reset to prevent losing 1st rx packet intermittently */
8253 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8254 	    tg3_flag(tp, 5780_CLASS)) {
8255 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8256 		udelay(10);
8257 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8258 	}
8259 
8260 	mac_mode = tp->mac_mode &
8261 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8262 	if (speed == SPEED_1000)
8263 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8264 	else
8265 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8266 
8267 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8268 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8269 
8270 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8271 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8272 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8273 			mac_mode |= MAC_MODE_LINK_POLARITY;
8274 
8275 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8276 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8277 	}
8278 
8279 	tw32(MAC_MODE, mac_mode);
8280 	udelay(40);
8281 
8282 	return 0;
8283 }
8284 
8285 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8286 {
8287 	struct tg3 *tp = netdev_priv(dev);
8288 
8289 	if (features & NETIF_F_LOOPBACK) {
8290 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8291 			return;
8292 
8293 		spin_lock_bh(&tp->lock);
8294 		tg3_mac_loopback(tp, true);
8295 		netif_carrier_on(tp->dev);
8296 		spin_unlock_bh(&tp->lock);
8297 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8298 	} else {
8299 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8300 			return;
8301 
8302 		spin_lock_bh(&tp->lock);
8303 		tg3_mac_loopback(tp, false);
8304 		/* Force link status check */
8305 		tg3_setup_phy(tp, true);
8306 		spin_unlock_bh(&tp->lock);
8307 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8308 	}
8309 }
8310 
8311 static netdev_features_t tg3_fix_features(struct net_device *dev,
8312 	netdev_features_t features)
8313 {
8314 	struct tg3 *tp = netdev_priv(dev);
8315 
8316 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8317 		features &= ~NETIF_F_ALL_TSO;
8318 
8319 	return features;
8320 }
8321 
8322 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8323 {
8324 	netdev_features_t changed = dev->features ^ features;
8325 
8326 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8327 		tg3_set_loopback(dev, features);
8328 
8329 	return 0;
8330 }
8331 
8332 static void tg3_rx_prodring_free(struct tg3 *tp,
8333 				 struct tg3_rx_prodring_set *tpr)
8334 {
8335 	int i;
8336 
8337 	if (tpr != &tp->napi[0].prodring) {
8338 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8339 		     i = (i + 1) & tp->rx_std_ring_mask)
8340 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8341 					tp->rx_pkt_map_sz);
8342 
8343 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8344 			for (i = tpr->rx_jmb_cons_idx;
8345 			     i != tpr->rx_jmb_prod_idx;
8346 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8347 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8348 						TG3_RX_JMB_MAP_SZ);
8349 			}
8350 		}
8351 
8352 		return;
8353 	}
8354 
8355 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8356 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8357 				tp->rx_pkt_map_sz);
8358 
8359 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8360 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8361 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8362 					TG3_RX_JMB_MAP_SZ);
8363 	}
8364 }
8365 
8366 /* Initialize rx rings for packet processing.
8367  *
8368  * The chip has been shut down and the driver detached from
8369  * the networking, so no interrupts or new tx packets will
8370  * end up in the driver.  tp->{tx,}lock are held and thus
8371  * we may not sleep.
8372  */
8373 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8374 				 struct tg3_rx_prodring_set *tpr)
8375 {
8376 	u32 i, rx_pkt_dma_sz;
8377 
8378 	tpr->rx_std_cons_idx = 0;
8379 	tpr->rx_std_prod_idx = 0;
8380 	tpr->rx_jmb_cons_idx = 0;
8381 	tpr->rx_jmb_prod_idx = 0;
8382 
8383 	if (tpr != &tp->napi[0].prodring) {
8384 		memset(&tpr->rx_std_buffers[0], 0,
8385 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8386 		if (tpr->rx_jmb_buffers)
8387 			memset(&tpr->rx_jmb_buffers[0], 0,
8388 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8389 		goto done;
8390 	}
8391 
8392 	/* Zero out all descriptors. */
8393 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8394 
8395 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8396 	if (tg3_flag(tp, 5780_CLASS) &&
8397 	    tp->dev->mtu > ETH_DATA_LEN)
8398 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8399 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8400 
8401 	/* Initialize invariants of the rings, we only set this
8402 	 * stuff once.  This works because the card does not
8403 	 * write into the rx buffer posting rings.
8404 	 */
8405 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8406 		struct tg3_rx_buffer_desc *rxd;
8407 
8408 		rxd = &tpr->rx_std[i];
8409 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8410 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8411 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8412 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8413 	}
8414 
8415 	/* Now allocate fresh SKBs for each rx ring. */
8416 	for (i = 0; i < tp->rx_pending; i++) {
8417 		unsigned int frag_size;
8418 
8419 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8420 				      &frag_size) < 0) {
8421 			netdev_warn(tp->dev,
8422 				    "Using a smaller RX standard ring. Only "
8423 				    "%d out of %d buffers were allocated "
8424 				    "successfully\n", i, tp->rx_pending);
8425 			if (i == 0)
8426 				goto initfail;
8427 			tp->rx_pending = i;
8428 			break;
8429 		}
8430 	}
8431 
8432 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8433 		goto done;
8434 
8435 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8436 
8437 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8438 		goto done;
8439 
8440 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8441 		struct tg3_rx_buffer_desc *rxd;
8442 
8443 		rxd = &tpr->rx_jmb[i].std;
8444 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8445 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8446 				  RXD_FLAG_JUMBO;
8447 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8448 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8449 	}
8450 
8451 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8452 		unsigned int frag_size;
8453 
8454 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8455 				      &frag_size) < 0) {
8456 			netdev_warn(tp->dev,
8457 				    "Using a smaller RX jumbo ring. Only %d "
8458 				    "out of %d buffers were allocated "
8459 				    "successfully\n", i, tp->rx_jumbo_pending);
8460 			if (i == 0)
8461 				goto initfail;
8462 			tp->rx_jumbo_pending = i;
8463 			break;
8464 		}
8465 	}
8466 
8467 done:
8468 	return 0;
8469 
8470 initfail:
8471 	tg3_rx_prodring_free(tp, tpr);
8472 	return -ENOMEM;
8473 }
8474 
8475 static void tg3_rx_prodring_fini(struct tg3 *tp,
8476 				 struct tg3_rx_prodring_set *tpr)
8477 {
8478 	kfree(tpr->rx_std_buffers);
8479 	tpr->rx_std_buffers = NULL;
8480 	kfree(tpr->rx_jmb_buffers);
8481 	tpr->rx_jmb_buffers = NULL;
8482 	if (tpr->rx_std) {
8483 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8484 				  tpr->rx_std, tpr->rx_std_mapping);
8485 		tpr->rx_std = NULL;
8486 	}
8487 	if (tpr->rx_jmb) {
8488 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8489 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8490 		tpr->rx_jmb = NULL;
8491 	}
8492 }
8493 
8494 static int tg3_rx_prodring_init(struct tg3 *tp,
8495 				struct tg3_rx_prodring_set *tpr)
8496 {
8497 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8498 				      GFP_KERNEL);
8499 	if (!tpr->rx_std_buffers)
8500 		return -ENOMEM;
8501 
8502 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8503 					 TG3_RX_STD_RING_BYTES(tp),
8504 					 &tpr->rx_std_mapping,
8505 					 GFP_KERNEL);
8506 	if (!tpr->rx_std)
8507 		goto err_out;
8508 
8509 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8510 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8511 					      GFP_KERNEL);
8512 		if (!tpr->rx_jmb_buffers)
8513 			goto err_out;
8514 
8515 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8516 						 TG3_RX_JMB_RING_BYTES(tp),
8517 						 &tpr->rx_jmb_mapping,
8518 						 GFP_KERNEL);
8519 		if (!tpr->rx_jmb)
8520 			goto err_out;
8521 	}
8522 
8523 	return 0;
8524 
8525 err_out:
8526 	tg3_rx_prodring_fini(tp, tpr);
8527 	return -ENOMEM;
8528 }
8529 
8530 /* Free up pending packets in all rx/tx rings.
8531  *
8532  * The chip has been shut down and the driver detached from
8533  * the networking, so no interrupts or new tx packets will
8534  * end up in the driver.  tp->{tx,}lock is not held and we are not
8535  * in an interrupt context and thus may sleep.
8536  */
8537 static void tg3_free_rings(struct tg3 *tp)
8538 {
8539 	int i, j;
8540 
8541 	for (j = 0; j < tp->irq_cnt; j++) {
8542 		struct tg3_napi *tnapi = &tp->napi[j];
8543 
8544 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8545 
8546 		if (!tnapi->tx_buffers)
8547 			continue;
8548 
8549 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8550 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8551 
8552 			if (!skb)
8553 				continue;
8554 
8555 			tg3_tx_skb_unmap(tnapi, i,
8556 					 skb_shinfo(skb)->nr_frags - 1);
8557 
8558 			dev_consume_skb_any(skb);
8559 		}
8560 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8561 	}
8562 }
8563 
8564 /* Initialize tx/rx rings for packet processing.
8565  *
8566  * The chip has been shut down and the driver detached from
8567  * the networking, so no interrupts or new tx packets will
8568  * end up in the driver.  tp->{tx,}lock are held and thus
8569  * we may not sleep.
8570  */
8571 static int tg3_init_rings(struct tg3 *tp)
8572 {
8573 	int i;
8574 
8575 	/* Free up all the SKBs. */
8576 	tg3_free_rings(tp);
8577 
8578 	for (i = 0; i < tp->irq_cnt; i++) {
8579 		struct tg3_napi *tnapi = &tp->napi[i];
8580 
8581 		tnapi->last_tag = 0;
8582 		tnapi->last_irq_tag = 0;
8583 		tnapi->hw_status->status = 0;
8584 		tnapi->hw_status->status_tag = 0;
8585 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8586 
8587 		tnapi->tx_prod = 0;
8588 		tnapi->tx_cons = 0;
8589 		if (tnapi->tx_ring)
8590 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8591 
8592 		tnapi->rx_rcb_ptr = 0;
8593 		if (tnapi->rx_rcb)
8594 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8595 
8596 		if (tnapi->prodring.rx_std &&
8597 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8598 			tg3_free_rings(tp);
8599 			return -ENOMEM;
8600 		}
8601 	}
8602 
8603 	return 0;
8604 }
8605 
8606 static void tg3_mem_tx_release(struct tg3 *tp)
8607 {
8608 	int i;
8609 
8610 	for (i = 0; i < tp->irq_max; i++) {
8611 		struct tg3_napi *tnapi = &tp->napi[i];
8612 
8613 		if (tnapi->tx_ring) {
8614 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8615 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8616 			tnapi->tx_ring = NULL;
8617 		}
8618 
8619 		kfree(tnapi->tx_buffers);
8620 		tnapi->tx_buffers = NULL;
8621 	}
8622 }
8623 
8624 static int tg3_mem_tx_acquire(struct tg3 *tp)
8625 {
8626 	int i;
8627 	struct tg3_napi *tnapi = &tp->napi[0];
8628 
8629 	/* If multivector TSS is enabled, vector 0 does not handle
8630 	 * tx interrupts.  Don't allocate any resources for it.
8631 	 */
8632 	if (tg3_flag(tp, ENABLE_TSS))
8633 		tnapi++;
8634 
8635 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8636 		tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8637 					    sizeof(struct tg3_tx_ring_info),
8638 					    GFP_KERNEL);
8639 		if (!tnapi->tx_buffers)
8640 			goto err_out;
8641 
8642 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8643 						    TG3_TX_RING_BYTES,
8644 						    &tnapi->tx_desc_mapping,
8645 						    GFP_KERNEL);
8646 		if (!tnapi->tx_ring)
8647 			goto err_out;
8648 	}
8649 
8650 	return 0;
8651 
8652 err_out:
8653 	tg3_mem_tx_release(tp);
8654 	return -ENOMEM;
8655 }
8656 
8657 static void tg3_mem_rx_release(struct tg3 *tp)
8658 {
8659 	int i;
8660 
8661 	for (i = 0; i < tp->irq_max; i++) {
8662 		struct tg3_napi *tnapi = &tp->napi[i];
8663 
8664 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8665 
8666 		if (!tnapi->rx_rcb)
8667 			continue;
8668 
8669 		dma_free_coherent(&tp->pdev->dev,
8670 				  TG3_RX_RCB_RING_BYTES(tp),
8671 				  tnapi->rx_rcb,
8672 				  tnapi->rx_rcb_mapping);
8673 		tnapi->rx_rcb = NULL;
8674 	}
8675 }
8676 
8677 static int tg3_mem_rx_acquire(struct tg3 *tp)
8678 {
8679 	unsigned int i, limit;
8680 
8681 	limit = tp->rxq_cnt;
8682 
8683 	/* If RSS is enabled, we need a (dummy) producer ring
8684 	 * set on vector zero.  This is the true hw prodring.
8685 	 */
8686 	if (tg3_flag(tp, ENABLE_RSS))
8687 		limit++;
8688 
8689 	for (i = 0; i < limit; i++) {
8690 		struct tg3_napi *tnapi = &tp->napi[i];
8691 
8692 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8693 			goto err_out;
8694 
8695 		/* If multivector RSS is enabled, vector 0
8696 		 * does not handle rx or tx interrupts.
8697 		 * Don't allocate any resources for it.
8698 		 */
8699 		if (!i && tg3_flag(tp, ENABLE_RSS))
8700 			continue;
8701 
8702 		tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8703 						    TG3_RX_RCB_RING_BYTES(tp),
8704 						    &tnapi->rx_rcb_mapping,
8705 						    GFP_KERNEL);
8706 		if (!tnapi->rx_rcb)
8707 			goto err_out;
8708 	}
8709 
8710 	return 0;
8711 
8712 err_out:
8713 	tg3_mem_rx_release(tp);
8714 	return -ENOMEM;
8715 }
8716 
8717 /*
8718  * Must not be invoked with interrupt sources disabled and
8719  * the hardware shutdown down.
8720  */
8721 static void tg3_free_consistent(struct tg3 *tp)
8722 {
8723 	int i;
8724 
8725 	for (i = 0; i < tp->irq_cnt; i++) {
8726 		struct tg3_napi *tnapi = &tp->napi[i];
8727 
8728 		if (tnapi->hw_status) {
8729 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8730 					  tnapi->hw_status,
8731 					  tnapi->status_mapping);
8732 			tnapi->hw_status = NULL;
8733 		}
8734 	}
8735 
8736 	tg3_mem_rx_release(tp);
8737 	tg3_mem_tx_release(tp);
8738 
8739 	/* tp->hw_stats can be referenced safely:
8740 	 *     1. under rtnl_lock
8741 	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8742 	 */
8743 	if (tp->hw_stats) {
8744 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8745 				  tp->hw_stats, tp->stats_mapping);
8746 		tp->hw_stats = NULL;
8747 	}
8748 }
8749 
8750 /*
8751  * Must not be invoked with interrupt sources disabled and
8752  * the hardware shutdown down.  Can sleep.
8753  */
8754 static int tg3_alloc_consistent(struct tg3 *tp)
8755 {
8756 	int i;
8757 
8758 	tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8759 					   sizeof(struct tg3_hw_stats),
8760 					   &tp->stats_mapping, GFP_KERNEL);
8761 	if (!tp->hw_stats)
8762 		goto err_out;
8763 
8764 	for (i = 0; i < tp->irq_cnt; i++) {
8765 		struct tg3_napi *tnapi = &tp->napi[i];
8766 		struct tg3_hw_status *sblk;
8767 
8768 		tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8769 						       TG3_HW_STATUS_SIZE,
8770 						       &tnapi->status_mapping,
8771 						       GFP_KERNEL);
8772 		if (!tnapi->hw_status)
8773 			goto err_out;
8774 
8775 		sblk = tnapi->hw_status;
8776 
8777 		if (tg3_flag(tp, ENABLE_RSS)) {
8778 			u16 *prodptr = NULL;
8779 
8780 			/*
8781 			 * When RSS is enabled, the status block format changes
8782 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8783 			 * and "rx_mini_consumer" members get mapped to the
8784 			 * other three rx return ring producer indexes.
8785 			 */
8786 			switch (i) {
8787 			case 1:
8788 				prodptr = &sblk->idx[0].rx_producer;
8789 				break;
8790 			case 2:
8791 				prodptr = &sblk->rx_jumbo_consumer;
8792 				break;
8793 			case 3:
8794 				prodptr = &sblk->reserved;
8795 				break;
8796 			case 4:
8797 				prodptr = &sblk->rx_mini_consumer;
8798 				break;
8799 			}
8800 			tnapi->rx_rcb_prod_idx = prodptr;
8801 		} else {
8802 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8803 		}
8804 	}
8805 
8806 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8807 		goto err_out;
8808 
8809 	return 0;
8810 
8811 err_out:
8812 	tg3_free_consistent(tp);
8813 	return -ENOMEM;
8814 }
8815 
8816 #define MAX_WAIT_CNT 1000
8817 
8818 /* To stop a block, clear the enable bit and poll till it
8819  * clears.  tp->lock is held.
8820  */
8821 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8822 {
8823 	unsigned int i;
8824 	u32 val;
8825 
8826 	if (tg3_flag(tp, 5705_PLUS)) {
8827 		switch (ofs) {
8828 		case RCVLSC_MODE:
8829 		case DMAC_MODE:
8830 		case MBFREE_MODE:
8831 		case BUFMGR_MODE:
8832 		case MEMARB_MODE:
8833 			/* We can't enable/disable these bits of the
8834 			 * 5705/5750, just say success.
8835 			 */
8836 			return 0;
8837 
8838 		default:
8839 			break;
8840 		}
8841 	}
8842 
8843 	val = tr32(ofs);
8844 	val &= ~enable_bit;
8845 	tw32_f(ofs, val);
8846 
8847 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8848 		if (pci_channel_offline(tp->pdev)) {
8849 			dev_err(&tp->pdev->dev,
8850 				"tg3_stop_block device offline, "
8851 				"ofs=%lx enable_bit=%x\n",
8852 				ofs, enable_bit);
8853 			return -ENODEV;
8854 		}
8855 
8856 		udelay(100);
8857 		val = tr32(ofs);
8858 		if ((val & enable_bit) == 0)
8859 			break;
8860 	}
8861 
8862 	if (i == MAX_WAIT_CNT && !silent) {
8863 		dev_err(&tp->pdev->dev,
8864 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8865 			ofs, enable_bit);
8866 		return -ENODEV;
8867 	}
8868 
8869 	return 0;
8870 }
8871 
8872 /* tp->lock is held. */
8873 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8874 {
8875 	int i, err;
8876 
8877 	tg3_disable_ints(tp);
8878 
8879 	if (pci_channel_offline(tp->pdev)) {
8880 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8881 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8882 		err = -ENODEV;
8883 		goto err_no_dev;
8884 	}
8885 
8886 	tp->rx_mode &= ~RX_MODE_ENABLE;
8887 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8888 	udelay(10);
8889 
8890 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8891 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8892 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8893 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8894 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8895 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8896 
8897 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8898 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8899 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8900 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8901 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8902 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8903 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8904 
8905 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8906 	tw32_f(MAC_MODE, tp->mac_mode);
8907 	udelay(40);
8908 
8909 	tp->tx_mode &= ~TX_MODE_ENABLE;
8910 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8911 
8912 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8913 		udelay(100);
8914 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8915 			break;
8916 	}
8917 	if (i >= MAX_WAIT_CNT) {
8918 		dev_err(&tp->pdev->dev,
8919 			"%s timed out, TX_MODE_ENABLE will not clear "
8920 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8921 		err |= -ENODEV;
8922 	}
8923 
8924 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8925 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8926 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8927 
8928 	tw32(FTQ_RESET, 0xffffffff);
8929 	tw32(FTQ_RESET, 0x00000000);
8930 
8931 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8932 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8933 
8934 err_no_dev:
8935 	for (i = 0; i < tp->irq_cnt; i++) {
8936 		struct tg3_napi *tnapi = &tp->napi[i];
8937 		if (tnapi->hw_status)
8938 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8939 	}
8940 
8941 	return err;
8942 }
8943 
8944 /* Save PCI command register before chip reset */
8945 static void tg3_save_pci_state(struct tg3 *tp)
8946 {
8947 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8948 }
8949 
8950 /* Restore PCI state after chip reset */
8951 static void tg3_restore_pci_state(struct tg3 *tp)
8952 {
8953 	u32 val;
8954 
8955 	/* Re-enable indirect register accesses. */
8956 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8957 			       tp->misc_host_ctrl);
8958 
8959 	/* Set MAX PCI retry to zero. */
8960 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8961 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8962 	    tg3_flag(tp, PCIX_MODE))
8963 		val |= PCISTATE_RETRY_SAME_DMA;
8964 	/* Allow reads and writes to the APE register and memory space. */
8965 	if (tg3_flag(tp, ENABLE_APE))
8966 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8967 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8968 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8969 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8970 
8971 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8972 
8973 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8974 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8975 				      tp->pci_cacheline_sz);
8976 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8977 				      tp->pci_lat_timer);
8978 	}
8979 
8980 	/* Make sure PCI-X relaxed ordering bit is clear. */
8981 	if (tg3_flag(tp, PCIX_MODE)) {
8982 		u16 pcix_cmd;
8983 
8984 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8985 				     &pcix_cmd);
8986 		pcix_cmd &= ~PCI_X_CMD_ERO;
8987 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8988 				      pcix_cmd);
8989 	}
8990 
8991 	if (tg3_flag(tp, 5780_CLASS)) {
8992 
8993 		/* Chip reset on 5780 will reset MSI enable bit,
8994 		 * so need to restore it.
8995 		 */
8996 		if (tg3_flag(tp, USING_MSI)) {
8997 			u16 ctrl;
8998 
8999 			pci_read_config_word(tp->pdev,
9000 					     tp->msi_cap + PCI_MSI_FLAGS,
9001 					     &ctrl);
9002 			pci_write_config_word(tp->pdev,
9003 					      tp->msi_cap + PCI_MSI_FLAGS,
9004 					      ctrl | PCI_MSI_FLAGS_ENABLE);
9005 			val = tr32(MSGINT_MODE);
9006 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9007 		}
9008 	}
9009 }
9010 
9011 static void tg3_override_clk(struct tg3 *tp)
9012 {
9013 	u32 val;
9014 
9015 	switch (tg3_asic_rev(tp)) {
9016 	case ASIC_REV_5717:
9017 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9018 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9019 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9020 		break;
9021 
9022 	case ASIC_REV_5719:
9023 	case ASIC_REV_5720:
9024 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9025 		break;
9026 
9027 	default:
9028 		return;
9029 	}
9030 }
9031 
9032 static void tg3_restore_clk(struct tg3 *tp)
9033 {
9034 	u32 val;
9035 
9036 	switch (tg3_asic_rev(tp)) {
9037 	case ASIC_REV_5717:
9038 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9039 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9040 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9041 		break;
9042 
9043 	case ASIC_REV_5719:
9044 	case ASIC_REV_5720:
9045 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9046 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9047 		break;
9048 
9049 	default:
9050 		return;
9051 	}
9052 }
9053 
9054 /* tp->lock is held. */
9055 static int tg3_chip_reset(struct tg3 *tp)
9056 	__releases(tp->lock)
9057 	__acquires(tp->lock)
9058 {
9059 	u32 val;
9060 	void (*write_op)(struct tg3 *, u32, u32);
9061 	int i, err;
9062 
9063 	if (!pci_device_is_present(tp->pdev))
9064 		return -ENODEV;
9065 
9066 	tg3_nvram_lock(tp);
9067 
9068 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9069 
9070 	/* No matching tg3_nvram_unlock() after this because
9071 	 * chip reset below will undo the nvram lock.
9072 	 */
9073 	tp->nvram_lock_cnt = 0;
9074 
9075 	/* GRC_MISC_CFG core clock reset will clear the memory
9076 	 * enable bit in PCI register 4 and the MSI enable bit
9077 	 * on some chips, so we save relevant registers here.
9078 	 */
9079 	tg3_save_pci_state(tp);
9080 
9081 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9082 	    tg3_flag(tp, 5755_PLUS))
9083 		tw32(GRC_FASTBOOT_PC, 0);
9084 
9085 	/*
9086 	 * We must avoid the readl() that normally takes place.
9087 	 * It locks machines, causes machine checks, and other
9088 	 * fun things.  So, temporarily disable the 5701
9089 	 * hardware workaround, while we do the reset.
9090 	 */
9091 	write_op = tp->write32;
9092 	if (write_op == tg3_write_flush_reg32)
9093 		tp->write32 = tg3_write32;
9094 
9095 	/* Prevent the irq handler from reading or writing PCI registers
9096 	 * during chip reset when the memory enable bit in the PCI command
9097 	 * register may be cleared.  The chip does not generate interrupt
9098 	 * at this time, but the irq handler may still be called due to irq
9099 	 * sharing or irqpoll.
9100 	 */
9101 	tg3_flag_set(tp, CHIP_RESETTING);
9102 	for (i = 0; i < tp->irq_cnt; i++) {
9103 		struct tg3_napi *tnapi = &tp->napi[i];
9104 		if (tnapi->hw_status) {
9105 			tnapi->hw_status->status = 0;
9106 			tnapi->hw_status->status_tag = 0;
9107 		}
9108 		tnapi->last_tag = 0;
9109 		tnapi->last_irq_tag = 0;
9110 	}
9111 	smp_mb();
9112 
9113 	tg3_full_unlock(tp);
9114 
9115 	for (i = 0; i < tp->irq_cnt; i++)
9116 		synchronize_irq(tp->napi[i].irq_vec);
9117 
9118 	tg3_full_lock(tp, 0);
9119 
9120 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9121 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9122 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9123 	}
9124 
9125 	/* do the reset */
9126 	val = GRC_MISC_CFG_CORECLK_RESET;
9127 
9128 	if (tg3_flag(tp, PCI_EXPRESS)) {
9129 		/* Force PCIe 1.0a mode */
9130 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9131 		    !tg3_flag(tp, 57765_PLUS) &&
9132 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9133 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9134 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9135 
9136 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9137 			tw32(GRC_MISC_CFG, (1 << 29));
9138 			val |= (1 << 29);
9139 		}
9140 	}
9141 
9142 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9143 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9144 		tw32(GRC_VCPU_EXT_CTRL,
9145 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9146 	}
9147 
9148 	/* Set the clock to the highest frequency to avoid timeouts. With link
9149 	 * aware mode, the clock speed could be slow and bootcode does not
9150 	 * complete within the expected time. Override the clock to allow the
9151 	 * bootcode to finish sooner and then restore it.
9152 	 */
9153 	tg3_override_clk(tp);
9154 
9155 	/* Manage gphy power for all CPMU absent PCIe devices. */
9156 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9157 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9158 
9159 	tw32(GRC_MISC_CFG, val);
9160 
9161 	/* restore 5701 hardware bug workaround write method */
9162 	tp->write32 = write_op;
9163 
9164 	/* Unfortunately, we have to delay before the PCI read back.
9165 	 * Some 575X chips even will not respond to a PCI cfg access
9166 	 * when the reset command is given to the chip.
9167 	 *
9168 	 * How do these hardware designers expect things to work
9169 	 * properly if the PCI write is posted for a long period
9170 	 * of time?  It is always necessary to have some method by
9171 	 * which a register read back can occur to push the write
9172 	 * out which does the reset.
9173 	 *
9174 	 * For most tg3 variants the trick below was working.
9175 	 * Ho hum...
9176 	 */
9177 	udelay(120);
9178 
9179 	/* Flush PCI posted writes.  The normal MMIO registers
9180 	 * are inaccessible at this time so this is the only
9181 	 * way to make this reliably (actually, this is no longer
9182 	 * the case, see above).  I tried to use indirect
9183 	 * register read/write but this upset some 5701 variants.
9184 	 */
9185 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9186 
9187 	udelay(120);
9188 
9189 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9190 		u16 val16;
9191 
9192 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9193 			int j;
9194 			u32 cfg_val;
9195 
9196 			/* Wait for link training to complete.  */
9197 			for (j = 0; j < 5000; j++)
9198 				udelay(100);
9199 
9200 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9201 			pci_write_config_dword(tp->pdev, 0xc4,
9202 					       cfg_val | (1 << 15));
9203 		}
9204 
9205 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9206 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9207 		/*
9208 		 * Older PCIe devices only support the 128 byte
9209 		 * MPS setting.  Enforce the restriction.
9210 		 */
9211 		if (!tg3_flag(tp, CPMU_PRESENT))
9212 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9213 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9214 
9215 		/* Clear error status */
9216 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9217 				      PCI_EXP_DEVSTA_CED |
9218 				      PCI_EXP_DEVSTA_NFED |
9219 				      PCI_EXP_DEVSTA_FED |
9220 				      PCI_EXP_DEVSTA_URD);
9221 	}
9222 
9223 	tg3_restore_pci_state(tp);
9224 
9225 	tg3_flag_clear(tp, CHIP_RESETTING);
9226 	tg3_flag_clear(tp, ERROR_PROCESSED);
9227 
9228 	val = 0;
9229 	if (tg3_flag(tp, 5780_CLASS))
9230 		val = tr32(MEMARB_MODE);
9231 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9232 
9233 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9234 		tg3_stop_fw(tp);
9235 		tw32(0x5000, 0x400);
9236 	}
9237 
9238 	if (tg3_flag(tp, IS_SSB_CORE)) {
9239 		/*
9240 		 * BCM4785: In order to avoid repercussions from using
9241 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9242 		 * which is not required.
9243 		 */
9244 		tg3_stop_fw(tp);
9245 		tg3_halt_cpu(tp, RX_CPU_BASE);
9246 	}
9247 
9248 	err = tg3_poll_fw(tp);
9249 	if (err)
9250 		return err;
9251 
9252 	tw32(GRC_MODE, tp->grc_mode);
9253 
9254 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9255 		val = tr32(0xc4);
9256 
9257 		tw32(0xc4, val | (1 << 15));
9258 	}
9259 
9260 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9261 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9262 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9263 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9264 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9265 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9266 	}
9267 
9268 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9269 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9270 		val = tp->mac_mode;
9271 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9272 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9273 		val = tp->mac_mode;
9274 	} else
9275 		val = 0;
9276 
9277 	tw32_f(MAC_MODE, val);
9278 	udelay(40);
9279 
9280 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9281 
9282 	tg3_mdio_start(tp);
9283 
9284 	if (tg3_flag(tp, PCI_EXPRESS) &&
9285 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9286 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9287 	    !tg3_flag(tp, 57765_PLUS)) {
9288 		val = tr32(0x7c00);
9289 
9290 		tw32(0x7c00, val | (1 << 25));
9291 	}
9292 
9293 	tg3_restore_clk(tp);
9294 
9295 	/* Reprobe ASF enable state.  */
9296 	tg3_flag_clear(tp, ENABLE_ASF);
9297 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9298 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9299 
9300 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9301 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9302 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9303 		u32 nic_cfg;
9304 
9305 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9306 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9307 			tg3_flag_set(tp, ENABLE_ASF);
9308 			tp->last_event_jiffies = jiffies;
9309 			if (tg3_flag(tp, 5750_PLUS))
9310 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9311 
9312 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9313 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9314 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9315 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9316 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9317 		}
9318 	}
9319 
9320 	return 0;
9321 }
9322 
9323 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9324 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9325 static void __tg3_set_rx_mode(struct net_device *);
9326 
9327 /* tp->lock is held. */
9328 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9329 {
9330 	int err;
9331 
9332 	tg3_stop_fw(tp);
9333 
9334 	tg3_write_sig_pre_reset(tp, kind);
9335 
9336 	tg3_abort_hw(tp, silent);
9337 	err = tg3_chip_reset(tp);
9338 
9339 	__tg3_set_mac_addr(tp, false);
9340 
9341 	tg3_write_sig_legacy(tp, kind);
9342 	tg3_write_sig_post_reset(tp, kind);
9343 
9344 	if (tp->hw_stats) {
9345 		/* Save the stats across chip resets... */
9346 		tg3_get_nstats(tp, &tp->net_stats_prev);
9347 		tg3_get_estats(tp, &tp->estats_prev);
9348 
9349 		/* And make sure the next sample is new data */
9350 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9351 	}
9352 
9353 	return err;
9354 }
9355 
9356 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9357 {
9358 	struct tg3 *tp = netdev_priv(dev);
9359 	struct sockaddr *addr = p;
9360 	int err = 0;
9361 	bool skip_mac_1 = false;
9362 
9363 	if (!is_valid_ether_addr(addr->sa_data))
9364 		return -EADDRNOTAVAIL;
9365 
9366 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9367 
9368 	if (!netif_running(dev))
9369 		return 0;
9370 
9371 	if (tg3_flag(tp, ENABLE_ASF)) {
9372 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9373 
9374 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9375 		addr0_low = tr32(MAC_ADDR_0_LOW);
9376 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9377 		addr1_low = tr32(MAC_ADDR_1_LOW);
9378 
9379 		/* Skip MAC addr 1 if ASF is using it. */
9380 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9381 		    !(addr1_high == 0 && addr1_low == 0))
9382 			skip_mac_1 = true;
9383 	}
9384 	spin_lock_bh(&tp->lock);
9385 	__tg3_set_mac_addr(tp, skip_mac_1);
9386 	__tg3_set_rx_mode(dev);
9387 	spin_unlock_bh(&tp->lock);
9388 
9389 	return err;
9390 }
9391 
9392 /* tp->lock is held. */
9393 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9394 			   dma_addr_t mapping, u32 maxlen_flags,
9395 			   u32 nic_addr)
9396 {
9397 	tg3_write_mem(tp,
9398 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9399 		      ((u64) mapping >> 32));
9400 	tg3_write_mem(tp,
9401 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9402 		      ((u64) mapping & 0xffffffff));
9403 	tg3_write_mem(tp,
9404 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9405 		       maxlen_flags);
9406 
9407 	if (!tg3_flag(tp, 5705_PLUS))
9408 		tg3_write_mem(tp,
9409 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9410 			      nic_addr);
9411 }
9412 
9413 
9414 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9415 {
9416 	int i = 0;
9417 
9418 	if (!tg3_flag(tp, ENABLE_TSS)) {
9419 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9420 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9421 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9422 	} else {
9423 		tw32(HOSTCC_TXCOL_TICKS, 0);
9424 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9425 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9426 
9427 		for (; i < tp->txq_cnt; i++) {
9428 			u32 reg;
9429 
9430 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9431 			tw32(reg, ec->tx_coalesce_usecs);
9432 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9433 			tw32(reg, ec->tx_max_coalesced_frames);
9434 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9435 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9436 		}
9437 	}
9438 
9439 	for (; i < tp->irq_max - 1; i++) {
9440 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9441 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9442 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9443 	}
9444 }
9445 
9446 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9447 {
9448 	int i = 0;
9449 	u32 limit = tp->rxq_cnt;
9450 
9451 	if (!tg3_flag(tp, ENABLE_RSS)) {
9452 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9453 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9454 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9455 		limit--;
9456 	} else {
9457 		tw32(HOSTCC_RXCOL_TICKS, 0);
9458 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9459 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9460 	}
9461 
9462 	for (; i < limit; i++) {
9463 		u32 reg;
9464 
9465 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9466 		tw32(reg, ec->rx_coalesce_usecs);
9467 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9468 		tw32(reg, ec->rx_max_coalesced_frames);
9469 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9470 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9471 	}
9472 
9473 	for (; i < tp->irq_max - 1; i++) {
9474 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9475 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9476 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9477 	}
9478 }
9479 
9480 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9481 {
9482 	tg3_coal_tx_init(tp, ec);
9483 	tg3_coal_rx_init(tp, ec);
9484 
9485 	if (!tg3_flag(tp, 5705_PLUS)) {
9486 		u32 val = ec->stats_block_coalesce_usecs;
9487 
9488 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9489 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9490 
9491 		if (!tp->link_up)
9492 			val = 0;
9493 
9494 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9495 	}
9496 }
9497 
9498 /* tp->lock is held. */
9499 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9500 {
9501 	u32 txrcb, limit;
9502 
9503 	/* Disable all transmit rings but the first. */
9504 	if (!tg3_flag(tp, 5705_PLUS))
9505 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9506 	else if (tg3_flag(tp, 5717_PLUS))
9507 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9508 	else if (tg3_flag(tp, 57765_CLASS) ||
9509 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9510 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9511 	else
9512 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9513 
9514 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9515 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9516 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9517 			      BDINFO_FLAGS_DISABLED);
9518 }
9519 
9520 /* tp->lock is held. */
9521 static void tg3_tx_rcbs_init(struct tg3 *tp)
9522 {
9523 	int i = 0;
9524 	u32 txrcb = NIC_SRAM_SEND_RCB;
9525 
9526 	if (tg3_flag(tp, ENABLE_TSS))
9527 		i++;
9528 
9529 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9530 		struct tg3_napi *tnapi = &tp->napi[i];
9531 
9532 		if (!tnapi->tx_ring)
9533 			continue;
9534 
9535 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9536 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9537 			       NIC_SRAM_TX_BUFFER_DESC);
9538 	}
9539 }
9540 
9541 /* tp->lock is held. */
9542 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9543 {
9544 	u32 rxrcb, limit;
9545 
9546 	/* Disable all receive return rings but the first. */
9547 	if (tg3_flag(tp, 5717_PLUS))
9548 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9549 	else if (!tg3_flag(tp, 5705_PLUS))
9550 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9551 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9552 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9553 		 tg3_flag(tp, 57765_CLASS))
9554 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9555 	else
9556 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9557 
9558 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9559 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9560 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9561 			      BDINFO_FLAGS_DISABLED);
9562 }
9563 
9564 /* tp->lock is held. */
9565 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9566 {
9567 	int i = 0;
9568 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9569 
9570 	if (tg3_flag(tp, ENABLE_RSS))
9571 		i++;
9572 
9573 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9574 		struct tg3_napi *tnapi = &tp->napi[i];
9575 
9576 		if (!tnapi->rx_rcb)
9577 			continue;
9578 
9579 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9580 			       (tp->rx_ret_ring_mask + 1) <<
9581 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9582 	}
9583 }
9584 
9585 /* tp->lock is held. */
9586 static void tg3_rings_reset(struct tg3 *tp)
9587 {
9588 	int i;
9589 	u32 stblk;
9590 	struct tg3_napi *tnapi = &tp->napi[0];
9591 
9592 	tg3_tx_rcbs_disable(tp);
9593 
9594 	tg3_rx_ret_rcbs_disable(tp);
9595 
9596 	/* Disable interrupts */
9597 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9598 	tp->napi[0].chk_msi_cnt = 0;
9599 	tp->napi[0].last_rx_cons = 0;
9600 	tp->napi[0].last_tx_cons = 0;
9601 
9602 	/* Zero mailbox registers. */
9603 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9604 		for (i = 1; i < tp->irq_max; i++) {
9605 			tp->napi[i].tx_prod = 0;
9606 			tp->napi[i].tx_cons = 0;
9607 			if (tg3_flag(tp, ENABLE_TSS))
9608 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9609 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9610 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9611 			tp->napi[i].chk_msi_cnt = 0;
9612 			tp->napi[i].last_rx_cons = 0;
9613 			tp->napi[i].last_tx_cons = 0;
9614 		}
9615 		if (!tg3_flag(tp, ENABLE_TSS))
9616 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9617 	} else {
9618 		tp->napi[0].tx_prod = 0;
9619 		tp->napi[0].tx_cons = 0;
9620 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9621 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9622 	}
9623 
9624 	/* Make sure the NIC-based send BD rings are disabled. */
9625 	if (!tg3_flag(tp, 5705_PLUS)) {
9626 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9627 		for (i = 0; i < 16; i++)
9628 			tw32_tx_mbox(mbox + i * 8, 0);
9629 	}
9630 
9631 	/* Clear status block in ram. */
9632 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9633 
9634 	/* Set status block DMA address */
9635 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9636 	     ((u64) tnapi->status_mapping >> 32));
9637 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9638 	     ((u64) tnapi->status_mapping & 0xffffffff));
9639 
9640 	stblk = HOSTCC_STATBLCK_RING1;
9641 
9642 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9643 		u64 mapping = (u64)tnapi->status_mapping;
9644 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9645 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9646 		stblk += 8;
9647 
9648 		/* Clear status block in ram. */
9649 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9650 	}
9651 
9652 	tg3_tx_rcbs_init(tp);
9653 	tg3_rx_ret_rcbs_init(tp);
9654 }
9655 
9656 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9657 {
9658 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9659 
9660 	if (!tg3_flag(tp, 5750_PLUS) ||
9661 	    tg3_flag(tp, 5780_CLASS) ||
9662 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9663 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9664 	    tg3_flag(tp, 57765_PLUS))
9665 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9666 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9667 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9668 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9669 	else
9670 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9671 
9672 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9673 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9674 
9675 	val = min(nic_rep_thresh, host_rep_thresh);
9676 	tw32(RCVBDI_STD_THRESH, val);
9677 
9678 	if (tg3_flag(tp, 57765_PLUS))
9679 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9680 
9681 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9682 		return;
9683 
9684 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9685 
9686 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9687 
9688 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9689 	tw32(RCVBDI_JUMBO_THRESH, val);
9690 
9691 	if (tg3_flag(tp, 57765_PLUS))
9692 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9693 }
9694 
9695 static inline u32 calc_crc(unsigned char *buf, int len)
9696 {
9697 	u32 reg;
9698 	u32 tmp;
9699 	int j, k;
9700 
9701 	reg = 0xffffffff;
9702 
9703 	for (j = 0; j < len; j++) {
9704 		reg ^= buf[j];
9705 
9706 		for (k = 0; k < 8; k++) {
9707 			tmp = reg & 0x01;
9708 
9709 			reg >>= 1;
9710 
9711 			if (tmp)
9712 				reg ^= 0xedb88320;
9713 		}
9714 	}
9715 
9716 	return ~reg;
9717 }
9718 
9719 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9720 {
9721 	/* accept or reject all multicast frames */
9722 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9723 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9724 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9725 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9726 }
9727 
9728 static void __tg3_set_rx_mode(struct net_device *dev)
9729 {
9730 	struct tg3 *tp = netdev_priv(dev);
9731 	u32 rx_mode;
9732 
9733 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9734 				  RX_MODE_KEEP_VLAN_TAG);
9735 
9736 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9737 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9738 	 * flag clear.
9739 	 */
9740 	if (!tg3_flag(tp, ENABLE_ASF))
9741 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9742 #endif
9743 
9744 	if (dev->flags & IFF_PROMISC) {
9745 		/* Promiscuous mode. */
9746 		rx_mode |= RX_MODE_PROMISC;
9747 	} else if (dev->flags & IFF_ALLMULTI) {
9748 		/* Accept all multicast. */
9749 		tg3_set_multi(tp, 1);
9750 	} else if (netdev_mc_empty(dev)) {
9751 		/* Reject all multicast. */
9752 		tg3_set_multi(tp, 0);
9753 	} else {
9754 		/* Accept one or more multicast(s). */
9755 		struct netdev_hw_addr *ha;
9756 		u32 mc_filter[4] = { 0, };
9757 		u32 regidx;
9758 		u32 bit;
9759 		u32 crc;
9760 
9761 		netdev_for_each_mc_addr(ha, dev) {
9762 			crc = calc_crc(ha->addr, ETH_ALEN);
9763 			bit = ~crc & 0x7f;
9764 			regidx = (bit & 0x60) >> 5;
9765 			bit &= 0x1f;
9766 			mc_filter[regidx] |= (1 << bit);
9767 		}
9768 
9769 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9770 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9771 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9772 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9773 	}
9774 
9775 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9776 		rx_mode |= RX_MODE_PROMISC;
9777 	} else if (!(dev->flags & IFF_PROMISC)) {
9778 		/* Add all entries into to the mac addr filter list */
9779 		int i = 0;
9780 		struct netdev_hw_addr *ha;
9781 
9782 		netdev_for_each_uc_addr(ha, dev) {
9783 			__tg3_set_one_mac_addr(tp, ha->addr,
9784 					       i + TG3_UCAST_ADDR_IDX(tp));
9785 			i++;
9786 		}
9787 	}
9788 
9789 	if (rx_mode != tp->rx_mode) {
9790 		tp->rx_mode = rx_mode;
9791 		tw32_f(MAC_RX_MODE, rx_mode);
9792 		udelay(10);
9793 	}
9794 }
9795 
9796 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9797 {
9798 	int i;
9799 
9800 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9801 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9802 }
9803 
9804 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9805 {
9806 	int i;
9807 
9808 	if (!tg3_flag(tp, SUPPORT_MSIX))
9809 		return;
9810 
9811 	if (tp->rxq_cnt == 1) {
9812 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9813 		return;
9814 	}
9815 
9816 	/* Validate table against current IRQ count */
9817 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9818 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9819 			break;
9820 	}
9821 
9822 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9823 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9824 }
9825 
9826 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9827 {
9828 	int i = 0;
9829 	u32 reg = MAC_RSS_INDIR_TBL_0;
9830 
9831 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9832 		u32 val = tp->rss_ind_tbl[i];
9833 		i++;
9834 		for (; i % 8; i++) {
9835 			val <<= 4;
9836 			val |= tp->rss_ind_tbl[i];
9837 		}
9838 		tw32(reg, val);
9839 		reg += 4;
9840 	}
9841 }
9842 
9843 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9844 {
9845 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9846 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9847 	else
9848 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9849 }
9850 
9851 /* tp->lock is held. */
9852 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9853 {
9854 	u32 val, rdmac_mode;
9855 	int i, err, limit;
9856 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9857 
9858 	tg3_disable_ints(tp);
9859 
9860 	tg3_stop_fw(tp);
9861 
9862 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9863 
9864 	if (tg3_flag(tp, INIT_COMPLETE))
9865 		tg3_abort_hw(tp, 1);
9866 
9867 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9868 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9869 		tg3_phy_pull_config(tp);
9870 		tg3_eee_pull_config(tp, NULL);
9871 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9872 	}
9873 
9874 	/* Enable MAC control of LPI */
9875 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9876 		tg3_setup_eee(tp);
9877 
9878 	if (reset_phy)
9879 		tg3_phy_reset(tp);
9880 
9881 	err = tg3_chip_reset(tp);
9882 	if (err)
9883 		return err;
9884 
9885 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9886 
9887 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9888 		val = tr32(TG3_CPMU_CTRL);
9889 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9890 		tw32(TG3_CPMU_CTRL, val);
9891 
9892 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9893 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9894 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9895 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9896 
9897 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9898 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9899 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9900 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9901 
9902 		val = tr32(TG3_CPMU_HST_ACC);
9903 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9904 		val |= CPMU_HST_ACC_MACCLK_6_25;
9905 		tw32(TG3_CPMU_HST_ACC, val);
9906 	}
9907 
9908 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9909 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9910 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9911 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9912 		tw32(PCIE_PWR_MGMT_THRESH, val);
9913 
9914 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9915 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9916 
9917 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9918 
9919 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9920 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9921 	}
9922 
9923 	if (tg3_flag(tp, L1PLLPD_EN)) {
9924 		u32 grc_mode = tr32(GRC_MODE);
9925 
9926 		/* Access the lower 1K of PL PCIE block registers. */
9927 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9928 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9929 
9930 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9931 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9932 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9933 
9934 		tw32(GRC_MODE, grc_mode);
9935 	}
9936 
9937 	if (tg3_flag(tp, 57765_CLASS)) {
9938 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9939 			u32 grc_mode = tr32(GRC_MODE);
9940 
9941 			/* Access the lower 1K of PL PCIE block registers. */
9942 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9943 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9944 
9945 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9946 				   TG3_PCIE_PL_LO_PHYCTL5);
9947 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9948 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9949 
9950 			tw32(GRC_MODE, grc_mode);
9951 		}
9952 
9953 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9954 			u32 grc_mode;
9955 
9956 			/* Fix transmit hangs */
9957 			val = tr32(TG3_CPMU_PADRNG_CTL);
9958 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9959 			tw32(TG3_CPMU_PADRNG_CTL, val);
9960 
9961 			grc_mode = tr32(GRC_MODE);
9962 
9963 			/* Access the lower 1K of DL PCIE block registers. */
9964 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9965 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9966 
9967 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9968 				   TG3_PCIE_DL_LO_FTSMAX);
9969 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9970 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9971 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9972 
9973 			tw32(GRC_MODE, grc_mode);
9974 		}
9975 
9976 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9977 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9978 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9979 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9980 	}
9981 
9982 	/* This works around an issue with Athlon chipsets on
9983 	 * B3 tigon3 silicon.  This bit has no effect on any
9984 	 * other revision.  But do not set this on PCI Express
9985 	 * chips and don't even touch the clocks if the CPMU is present.
9986 	 */
9987 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9988 		if (!tg3_flag(tp, PCI_EXPRESS))
9989 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9990 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9991 	}
9992 
9993 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9994 	    tg3_flag(tp, PCIX_MODE)) {
9995 		val = tr32(TG3PCI_PCISTATE);
9996 		val |= PCISTATE_RETRY_SAME_DMA;
9997 		tw32(TG3PCI_PCISTATE, val);
9998 	}
9999 
10000 	if (tg3_flag(tp, ENABLE_APE)) {
10001 		/* Allow reads and writes to the
10002 		 * APE register and memory space.
10003 		 */
10004 		val = tr32(TG3PCI_PCISTATE);
10005 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10006 		       PCISTATE_ALLOW_APE_SHMEM_WR |
10007 		       PCISTATE_ALLOW_APE_PSPACE_WR;
10008 		tw32(TG3PCI_PCISTATE, val);
10009 	}
10010 
10011 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10012 		/* Enable some hw fixes.  */
10013 		val = tr32(TG3PCI_MSI_DATA);
10014 		val |= (1 << 26) | (1 << 28) | (1 << 29);
10015 		tw32(TG3PCI_MSI_DATA, val);
10016 	}
10017 
10018 	/* Descriptor ring init may make accesses to the
10019 	 * NIC SRAM area to setup the TX descriptors, so we
10020 	 * can only do this after the hardware has been
10021 	 * successfully reset.
10022 	 */
10023 	err = tg3_init_rings(tp);
10024 	if (err)
10025 		return err;
10026 
10027 	if (tg3_flag(tp, 57765_PLUS)) {
10028 		val = tr32(TG3PCI_DMA_RW_CTRL) &
10029 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10030 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10031 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10032 		if (!tg3_flag(tp, 57765_CLASS) &&
10033 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10034 		    tg3_asic_rev(tp) != ASIC_REV_5762)
10035 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10036 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10037 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10038 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10039 		/* This value is determined during the probe time DMA
10040 		 * engine test, tg3_test_dma.
10041 		 */
10042 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10043 	}
10044 
10045 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10046 			  GRC_MODE_4X_NIC_SEND_RINGS |
10047 			  GRC_MODE_NO_TX_PHDR_CSUM |
10048 			  GRC_MODE_NO_RX_PHDR_CSUM);
10049 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10050 
10051 	/* Pseudo-header checksum is done by hardware logic and not
10052 	 * the offload processers, so make the chip do the pseudo-
10053 	 * header checksums on receive.  For transmit it is more
10054 	 * convenient to do the pseudo-header checksum in software
10055 	 * as Linux does that on transmit for us in all cases.
10056 	 */
10057 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10058 
10059 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10060 	if (tp->rxptpctl)
10061 		tw32(TG3_RX_PTP_CTL,
10062 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10063 
10064 	if (tg3_flag(tp, PTP_CAPABLE))
10065 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10066 
10067 	tw32(GRC_MODE, tp->grc_mode | val);
10068 
10069 	/* On one of the AMD platform, MRRS is restricted to 4000 because of
10070 	 * south bridge limitation. As a workaround, Driver is setting MRRS
10071 	 * to 2048 instead of default 4096.
10072 	 */
10073 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10074 	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10075 		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10076 		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10077 	}
10078 
10079 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10080 	val = tr32(GRC_MISC_CFG);
10081 	val &= ~0xff;
10082 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10083 	tw32(GRC_MISC_CFG, val);
10084 
10085 	/* Initialize MBUF/DESC pool. */
10086 	if (tg3_flag(tp, 5750_PLUS)) {
10087 		/* Do nothing.  */
10088 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10089 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10090 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10091 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10092 		else
10093 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10094 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10095 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10096 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10097 		int fw_len;
10098 
10099 		fw_len = tp->fw_len;
10100 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10101 		tw32(BUFMGR_MB_POOL_ADDR,
10102 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10103 		tw32(BUFMGR_MB_POOL_SIZE,
10104 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10105 	}
10106 
10107 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10108 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10109 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10110 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10111 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10112 		tw32(BUFMGR_MB_HIGH_WATER,
10113 		     tp->bufmgr_config.mbuf_high_water);
10114 	} else {
10115 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10116 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10117 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10118 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10119 		tw32(BUFMGR_MB_HIGH_WATER,
10120 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10121 	}
10122 	tw32(BUFMGR_DMA_LOW_WATER,
10123 	     tp->bufmgr_config.dma_low_water);
10124 	tw32(BUFMGR_DMA_HIGH_WATER,
10125 	     tp->bufmgr_config.dma_high_water);
10126 
10127 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10128 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10129 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10130 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10131 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10132 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10133 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10134 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10135 	tw32(BUFMGR_MODE, val);
10136 	for (i = 0; i < 2000; i++) {
10137 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10138 			break;
10139 		udelay(10);
10140 	}
10141 	if (i >= 2000) {
10142 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10143 		return -ENODEV;
10144 	}
10145 
10146 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10147 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10148 
10149 	tg3_setup_rxbd_thresholds(tp);
10150 
10151 	/* Initialize TG3_BDINFO's at:
10152 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10153 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10154 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10155 	 *
10156 	 * like so:
10157 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10158 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10159 	 *                              ring attribute flags
10160 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10161 	 *
10162 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10163 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10164 	 *
10165 	 * The size of each ring is fixed in the firmware, but the location is
10166 	 * configurable.
10167 	 */
10168 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10169 	     ((u64) tpr->rx_std_mapping >> 32));
10170 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10171 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10172 	if (!tg3_flag(tp, 5717_PLUS))
10173 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10174 		     NIC_SRAM_RX_BUFFER_DESC);
10175 
10176 	/* Disable the mini ring */
10177 	if (!tg3_flag(tp, 5705_PLUS))
10178 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10179 		     BDINFO_FLAGS_DISABLED);
10180 
10181 	/* Program the jumbo buffer descriptor ring control
10182 	 * blocks on those devices that have them.
10183 	 */
10184 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10185 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10186 
10187 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10188 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10189 			     ((u64) tpr->rx_jmb_mapping >> 32));
10190 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10191 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10192 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10193 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10194 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10195 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10196 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10197 			    tg3_flag(tp, 57765_CLASS) ||
10198 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10199 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10200 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10201 		} else {
10202 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10203 			     BDINFO_FLAGS_DISABLED);
10204 		}
10205 
10206 		if (tg3_flag(tp, 57765_PLUS)) {
10207 			val = TG3_RX_STD_RING_SIZE(tp);
10208 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10209 			val |= (TG3_RX_STD_DMA_SZ << 2);
10210 		} else
10211 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10212 	} else
10213 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10214 
10215 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10216 
10217 	tpr->rx_std_prod_idx = tp->rx_pending;
10218 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10219 
10220 	tpr->rx_jmb_prod_idx =
10221 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10222 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10223 
10224 	tg3_rings_reset(tp);
10225 
10226 	/* Initialize MAC address and backoff seed. */
10227 	__tg3_set_mac_addr(tp, false);
10228 
10229 	/* MTU + ethernet header + FCS + optional VLAN tag */
10230 	tw32(MAC_RX_MTU_SIZE,
10231 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10232 
10233 	/* The slot time is changed by tg3_setup_phy if we
10234 	 * run at gigabit with half duplex.
10235 	 */
10236 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10237 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10238 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10239 
10240 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10241 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10242 		val |= tr32(MAC_TX_LENGTHS) &
10243 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10244 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10245 
10246 	tw32(MAC_TX_LENGTHS, val);
10247 
10248 	/* Receive rules. */
10249 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10250 	tw32(RCVLPC_CONFIG, 0x0181);
10251 
10252 	/* Calculate RDMAC_MODE setting early, we need it to determine
10253 	 * the RCVLPC_STATE_ENABLE mask.
10254 	 */
10255 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10256 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10257 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10258 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10259 		      RDMAC_MODE_LNGREAD_ENAB);
10260 
10261 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10262 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10263 
10264 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10265 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10266 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10267 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10268 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10269 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10270 
10271 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10272 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10273 		if (tg3_flag(tp, TSO_CAPABLE) &&
10274 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
10275 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10276 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10277 			   !tg3_flag(tp, IS_5788)) {
10278 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10279 		}
10280 	}
10281 
10282 	if (tg3_flag(tp, PCI_EXPRESS))
10283 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10284 
10285 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10286 		tp->dma_limit = 0;
10287 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10288 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10289 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10290 		}
10291 	}
10292 
10293 	if (tg3_flag(tp, HW_TSO_1) ||
10294 	    tg3_flag(tp, HW_TSO_2) ||
10295 	    tg3_flag(tp, HW_TSO_3))
10296 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10297 
10298 	if (tg3_flag(tp, 57765_PLUS) ||
10299 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10300 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10301 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10302 
10303 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10304 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10305 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10306 
10307 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10308 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10309 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10310 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10311 	    tg3_flag(tp, 57765_PLUS)) {
10312 		u32 tgtreg;
10313 
10314 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10315 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10316 		else
10317 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10318 
10319 		val = tr32(tgtreg);
10320 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10321 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10322 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10323 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10324 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10325 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10326 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10327 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10328 		}
10329 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10330 	}
10331 
10332 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10333 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10334 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10335 		u32 tgtreg;
10336 
10337 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10338 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10339 		else
10340 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10341 
10342 		val = tr32(tgtreg);
10343 		tw32(tgtreg, val |
10344 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10345 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10346 	}
10347 
10348 	/* Receive/send statistics. */
10349 	if (tg3_flag(tp, 5750_PLUS)) {
10350 		val = tr32(RCVLPC_STATS_ENABLE);
10351 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10352 		tw32(RCVLPC_STATS_ENABLE, val);
10353 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10354 		   tg3_flag(tp, TSO_CAPABLE)) {
10355 		val = tr32(RCVLPC_STATS_ENABLE);
10356 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10357 		tw32(RCVLPC_STATS_ENABLE, val);
10358 	} else {
10359 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10360 	}
10361 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10362 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10363 	tw32(SNDDATAI_STATSCTRL,
10364 	     (SNDDATAI_SCTRL_ENABLE |
10365 	      SNDDATAI_SCTRL_FASTUPD));
10366 
10367 	/* Setup host coalescing engine. */
10368 	tw32(HOSTCC_MODE, 0);
10369 	for (i = 0; i < 2000; i++) {
10370 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10371 			break;
10372 		udelay(10);
10373 	}
10374 
10375 	__tg3_set_coalesce(tp, &tp->coal);
10376 
10377 	if (!tg3_flag(tp, 5705_PLUS)) {
10378 		/* Status/statistics block address.  See tg3_timer,
10379 		 * the tg3_periodic_fetch_stats call there, and
10380 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10381 		 */
10382 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10383 		     ((u64) tp->stats_mapping >> 32));
10384 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10385 		     ((u64) tp->stats_mapping & 0xffffffff));
10386 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10387 
10388 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10389 
10390 		/* Clear statistics and status block memory areas */
10391 		for (i = NIC_SRAM_STATS_BLK;
10392 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10393 		     i += sizeof(u32)) {
10394 			tg3_write_mem(tp, i, 0);
10395 			udelay(40);
10396 		}
10397 	}
10398 
10399 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10400 
10401 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10402 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10403 	if (!tg3_flag(tp, 5705_PLUS))
10404 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10405 
10406 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10407 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10408 		/* reset to prevent losing 1st rx packet intermittently */
10409 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10410 		udelay(10);
10411 	}
10412 
10413 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10414 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10415 			MAC_MODE_FHDE_ENABLE;
10416 	if (tg3_flag(tp, ENABLE_APE))
10417 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10418 	if (!tg3_flag(tp, 5705_PLUS) &&
10419 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10420 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10421 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10422 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10423 	udelay(40);
10424 
10425 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10426 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10427 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10428 	 * whether used as inputs or outputs, are set by boot code after
10429 	 * reset.
10430 	 */
10431 	if (!tg3_flag(tp, IS_NIC)) {
10432 		u32 gpio_mask;
10433 
10434 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10435 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10436 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10437 
10438 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10439 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10440 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10441 
10442 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10443 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10444 
10445 		tp->grc_local_ctrl &= ~gpio_mask;
10446 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10447 
10448 		/* GPIO1 must be driven high for eeprom write protect */
10449 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10450 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10451 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10452 	}
10453 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10454 	udelay(100);
10455 
10456 	if (tg3_flag(tp, USING_MSIX)) {
10457 		val = tr32(MSGINT_MODE);
10458 		val |= MSGINT_MODE_ENABLE;
10459 		if (tp->irq_cnt > 1)
10460 			val |= MSGINT_MODE_MULTIVEC_EN;
10461 		if (!tg3_flag(tp, 1SHOT_MSI))
10462 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10463 		tw32(MSGINT_MODE, val);
10464 	}
10465 
10466 	if (!tg3_flag(tp, 5705_PLUS)) {
10467 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10468 		udelay(40);
10469 	}
10470 
10471 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10472 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10473 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10474 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10475 	       WDMAC_MODE_LNGREAD_ENAB);
10476 
10477 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10478 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10479 		if (tg3_flag(tp, TSO_CAPABLE) &&
10480 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10481 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10482 			/* nothing */
10483 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10484 			   !tg3_flag(tp, IS_5788)) {
10485 			val |= WDMAC_MODE_RX_ACCEL;
10486 		}
10487 	}
10488 
10489 	/* Enable host coalescing bug fix */
10490 	if (tg3_flag(tp, 5755_PLUS))
10491 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10492 
10493 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10494 		val |= WDMAC_MODE_BURST_ALL_DATA;
10495 
10496 	tw32_f(WDMAC_MODE, val);
10497 	udelay(40);
10498 
10499 	if (tg3_flag(tp, PCIX_MODE)) {
10500 		u16 pcix_cmd;
10501 
10502 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10503 				     &pcix_cmd);
10504 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10505 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10506 			pcix_cmd |= PCI_X_CMD_READ_2K;
10507 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10508 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10509 			pcix_cmd |= PCI_X_CMD_READ_2K;
10510 		}
10511 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10512 				      pcix_cmd);
10513 	}
10514 
10515 	tw32_f(RDMAC_MODE, rdmac_mode);
10516 	udelay(40);
10517 
10518 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10519 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10520 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10521 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10522 				break;
10523 		}
10524 		if (i < TG3_NUM_RDMA_CHANNELS) {
10525 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10526 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10527 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10528 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10529 		}
10530 	}
10531 
10532 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10533 	if (!tg3_flag(tp, 5705_PLUS))
10534 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10535 
10536 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10537 		tw32(SNDDATAC_MODE,
10538 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10539 	else
10540 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10541 
10542 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10543 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10544 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10545 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10546 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10547 	tw32(RCVDBDI_MODE, val);
10548 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10549 	if (tg3_flag(tp, HW_TSO_1) ||
10550 	    tg3_flag(tp, HW_TSO_2) ||
10551 	    tg3_flag(tp, HW_TSO_3))
10552 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10553 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10554 	if (tg3_flag(tp, ENABLE_TSS))
10555 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10556 	tw32(SNDBDI_MODE, val);
10557 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10558 
10559 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10560 		err = tg3_load_5701_a0_firmware_fix(tp);
10561 		if (err)
10562 			return err;
10563 	}
10564 
10565 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10566 		/* Ignore any errors for the firmware download. If download
10567 		 * fails, the device will operate with EEE disabled
10568 		 */
10569 		tg3_load_57766_firmware(tp);
10570 	}
10571 
10572 	if (tg3_flag(tp, TSO_CAPABLE)) {
10573 		err = tg3_load_tso_firmware(tp);
10574 		if (err)
10575 			return err;
10576 	}
10577 
10578 	tp->tx_mode = TX_MODE_ENABLE;
10579 
10580 	if (tg3_flag(tp, 5755_PLUS) ||
10581 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10582 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10583 
10584 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10585 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10586 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10587 		tp->tx_mode &= ~val;
10588 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10589 	}
10590 
10591 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10592 	udelay(100);
10593 
10594 	if (tg3_flag(tp, ENABLE_RSS)) {
10595 		u32 rss_key[10];
10596 
10597 		tg3_rss_write_indir_tbl(tp);
10598 
10599 		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10600 
10601 		for (i = 0; i < 10 ; i++)
10602 			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10603 	}
10604 
10605 	tp->rx_mode = RX_MODE_ENABLE;
10606 	if (tg3_flag(tp, 5755_PLUS))
10607 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10608 
10609 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10610 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10611 
10612 	if (tg3_flag(tp, ENABLE_RSS))
10613 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10614 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10615 			       RX_MODE_RSS_IPV6_HASH_EN |
10616 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10617 			       RX_MODE_RSS_IPV4_HASH_EN |
10618 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10619 
10620 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10621 	udelay(10);
10622 
10623 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10624 
10625 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10626 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10627 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10628 		udelay(10);
10629 	}
10630 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10631 	udelay(10);
10632 
10633 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10634 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10635 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10636 			/* Set drive transmission level to 1.2V  */
10637 			/* only if the signal pre-emphasis bit is not set  */
10638 			val = tr32(MAC_SERDES_CFG);
10639 			val &= 0xfffff000;
10640 			val |= 0x880;
10641 			tw32(MAC_SERDES_CFG, val);
10642 		}
10643 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10644 			tw32(MAC_SERDES_CFG, 0x616000);
10645 	}
10646 
10647 	/* Prevent chip from dropping frames when flow control
10648 	 * is enabled.
10649 	 */
10650 	if (tg3_flag(tp, 57765_CLASS))
10651 		val = 1;
10652 	else
10653 		val = 2;
10654 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10655 
10656 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10657 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10658 		/* Use hardware link auto-negotiation */
10659 		tg3_flag_set(tp, HW_AUTONEG);
10660 	}
10661 
10662 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10663 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10664 		u32 tmp;
10665 
10666 		tmp = tr32(SERDES_RX_CTRL);
10667 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10668 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10669 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10670 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10671 	}
10672 
10673 	if (!tg3_flag(tp, USE_PHYLIB)) {
10674 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10675 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10676 
10677 		err = tg3_setup_phy(tp, false);
10678 		if (err)
10679 			return err;
10680 
10681 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10682 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10683 			u32 tmp;
10684 
10685 			/* Clear CRC stats. */
10686 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10687 				tg3_writephy(tp, MII_TG3_TEST1,
10688 					     tmp | MII_TG3_TEST1_CRC_EN);
10689 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10690 			}
10691 		}
10692 	}
10693 
10694 	__tg3_set_rx_mode(tp->dev);
10695 
10696 	/* Initialize receive rules. */
10697 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10698 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10699 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10700 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10701 
10702 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10703 		limit = 8;
10704 	else
10705 		limit = 16;
10706 	if (tg3_flag(tp, ENABLE_ASF))
10707 		limit -= 4;
10708 	switch (limit) {
10709 	case 16:
10710 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10711 		/* fall through */
10712 	case 15:
10713 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10714 		/* fall through */
10715 	case 14:
10716 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10717 		/* fall through */
10718 	case 13:
10719 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10720 		/* fall through */
10721 	case 12:
10722 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10723 		/* fall through */
10724 	case 11:
10725 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10726 		/* fall through */
10727 	case 10:
10728 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10729 		/* fall through */
10730 	case 9:
10731 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10732 		/* fall through */
10733 	case 8:
10734 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10735 		/* fall through */
10736 	case 7:
10737 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10738 		/* fall through */
10739 	case 6:
10740 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10741 		/* fall through */
10742 	case 5:
10743 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10744 		/* fall through */
10745 	case 4:
10746 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10747 	case 3:
10748 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10749 	case 2:
10750 	case 1:
10751 
10752 	default:
10753 		break;
10754 	}
10755 
10756 	if (tg3_flag(tp, ENABLE_APE))
10757 		/* Write our heartbeat update interval to APE. */
10758 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10759 				APE_HOST_HEARTBEAT_INT_5SEC);
10760 
10761 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10762 
10763 	return 0;
10764 }
10765 
10766 /* Called at device open time to get the chip ready for
10767  * packet processing.  Invoked with tp->lock held.
10768  */
10769 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10770 {
10771 	/* Chip may have been just powered on. If so, the boot code may still
10772 	 * be running initialization. Wait for it to finish to avoid races in
10773 	 * accessing the hardware.
10774 	 */
10775 	tg3_enable_register_access(tp);
10776 	tg3_poll_fw(tp);
10777 
10778 	tg3_switch_clocks(tp);
10779 
10780 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10781 
10782 	return tg3_reset_hw(tp, reset_phy);
10783 }
10784 
10785 #ifdef CONFIG_TIGON3_HWMON
10786 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10787 {
10788 	int i;
10789 
10790 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10791 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10792 
10793 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10794 		off += len;
10795 
10796 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10797 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10798 			memset(ocir, 0, TG3_OCIR_LEN);
10799 	}
10800 }
10801 
10802 /* sysfs attributes for hwmon */
10803 static ssize_t tg3_show_temp(struct device *dev,
10804 			     struct device_attribute *devattr, char *buf)
10805 {
10806 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10807 	struct tg3 *tp = dev_get_drvdata(dev);
10808 	u32 temperature;
10809 
10810 	spin_lock_bh(&tp->lock);
10811 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10812 				sizeof(temperature));
10813 	spin_unlock_bh(&tp->lock);
10814 	return sprintf(buf, "%u\n", temperature * 1000);
10815 }
10816 
10817 
10818 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10819 			  TG3_TEMP_SENSOR_OFFSET);
10820 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10821 			  TG3_TEMP_CAUTION_OFFSET);
10822 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10823 			  TG3_TEMP_MAX_OFFSET);
10824 
10825 static struct attribute *tg3_attrs[] = {
10826 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10827 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10828 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10829 	NULL
10830 };
10831 ATTRIBUTE_GROUPS(tg3);
10832 
10833 static void tg3_hwmon_close(struct tg3 *tp)
10834 {
10835 	if (tp->hwmon_dev) {
10836 		hwmon_device_unregister(tp->hwmon_dev);
10837 		tp->hwmon_dev = NULL;
10838 	}
10839 }
10840 
10841 static void tg3_hwmon_open(struct tg3 *tp)
10842 {
10843 	int i;
10844 	u32 size = 0;
10845 	struct pci_dev *pdev = tp->pdev;
10846 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10847 
10848 	tg3_sd_scan_scratchpad(tp, ocirs);
10849 
10850 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10851 		if (!ocirs[i].src_data_length)
10852 			continue;
10853 
10854 		size += ocirs[i].src_hdr_length;
10855 		size += ocirs[i].src_data_length;
10856 	}
10857 
10858 	if (!size)
10859 		return;
10860 
10861 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10862 							  tp, tg3_groups);
10863 	if (IS_ERR(tp->hwmon_dev)) {
10864 		tp->hwmon_dev = NULL;
10865 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10866 	}
10867 }
10868 #else
10869 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10870 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10871 #endif /* CONFIG_TIGON3_HWMON */
10872 
10873 
10874 #define TG3_STAT_ADD32(PSTAT, REG) \
10875 do {	u32 __val = tr32(REG); \
10876 	(PSTAT)->low += __val; \
10877 	if ((PSTAT)->low < __val) \
10878 		(PSTAT)->high += 1; \
10879 } while (0)
10880 
10881 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10882 {
10883 	struct tg3_hw_stats *sp = tp->hw_stats;
10884 
10885 	if (!tp->link_up)
10886 		return;
10887 
10888 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10889 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10890 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10891 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10892 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10893 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10894 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10895 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10896 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10897 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10898 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10899 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10900 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10901 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10902 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10903 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10904 		u32 val;
10905 
10906 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10907 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10908 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10909 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10910 	}
10911 
10912 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10913 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10914 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10915 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10916 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10917 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10918 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10919 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10920 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10921 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10922 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10923 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10924 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10925 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10926 
10927 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10928 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10929 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
10930 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10931 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10932 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10933 	} else {
10934 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10935 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10936 		if (val) {
10937 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10938 			sp->rx_discards.low += val;
10939 			if (sp->rx_discards.low < val)
10940 				sp->rx_discards.high += 1;
10941 		}
10942 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10943 	}
10944 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10945 }
10946 
10947 static void tg3_chk_missed_msi(struct tg3 *tp)
10948 {
10949 	u32 i;
10950 
10951 	for (i = 0; i < tp->irq_cnt; i++) {
10952 		struct tg3_napi *tnapi = &tp->napi[i];
10953 
10954 		if (tg3_has_work(tnapi)) {
10955 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10956 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10957 				if (tnapi->chk_msi_cnt < 1) {
10958 					tnapi->chk_msi_cnt++;
10959 					return;
10960 				}
10961 				tg3_msi(0, tnapi);
10962 			}
10963 		}
10964 		tnapi->chk_msi_cnt = 0;
10965 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10966 		tnapi->last_tx_cons = tnapi->tx_cons;
10967 	}
10968 }
10969 
10970 static void tg3_timer(struct timer_list *t)
10971 {
10972 	struct tg3 *tp = from_timer(tp, t, timer);
10973 
10974 	spin_lock(&tp->lock);
10975 
10976 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10977 		spin_unlock(&tp->lock);
10978 		goto restart_timer;
10979 	}
10980 
10981 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10982 	    tg3_flag(tp, 57765_CLASS))
10983 		tg3_chk_missed_msi(tp);
10984 
10985 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10986 		/* BCM4785: Flush posted writes from GbE to host memory. */
10987 		tr32(HOSTCC_MODE);
10988 	}
10989 
10990 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10991 		/* All of this garbage is because when using non-tagged
10992 		 * IRQ status the mailbox/status_block protocol the chip
10993 		 * uses with the cpu is race prone.
10994 		 */
10995 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10996 			tw32(GRC_LOCAL_CTRL,
10997 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10998 		} else {
10999 			tw32(HOSTCC_MODE, tp->coalesce_mode |
11000 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11001 		}
11002 
11003 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11004 			spin_unlock(&tp->lock);
11005 			tg3_reset_task_schedule(tp);
11006 			goto restart_timer;
11007 		}
11008 	}
11009 
11010 	/* This part only runs once per second. */
11011 	if (!--tp->timer_counter) {
11012 		if (tg3_flag(tp, 5705_PLUS))
11013 			tg3_periodic_fetch_stats(tp);
11014 
11015 		if (tp->setlpicnt && !--tp->setlpicnt)
11016 			tg3_phy_eee_enable(tp);
11017 
11018 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
11019 			u32 mac_stat;
11020 			int phy_event;
11021 
11022 			mac_stat = tr32(MAC_STATUS);
11023 
11024 			phy_event = 0;
11025 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11026 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11027 					phy_event = 1;
11028 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11029 				phy_event = 1;
11030 
11031 			if (phy_event)
11032 				tg3_setup_phy(tp, false);
11033 		} else if (tg3_flag(tp, POLL_SERDES)) {
11034 			u32 mac_stat = tr32(MAC_STATUS);
11035 			int need_setup = 0;
11036 
11037 			if (tp->link_up &&
11038 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11039 				need_setup = 1;
11040 			}
11041 			if (!tp->link_up &&
11042 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11043 					 MAC_STATUS_SIGNAL_DET))) {
11044 				need_setup = 1;
11045 			}
11046 			if (need_setup) {
11047 				if (!tp->serdes_counter) {
11048 					tw32_f(MAC_MODE,
11049 					     (tp->mac_mode &
11050 					      ~MAC_MODE_PORT_MODE_MASK));
11051 					udelay(40);
11052 					tw32_f(MAC_MODE, tp->mac_mode);
11053 					udelay(40);
11054 				}
11055 				tg3_setup_phy(tp, false);
11056 			}
11057 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11058 			   tg3_flag(tp, 5780_CLASS)) {
11059 			tg3_serdes_parallel_detect(tp);
11060 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11061 			u32 cpmu = tr32(TG3_CPMU_STATUS);
11062 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11063 					 TG3_CPMU_STATUS_LINK_MASK);
11064 
11065 			if (link_up != tp->link_up)
11066 				tg3_setup_phy(tp, false);
11067 		}
11068 
11069 		tp->timer_counter = tp->timer_multiplier;
11070 	}
11071 
11072 	/* Heartbeat is only sent once every 2 seconds.
11073 	 *
11074 	 * The heartbeat is to tell the ASF firmware that the host
11075 	 * driver is still alive.  In the event that the OS crashes,
11076 	 * ASF needs to reset the hardware to free up the FIFO space
11077 	 * that may be filled with rx packets destined for the host.
11078 	 * If the FIFO is full, ASF will no longer function properly.
11079 	 *
11080 	 * Unintended resets have been reported on real time kernels
11081 	 * where the timer doesn't run on time.  Netpoll will also have
11082 	 * same problem.
11083 	 *
11084 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11085 	 * to check the ring condition when the heartbeat is expiring
11086 	 * before doing the reset.  This will prevent most unintended
11087 	 * resets.
11088 	 */
11089 	if (!--tp->asf_counter) {
11090 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11091 			tg3_wait_for_event_ack(tp);
11092 
11093 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11094 				      FWCMD_NICDRV_ALIVE3);
11095 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11096 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11097 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11098 
11099 			tg3_generate_fw_event(tp);
11100 		}
11101 		tp->asf_counter = tp->asf_multiplier;
11102 	}
11103 
11104 	/* Update the APE heartbeat every 5 seconds.*/
11105 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11106 
11107 	spin_unlock(&tp->lock);
11108 
11109 restart_timer:
11110 	tp->timer.expires = jiffies + tp->timer_offset;
11111 	add_timer(&tp->timer);
11112 }
11113 
11114 static void tg3_timer_init(struct tg3 *tp)
11115 {
11116 	if (tg3_flag(tp, TAGGED_STATUS) &&
11117 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11118 	    !tg3_flag(tp, 57765_CLASS))
11119 		tp->timer_offset = HZ;
11120 	else
11121 		tp->timer_offset = HZ / 10;
11122 
11123 	BUG_ON(tp->timer_offset > HZ);
11124 
11125 	tp->timer_multiplier = (HZ / tp->timer_offset);
11126 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11127 			     TG3_FW_UPDATE_FREQ_SEC;
11128 
11129 	timer_setup(&tp->timer, tg3_timer, 0);
11130 }
11131 
11132 static void tg3_timer_start(struct tg3 *tp)
11133 {
11134 	tp->asf_counter   = tp->asf_multiplier;
11135 	tp->timer_counter = tp->timer_multiplier;
11136 
11137 	tp->timer.expires = jiffies + tp->timer_offset;
11138 	add_timer(&tp->timer);
11139 }
11140 
11141 static void tg3_timer_stop(struct tg3 *tp)
11142 {
11143 	del_timer_sync(&tp->timer);
11144 }
11145 
11146 /* Restart hardware after configuration changes, self-test, etc.
11147  * Invoked with tp->lock held.
11148  */
11149 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11150 	__releases(tp->lock)
11151 	__acquires(tp->lock)
11152 {
11153 	int err;
11154 
11155 	err = tg3_init_hw(tp, reset_phy);
11156 	if (err) {
11157 		netdev_err(tp->dev,
11158 			   "Failed to re-initialize device, aborting\n");
11159 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11160 		tg3_full_unlock(tp);
11161 		tg3_timer_stop(tp);
11162 		tp->irq_sync = 0;
11163 		tg3_napi_enable(tp);
11164 		dev_close(tp->dev);
11165 		tg3_full_lock(tp, 0);
11166 	}
11167 	return err;
11168 }
11169 
11170 static void tg3_reset_task(struct work_struct *work)
11171 {
11172 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11173 	int err;
11174 
11175 	rtnl_lock();
11176 	tg3_full_lock(tp, 0);
11177 
11178 	if (!netif_running(tp->dev)) {
11179 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11180 		tg3_full_unlock(tp);
11181 		rtnl_unlock();
11182 		return;
11183 	}
11184 
11185 	tg3_full_unlock(tp);
11186 
11187 	tg3_phy_stop(tp);
11188 
11189 	tg3_netif_stop(tp);
11190 
11191 	tg3_full_lock(tp, 1);
11192 
11193 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11194 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11195 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11196 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11197 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11198 	}
11199 
11200 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11201 	err = tg3_init_hw(tp, true);
11202 	if (err)
11203 		goto out;
11204 
11205 	tg3_netif_start(tp);
11206 
11207 out:
11208 	tg3_full_unlock(tp);
11209 
11210 	if (!err)
11211 		tg3_phy_start(tp);
11212 
11213 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11214 	rtnl_unlock();
11215 }
11216 
11217 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11218 {
11219 	irq_handler_t fn;
11220 	unsigned long flags;
11221 	char *name;
11222 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11223 
11224 	if (tp->irq_cnt == 1)
11225 		name = tp->dev->name;
11226 	else {
11227 		name = &tnapi->irq_lbl[0];
11228 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11229 			snprintf(name, IFNAMSIZ,
11230 				 "%s-txrx-%d", tp->dev->name, irq_num);
11231 		else if (tnapi->tx_buffers)
11232 			snprintf(name, IFNAMSIZ,
11233 				 "%s-tx-%d", tp->dev->name, irq_num);
11234 		else if (tnapi->rx_rcb)
11235 			snprintf(name, IFNAMSIZ,
11236 				 "%s-rx-%d", tp->dev->name, irq_num);
11237 		else
11238 			snprintf(name, IFNAMSIZ,
11239 				 "%s-%d", tp->dev->name, irq_num);
11240 		name[IFNAMSIZ-1] = 0;
11241 	}
11242 
11243 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11244 		fn = tg3_msi;
11245 		if (tg3_flag(tp, 1SHOT_MSI))
11246 			fn = tg3_msi_1shot;
11247 		flags = 0;
11248 	} else {
11249 		fn = tg3_interrupt;
11250 		if (tg3_flag(tp, TAGGED_STATUS))
11251 			fn = tg3_interrupt_tagged;
11252 		flags = IRQF_SHARED;
11253 	}
11254 
11255 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11256 }
11257 
11258 static int tg3_test_interrupt(struct tg3 *tp)
11259 {
11260 	struct tg3_napi *tnapi = &tp->napi[0];
11261 	struct net_device *dev = tp->dev;
11262 	int err, i, intr_ok = 0;
11263 	u32 val;
11264 
11265 	if (!netif_running(dev))
11266 		return -ENODEV;
11267 
11268 	tg3_disable_ints(tp);
11269 
11270 	free_irq(tnapi->irq_vec, tnapi);
11271 
11272 	/*
11273 	 * Turn off MSI one shot mode.  Otherwise this test has no
11274 	 * observable way to know whether the interrupt was delivered.
11275 	 */
11276 	if (tg3_flag(tp, 57765_PLUS)) {
11277 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11278 		tw32(MSGINT_MODE, val);
11279 	}
11280 
11281 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11282 			  IRQF_SHARED, dev->name, tnapi);
11283 	if (err)
11284 		return err;
11285 
11286 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11287 	tg3_enable_ints(tp);
11288 
11289 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11290 	       tnapi->coal_now);
11291 
11292 	for (i = 0; i < 5; i++) {
11293 		u32 int_mbox, misc_host_ctrl;
11294 
11295 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11296 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11297 
11298 		if ((int_mbox != 0) ||
11299 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11300 			intr_ok = 1;
11301 			break;
11302 		}
11303 
11304 		if (tg3_flag(tp, 57765_PLUS) &&
11305 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11306 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11307 
11308 		msleep(10);
11309 	}
11310 
11311 	tg3_disable_ints(tp);
11312 
11313 	free_irq(tnapi->irq_vec, tnapi);
11314 
11315 	err = tg3_request_irq(tp, 0);
11316 
11317 	if (err)
11318 		return err;
11319 
11320 	if (intr_ok) {
11321 		/* Reenable MSI one shot mode. */
11322 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11323 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11324 			tw32(MSGINT_MODE, val);
11325 		}
11326 		return 0;
11327 	}
11328 
11329 	return -EIO;
11330 }
11331 
11332 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11333  * successfully restored
11334  */
11335 static int tg3_test_msi(struct tg3 *tp)
11336 {
11337 	int err;
11338 	u16 pci_cmd;
11339 
11340 	if (!tg3_flag(tp, USING_MSI))
11341 		return 0;
11342 
11343 	/* Turn off SERR reporting in case MSI terminates with Master
11344 	 * Abort.
11345 	 */
11346 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11347 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11348 			      pci_cmd & ~PCI_COMMAND_SERR);
11349 
11350 	err = tg3_test_interrupt(tp);
11351 
11352 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11353 
11354 	if (!err)
11355 		return 0;
11356 
11357 	/* other failures */
11358 	if (err != -EIO)
11359 		return err;
11360 
11361 	/* MSI test failed, go back to INTx mode */
11362 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11363 		    "to INTx mode. Please report this failure to the PCI "
11364 		    "maintainer and include system chipset information\n");
11365 
11366 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11367 
11368 	pci_disable_msi(tp->pdev);
11369 
11370 	tg3_flag_clear(tp, USING_MSI);
11371 	tp->napi[0].irq_vec = tp->pdev->irq;
11372 
11373 	err = tg3_request_irq(tp, 0);
11374 	if (err)
11375 		return err;
11376 
11377 	/* Need to reset the chip because the MSI cycle may have terminated
11378 	 * with Master Abort.
11379 	 */
11380 	tg3_full_lock(tp, 1);
11381 
11382 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11383 	err = tg3_init_hw(tp, true);
11384 
11385 	tg3_full_unlock(tp);
11386 
11387 	if (err)
11388 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11389 
11390 	return err;
11391 }
11392 
11393 static int tg3_request_firmware(struct tg3 *tp)
11394 {
11395 	const struct tg3_firmware_hdr *fw_hdr;
11396 
11397 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11398 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11399 			   tp->fw_needed);
11400 		return -ENOENT;
11401 	}
11402 
11403 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11404 
11405 	/* Firmware blob starts with version numbers, followed by
11406 	 * start address and _full_ length including BSS sections
11407 	 * (which must be longer than the actual data, of course
11408 	 */
11409 
11410 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11411 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11412 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11413 			   tp->fw_len, tp->fw_needed);
11414 		release_firmware(tp->fw);
11415 		tp->fw = NULL;
11416 		return -EINVAL;
11417 	}
11418 
11419 	/* We no longer need firmware; we have it. */
11420 	tp->fw_needed = NULL;
11421 	return 0;
11422 }
11423 
11424 static u32 tg3_irq_count(struct tg3 *tp)
11425 {
11426 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11427 
11428 	if (irq_cnt > 1) {
11429 		/* We want as many rx rings enabled as there are cpus.
11430 		 * In multiqueue MSI-X mode, the first MSI-X vector
11431 		 * only deals with link interrupts, etc, so we add
11432 		 * one to the number of vectors we are requesting.
11433 		 */
11434 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11435 	}
11436 
11437 	return irq_cnt;
11438 }
11439 
11440 static bool tg3_enable_msix(struct tg3 *tp)
11441 {
11442 	int i, rc;
11443 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11444 
11445 	tp->txq_cnt = tp->txq_req;
11446 	tp->rxq_cnt = tp->rxq_req;
11447 	if (!tp->rxq_cnt)
11448 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11449 	if (tp->rxq_cnt > tp->rxq_max)
11450 		tp->rxq_cnt = tp->rxq_max;
11451 
11452 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11453 	 * scheduling of the TX rings can cause starvation of rings with
11454 	 * small packets when other rings have TSO or jumbo packets.
11455 	 */
11456 	if (!tp->txq_req)
11457 		tp->txq_cnt = 1;
11458 
11459 	tp->irq_cnt = tg3_irq_count(tp);
11460 
11461 	for (i = 0; i < tp->irq_max; i++) {
11462 		msix_ent[i].entry  = i;
11463 		msix_ent[i].vector = 0;
11464 	}
11465 
11466 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11467 	if (rc < 0) {
11468 		return false;
11469 	} else if (rc < tp->irq_cnt) {
11470 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11471 			      tp->irq_cnt, rc);
11472 		tp->irq_cnt = rc;
11473 		tp->rxq_cnt = max(rc - 1, 1);
11474 		if (tp->txq_cnt)
11475 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11476 	}
11477 
11478 	for (i = 0; i < tp->irq_max; i++)
11479 		tp->napi[i].irq_vec = msix_ent[i].vector;
11480 
11481 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11482 		pci_disable_msix(tp->pdev);
11483 		return false;
11484 	}
11485 
11486 	if (tp->irq_cnt == 1)
11487 		return true;
11488 
11489 	tg3_flag_set(tp, ENABLE_RSS);
11490 
11491 	if (tp->txq_cnt > 1)
11492 		tg3_flag_set(tp, ENABLE_TSS);
11493 
11494 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11495 
11496 	return true;
11497 }
11498 
11499 static void tg3_ints_init(struct tg3 *tp)
11500 {
11501 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11502 	    !tg3_flag(tp, TAGGED_STATUS)) {
11503 		/* All MSI supporting chips should support tagged
11504 		 * status.  Assert that this is the case.
11505 		 */
11506 		netdev_warn(tp->dev,
11507 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11508 		goto defcfg;
11509 	}
11510 
11511 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11512 		tg3_flag_set(tp, USING_MSIX);
11513 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11514 		tg3_flag_set(tp, USING_MSI);
11515 
11516 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11517 		u32 msi_mode = tr32(MSGINT_MODE);
11518 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11519 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11520 		if (!tg3_flag(tp, 1SHOT_MSI))
11521 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11522 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11523 	}
11524 defcfg:
11525 	if (!tg3_flag(tp, USING_MSIX)) {
11526 		tp->irq_cnt = 1;
11527 		tp->napi[0].irq_vec = tp->pdev->irq;
11528 	}
11529 
11530 	if (tp->irq_cnt == 1) {
11531 		tp->txq_cnt = 1;
11532 		tp->rxq_cnt = 1;
11533 		netif_set_real_num_tx_queues(tp->dev, 1);
11534 		netif_set_real_num_rx_queues(tp->dev, 1);
11535 	}
11536 }
11537 
11538 static void tg3_ints_fini(struct tg3 *tp)
11539 {
11540 	if (tg3_flag(tp, USING_MSIX))
11541 		pci_disable_msix(tp->pdev);
11542 	else if (tg3_flag(tp, USING_MSI))
11543 		pci_disable_msi(tp->pdev);
11544 	tg3_flag_clear(tp, USING_MSI);
11545 	tg3_flag_clear(tp, USING_MSIX);
11546 	tg3_flag_clear(tp, ENABLE_RSS);
11547 	tg3_flag_clear(tp, ENABLE_TSS);
11548 }
11549 
11550 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11551 		     bool init)
11552 {
11553 	struct net_device *dev = tp->dev;
11554 	int i, err;
11555 
11556 	/*
11557 	 * Setup interrupts first so we know how
11558 	 * many NAPI resources to allocate
11559 	 */
11560 	tg3_ints_init(tp);
11561 
11562 	tg3_rss_check_indir_tbl(tp);
11563 
11564 	/* The placement of this call is tied
11565 	 * to the setup and use of Host TX descriptors.
11566 	 */
11567 	err = tg3_alloc_consistent(tp);
11568 	if (err)
11569 		goto out_ints_fini;
11570 
11571 	tg3_napi_init(tp);
11572 
11573 	tg3_napi_enable(tp);
11574 
11575 	for (i = 0; i < tp->irq_cnt; i++) {
11576 		err = tg3_request_irq(tp, i);
11577 		if (err) {
11578 			for (i--; i >= 0; i--) {
11579 				struct tg3_napi *tnapi = &tp->napi[i];
11580 
11581 				free_irq(tnapi->irq_vec, tnapi);
11582 			}
11583 			goto out_napi_fini;
11584 		}
11585 	}
11586 
11587 	tg3_full_lock(tp, 0);
11588 
11589 	if (init)
11590 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11591 
11592 	err = tg3_init_hw(tp, reset_phy);
11593 	if (err) {
11594 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11595 		tg3_free_rings(tp);
11596 	}
11597 
11598 	tg3_full_unlock(tp);
11599 
11600 	if (err)
11601 		goto out_free_irq;
11602 
11603 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11604 		err = tg3_test_msi(tp);
11605 
11606 		if (err) {
11607 			tg3_full_lock(tp, 0);
11608 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11609 			tg3_free_rings(tp);
11610 			tg3_full_unlock(tp);
11611 
11612 			goto out_napi_fini;
11613 		}
11614 
11615 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11616 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11617 
11618 			tw32(PCIE_TRANSACTION_CFG,
11619 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11620 		}
11621 	}
11622 
11623 	tg3_phy_start(tp);
11624 
11625 	tg3_hwmon_open(tp);
11626 
11627 	tg3_full_lock(tp, 0);
11628 
11629 	tg3_timer_start(tp);
11630 	tg3_flag_set(tp, INIT_COMPLETE);
11631 	tg3_enable_ints(tp);
11632 
11633 	tg3_ptp_resume(tp);
11634 
11635 	tg3_full_unlock(tp);
11636 
11637 	netif_tx_start_all_queues(dev);
11638 
11639 	/*
11640 	 * Reset loopback feature if it was turned on while the device was down
11641 	 * make sure that it's installed properly now.
11642 	 */
11643 	if (dev->features & NETIF_F_LOOPBACK)
11644 		tg3_set_loopback(dev, dev->features);
11645 
11646 	return 0;
11647 
11648 out_free_irq:
11649 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11650 		struct tg3_napi *tnapi = &tp->napi[i];
11651 		free_irq(tnapi->irq_vec, tnapi);
11652 	}
11653 
11654 out_napi_fini:
11655 	tg3_napi_disable(tp);
11656 	tg3_napi_fini(tp);
11657 	tg3_free_consistent(tp);
11658 
11659 out_ints_fini:
11660 	tg3_ints_fini(tp);
11661 
11662 	return err;
11663 }
11664 
11665 static void tg3_stop(struct tg3 *tp)
11666 {
11667 	int i;
11668 
11669 	tg3_reset_task_cancel(tp);
11670 	tg3_netif_stop(tp);
11671 
11672 	tg3_timer_stop(tp);
11673 
11674 	tg3_hwmon_close(tp);
11675 
11676 	tg3_phy_stop(tp);
11677 
11678 	tg3_full_lock(tp, 1);
11679 
11680 	tg3_disable_ints(tp);
11681 
11682 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11683 	tg3_free_rings(tp);
11684 	tg3_flag_clear(tp, INIT_COMPLETE);
11685 
11686 	tg3_full_unlock(tp);
11687 
11688 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11689 		struct tg3_napi *tnapi = &tp->napi[i];
11690 		free_irq(tnapi->irq_vec, tnapi);
11691 	}
11692 
11693 	tg3_ints_fini(tp);
11694 
11695 	tg3_napi_fini(tp);
11696 
11697 	tg3_free_consistent(tp);
11698 }
11699 
11700 static int tg3_open(struct net_device *dev)
11701 {
11702 	struct tg3 *tp = netdev_priv(dev);
11703 	int err;
11704 
11705 	if (tp->pcierr_recovery) {
11706 		netdev_err(dev, "Failed to open device. PCI error recovery "
11707 			   "in progress\n");
11708 		return -EAGAIN;
11709 	}
11710 
11711 	if (tp->fw_needed) {
11712 		err = tg3_request_firmware(tp);
11713 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11714 			if (err) {
11715 				netdev_warn(tp->dev, "EEE capability disabled\n");
11716 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11717 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11718 				netdev_warn(tp->dev, "EEE capability restored\n");
11719 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11720 			}
11721 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11722 			if (err)
11723 				return err;
11724 		} else if (err) {
11725 			netdev_warn(tp->dev, "TSO capability disabled\n");
11726 			tg3_flag_clear(tp, TSO_CAPABLE);
11727 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11728 			netdev_notice(tp->dev, "TSO capability restored\n");
11729 			tg3_flag_set(tp, TSO_CAPABLE);
11730 		}
11731 	}
11732 
11733 	tg3_carrier_off(tp);
11734 
11735 	err = tg3_power_up(tp);
11736 	if (err)
11737 		return err;
11738 
11739 	tg3_full_lock(tp, 0);
11740 
11741 	tg3_disable_ints(tp);
11742 	tg3_flag_clear(tp, INIT_COMPLETE);
11743 
11744 	tg3_full_unlock(tp);
11745 
11746 	err = tg3_start(tp,
11747 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11748 			true, true);
11749 	if (err) {
11750 		tg3_frob_aux_power(tp, false);
11751 		pci_set_power_state(tp->pdev, PCI_D3hot);
11752 	}
11753 
11754 	return err;
11755 }
11756 
11757 static int tg3_close(struct net_device *dev)
11758 {
11759 	struct tg3 *tp = netdev_priv(dev);
11760 
11761 	if (tp->pcierr_recovery) {
11762 		netdev_err(dev, "Failed to close device. PCI error recovery "
11763 			   "in progress\n");
11764 		return -EAGAIN;
11765 	}
11766 
11767 	tg3_stop(tp);
11768 
11769 	if (pci_device_is_present(tp->pdev)) {
11770 		tg3_power_down_prepare(tp);
11771 
11772 		tg3_carrier_off(tp);
11773 	}
11774 	return 0;
11775 }
11776 
11777 static inline u64 get_stat64(tg3_stat64_t *val)
11778 {
11779        return ((u64)val->high << 32) | ((u64)val->low);
11780 }
11781 
11782 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11783 {
11784 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11785 
11786 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11787 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11788 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11789 		u32 val;
11790 
11791 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11792 			tg3_writephy(tp, MII_TG3_TEST1,
11793 				     val | MII_TG3_TEST1_CRC_EN);
11794 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11795 		} else
11796 			val = 0;
11797 
11798 		tp->phy_crc_errors += val;
11799 
11800 		return tp->phy_crc_errors;
11801 	}
11802 
11803 	return get_stat64(&hw_stats->rx_fcs_errors);
11804 }
11805 
11806 #define ESTAT_ADD(member) \
11807 	estats->member =	old_estats->member + \
11808 				get_stat64(&hw_stats->member)
11809 
11810 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11811 {
11812 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11813 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11814 
11815 	ESTAT_ADD(rx_octets);
11816 	ESTAT_ADD(rx_fragments);
11817 	ESTAT_ADD(rx_ucast_packets);
11818 	ESTAT_ADD(rx_mcast_packets);
11819 	ESTAT_ADD(rx_bcast_packets);
11820 	ESTAT_ADD(rx_fcs_errors);
11821 	ESTAT_ADD(rx_align_errors);
11822 	ESTAT_ADD(rx_xon_pause_rcvd);
11823 	ESTAT_ADD(rx_xoff_pause_rcvd);
11824 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11825 	ESTAT_ADD(rx_xoff_entered);
11826 	ESTAT_ADD(rx_frame_too_long_errors);
11827 	ESTAT_ADD(rx_jabbers);
11828 	ESTAT_ADD(rx_undersize_packets);
11829 	ESTAT_ADD(rx_in_length_errors);
11830 	ESTAT_ADD(rx_out_length_errors);
11831 	ESTAT_ADD(rx_64_or_less_octet_packets);
11832 	ESTAT_ADD(rx_65_to_127_octet_packets);
11833 	ESTAT_ADD(rx_128_to_255_octet_packets);
11834 	ESTAT_ADD(rx_256_to_511_octet_packets);
11835 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11836 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11837 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11838 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11839 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11840 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11841 
11842 	ESTAT_ADD(tx_octets);
11843 	ESTAT_ADD(tx_collisions);
11844 	ESTAT_ADD(tx_xon_sent);
11845 	ESTAT_ADD(tx_xoff_sent);
11846 	ESTAT_ADD(tx_flow_control);
11847 	ESTAT_ADD(tx_mac_errors);
11848 	ESTAT_ADD(tx_single_collisions);
11849 	ESTAT_ADD(tx_mult_collisions);
11850 	ESTAT_ADD(tx_deferred);
11851 	ESTAT_ADD(tx_excessive_collisions);
11852 	ESTAT_ADD(tx_late_collisions);
11853 	ESTAT_ADD(tx_collide_2times);
11854 	ESTAT_ADD(tx_collide_3times);
11855 	ESTAT_ADD(tx_collide_4times);
11856 	ESTAT_ADD(tx_collide_5times);
11857 	ESTAT_ADD(tx_collide_6times);
11858 	ESTAT_ADD(tx_collide_7times);
11859 	ESTAT_ADD(tx_collide_8times);
11860 	ESTAT_ADD(tx_collide_9times);
11861 	ESTAT_ADD(tx_collide_10times);
11862 	ESTAT_ADD(tx_collide_11times);
11863 	ESTAT_ADD(tx_collide_12times);
11864 	ESTAT_ADD(tx_collide_13times);
11865 	ESTAT_ADD(tx_collide_14times);
11866 	ESTAT_ADD(tx_collide_15times);
11867 	ESTAT_ADD(tx_ucast_packets);
11868 	ESTAT_ADD(tx_mcast_packets);
11869 	ESTAT_ADD(tx_bcast_packets);
11870 	ESTAT_ADD(tx_carrier_sense_errors);
11871 	ESTAT_ADD(tx_discards);
11872 	ESTAT_ADD(tx_errors);
11873 
11874 	ESTAT_ADD(dma_writeq_full);
11875 	ESTAT_ADD(dma_write_prioq_full);
11876 	ESTAT_ADD(rxbds_empty);
11877 	ESTAT_ADD(rx_discards);
11878 	ESTAT_ADD(rx_errors);
11879 	ESTAT_ADD(rx_threshold_hit);
11880 
11881 	ESTAT_ADD(dma_readq_full);
11882 	ESTAT_ADD(dma_read_prioq_full);
11883 	ESTAT_ADD(tx_comp_queue_full);
11884 
11885 	ESTAT_ADD(ring_set_send_prod_index);
11886 	ESTAT_ADD(ring_status_update);
11887 	ESTAT_ADD(nic_irqs);
11888 	ESTAT_ADD(nic_avoided_irqs);
11889 	ESTAT_ADD(nic_tx_threshold_hit);
11890 
11891 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11892 }
11893 
11894 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11895 {
11896 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11897 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11898 
11899 	stats->rx_packets = old_stats->rx_packets +
11900 		get_stat64(&hw_stats->rx_ucast_packets) +
11901 		get_stat64(&hw_stats->rx_mcast_packets) +
11902 		get_stat64(&hw_stats->rx_bcast_packets);
11903 
11904 	stats->tx_packets = old_stats->tx_packets +
11905 		get_stat64(&hw_stats->tx_ucast_packets) +
11906 		get_stat64(&hw_stats->tx_mcast_packets) +
11907 		get_stat64(&hw_stats->tx_bcast_packets);
11908 
11909 	stats->rx_bytes = old_stats->rx_bytes +
11910 		get_stat64(&hw_stats->rx_octets);
11911 	stats->tx_bytes = old_stats->tx_bytes +
11912 		get_stat64(&hw_stats->tx_octets);
11913 
11914 	stats->rx_errors = old_stats->rx_errors +
11915 		get_stat64(&hw_stats->rx_errors);
11916 	stats->tx_errors = old_stats->tx_errors +
11917 		get_stat64(&hw_stats->tx_errors) +
11918 		get_stat64(&hw_stats->tx_mac_errors) +
11919 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11920 		get_stat64(&hw_stats->tx_discards);
11921 
11922 	stats->multicast = old_stats->multicast +
11923 		get_stat64(&hw_stats->rx_mcast_packets);
11924 	stats->collisions = old_stats->collisions +
11925 		get_stat64(&hw_stats->tx_collisions);
11926 
11927 	stats->rx_length_errors = old_stats->rx_length_errors +
11928 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11929 		get_stat64(&hw_stats->rx_undersize_packets);
11930 
11931 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11932 		get_stat64(&hw_stats->rx_align_errors);
11933 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11934 		get_stat64(&hw_stats->tx_discards);
11935 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11936 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11937 
11938 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11939 		tg3_calc_crc_errors(tp);
11940 
11941 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11942 		get_stat64(&hw_stats->rx_discards);
11943 
11944 	stats->rx_dropped = tp->rx_dropped;
11945 	stats->tx_dropped = tp->tx_dropped;
11946 }
11947 
11948 static int tg3_get_regs_len(struct net_device *dev)
11949 {
11950 	return TG3_REG_BLK_SIZE;
11951 }
11952 
11953 static void tg3_get_regs(struct net_device *dev,
11954 		struct ethtool_regs *regs, void *_p)
11955 {
11956 	struct tg3 *tp = netdev_priv(dev);
11957 
11958 	regs->version = 0;
11959 
11960 	memset(_p, 0, TG3_REG_BLK_SIZE);
11961 
11962 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11963 		return;
11964 
11965 	tg3_full_lock(tp, 0);
11966 
11967 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11968 
11969 	tg3_full_unlock(tp);
11970 }
11971 
11972 static int tg3_get_eeprom_len(struct net_device *dev)
11973 {
11974 	struct tg3 *tp = netdev_priv(dev);
11975 
11976 	return tp->nvram_size;
11977 }
11978 
11979 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11980 {
11981 	struct tg3 *tp = netdev_priv(dev);
11982 	int ret, cpmu_restore = 0;
11983 	u8  *pd;
11984 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11985 	__be32 val;
11986 
11987 	if (tg3_flag(tp, NO_NVRAM))
11988 		return -EINVAL;
11989 
11990 	offset = eeprom->offset;
11991 	len = eeprom->len;
11992 	eeprom->len = 0;
11993 
11994 	eeprom->magic = TG3_EEPROM_MAGIC;
11995 
11996 	/* Override clock, link aware and link idle modes */
11997 	if (tg3_flag(tp, CPMU_PRESENT)) {
11998 		cpmu_val = tr32(TG3_CPMU_CTRL);
11999 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12000 				CPMU_CTRL_LINK_IDLE_MODE)) {
12001 			tw32(TG3_CPMU_CTRL, cpmu_val &
12002 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
12003 					     CPMU_CTRL_LINK_IDLE_MODE));
12004 			cpmu_restore = 1;
12005 		}
12006 	}
12007 	tg3_override_clk(tp);
12008 
12009 	if (offset & 3) {
12010 		/* adjustments to start on required 4 byte boundary */
12011 		b_offset = offset & 3;
12012 		b_count = 4 - b_offset;
12013 		if (b_count > len) {
12014 			/* i.e. offset=1 len=2 */
12015 			b_count = len;
12016 		}
12017 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12018 		if (ret)
12019 			goto eeprom_done;
12020 		memcpy(data, ((char *)&val) + b_offset, b_count);
12021 		len -= b_count;
12022 		offset += b_count;
12023 		eeprom->len += b_count;
12024 	}
12025 
12026 	/* read bytes up to the last 4 byte boundary */
12027 	pd = &data[eeprom->len];
12028 	for (i = 0; i < (len - (len & 3)); i += 4) {
12029 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
12030 		if (ret) {
12031 			if (i)
12032 				i -= 4;
12033 			eeprom->len += i;
12034 			goto eeprom_done;
12035 		}
12036 		memcpy(pd + i, &val, 4);
12037 		if (need_resched()) {
12038 			if (signal_pending(current)) {
12039 				eeprom->len += i;
12040 				ret = -EINTR;
12041 				goto eeprom_done;
12042 			}
12043 			cond_resched();
12044 		}
12045 	}
12046 	eeprom->len += i;
12047 
12048 	if (len & 3) {
12049 		/* read last bytes not ending on 4 byte boundary */
12050 		pd = &data[eeprom->len];
12051 		b_count = len & 3;
12052 		b_offset = offset + len - b_count;
12053 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12054 		if (ret)
12055 			goto eeprom_done;
12056 		memcpy(pd, &val, b_count);
12057 		eeprom->len += b_count;
12058 	}
12059 	ret = 0;
12060 
12061 eeprom_done:
12062 	/* Restore clock, link aware and link idle modes */
12063 	tg3_restore_clk(tp);
12064 	if (cpmu_restore)
12065 		tw32(TG3_CPMU_CTRL, cpmu_val);
12066 
12067 	return ret;
12068 }
12069 
12070 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12071 {
12072 	struct tg3 *tp = netdev_priv(dev);
12073 	int ret;
12074 	u32 offset, len, b_offset, odd_len;
12075 	u8 *buf;
12076 	__be32 start = 0, end;
12077 
12078 	if (tg3_flag(tp, NO_NVRAM) ||
12079 	    eeprom->magic != TG3_EEPROM_MAGIC)
12080 		return -EINVAL;
12081 
12082 	offset = eeprom->offset;
12083 	len = eeprom->len;
12084 
12085 	if ((b_offset = (offset & 3))) {
12086 		/* adjustments to start on required 4 byte boundary */
12087 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12088 		if (ret)
12089 			return ret;
12090 		len += b_offset;
12091 		offset &= ~3;
12092 		if (len < 4)
12093 			len = 4;
12094 	}
12095 
12096 	odd_len = 0;
12097 	if (len & 3) {
12098 		/* adjustments to end on required 4 byte boundary */
12099 		odd_len = 1;
12100 		len = (len + 3) & ~3;
12101 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12102 		if (ret)
12103 			return ret;
12104 	}
12105 
12106 	buf = data;
12107 	if (b_offset || odd_len) {
12108 		buf = kmalloc(len, GFP_KERNEL);
12109 		if (!buf)
12110 			return -ENOMEM;
12111 		if (b_offset)
12112 			memcpy(buf, &start, 4);
12113 		if (odd_len)
12114 			memcpy(buf+len-4, &end, 4);
12115 		memcpy(buf + b_offset, data, eeprom->len);
12116 	}
12117 
12118 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12119 
12120 	if (buf != data)
12121 		kfree(buf);
12122 
12123 	return ret;
12124 }
12125 
12126 static int tg3_get_link_ksettings(struct net_device *dev,
12127 				  struct ethtool_link_ksettings *cmd)
12128 {
12129 	struct tg3 *tp = netdev_priv(dev);
12130 	u32 supported, advertising;
12131 
12132 	if (tg3_flag(tp, USE_PHYLIB)) {
12133 		struct phy_device *phydev;
12134 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12135 			return -EAGAIN;
12136 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12137 		phy_ethtool_ksettings_get(phydev, cmd);
12138 
12139 		return 0;
12140 	}
12141 
12142 	supported = (SUPPORTED_Autoneg);
12143 
12144 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12145 		supported |= (SUPPORTED_1000baseT_Half |
12146 			      SUPPORTED_1000baseT_Full);
12147 
12148 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12149 		supported |= (SUPPORTED_100baseT_Half |
12150 			      SUPPORTED_100baseT_Full |
12151 			      SUPPORTED_10baseT_Half |
12152 			      SUPPORTED_10baseT_Full |
12153 			      SUPPORTED_TP);
12154 		cmd->base.port = PORT_TP;
12155 	} else {
12156 		supported |= SUPPORTED_FIBRE;
12157 		cmd->base.port = PORT_FIBRE;
12158 	}
12159 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12160 						supported);
12161 
12162 	advertising = tp->link_config.advertising;
12163 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12164 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12165 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12166 				advertising |= ADVERTISED_Pause;
12167 			} else {
12168 				advertising |= ADVERTISED_Pause |
12169 					ADVERTISED_Asym_Pause;
12170 			}
12171 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12172 			advertising |= ADVERTISED_Asym_Pause;
12173 		}
12174 	}
12175 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12176 						advertising);
12177 
12178 	if (netif_running(dev) && tp->link_up) {
12179 		cmd->base.speed = tp->link_config.active_speed;
12180 		cmd->base.duplex = tp->link_config.active_duplex;
12181 		ethtool_convert_legacy_u32_to_link_mode(
12182 			cmd->link_modes.lp_advertising,
12183 			tp->link_config.rmt_adv);
12184 
12185 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12186 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12187 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12188 			else
12189 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12190 		}
12191 	} else {
12192 		cmd->base.speed = SPEED_UNKNOWN;
12193 		cmd->base.duplex = DUPLEX_UNKNOWN;
12194 		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12195 	}
12196 	cmd->base.phy_address = tp->phy_addr;
12197 	cmd->base.autoneg = tp->link_config.autoneg;
12198 	return 0;
12199 }
12200 
12201 static int tg3_set_link_ksettings(struct net_device *dev,
12202 				  const struct ethtool_link_ksettings *cmd)
12203 {
12204 	struct tg3 *tp = netdev_priv(dev);
12205 	u32 speed = cmd->base.speed;
12206 	u32 advertising;
12207 
12208 	if (tg3_flag(tp, USE_PHYLIB)) {
12209 		struct phy_device *phydev;
12210 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12211 			return -EAGAIN;
12212 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12213 		return phy_ethtool_ksettings_set(phydev, cmd);
12214 	}
12215 
12216 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12217 	    cmd->base.autoneg != AUTONEG_DISABLE)
12218 		return -EINVAL;
12219 
12220 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12221 	    cmd->base.duplex != DUPLEX_FULL &&
12222 	    cmd->base.duplex != DUPLEX_HALF)
12223 		return -EINVAL;
12224 
12225 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12226 						cmd->link_modes.advertising);
12227 
12228 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12229 		u32 mask = ADVERTISED_Autoneg |
12230 			   ADVERTISED_Pause |
12231 			   ADVERTISED_Asym_Pause;
12232 
12233 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12234 			mask |= ADVERTISED_1000baseT_Half |
12235 				ADVERTISED_1000baseT_Full;
12236 
12237 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12238 			mask |= ADVERTISED_100baseT_Half |
12239 				ADVERTISED_100baseT_Full |
12240 				ADVERTISED_10baseT_Half |
12241 				ADVERTISED_10baseT_Full |
12242 				ADVERTISED_TP;
12243 		else
12244 			mask |= ADVERTISED_FIBRE;
12245 
12246 		if (advertising & ~mask)
12247 			return -EINVAL;
12248 
12249 		mask &= (ADVERTISED_1000baseT_Half |
12250 			 ADVERTISED_1000baseT_Full |
12251 			 ADVERTISED_100baseT_Half |
12252 			 ADVERTISED_100baseT_Full |
12253 			 ADVERTISED_10baseT_Half |
12254 			 ADVERTISED_10baseT_Full);
12255 
12256 		advertising &= mask;
12257 	} else {
12258 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12259 			if (speed != SPEED_1000)
12260 				return -EINVAL;
12261 
12262 			if (cmd->base.duplex != DUPLEX_FULL)
12263 				return -EINVAL;
12264 		} else {
12265 			if (speed != SPEED_100 &&
12266 			    speed != SPEED_10)
12267 				return -EINVAL;
12268 		}
12269 	}
12270 
12271 	tg3_full_lock(tp, 0);
12272 
12273 	tp->link_config.autoneg = cmd->base.autoneg;
12274 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12275 		tp->link_config.advertising = (advertising |
12276 					      ADVERTISED_Autoneg);
12277 		tp->link_config.speed = SPEED_UNKNOWN;
12278 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12279 	} else {
12280 		tp->link_config.advertising = 0;
12281 		tp->link_config.speed = speed;
12282 		tp->link_config.duplex = cmd->base.duplex;
12283 	}
12284 
12285 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12286 
12287 	tg3_warn_mgmt_link_flap(tp);
12288 
12289 	if (netif_running(dev))
12290 		tg3_setup_phy(tp, true);
12291 
12292 	tg3_full_unlock(tp);
12293 
12294 	return 0;
12295 }
12296 
12297 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12298 {
12299 	struct tg3 *tp = netdev_priv(dev);
12300 
12301 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12302 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12303 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12304 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12305 }
12306 
12307 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12308 {
12309 	struct tg3 *tp = netdev_priv(dev);
12310 
12311 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12312 		wol->supported = WAKE_MAGIC;
12313 	else
12314 		wol->supported = 0;
12315 	wol->wolopts = 0;
12316 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12317 		wol->wolopts = WAKE_MAGIC;
12318 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12319 }
12320 
12321 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12322 {
12323 	struct tg3 *tp = netdev_priv(dev);
12324 	struct device *dp = &tp->pdev->dev;
12325 
12326 	if (wol->wolopts & ~WAKE_MAGIC)
12327 		return -EINVAL;
12328 	if ((wol->wolopts & WAKE_MAGIC) &&
12329 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12330 		return -EINVAL;
12331 
12332 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12333 
12334 	if (device_may_wakeup(dp))
12335 		tg3_flag_set(tp, WOL_ENABLE);
12336 	else
12337 		tg3_flag_clear(tp, WOL_ENABLE);
12338 
12339 	return 0;
12340 }
12341 
12342 static u32 tg3_get_msglevel(struct net_device *dev)
12343 {
12344 	struct tg3 *tp = netdev_priv(dev);
12345 	return tp->msg_enable;
12346 }
12347 
12348 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12349 {
12350 	struct tg3 *tp = netdev_priv(dev);
12351 	tp->msg_enable = value;
12352 }
12353 
12354 static int tg3_nway_reset(struct net_device *dev)
12355 {
12356 	struct tg3 *tp = netdev_priv(dev);
12357 	int r;
12358 
12359 	if (!netif_running(dev))
12360 		return -EAGAIN;
12361 
12362 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12363 		return -EINVAL;
12364 
12365 	tg3_warn_mgmt_link_flap(tp);
12366 
12367 	if (tg3_flag(tp, USE_PHYLIB)) {
12368 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12369 			return -EAGAIN;
12370 		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12371 	} else {
12372 		u32 bmcr;
12373 
12374 		spin_lock_bh(&tp->lock);
12375 		r = -EINVAL;
12376 		tg3_readphy(tp, MII_BMCR, &bmcr);
12377 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12378 		    ((bmcr & BMCR_ANENABLE) ||
12379 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12380 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12381 						   BMCR_ANENABLE);
12382 			r = 0;
12383 		}
12384 		spin_unlock_bh(&tp->lock);
12385 	}
12386 
12387 	return r;
12388 }
12389 
12390 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12391 {
12392 	struct tg3 *tp = netdev_priv(dev);
12393 
12394 	ering->rx_max_pending = tp->rx_std_ring_mask;
12395 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12396 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12397 	else
12398 		ering->rx_jumbo_max_pending = 0;
12399 
12400 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12401 
12402 	ering->rx_pending = tp->rx_pending;
12403 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12404 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12405 	else
12406 		ering->rx_jumbo_pending = 0;
12407 
12408 	ering->tx_pending = tp->napi[0].tx_pending;
12409 }
12410 
12411 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12412 {
12413 	struct tg3 *tp = netdev_priv(dev);
12414 	int i, irq_sync = 0, err = 0;
12415 
12416 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12417 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12418 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12419 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12420 	    (tg3_flag(tp, TSO_BUG) &&
12421 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12422 		return -EINVAL;
12423 
12424 	if (netif_running(dev)) {
12425 		tg3_phy_stop(tp);
12426 		tg3_netif_stop(tp);
12427 		irq_sync = 1;
12428 	}
12429 
12430 	tg3_full_lock(tp, irq_sync);
12431 
12432 	tp->rx_pending = ering->rx_pending;
12433 
12434 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12435 	    tp->rx_pending > 63)
12436 		tp->rx_pending = 63;
12437 
12438 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12439 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12440 
12441 	for (i = 0; i < tp->irq_max; i++)
12442 		tp->napi[i].tx_pending = ering->tx_pending;
12443 
12444 	if (netif_running(dev)) {
12445 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12446 		err = tg3_restart_hw(tp, false);
12447 		if (!err)
12448 			tg3_netif_start(tp);
12449 	}
12450 
12451 	tg3_full_unlock(tp);
12452 
12453 	if (irq_sync && !err)
12454 		tg3_phy_start(tp);
12455 
12456 	return err;
12457 }
12458 
12459 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12460 {
12461 	struct tg3 *tp = netdev_priv(dev);
12462 
12463 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12464 
12465 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12466 		epause->rx_pause = 1;
12467 	else
12468 		epause->rx_pause = 0;
12469 
12470 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12471 		epause->tx_pause = 1;
12472 	else
12473 		epause->tx_pause = 0;
12474 }
12475 
12476 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12477 {
12478 	struct tg3 *tp = netdev_priv(dev);
12479 	int err = 0;
12480 
12481 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12482 		tg3_warn_mgmt_link_flap(tp);
12483 
12484 	if (tg3_flag(tp, USE_PHYLIB)) {
12485 		u32 newadv;
12486 		struct phy_device *phydev;
12487 
12488 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12489 
12490 		if (!(phydev->supported & SUPPORTED_Pause) ||
12491 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12492 		     (epause->rx_pause != epause->tx_pause)))
12493 			return -EINVAL;
12494 
12495 		tp->link_config.flowctrl = 0;
12496 		if (epause->rx_pause) {
12497 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12498 
12499 			if (epause->tx_pause) {
12500 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12501 				newadv = ADVERTISED_Pause;
12502 			} else
12503 				newadv = ADVERTISED_Pause |
12504 					 ADVERTISED_Asym_Pause;
12505 		} else if (epause->tx_pause) {
12506 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12507 			newadv = ADVERTISED_Asym_Pause;
12508 		} else
12509 			newadv = 0;
12510 
12511 		if (epause->autoneg)
12512 			tg3_flag_set(tp, PAUSE_AUTONEG);
12513 		else
12514 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12515 
12516 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12517 			u32 oldadv = phydev->advertising &
12518 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12519 			if (oldadv != newadv) {
12520 				phydev->advertising &=
12521 					~(ADVERTISED_Pause |
12522 					  ADVERTISED_Asym_Pause);
12523 				phydev->advertising |= newadv;
12524 				if (phydev->autoneg) {
12525 					/*
12526 					 * Always renegotiate the link to
12527 					 * inform our link partner of our
12528 					 * flow control settings, even if the
12529 					 * flow control is forced.  Let
12530 					 * tg3_adjust_link() do the final
12531 					 * flow control setup.
12532 					 */
12533 					return phy_start_aneg(phydev);
12534 				}
12535 			}
12536 
12537 			if (!epause->autoneg)
12538 				tg3_setup_flow_control(tp, 0, 0);
12539 		} else {
12540 			tp->link_config.advertising &=
12541 					~(ADVERTISED_Pause |
12542 					  ADVERTISED_Asym_Pause);
12543 			tp->link_config.advertising |= newadv;
12544 		}
12545 	} else {
12546 		int irq_sync = 0;
12547 
12548 		if (netif_running(dev)) {
12549 			tg3_netif_stop(tp);
12550 			irq_sync = 1;
12551 		}
12552 
12553 		tg3_full_lock(tp, irq_sync);
12554 
12555 		if (epause->autoneg)
12556 			tg3_flag_set(tp, PAUSE_AUTONEG);
12557 		else
12558 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12559 		if (epause->rx_pause)
12560 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12561 		else
12562 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12563 		if (epause->tx_pause)
12564 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12565 		else
12566 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12567 
12568 		if (netif_running(dev)) {
12569 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12570 			err = tg3_restart_hw(tp, false);
12571 			if (!err)
12572 				tg3_netif_start(tp);
12573 		}
12574 
12575 		tg3_full_unlock(tp);
12576 	}
12577 
12578 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12579 
12580 	return err;
12581 }
12582 
12583 static int tg3_get_sset_count(struct net_device *dev, int sset)
12584 {
12585 	switch (sset) {
12586 	case ETH_SS_TEST:
12587 		return TG3_NUM_TEST;
12588 	case ETH_SS_STATS:
12589 		return TG3_NUM_STATS;
12590 	default:
12591 		return -EOPNOTSUPP;
12592 	}
12593 }
12594 
12595 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12596 			 u32 *rules __always_unused)
12597 {
12598 	struct tg3 *tp = netdev_priv(dev);
12599 
12600 	if (!tg3_flag(tp, SUPPORT_MSIX))
12601 		return -EOPNOTSUPP;
12602 
12603 	switch (info->cmd) {
12604 	case ETHTOOL_GRXRINGS:
12605 		if (netif_running(tp->dev))
12606 			info->data = tp->rxq_cnt;
12607 		else {
12608 			info->data = num_online_cpus();
12609 			if (info->data > TG3_RSS_MAX_NUM_QS)
12610 				info->data = TG3_RSS_MAX_NUM_QS;
12611 		}
12612 
12613 		return 0;
12614 
12615 	default:
12616 		return -EOPNOTSUPP;
12617 	}
12618 }
12619 
12620 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12621 {
12622 	u32 size = 0;
12623 	struct tg3 *tp = netdev_priv(dev);
12624 
12625 	if (tg3_flag(tp, SUPPORT_MSIX))
12626 		size = TG3_RSS_INDIR_TBL_SIZE;
12627 
12628 	return size;
12629 }
12630 
12631 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12632 {
12633 	struct tg3 *tp = netdev_priv(dev);
12634 	int i;
12635 
12636 	if (hfunc)
12637 		*hfunc = ETH_RSS_HASH_TOP;
12638 	if (!indir)
12639 		return 0;
12640 
12641 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12642 		indir[i] = tp->rss_ind_tbl[i];
12643 
12644 	return 0;
12645 }
12646 
12647 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12648 			const u8 hfunc)
12649 {
12650 	struct tg3 *tp = netdev_priv(dev);
12651 	size_t i;
12652 
12653 	/* We require at least one supported parameter to be changed and no
12654 	 * change in any of the unsupported parameters
12655 	 */
12656 	if (key ||
12657 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12658 		return -EOPNOTSUPP;
12659 
12660 	if (!indir)
12661 		return 0;
12662 
12663 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12664 		tp->rss_ind_tbl[i] = indir[i];
12665 
12666 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12667 		return 0;
12668 
12669 	/* It is legal to write the indirection
12670 	 * table while the device is running.
12671 	 */
12672 	tg3_full_lock(tp, 0);
12673 	tg3_rss_write_indir_tbl(tp);
12674 	tg3_full_unlock(tp);
12675 
12676 	return 0;
12677 }
12678 
12679 static void tg3_get_channels(struct net_device *dev,
12680 			     struct ethtool_channels *channel)
12681 {
12682 	struct tg3 *tp = netdev_priv(dev);
12683 	u32 deflt_qs = netif_get_num_default_rss_queues();
12684 
12685 	channel->max_rx = tp->rxq_max;
12686 	channel->max_tx = tp->txq_max;
12687 
12688 	if (netif_running(dev)) {
12689 		channel->rx_count = tp->rxq_cnt;
12690 		channel->tx_count = tp->txq_cnt;
12691 	} else {
12692 		if (tp->rxq_req)
12693 			channel->rx_count = tp->rxq_req;
12694 		else
12695 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12696 
12697 		if (tp->txq_req)
12698 			channel->tx_count = tp->txq_req;
12699 		else
12700 			channel->tx_count = min(deflt_qs, tp->txq_max);
12701 	}
12702 }
12703 
12704 static int tg3_set_channels(struct net_device *dev,
12705 			    struct ethtool_channels *channel)
12706 {
12707 	struct tg3 *tp = netdev_priv(dev);
12708 
12709 	if (!tg3_flag(tp, SUPPORT_MSIX))
12710 		return -EOPNOTSUPP;
12711 
12712 	if (channel->rx_count > tp->rxq_max ||
12713 	    channel->tx_count > tp->txq_max)
12714 		return -EINVAL;
12715 
12716 	tp->rxq_req = channel->rx_count;
12717 	tp->txq_req = channel->tx_count;
12718 
12719 	if (!netif_running(dev))
12720 		return 0;
12721 
12722 	tg3_stop(tp);
12723 
12724 	tg3_carrier_off(tp);
12725 
12726 	tg3_start(tp, true, false, false);
12727 
12728 	return 0;
12729 }
12730 
12731 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12732 {
12733 	switch (stringset) {
12734 	case ETH_SS_STATS:
12735 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12736 		break;
12737 	case ETH_SS_TEST:
12738 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12739 		break;
12740 	default:
12741 		WARN_ON(1);	/* we need a WARN() */
12742 		break;
12743 	}
12744 }
12745 
12746 static int tg3_set_phys_id(struct net_device *dev,
12747 			    enum ethtool_phys_id_state state)
12748 {
12749 	struct tg3 *tp = netdev_priv(dev);
12750 
12751 	if (!netif_running(tp->dev))
12752 		return -EAGAIN;
12753 
12754 	switch (state) {
12755 	case ETHTOOL_ID_ACTIVE:
12756 		return 1;	/* cycle on/off once per second */
12757 
12758 	case ETHTOOL_ID_ON:
12759 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12760 		     LED_CTRL_1000MBPS_ON |
12761 		     LED_CTRL_100MBPS_ON |
12762 		     LED_CTRL_10MBPS_ON |
12763 		     LED_CTRL_TRAFFIC_OVERRIDE |
12764 		     LED_CTRL_TRAFFIC_BLINK |
12765 		     LED_CTRL_TRAFFIC_LED);
12766 		break;
12767 
12768 	case ETHTOOL_ID_OFF:
12769 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12770 		     LED_CTRL_TRAFFIC_OVERRIDE);
12771 		break;
12772 
12773 	case ETHTOOL_ID_INACTIVE:
12774 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12775 		break;
12776 	}
12777 
12778 	return 0;
12779 }
12780 
12781 static void tg3_get_ethtool_stats(struct net_device *dev,
12782 				   struct ethtool_stats *estats, u64 *tmp_stats)
12783 {
12784 	struct tg3 *tp = netdev_priv(dev);
12785 
12786 	if (tp->hw_stats)
12787 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12788 	else
12789 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12790 }
12791 
12792 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12793 {
12794 	int i;
12795 	__be32 *buf;
12796 	u32 offset = 0, len = 0;
12797 	u32 magic, val;
12798 
12799 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12800 		return NULL;
12801 
12802 	if (magic == TG3_EEPROM_MAGIC) {
12803 		for (offset = TG3_NVM_DIR_START;
12804 		     offset < TG3_NVM_DIR_END;
12805 		     offset += TG3_NVM_DIRENT_SIZE) {
12806 			if (tg3_nvram_read(tp, offset, &val))
12807 				return NULL;
12808 
12809 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12810 			    TG3_NVM_DIRTYPE_EXTVPD)
12811 				break;
12812 		}
12813 
12814 		if (offset != TG3_NVM_DIR_END) {
12815 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12816 			if (tg3_nvram_read(tp, offset + 4, &offset))
12817 				return NULL;
12818 
12819 			offset = tg3_nvram_logical_addr(tp, offset);
12820 		}
12821 	}
12822 
12823 	if (!offset || !len) {
12824 		offset = TG3_NVM_VPD_OFF;
12825 		len = TG3_NVM_VPD_LEN;
12826 	}
12827 
12828 	buf = kmalloc(len, GFP_KERNEL);
12829 	if (buf == NULL)
12830 		return NULL;
12831 
12832 	if (magic == TG3_EEPROM_MAGIC) {
12833 		for (i = 0; i < len; i += 4) {
12834 			/* The data is in little-endian format in NVRAM.
12835 			 * Use the big-endian read routines to preserve
12836 			 * the byte order as it exists in NVRAM.
12837 			 */
12838 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12839 				goto error;
12840 		}
12841 	} else {
12842 		u8 *ptr;
12843 		ssize_t cnt;
12844 		unsigned int pos = 0;
12845 
12846 		ptr = (u8 *)&buf[0];
12847 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12848 			cnt = pci_read_vpd(tp->pdev, pos,
12849 					   len - pos, ptr);
12850 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12851 				cnt = 0;
12852 			else if (cnt < 0)
12853 				goto error;
12854 		}
12855 		if (pos != len)
12856 			goto error;
12857 	}
12858 
12859 	*vpdlen = len;
12860 
12861 	return buf;
12862 
12863 error:
12864 	kfree(buf);
12865 	return NULL;
12866 }
12867 
12868 #define NVRAM_TEST_SIZE 0x100
12869 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12870 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12871 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12872 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12873 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12874 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12875 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12876 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12877 
12878 static int tg3_test_nvram(struct tg3 *tp)
12879 {
12880 	u32 csum, magic, len;
12881 	__be32 *buf;
12882 	int i, j, k, err = 0, size;
12883 
12884 	if (tg3_flag(tp, NO_NVRAM))
12885 		return 0;
12886 
12887 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12888 		return -EIO;
12889 
12890 	if (magic == TG3_EEPROM_MAGIC)
12891 		size = NVRAM_TEST_SIZE;
12892 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12893 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12894 		    TG3_EEPROM_SB_FORMAT_1) {
12895 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12896 			case TG3_EEPROM_SB_REVISION_0:
12897 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12898 				break;
12899 			case TG3_EEPROM_SB_REVISION_2:
12900 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12901 				break;
12902 			case TG3_EEPROM_SB_REVISION_3:
12903 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12904 				break;
12905 			case TG3_EEPROM_SB_REVISION_4:
12906 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12907 				break;
12908 			case TG3_EEPROM_SB_REVISION_5:
12909 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12910 				break;
12911 			case TG3_EEPROM_SB_REVISION_6:
12912 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12913 				break;
12914 			default:
12915 				return -EIO;
12916 			}
12917 		} else
12918 			return 0;
12919 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12920 		size = NVRAM_SELFBOOT_HW_SIZE;
12921 	else
12922 		return -EIO;
12923 
12924 	buf = kmalloc(size, GFP_KERNEL);
12925 	if (buf == NULL)
12926 		return -ENOMEM;
12927 
12928 	err = -EIO;
12929 	for (i = 0, j = 0; i < size; i += 4, j++) {
12930 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12931 		if (err)
12932 			break;
12933 	}
12934 	if (i < size)
12935 		goto out;
12936 
12937 	/* Selfboot format */
12938 	magic = be32_to_cpu(buf[0]);
12939 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12940 	    TG3_EEPROM_MAGIC_FW) {
12941 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12942 
12943 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12944 		    TG3_EEPROM_SB_REVISION_2) {
12945 			/* For rev 2, the csum doesn't include the MBA. */
12946 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12947 				csum8 += buf8[i];
12948 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12949 				csum8 += buf8[i];
12950 		} else {
12951 			for (i = 0; i < size; i++)
12952 				csum8 += buf8[i];
12953 		}
12954 
12955 		if (csum8 == 0) {
12956 			err = 0;
12957 			goto out;
12958 		}
12959 
12960 		err = -EIO;
12961 		goto out;
12962 	}
12963 
12964 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12965 	    TG3_EEPROM_MAGIC_HW) {
12966 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12967 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12968 		u8 *buf8 = (u8 *) buf;
12969 
12970 		/* Separate the parity bits and the data bytes.  */
12971 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12972 			if ((i == 0) || (i == 8)) {
12973 				int l;
12974 				u8 msk;
12975 
12976 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12977 					parity[k++] = buf8[i] & msk;
12978 				i++;
12979 			} else if (i == 16) {
12980 				int l;
12981 				u8 msk;
12982 
12983 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12984 					parity[k++] = buf8[i] & msk;
12985 				i++;
12986 
12987 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12988 					parity[k++] = buf8[i] & msk;
12989 				i++;
12990 			}
12991 			data[j++] = buf8[i];
12992 		}
12993 
12994 		err = -EIO;
12995 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12996 			u8 hw8 = hweight8(data[i]);
12997 
12998 			if ((hw8 & 0x1) && parity[i])
12999 				goto out;
13000 			else if (!(hw8 & 0x1) && !parity[i])
13001 				goto out;
13002 		}
13003 		err = 0;
13004 		goto out;
13005 	}
13006 
13007 	err = -EIO;
13008 
13009 	/* Bootstrap checksum at offset 0x10 */
13010 	csum = calc_crc((unsigned char *) buf, 0x10);
13011 	if (csum != le32_to_cpu(buf[0x10/4]))
13012 		goto out;
13013 
13014 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13015 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13016 	if (csum != le32_to_cpu(buf[0xfc/4]))
13017 		goto out;
13018 
13019 	kfree(buf);
13020 
13021 	buf = tg3_vpd_readblock(tp, &len);
13022 	if (!buf)
13023 		return -ENOMEM;
13024 
13025 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13026 	if (i > 0) {
13027 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13028 		if (j < 0)
13029 			goto out;
13030 
13031 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13032 			goto out;
13033 
13034 		i += PCI_VPD_LRDT_TAG_SIZE;
13035 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13036 					      PCI_VPD_RO_KEYWORD_CHKSUM);
13037 		if (j > 0) {
13038 			u8 csum8 = 0;
13039 
13040 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
13041 
13042 			for (i = 0; i <= j; i++)
13043 				csum8 += ((u8 *)buf)[i];
13044 
13045 			if (csum8)
13046 				goto out;
13047 		}
13048 	}
13049 
13050 	err = 0;
13051 
13052 out:
13053 	kfree(buf);
13054 	return err;
13055 }
13056 
13057 #define TG3_SERDES_TIMEOUT_SEC	2
13058 #define TG3_COPPER_TIMEOUT_SEC	6
13059 
13060 static int tg3_test_link(struct tg3 *tp)
13061 {
13062 	int i, max;
13063 
13064 	if (!netif_running(tp->dev))
13065 		return -ENODEV;
13066 
13067 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13068 		max = TG3_SERDES_TIMEOUT_SEC;
13069 	else
13070 		max = TG3_COPPER_TIMEOUT_SEC;
13071 
13072 	for (i = 0; i < max; i++) {
13073 		if (tp->link_up)
13074 			return 0;
13075 
13076 		if (msleep_interruptible(1000))
13077 			break;
13078 	}
13079 
13080 	return -EIO;
13081 }
13082 
13083 /* Only test the commonly used registers */
13084 static int tg3_test_registers(struct tg3 *tp)
13085 {
13086 	int i, is_5705, is_5750;
13087 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13088 	static struct {
13089 		u16 offset;
13090 		u16 flags;
13091 #define TG3_FL_5705	0x1
13092 #define TG3_FL_NOT_5705	0x2
13093 #define TG3_FL_NOT_5788	0x4
13094 #define TG3_FL_NOT_5750	0x8
13095 		u32 read_mask;
13096 		u32 write_mask;
13097 	} reg_tbl[] = {
13098 		/* MAC Control Registers */
13099 		{ MAC_MODE, TG3_FL_NOT_5705,
13100 			0x00000000, 0x00ef6f8c },
13101 		{ MAC_MODE, TG3_FL_5705,
13102 			0x00000000, 0x01ef6b8c },
13103 		{ MAC_STATUS, TG3_FL_NOT_5705,
13104 			0x03800107, 0x00000000 },
13105 		{ MAC_STATUS, TG3_FL_5705,
13106 			0x03800100, 0x00000000 },
13107 		{ MAC_ADDR_0_HIGH, 0x0000,
13108 			0x00000000, 0x0000ffff },
13109 		{ MAC_ADDR_0_LOW, 0x0000,
13110 			0x00000000, 0xffffffff },
13111 		{ MAC_RX_MTU_SIZE, 0x0000,
13112 			0x00000000, 0x0000ffff },
13113 		{ MAC_TX_MODE, 0x0000,
13114 			0x00000000, 0x00000070 },
13115 		{ MAC_TX_LENGTHS, 0x0000,
13116 			0x00000000, 0x00003fff },
13117 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13118 			0x00000000, 0x000007fc },
13119 		{ MAC_RX_MODE, TG3_FL_5705,
13120 			0x00000000, 0x000007dc },
13121 		{ MAC_HASH_REG_0, 0x0000,
13122 			0x00000000, 0xffffffff },
13123 		{ MAC_HASH_REG_1, 0x0000,
13124 			0x00000000, 0xffffffff },
13125 		{ MAC_HASH_REG_2, 0x0000,
13126 			0x00000000, 0xffffffff },
13127 		{ MAC_HASH_REG_3, 0x0000,
13128 			0x00000000, 0xffffffff },
13129 
13130 		/* Receive Data and Receive BD Initiator Control Registers. */
13131 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13132 			0x00000000, 0xffffffff },
13133 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13134 			0x00000000, 0xffffffff },
13135 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13136 			0x00000000, 0x00000003 },
13137 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13138 			0x00000000, 0xffffffff },
13139 		{ RCVDBDI_STD_BD+0, 0x0000,
13140 			0x00000000, 0xffffffff },
13141 		{ RCVDBDI_STD_BD+4, 0x0000,
13142 			0x00000000, 0xffffffff },
13143 		{ RCVDBDI_STD_BD+8, 0x0000,
13144 			0x00000000, 0xffff0002 },
13145 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13146 			0x00000000, 0xffffffff },
13147 
13148 		/* Receive BD Initiator Control Registers. */
13149 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13150 			0x00000000, 0xffffffff },
13151 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13152 			0x00000000, 0x000003ff },
13153 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13154 			0x00000000, 0xffffffff },
13155 
13156 		/* Host Coalescing Control Registers. */
13157 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13158 			0x00000000, 0x00000004 },
13159 		{ HOSTCC_MODE, TG3_FL_5705,
13160 			0x00000000, 0x000000f6 },
13161 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13162 			0x00000000, 0xffffffff },
13163 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13164 			0x00000000, 0x000003ff },
13165 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13166 			0x00000000, 0xffffffff },
13167 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13168 			0x00000000, 0x000003ff },
13169 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13170 			0x00000000, 0xffffffff },
13171 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13172 			0x00000000, 0x000000ff },
13173 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13174 			0x00000000, 0xffffffff },
13175 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13176 			0x00000000, 0x000000ff },
13177 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13178 			0x00000000, 0xffffffff },
13179 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13180 			0x00000000, 0xffffffff },
13181 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13182 			0x00000000, 0xffffffff },
13183 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13184 			0x00000000, 0x000000ff },
13185 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13186 			0x00000000, 0xffffffff },
13187 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13188 			0x00000000, 0x000000ff },
13189 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13190 			0x00000000, 0xffffffff },
13191 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13192 			0x00000000, 0xffffffff },
13193 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13194 			0x00000000, 0xffffffff },
13195 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13196 			0x00000000, 0xffffffff },
13197 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13198 			0x00000000, 0xffffffff },
13199 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13200 			0xffffffff, 0x00000000 },
13201 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13202 			0xffffffff, 0x00000000 },
13203 
13204 		/* Buffer Manager Control Registers. */
13205 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13206 			0x00000000, 0x007fff80 },
13207 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13208 			0x00000000, 0x007fffff },
13209 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13210 			0x00000000, 0x0000003f },
13211 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13212 			0x00000000, 0x000001ff },
13213 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13214 			0x00000000, 0x000001ff },
13215 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13216 			0xffffffff, 0x00000000 },
13217 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13218 			0xffffffff, 0x00000000 },
13219 
13220 		/* Mailbox Registers */
13221 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13222 			0x00000000, 0x000001ff },
13223 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13224 			0x00000000, 0x000001ff },
13225 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13226 			0x00000000, 0x000007ff },
13227 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13228 			0x00000000, 0x000001ff },
13229 
13230 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13231 	};
13232 
13233 	is_5705 = is_5750 = 0;
13234 	if (tg3_flag(tp, 5705_PLUS)) {
13235 		is_5705 = 1;
13236 		if (tg3_flag(tp, 5750_PLUS))
13237 			is_5750 = 1;
13238 	}
13239 
13240 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13241 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13242 			continue;
13243 
13244 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13245 			continue;
13246 
13247 		if (tg3_flag(tp, IS_5788) &&
13248 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13249 			continue;
13250 
13251 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13252 			continue;
13253 
13254 		offset = (u32) reg_tbl[i].offset;
13255 		read_mask = reg_tbl[i].read_mask;
13256 		write_mask = reg_tbl[i].write_mask;
13257 
13258 		/* Save the original register content */
13259 		save_val = tr32(offset);
13260 
13261 		/* Determine the read-only value. */
13262 		read_val = save_val & read_mask;
13263 
13264 		/* Write zero to the register, then make sure the read-only bits
13265 		 * are not changed and the read/write bits are all zeros.
13266 		 */
13267 		tw32(offset, 0);
13268 
13269 		val = tr32(offset);
13270 
13271 		/* Test the read-only and read/write bits. */
13272 		if (((val & read_mask) != read_val) || (val & write_mask))
13273 			goto out;
13274 
13275 		/* Write ones to all the bits defined by RdMask and WrMask, then
13276 		 * make sure the read-only bits are not changed and the
13277 		 * read/write bits are all ones.
13278 		 */
13279 		tw32(offset, read_mask | write_mask);
13280 
13281 		val = tr32(offset);
13282 
13283 		/* Test the read-only bits. */
13284 		if ((val & read_mask) != read_val)
13285 			goto out;
13286 
13287 		/* Test the read/write bits. */
13288 		if ((val & write_mask) != write_mask)
13289 			goto out;
13290 
13291 		tw32(offset, save_val);
13292 	}
13293 
13294 	return 0;
13295 
13296 out:
13297 	if (netif_msg_hw(tp))
13298 		netdev_err(tp->dev,
13299 			   "Register test failed at offset %x\n", offset);
13300 	tw32(offset, save_val);
13301 	return -EIO;
13302 }
13303 
13304 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13305 {
13306 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13307 	int i;
13308 	u32 j;
13309 
13310 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13311 		for (j = 0; j < len; j += 4) {
13312 			u32 val;
13313 
13314 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13315 			tg3_read_mem(tp, offset + j, &val);
13316 			if (val != test_pattern[i])
13317 				return -EIO;
13318 		}
13319 	}
13320 	return 0;
13321 }
13322 
13323 static int tg3_test_memory(struct tg3 *tp)
13324 {
13325 	static struct mem_entry {
13326 		u32 offset;
13327 		u32 len;
13328 	} mem_tbl_570x[] = {
13329 		{ 0x00000000, 0x00b50},
13330 		{ 0x00002000, 0x1c000},
13331 		{ 0xffffffff, 0x00000}
13332 	}, mem_tbl_5705[] = {
13333 		{ 0x00000100, 0x0000c},
13334 		{ 0x00000200, 0x00008},
13335 		{ 0x00004000, 0x00800},
13336 		{ 0x00006000, 0x01000},
13337 		{ 0x00008000, 0x02000},
13338 		{ 0x00010000, 0x0e000},
13339 		{ 0xffffffff, 0x00000}
13340 	}, mem_tbl_5755[] = {
13341 		{ 0x00000200, 0x00008},
13342 		{ 0x00004000, 0x00800},
13343 		{ 0x00006000, 0x00800},
13344 		{ 0x00008000, 0x02000},
13345 		{ 0x00010000, 0x0c000},
13346 		{ 0xffffffff, 0x00000}
13347 	}, mem_tbl_5906[] = {
13348 		{ 0x00000200, 0x00008},
13349 		{ 0x00004000, 0x00400},
13350 		{ 0x00006000, 0x00400},
13351 		{ 0x00008000, 0x01000},
13352 		{ 0x00010000, 0x01000},
13353 		{ 0xffffffff, 0x00000}
13354 	}, mem_tbl_5717[] = {
13355 		{ 0x00000200, 0x00008},
13356 		{ 0x00010000, 0x0a000},
13357 		{ 0x00020000, 0x13c00},
13358 		{ 0xffffffff, 0x00000}
13359 	}, mem_tbl_57765[] = {
13360 		{ 0x00000200, 0x00008},
13361 		{ 0x00004000, 0x00800},
13362 		{ 0x00006000, 0x09800},
13363 		{ 0x00010000, 0x0a000},
13364 		{ 0xffffffff, 0x00000}
13365 	};
13366 	struct mem_entry *mem_tbl;
13367 	int err = 0;
13368 	int i;
13369 
13370 	if (tg3_flag(tp, 5717_PLUS))
13371 		mem_tbl = mem_tbl_5717;
13372 	else if (tg3_flag(tp, 57765_CLASS) ||
13373 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13374 		mem_tbl = mem_tbl_57765;
13375 	else if (tg3_flag(tp, 5755_PLUS))
13376 		mem_tbl = mem_tbl_5755;
13377 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13378 		mem_tbl = mem_tbl_5906;
13379 	else if (tg3_flag(tp, 5705_PLUS))
13380 		mem_tbl = mem_tbl_5705;
13381 	else
13382 		mem_tbl = mem_tbl_570x;
13383 
13384 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13385 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13386 		if (err)
13387 			break;
13388 	}
13389 
13390 	return err;
13391 }
13392 
13393 #define TG3_TSO_MSS		500
13394 
13395 #define TG3_TSO_IP_HDR_LEN	20
13396 #define TG3_TSO_TCP_HDR_LEN	20
13397 #define TG3_TSO_TCP_OPT_LEN	12
13398 
13399 static const u8 tg3_tso_header[] = {
13400 0x08, 0x00,
13401 0x45, 0x00, 0x00, 0x00,
13402 0x00, 0x00, 0x40, 0x00,
13403 0x40, 0x06, 0x00, 0x00,
13404 0x0a, 0x00, 0x00, 0x01,
13405 0x0a, 0x00, 0x00, 0x02,
13406 0x0d, 0x00, 0xe0, 0x00,
13407 0x00, 0x00, 0x01, 0x00,
13408 0x00, 0x00, 0x02, 0x00,
13409 0x80, 0x10, 0x10, 0x00,
13410 0x14, 0x09, 0x00, 0x00,
13411 0x01, 0x01, 0x08, 0x0a,
13412 0x11, 0x11, 0x11, 0x11,
13413 0x11, 0x11, 0x11, 0x11,
13414 };
13415 
13416 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13417 {
13418 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13419 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13420 	u32 budget;
13421 	struct sk_buff *skb;
13422 	u8 *tx_data, *rx_data;
13423 	dma_addr_t map;
13424 	int num_pkts, tx_len, rx_len, i, err;
13425 	struct tg3_rx_buffer_desc *desc;
13426 	struct tg3_napi *tnapi, *rnapi;
13427 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13428 
13429 	tnapi = &tp->napi[0];
13430 	rnapi = &tp->napi[0];
13431 	if (tp->irq_cnt > 1) {
13432 		if (tg3_flag(tp, ENABLE_RSS))
13433 			rnapi = &tp->napi[1];
13434 		if (tg3_flag(tp, ENABLE_TSS))
13435 			tnapi = &tp->napi[1];
13436 	}
13437 	coal_now = tnapi->coal_now | rnapi->coal_now;
13438 
13439 	err = -EIO;
13440 
13441 	tx_len = pktsz;
13442 	skb = netdev_alloc_skb(tp->dev, tx_len);
13443 	if (!skb)
13444 		return -ENOMEM;
13445 
13446 	tx_data = skb_put(skb, tx_len);
13447 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13448 	memset(tx_data + ETH_ALEN, 0x0, 8);
13449 
13450 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13451 
13452 	if (tso_loopback) {
13453 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13454 
13455 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13456 			      TG3_TSO_TCP_OPT_LEN;
13457 
13458 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13459 		       sizeof(tg3_tso_header));
13460 		mss = TG3_TSO_MSS;
13461 
13462 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13463 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13464 
13465 		/* Set the total length field in the IP header */
13466 		iph->tot_len = htons((u16)(mss + hdr_len));
13467 
13468 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13469 			      TXD_FLAG_CPU_POST_DMA);
13470 
13471 		if (tg3_flag(tp, HW_TSO_1) ||
13472 		    tg3_flag(tp, HW_TSO_2) ||
13473 		    tg3_flag(tp, HW_TSO_3)) {
13474 			struct tcphdr *th;
13475 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13476 			th = (struct tcphdr *)&tx_data[val];
13477 			th->check = 0;
13478 		} else
13479 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13480 
13481 		if (tg3_flag(tp, HW_TSO_3)) {
13482 			mss |= (hdr_len & 0xc) << 12;
13483 			if (hdr_len & 0x10)
13484 				base_flags |= 0x00000010;
13485 			base_flags |= (hdr_len & 0x3e0) << 5;
13486 		} else if (tg3_flag(tp, HW_TSO_2))
13487 			mss |= hdr_len << 9;
13488 		else if (tg3_flag(tp, HW_TSO_1) ||
13489 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13490 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13491 		} else {
13492 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13493 		}
13494 
13495 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13496 	} else {
13497 		num_pkts = 1;
13498 		data_off = ETH_HLEN;
13499 
13500 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13501 		    tx_len > VLAN_ETH_FRAME_LEN)
13502 			base_flags |= TXD_FLAG_JMB_PKT;
13503 	}
13504 
13505 	for (i = data_off; i < tx_len; i++)
13506 		tx_data[i] = (u8) (i & 0xff);
13507 
13508 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13509 	if (pci_dma_mapping_error(tp->pdev, map)) {
13510 		dev_kfree_skb(skb);
13511 		return -EIO;
13512 	}
13513 
13514 	val = tnapi->tx_prod;
13515 	tnapi->tx_buffers[val].skb = skb;
13516 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13517 
13518 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13519 	       rnapi->coal_now);
13520 
13521 	udelay(10);
13522 
13523 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13524 
13525 	budget = tg3_tx_avail(tnapi);
13526 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13527 			    base_flags | TXD_FLAG_END, mss, 0)) {
13528 		tnapi->tx_buffers[val].skb = NULL;
13529 		dev_kfree_skb(skb);
13530 		return -EIO;
13531 	}
13532 
13533 	tnapi->tx_prod++;
13534 
13535 	/* Sync BD data before updating mailbox */
13536 	wmb();
13537 
13538 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13539 	tr32_mailbox(tnapi->prodmbox);
13540 
13541 	udelay(10);
13542 
13543 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13544 	for (i = 0; i < 35; i++) {
13545 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13546 		       coal_now);
13547 
13548 		udelay(10);
13549 
13550 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13551 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13552 		if ((tx_idx == tnapi->tx_prod) &&
13553 		    (rx_idx == (rx_start_idx + num_pkts)))
13554 			break;
13555 	}
13556 
13557 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13558 	dev_kfree_skb(skb);
13559 
13560 	if (tx_idx != tnapi->tx_prod)
13561 		goto out;
13562 
13563 	if (rx_idx != rx_start_idx + num_pkts)
13564 		goto out;
13565 
13566 	val = data_off;
13567 	while (rx_idx != rx_start_idx) {
13568 		desc = &rnapi->rx_rcb[rx_start_idx++];
13569 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13570 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13571 
13572 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13573 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13574 			goto out;
13575 
13576 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13577 			 - ETH_FCS_LEN;
13578 
13579 		if (!tso_loopback) {
13580 			if (rx_len != tx_len)
13581 				goto out;
13582 
13583 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13584 				if (opaque_key != RXD_OPAQUE_RING_STD)
13585 					goto out;
13586 			} else {
13587 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13588 					goto out;
13589 			}
13590 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13591 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13592 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13593 			goto out;
13594 		}
13595 
13596 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13597 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13598 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13599 					     mapping);
13600 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13601 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13602 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13603 					     mapping);
13604 		} else
13605 			goto out;
13606 
13607 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13608 					    PCI_DMA_FROMDEVICE);
13609 
13610 		rx_data += TG3_RX_OFFSET(tp);
13611 		for (i = data_off; i < rx_len; i++, val++) {
13612 			if (*(rx_data + i) != (u8) (val & 0xff))
13613 				goto out;
13614 		}
13615 	}
13616 
13617 	err = 0;
13618 
13619 	/* tg3_free_rings will unmap and free the rx_data */
13620 out:
13621 	return err;
13622 }
13623 
13624 #define TG3_STD_LOOPBACK_FAILED		1
13625 #define TG3_JMB_LOOPBACK_FAILED		2
13626 #define TG3_TSO_LOOPBACK_FAILED		4
13627 #define TG3_LOOPBACK_FAILED \
13628 	(TG3_STD_LOOPBACK_FAILED | \
13629 	 TG3_JMB_LOOPBACK_FAILED | \
13630 	 TG3_TSO_LOOPBACK_FAILED)
13631 
13632 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13633 {
13634 	int err = -EIO;
13635 	u32 eee_cap;
13636 	u32 jmb_pkt_sz = 9000;
13637 
13638 	if (tp->dma_limit)
13639 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13640 
13641 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13642 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13643 
13644 	if (!netif_running(tp->dev)) {
13645 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13646 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13647 		if (do_extlpbk)
13648 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13649 		goto done;
13650 	}
13651 
13652 	err = tg3_reset_hw(tp, true);
13653 	if (err) {
13654 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13655 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13656 		if (do_extlpbk)
13657 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13658 		goto done;
13659 	}
13660 
13661 	if (tg3_flag(tp, ENABLE_RSS)) {
13662 		int i;
13663 
13664 		/* Reroute all rx packets to the 1st queue */
13665 		for (i = MAC_RSS_INDIR_TBL_0;
13666 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13667 			tw32(i, 0x0);
13668 	}
13669 
13670 	/* HW errata - mac loopback fails in some cases on 5780.
13671 	 * Normal traffic and PHY loopback are not affected by
13672 	 * errata.  Also, the MAC loopback test is deprecated for
13673 	 * all newer ASIC revisions.
13674 	 */
13675 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13676 	    !tg3_flag(tp, CPMU_PRESENT)) {
13677 		tg3_mac_loopback(tp, true);
13678 
13679 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13680 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13681 
13682 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13683 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13684 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13685 
13686 		tg3_mac_loopback(tp, false);
13687 	}
13688 
13689 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13690 	    !tg3_flag(tp, USE_PHYLIB)) {
13691 		int i;
13692 
13693 		tg3_phy_lpbk_set(tp, 0, false);
13694 
13695 		/* Wait for link */
13696 		for (i = 0; i < 100; i++) {
13697 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13698 				break;
13699 			mdelay(1);
13700 		}
13701 
13702 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13703 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13704 		if (tg3_flag(tp, TSO_CAPABLE) &&
13705 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13706 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13707 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13708 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13709 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13710 
13711 		if (do_extlpbk) {
13712 			tg3_phy_lpbk_set(tp, 0, true);
13713 
13714 			/* All link indications report up, but the hardware
13715 			 * isn't really ready for about 20 msec.  Double it
13716 			 * to be sure.
13717 			 */
13718 			mdelay(40);
13719 
13720 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13721 				data[TG3_EXT_LOOPB_TEST] |=
13722 							TG3_STD_LOOPBACK_FAILED;
13723 			if (tg3_flag(tp, TSO_CAPABLE) &&
13724 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13725 				data[TG3_EXT_LOOPB_TEST] |=
13726 							TG3_TSO_LOOPBACK_FAILED;
13727 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13728 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13729 				data[TG3_EXT_LOOPB_TEST] |=
13730 							TG3_JMB_LOOPBACK_FAILED;
13731 		}
13732 
13733 		/* Re-enable gphy autopowerdown. */
13734 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13735 			tg3_phy_toggle_apd(tp, true);
13736 	}
13737 
13738 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13739 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13740 
13741 done:
13742 	tp->phy_flags |= eee_cap;
13743 
13744 	return err;
13745 }
13746 
13747 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13748 			  u64 *data)
13749 {
13750 	struct tg3 *tp = netdev_priv(dev);
13751 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13752 
13753 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13754 		if (tg3_power_up(tp)) {
13755 			etest->flags |= ETH_TEST_FL_FAILED;
13756 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13757 			return;
13758 		}
13759 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13760 	}
13761 
13762 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13763 
13764 	if (tg3_test_nvram(tp) != 0) {
13765 		etest->flags |= ETH_TEST_FL_FAILED;
13766 		data[TG3_NVRAM_TEST] = 1;
13767 	}
13768 	if (!doextlpbk && tg3_test_link(tp)) {
13769 		etest->flags |= ETH_TEST_FL_FAILED;
13770 		data[TG3_LINK_TEST] = 1;
13771 	}
13772 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13773 		int err, err2 = 0, irq_sync = 0;
13774 
13775 		if (netif_running(dev)) {
13776 			tg3_phy_stop(tp);
13777 			tg3_netif_stop(tp);
13778 			irq_sync = 1;
13779 		}
13780 
13781 		tg3_full_lock(tp, irq_sync);
13782 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13783 		err = tg3_nvram_lock(tp);
13784 		tg3_halt_cpu(tp, RX_CPU_BASE);
13785 		if (!tg3_flag(tp, 5705_PLUS))
13786 			tg3_halt_cpu(tp, TX_CPU_BASE);
13787 		if (!err)
13788 			tg3_nvram_unlock(tp);
13789 
13790 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13791 			tg3_phy_reset(tp);
13792 
13793 		if (tg3_test_registers(tp) != 0) {
13794 			etest->flags |= ETH_TEST_FL_FAILED;
13795 			data[TG3_REGISTER_TEST] = 1;
13796 		}
13797 
13798 		if (tg3_test_memory(tp) != 0) {
13799 			etest->flags |= ETH_TEST_FL_FAILED;
13800 			data[TG3_MEMORY_TEST] = 1;
13801 		}
13802 
13803 		if (doextlpbk)
13804 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13805 
13806 		if (tg3_test_loopback(tp, data, doextlpbk))
13807 			etest->flags |= ETH_TEST_FL_FAILED;
13808 
13809 		tg3_full_unlock(tp);
13810 
13811 		if (tg3_test_interrupt(tp) != 0) {
13812 			etest->flags |= ETH_TEST_FL_FAILED;
13813 			data[TG3_INTERRUPT_TEST] = 1;
13814 		}
13815 
13816 		tg3_full_lock(tp, 0);
13817 
13818 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13819 		if (netif_running(dev)) {
13820 			tg3_flag_set(tp, INIT_COMPLETE);
13821 			err2 = tg3_restart_hw(tp, true);
13822 			if (!err2)
13823 				tg3_netif_start(tp);
13824 		}
13825 
13826 		tg3_full_unlock(tp);
13827 
13828 		if (irq_sync && !err2)
13829 			tg3_phy_start(tp);
13830 	}
13831 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13832 		tg3_power_down_prepare(tp);
13833 
13834 }
13835 
13836 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13837 {
13838 	struct tg3 *tp = netdev_priv(dev);
13839 	struct hwtstamp_config stmpconf;
13840 
13841 	if (!tg3_flag(tp, PTP_CAPABLE))
13842 		return -EOPNOTSUPP;
13843 
13844 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13845 		return -EFAULT;
13846 
13847 	if (stmpconf.flags)
13848 		return -EINVAL;
13849 
13850 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13851 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13852 		return -ERANGE;
13853 
13854 	switch (stmpconf.rx_filter) {
13855 	case HWTSTAMP_FILTER_NONE:
13856 		tp->rxptpctl = 0;
13857 		break;
13858 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13859 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13860 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13861 		break;
13862 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13863 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13864 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13865 		break;
13866 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13867 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13868 			       TG3_RX_PTP_CTL_DELAY_REQ;
13869 		break;
13870 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13871 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13872 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13873 		break;
13874 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13875 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13876 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13877 		break;
13878 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13879 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13880 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13881 		break;
13882 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13883 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13884 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13885 		break;
13886 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13887 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13888 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13889 		break;
13890 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13891 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13892 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13893 		break;
13894 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13895 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13896 			       TG3_RX_PTP_CTL_DELAY_REQ;
13897 		break;
13898 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13899 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13900 			       TG3_RX_PTP_CTL_DELAY_REQ;
13901 		break;
13902 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13903 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13904 			       TG3_RX_PTP_CTL_DELAY_REQ;
13905 		break;
13906 	default:
13907 		return -ERANGE;
13908 	}
13909 
13910 	if (netif_running(dev) && tp->rxptpctl)
13911 		tw32(TG3_RX_PTP_CTL,
13912 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13913 
13914 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13915 		tg3_flag_set(tp, TX_TSTAMP_EN);
13916 	else
13917 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13918 
13919 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13920 		-EFAULT : 0;
13921 }
13922 
13923 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13924 {
13925 	struct tg3 *tp = netdev_priv(dev);
13926 	struct hwtstamp_config stmpconf;
13927 
13928 	if (!tg3_flag(tp, PTP_CAPABLE))
13929 		return -EOPNOTSUPP;
13930 
13931 	stmpconf.flags = 0;
13932 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13933 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13934 
13935 	switch (tp->rxptpctl) {
13936 	case 0:
13937 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13938 		break;
13939 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13940 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13941 		break;
13942 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13943 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13944 		break;
13945 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13946 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13947 		break;
13948 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13949 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13950 		break;
13951 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13952 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13953 		break;
13954 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13955 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13956 		break;
13957 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13958 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13959 		break;
13960 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13961 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13962 		break;
13963 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13964 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13965 		break;
13966 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13967 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13968 		break;
13969 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13970 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13971 		break;
13972 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13973 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13974 		break;
13975 	default:
13976 		WARN_ON_ONCE(1);
13977 		return -ERANGE;
13978 	}
13979 
13980 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13981 		-EFAULT : 0;
13982 }
13983 
13984 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13985 {
13986 	struct mii_ioctl_data *data = if_mii(ifr);
13987 	struct tg3 *tp = netdev_priv(dev);
13988 	int err;
13989 
13990 	if (tg3_flag(tp, USE_PHYLIB)) {
13991 		struct phy_device *phydev;
13992 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13993 			return -EAGAIN;
13994 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13995 		return phy_mii_ioctl(phydev, ifr, cmd);
13996 	}
13997 
13998 	switch (cmd) {
13999 	case SIOCGMIIPHY:
14000 		data->phy_id = tp->phy_addr;
14001 
14002 		/* fallthru */
14003 	case SIOCGMIIREG: {
14004 		u32 mii_regval;
14005 
14006 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14007 			break;			/* We have no PHY */
14008 
14009 		if (!netif_running(dev))
14010 			return -EAGAIN;
14011 
14012 		spin_lock_bh(&tp->lock);
14013 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
14014 				    data->reg_num & 0x1f, &mii_regval);
14015 		spin_unlock_bh(&tp->lock);
14016 
14017 		data->val_out = mii_regval;
14018 
14019 		return err;
14020 	}
14021 
14022 	case SIOCSMIIREG:
14023 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14024 			break;			/* We have no PHY */
14025 
14026 		if (!netif_running(dev))
14027 			return -EAGAIN;
14028 
14029 		spin_lock_bh(&tp->lock);
14030 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
14031 				     data->reg_num & 0x1f, data->val_in);
14032 		spin_unlock_bh(&tp->lock);
14033 
14034 		return err;
14035 
14036 	case SIOCSHWTSTAMP:
14037 		return tg3_hwtstamp_set(dev, ifr);
14038 
14039 	case SIOCGHWTSTAMP:
14040 		return tg3_hwtstamp_get(dev, ifr);
14041 
14042 	default:
14043 		/* do nothing */
14044 		break;
14045 	}
14046 	return -EOPNOTSUPP;
14047 }
14048 
14049 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14050 {
14051 	struct tg3 *tp = netdev_priv(dev);
14052 
14053 	memcpy(ec, &tp->coal, sizeof(*ec));
14054 	return 0;
14055 }
14056 
14057 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14058 {
14059 	struct tg3 *tp = netdev_priv(dev);
14060 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14061 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14062 
14063 	if (!tg3_flag(tp, 5705_PLUS)) {
14064 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14065 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14066 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14067 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14068 	}
14069 
14070 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14071 	    (!ec->rx_coalesce_usecs) ||
14072 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14073 	    (!ec->tx_coalesce_usecs) ||
14074 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14075 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14076 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14077 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14078 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14079 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14080 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14081 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14082 		return -EINVAL;
14083 
14084 	/* Only copy relevant parameters, ignore all others. */
14085 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14086 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14087 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14088 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14089 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14090 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14091 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14092 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14093 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14094 
14095 	if (netif_running(dev)) {
14096 		tg3_full_lock(tp, 0);
14097 		__tg3_set_coalesce(tp, &tp->coal);
14098 		tg3_full_unlock(tp);
14099 	}
14100 	return 0;
14101 }
14102 
14103 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14104 {
14105 	struct tg3 *tp = netdev_priv(dev);
14106 
14107 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14108 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14109 		return -EOPNOTSUPP;
14110 	}
14111 
14112 	if (edata->advertised != tp->eee.advertised) {
14113 		netdev_warn(tp->dev,
14114 			    "Direct manipulation of EEE advertisement is not supported\n");
14115 		return -EINVAL;
14116 	}
14117 
14118 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14119 		netdev_warn(tp->dev,
14120 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14121 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14122 		return -EINVAL;
14123 	}
14124 
14125 	tp->eee = *edata;
14126 
14127 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14128 	tg3_warn_mgmt_link_flap(tp);
14129 
14130 	if (netif_running(tp->dev)) {
14131 		tg3_full_lock(tp, 0);
14132 		tg3_setup_eee(tp);
14133 		tg3_phy_reset(tp);
14134 		tg3_full_unlock(tp);
14135 	}
14136 
14137 	return 0;
14138 }
14139 
14140 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14141 {
14142 	struct tg3 *tp = netdev_priv(dev);
14143 
14144 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14145 		netdev_warn(tp->dev,
14146 			    "Board does not support EEE!\n");
14147 		return -EOPNOTSUPP;
14148 	}
14149 
14150 	*edata = tp->eee;
14151 	return 0;
14152 }
14153 
14154 static const struct ethtool_ops tg3_ethtool_ops = {
14155 	.get_drvinfo		= tg3_get_drvinfo,
14156 	.get_regs_len		= tg3_get_regs_len,
14157 	.get_regs		= tg3_get_regs,
14158 	.get_wol		= tg3_get_wol,
14159 	.set_wol		= tg3_set_wol,
14160 	.get_msglevel		= tg3_get_msglevel,
14161 	.set_msglevel		= tg3_set_msglevel,
14162 	.nway_reset		= tg3_nway_reset,
14163 	.get_link		= ethtool_op_get_link,
14164 	.get_eeprom_len		= tg3_get_eeprom_len,
14165 	.get_eeprom		= tg3_get_eeprom,
14166 	.set_eeprom		= tg3_set_eeprom,
14167 	.get_ringparam		= tg3_get_ringparam,
14168 	.set_ringparam		= tg3_set_ringparam,
14169 	.get_pauseparam		= tg3_get_pauseparam,
14170 	.set_pauseparam		= tg3_set_pauseparam,
14171 	.self_test		= tg3_self_test,
14172 	.get_strings		= tg3_get_strings,
14173 	.set_phys_id		= tg3_set_phys_id,
14174 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14175 	.get_coalesce		= tg3_get_coalesce,
14176 	.set_coalesce		= tg3_set_coalesce,
14177 	.get_sset_count		= tg3_get_sset_count,
14178 	.get_rxnfc		= tg3_get_rxnfc,
14179 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14180 	.get_rxfh		= tg3_get_rxfh,
14181 	.set_rxfh		= tg3_set_rxfh,
14182 	.get_channels		= tg3_get_channels,
14183 	.set_channels		= tg3_set_channels,
14184 	.get_ts_info		= tg3_get_ts_info,
14185 	.get_eee		= tg3_get_eee,
14186 	.set_eee		= tg3_set_eee,
14187 	.get_link_ksettings	= tg3_get_link_ksettings,
14188 	.set_link_ksettings	= tg3_set_link_ksettings,
14189 };
14190 
14191 static void tg3_get_stats64(struct net_device *dev,
14192 			    struct rtnl_link_stats64 *stats)
14193 {
14194 	struct tg3 *tp = netdev_priv(dev);
14195 
14196 	spin_lock_bh(&tp->lock);
14197 	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14198 		*stats = tp->net_stats_prev;
14199 		spin_unlock_bh(&tp->lock);
14200 		return;
14201 	}
14202 
14203 	tg3_get_nstats(tp, stats);
14204 	spin_unlock_bh(&tp->lock);
14205 }
14206 
14207 static void tg3_set_rx_mode(struct net_device *dev)
14208 {
14209 	struct tg3 *tp = netdev_priv(dev);
14210 
14211 	if (!netif_running(dev))
14212 		return;
14213 
14214 	tg3_full_lock(tp, 0);
14215 	__tg3_set_rx_mode(dev);
14216 	tg3_full_unlock(tp);
14217 }
14218 
14219 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14220 			       int new_mtu)
14221 {
14222 	dev->mtu = new_mtu;
14223 
14224 	if (new_mtu > ETH_DATA_LEN) {
14225 		if (tg3_flag(tp, 5780_CLASS)) {
14226 			netdev_update_features(dev);
14227 			tg3_flag_clear(tp, TSO_CAPABLE);
14228 		} else {
14229 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14230 		}
14231 	} else {
14232 		if (tg3_flag(tp, 5780_CLASS)) {
14233 			tg3_flag_set(tp, TSO_CAPABLE);
14234 			netdev_update_features(dev);
14235 		}
14236 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14237 	}
14238 }
14239 
14240 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14241 {
14242 	struct tg3 *tp = netdev_priv(dev);
14243 	int err;
14244 	bool reset_phy = false;
14245 
14246 	if (!netif_running(dev)) {
14247 		/* We'll just catch it later when the
14248 		 * device is up'd.
14249 		 */
14250 		tg3_set_mtu(dev, tp, new_mtu);
14251 		return 0;
14252 	}
14253 
14254 	tg3_phy_stop(tp);
14255 
14256 	tg3_netif_stop(tp);
14257 
14258 	tg3_set_mtu(dev, tp, new_mtu);
14259 
14260 	tg3_full_lock(tp, 1);
14261 
14262 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14263 
14264 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14265 	 * breaks all requests to 256 bytes.
14266 	 */
14267 	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14268 	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
14269 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14270 	    tg3_asic_rev(tp) == ASIC_REV_5720)
14271 		reset_phy = true;
14272 
14273 	err = tg3_restart_hw(tp, reset_phy);
14274 
14275 	if (!err)
14276 		tg3_netif_start(tp);
14277 
14278 	tg3_full_unlock(tp);
14279 
14280 	if (!err)
14281 		tg3_phy_start(tp);
14282 
14283 	return err;
14284 }
14285 
14286 static const struct net_device_ops tg3_netdev_ops = {
14287 	.ndo_open		= tg3_open,
14288 	.ndo_stop		= tg3_close,
14289 	.ndo_start_xmit		= tg3_start_xmit,
14290 	.ndo_get_stats64	= tg3_get_stats64,
14291 	.ndo_validate_addr	= eth_validate_addr,
14292 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14293 	.ndo_set_mac_address	= tg3_set_mac_addr,
14294 	.ndo_do_ioctl		= tg3_ioctl,
14295 	.ndo_tx_timeout		= tg3_tx_timeout,
14296 	.ndo_change_mtu		= tg3_change_mtu,
14297 	.ndo_fix_features	= tg3_fix_features,
14298 	.ndo_set_features	= tg3_set_features,
14299 #ifdef CONFIG_NET_POLL_CONTROLLER
14300 	.ndo_poll_controller	= tg3_poll_controller,
14301 #endif
14302 };
14303 
14304 static void tg3_get_eeprom_size(struct tg3 *tp)
14305 {
14306 	u32 cursize, val, magic;
14307 
14308 	tp->nvram_size = EEPROM_CHIP_SIZE;
14309 
14310 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14311 		return;
14312 
14313 	if ((magic != TG3_EEPROM_MAGIC) &&
14314 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14315 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14316 		return;
14317 
14318 	/*
14319 	 * Size the chip by reading offsets at increasing powers of two.
14320 	 * When we encounter our validation signature, we know the addressing
14321 	 * has wrapped around, and thus have our chip size.
14322 	 */
14323 	cursize = 0x10;
14324 
14325 	while (cursize < tp->nvram_size) {
14326 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14327 			return;
14328 
14329 		if (val == magic)
14330 			break;
14331 
14332 		cursize <<= 1;
14333 	}
14334 
14335 	tp->nvram_size = cursize;
14336 }
14337 
14338 static void tg3_get_nvram_size(struct tg3 *tp)
14339 {
14340 	u32 val;
14341 
14342 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14343 		return;
14344 
14345 	/* Selfboot format */
14346 	if (val != TG3_EEPROM_MAGIC) {
14347 		tg3_get_eeprom_size(tp);
14348 		return;
14349 	}
14350 
14351 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14352 		if (val != 0) {
14353 			/* This is confusing.  We want to operate on the
14354 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14355 			 * call will read from NVRAM and byteswap the data
14356 			 * according to the byteswapping settings for all
14357 			 * other register accesses.  This ensures the data we
14358 			 * want will always reside in the lower 16-bits.
14359 			 * However, the data in NVRAM is in LE format, which
14360 			 * means the data from the NVRAM read will always be
14361 			 * opposite the endianness of the CPU.  The 16-bit
14362 			 * byteswap then brings the data to CPU endianness.
14363 			 */
14364 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14365 			return;
14366 		}
14367 	}
14368 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14369 }
14370 
14371 static void tg3_get_nvram_info(struct tg3 *tp)
14372 {
14373 	u32 nvcfg1;
14374 
14375 	nvcfg1 = tr32(NVRAM_CFG1);
14376 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14377 		tg3_flag_set(tp, FLASH);
14378 	} else {
14379 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14380 		tw32(NVRAM_CFG1, nvcfg1);
14381 	}
14382 
14383 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14384 	    tg3_flag(tp, 5780_CLASS)) {
14385 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14386 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14387 			tp->nvram_jedecnum = JEDEC_ATMEL;
14388 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14389 			tg3_flag_set(tp, NVRAM_BUFFERED);
14390 			break;
14391 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14392 			tp->nvram_jedecnum = JEDEC_ATMEL;
14393 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14394 			break;
14395 		case FLASH_VENDOR_ATMEL_EEPROM:
14396 			tp->nvram_jedecnum = JEDEC_ATMEL;
14397 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14398 			tg3_flag_set(tp, NVRAM_BUFFERED);
14399 			break;
14400 		case FLASH_VENDOR_ST:
14401 			tp->nvram_jedecnum = JEDEC_ST;
14402 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14403 			tg3_flag_set(tp, NVRAM_BUFFERED);
14404 			break;
14405 		case FLASH_VENDOR_SAIFUN:
14406 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14407 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14408 			break;
14409 		case FLASH_VENDOR_SST_SMALL:
14410 		case FLASH_VENDOR_SST_LARGE:
14411 			tp->nvram_jedecnum = JEDEC_SST;
14412 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14413 			break;
14414 		}
14415 	} else {
14416 		tp->nvram_jedecnum = JEDEC_ATMEL;
14417 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14418 		tg3_flag_set(tp, NVRAM_BUFFERED);
14419 	}
14420 }
14421 
14422 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14423 {
14424 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14425 	case FLASH_5752PAGE_SIZE_256:
14426 		tp->nvram_pagesize = 256;
14427 		break;
14428 	case FLASH_5752PAGE_SIZE_512:
14429 		tp->nvram_pagesize = 512;
14430 		break;
14431 	case FLASH_5752PAGE_SIZE_1K:
14432 		tp->nvram_pagesize = 1024;
14433 		break;
14434 	case FLASH_5752PAGE_SIZE_2K:
14435 		tp->nvram_pagesize = 2048;
14436 		break;
14437 	case FLASH_5752PAGE_SIZE_4K:
14438 		tp->nvram_pagesize = 4096;
14439 		break;
14440 	case FLASH_5752PAGE_SIZE_264:
14441 		tp->nvram_pagesize = 264;
14442 		break;
14443 	case FLASH_5752PAGE_SIZE_528:
14444 		tp->nvram_pagesize = 528;
14445 		break;
14446 	}
14447 }
14448 
14449 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14450 {
14451 	u32 nvcfg1;
14452 
14453 	nvcfg1 = tr32(NVRAM_CFG1);
14454 
14455 	/* NVRAM protection for TPM */
14456 	if (nvcfg1 & (1 << 27))
14457 		tg3_flag_set(tp, PROTECTED_NVRAM);
14458 
14459 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14460 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14461 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14462 		tp->nvram_jedecnum = JEDEC_ATMEL;
14463 		tg3_flag_set(tp, NVRAM_BUFFERED);
14464 		break;
14465 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14466 		tp->nvram_jedecnum = JEDEC_ATMEL;
14467 		tg3_flag_set(tp, NVRAM_BUFFERED);
14468 		tg3_flag_set(tp, FLASH);
14469 		break;
14470 	case FLASH_5752VENDOR_ST_M45PE10:
14471 	case FLASH_5752VENDOR_ST_M45PE20:
14472 	case FLASH_5752VENDOR_ST_M45PE40:
14473 		tp->nvram_jedecnum = JEDEC_ST;
14474 		tg3_flag_set(tp, NVRAM_BUFFERED);
14475 		tg3_flag_set(tp, FLASH);
14476 		break;
14477 	}
14478 
14479 	if (tg3_flag(tp, FLASH)) {
14480 		tg3_nvram_get_pagesize(tp, nvcfg1);
14481 	} else {
14482 		/* For eeprom, set pagesize to maximum eeprom size */
14483 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14484 
14485 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14486 		tw32(NVRAM_CFG1, nvcfg1);
14487 	}
14488 }
14489 
14490 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14491 {
14492 	u32 nvcfg1, protect = 0;
14493 
14494 	nvcfg1 = tr32(NVRAM_CFG1);
14495 
14496 	/* NVRAM protection for TPM */
14497 	if (nvcfg1 & (1 << 27)) {
14498 		tg3_flag_set(tp, PROTECTED_NVRAM);
14499 		protect = 1;
14500 	}
14501 
14502 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14503 	switch (nvcfg1) {
14504 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14505 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14506 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14507 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14508 		tp->nvram_jedecnum = JEDEC_ATMEL;
14509 		tg3_flag_set(tp, NVRAM_BUFFERED);
14510 		tg3_flag_set(tp, FLASH);
14511 		tp->nvram_pagesize = 264;
14512 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14513 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14514 			tp->nvram_size = (protect ? 0x3e200 :
14515 					  TG3_NVRAM_SIZE_512KB);
14516 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14517 			tp->nvram_size = (protect ? 0x1f200 :
14518 					  TG3_NVRAM_SIZE_256KB);
14519 		else
14520 			tp->nvram_size = (protect ? 0x1f200 :
14521 					  TG3_NVRAM_SIZE_128KB);
14522 		break;
14523 	case FLASH_5752VENDOR_ST_M45PE10:
14524 	case FLASH_5752VENDOR_ST_M45PE20:
14525 	case FLASH_5752VENDOR_ST_M45PE40:
14526 		tp->nvram_jedecnum = JEDEC_ST;
14527 		tg3_flag_set(tp, NVRAM_BUFFERED);
14528 		tg3_flag_set(tp, FLASH);
14529 		tp->nvram_pagesize = 256;
14530 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14531 			tp->nvram_size = (protect ?
14532 					  TG3_NVRAM_SIZE_64KB :
14533 					  TG3_NVRAM_SIZE_128KB);
14534 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14535 			tp->nvram_size = (protect ?
14536 					  TG3_NVRAM_SIZE_64KB :
14537 					  TG3_NVRAM_SIZE_256KB);
14538 		else
14539 			tp->nvram_size = (protect ?
14540 					  TG3_NVRAM_SIZE_128KB :
14541 					  TG3_NVRAM_SIZE_512KB);
14542 		break;
14543 	}
14544 }
14545 
14546 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14547 {
14548 	u32 nvcfg1;
14549 
14550 	nvcfg1 = tr32(NVRAM_CFG1);
14551 
14552 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14553 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14554 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14555 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14556 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14557 		tp->nvram_jedecnum = JEDEC_ATMEL;
14558 		tg3_flag_set(tp, NVRAM_BUFFERED);
14559 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14560 
14561 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14562 		tw32(NVRAM_CFG1, nvcfg1);
14563 		break;
14564 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14565 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14566 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14567 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14568 		tp->nvram_jedecnum = JEDEC_ATMEL;
14569 		tg3_flag_set(tp, NVRAM_BUFFERED);
14570 		tg3_flag_set(tp, FLASH);
14571 		tp->nvram_pagesize = 264;
14572 		break;
14573 	case FLASH_5752VENDOR_ST_M45PE10:
14574 	case FLASH_5752VENDOR_ST_M45PE20:
14575 	case FLASH_5752VENDOR_ST_M45PE40:
14576 		tp->nvram_jedecnum = JEDEC_ST;
14577 		tg3_flag_set(tp, NVRAM_BUFFERED);
14578 		tg3_flag_set(tp, FLASH);
14579 		tp->nvram_pagesize = 256;
14580 		break;
14581 	}
14582 }
14583 
14584 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14585 {
14586 	u32 nvcfg1, protect = 0;
14587 
14588 	nvcfg1 = tr32(NVRAM_CFG1);
14589 
14590 	/* NVRAM protection for TPM */
14591 	if (nvcfg1 & (1 << 27)) {
14592 		tg3_flag_set(tp, PROTECTED_NVRAM);
14593 		protect = 1;
14594 	}
14595 
14596 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14597 	switch (nvcfg1) {
14598 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14599 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14600 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14601 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14602 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14603 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14604 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14605 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14606 		tp->nvram_jedecnum = JEDEC_ATMEL;
14607 		tg3_flag_set(tp, NVRAM_BUFFERED);
14608 		tg3_flag_set(tp, FLASH);
14609 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14610 		tp->nvram_pagesize = 256;
14611 		break;
14612 	case FLASH_5761VENDOR_ST_A_M45PE20:
14613 	case FLASH_5761VENDOR_ST_A_M45PE40:
14614 	case FLASH_5761VENDOR_ST_A_M45PE80:
14615 	case FLASH_5761VENDOR_ST_A_M45PE16:
14616 	case FLASH_5761VENDOR_ST_M_M45PE20:
14617 	case FLASH_5761VENDOR_ST_M_M45PE40:
14618 	case FLASH_5761VENDOR_ST_M_M45PE80:
14619 	case FLASH_5761VENDOR_ST_M_M45PE16:
14620 		tp->nvram_jedecnum = JEDEC_ST;
14621 		tg3_flag_set(tp, NVRAM_BUFFERED);
14622 		tg3_flag_set(tp, FLASH);
14623 		tp->nvram_pagesize = 256;
14624 		break;
14625 	}
14626 
14627 	if (protect) {
14628 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14629 	} else {
14630 		switch (nvcfg1) {
14631 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14632 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14633 		case FLASH_5761VENDOR_ST_A_M45PE16:
14634 		case FLASH_5761VENDOR_ST_M_M45PE16:
14635 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14636 			break;
14637 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14638 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14639 		case FLASH_5761VENDOR_ST_A_M45PE80:
14640 		case FLASH_5761VENDOR_ST_M_M45PE80:
14641 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14642 			break;
14643 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14644 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14645 		case FLASH_5761VENDOR_ST_A_M45PE40:
14646 		case FLASH_5761VENDOR_ST_M_M45PE40:
14647 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14648 			break;
14649 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14650 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14651 		case FLASH_5761VENDOR_ST_A_M45PE20:
14652 		case FLASH_5761VENDOR_ST_M_M45PE20:
14653 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14654 			break;
14655 		}
14656 	}
14657 }
14658 
14659 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14660 {
14661 	tp->nvram_jedecnum = JEDEC_ATMEL;
14662 	tg3_flag_set(tp, NVRAM_BUFFERED);
14663 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14664 }
14665 
14666 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14667 {
14668 	u32 nvcfg1;
14669 
14670 	nvcfg1 = tr32(NVRAM_CFG1);
14671 
14672 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14673 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14674 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14675 		tp->nvram_jedecnum = JEDEC_ATMEL;
14676 		tg3_flag_set(tp, NVRAM_BUFFERED);
14677 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14678 
14679 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14680 		tw32(NVRAM_CFG1, nvcfg1);
14681 		return;
14682 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14683 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14684 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14685 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14686 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14687 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14688 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14689 		tp->nvram_jedecnum = JEDEC_ATMEL;
14690 		tg3_flag_set(tp, NVRAM_BUFFERED);
14691 		tg3_flag_set(tp, FLASH);
14692 
14693 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14694 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14695 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14696 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14697 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14698 			break;
14699 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14700 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14701 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14702 			break;
14703 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14704 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14705 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14706 			break;
14707 		}
14708 		break;
14709 	case FLASH_5752VENDOR_ST_M45PE10:
14710 	case FLASH_5752VENDOR_ST_M45PE20:
14711 	case FLASH_5752VENDOR_ST_M45PE40:
14712 		tp->nvram_jedecnum = JEDEC_ST;
14713 		tg3_flag_set(tp, NVRAM_BUFFERED);
14714 		tg3_flag_set(tp, FLASH);
14715 
14716 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14717 		case FLASH_5752VENDOR_ST_M45PE10:
14718 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14719 			break;
14720 		case FLASH_5752VENDOR_ST_M45PE20:
14721 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14722 			break;
14723 		case FLASH_5752VENDOR_ST_M45PE40:
14724 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14725 			break;
14726 		}
14727 		break;
14728 	default:
14729 		tg3_flag_set(tp, NO_NVRAM);
14730 		return;
14731 	}
14732 
14733 	tg3_nvram_get_pagesize(tp, nvcfg1);
14734 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14735 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14736 }
14737 
14738 
14739 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14740 {
14741 	u32 nvcfg1;
14742 
14743 	nvcfg1 = tr32(NVRAM_CFG1);
14744 
14745 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14746 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14747 	case FLASH_5717VENDOR_MICRO_EEPROM:
14748 		tp->nvram_jedecnum = JEDEC_ATMEL;
14749 		tg3_flag_set(tp, NVRAM_BUFFERED);
14750 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14751 
14752 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14753 		tw32(NVRAM_CFG1, nvcfg1);
14754 		return;
14755 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14756 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14757 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14758 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14759 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14760 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14761 	case FLASH_5717VENDOR_ATMEL_45USPT:
14762 		tp->nvram_jedecnum = JEDEC_ATMEL;
14763 		tg3_flag_set(tp, NVRAM_BUFFERED);
14764 		tg3_flag_set(tp, FLASH);
14765 
14766 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14767 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14768 			/* Detect size with tg3_nvram_get_size() */
14769 			break;
14770 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14771 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14772 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14773 			break;
14774 		default:
14775 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14776 			break;
14777 		}
14778 		break;
14779 	case FLASH_5717VENDOR_ST_M_M25PE10:
14780 	case FLASH_5717VENDOR_ST_A_M25PE10:
14781 	case FLASH_5717VENDOR_ST_M_M45PE10:
14782 	case FLASH_5717VENDOR_ST_A_M45PE10:
14783 	case FLASH_5717VENDOR_ST_M_M25PE20:
14784 	case FLASH_5717VENDOR_ST_A_M25PE20:
14785 	case FLASH_5717VENDOR_ST_M_M45PE20:
14786 	case FLASH_5717VENDOR_ST_A_M45PE20:
14787 	case FLASH_5717VENDOR_ST_25USPT:
14788 	case FLASH_5717VENDOR_ST_45USPT:
14789 		tp->nvram_jedecnum = JEDEC_ST;
14790 		tg3_flag_set(tp, NVRAM_BUFFERED);
14791 		tg3_flag_set(tp, FLASH);
14792 
14793 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14794 		case FLASH_5717VENDOR_ST_M_M25PE20:
14795 		case FLASH_5717VENDOR_ST_M_M45PE20:
14796 			/* Detect size with tg3_nvram_get_size() */
14797 			break;
14798 		case FLASH_5717VENDOR_ST_A_M25PE20:
14799 		case FLASH_5717VENDOR_ST_A_M45PE20:
14800 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14801 			break;
14802 		default:
14803 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14804 			break;
14805 		}
14806 		break;
14807 	default:
14808 		tg3_flag_set(tp, NO_NVRAM);
14809 		return;
14810 	}
14811 
14812 	tg3_nvram_get_pagesize(tp, nvcfg1);
14813 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14814 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14815 }
14816 
14817 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14818 {
14819 	u32 nvcfg1, nvmpinstrp, nv_status;
14820 
14821 	nvcfg1 = tr32(NVRAM_CFG1);
14822 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14823 
14824 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14825 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14826 			tg3_flag_set(tp, NO_NVRAM);
14827 			return;
14828 		}
14829 
14830 		switch (nvmpinstrp) {
14831 		case FLASH_5762_MX25L_100:
14832 		case FLASH_5762_MX25L_200:
14833 		case FLASH_5762_MX25L_400:
14834 		case FLASH_5762_MX25L_800:
14835 		case FLASH_5762_MX25L_160_320:
14836 			tp->nvram_pagesize = 4096;
14837 			tp->nvram_jedecnum = JEDEC_MACRONIX;
14838 			tg3_flag_set(tp, NVRAM_BUFFERED);
14839 			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14840 			tg3_flag_set(tp, FLASH);
14841 			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14842 			tp->nvram_size =
14843 				(1 << (nv_status >> AUTOSENSE_DEVID &
14844 						AUTOSENSE_DEVID_MASK)
14845 					<< AUTOSENSE_SIZE_IN_MB);
14846 			return;
14847 
14848 		case FLASH_5762_EEPROM_HD:
14849 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14850 			break;
14851 		case FLASH_5762_EEPROM_LD:
14852 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14853 			break;
14854 		case FLASH_5720VENDOR_M_ST_M45PE20:
14855 			/* This pinstrap supports multiple sizes, so force it
14856 			 * to read the actual size from location 0xf0.
14857 			 */
14858 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14859 			break;
14860 		}
14861 	}
14862 
14863 	switch (nvmpinstrp) {
14864 	case FLASH_5720_EEPROM_HD:
14865 	case FLASH_5720_EEPROM_LD:
14866 		tp->nvram_jedecnum = JEDEC_ATMEL;
14867 		tg3_flag_set(tp, NVRAM_BUFFERED);
14868 
14869 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14870 		tw32(NVRAM_CFG1, nvcfg1);
14871 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14872 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14873 		else
14874 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14875 		return;
14876 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14877 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14878 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14879 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14880 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14881 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14882 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14883 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14884 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14885 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14886 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14887 	case FLASH_5720VENDOR_ATMEL_45USPT:
14888 		tp->nvram_jedecnum = JEDEC_ATMEL;
14889 		tg3_flag_set(tp, NVRAM_BUFFERED);
14890 		tg3_flag_set(tp, FLASH);
14891 
14892 		switch (nvmpinstrp) {
14893 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14894 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14895 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14896 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14897 			break;
14898 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14899 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14900 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14901 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14902 			break;
14903 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14904 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14905 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14906 			break;
14907 		default:
14908 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14909 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14910 			break;
14911 		}
14912 		break;
14913 	case FLASH_5720VENDOR_M_ST_M25PE10:
14914 	case FLASH_5720VENDOR_M_ST_M45PE10:
14915 	case FLASH_5720VENDOR_A_ST_M25PE10:
14916 	case FLASH_5720VENDOR_A_ST_M45PE10:
14917 	case FLASH_5720VENDOR_M_ST_M25PE20:
14918 	case FLASH_5720VENDOR_M_ST_M45PE20:
14919 	case FLASH_5720VENDOR_A_ST_M25PE20:
14920 	case FLASH_5720VENDOR_A_ST_M45PE20:
14921 	case FLASH_5720VENDOR_M_ST_M25PE40:
14922 	case FLASH_5720VENDOR_M_ST_M45PE40:
14923 	case FLASH_5720VENDOR_A_ST_M25PE40:
14924 	case FLASH_5720VENDOR_A_ST_M45PE40:
14925 	case FLASH_5720VENDOR_M_ST_M25PE80:
14926 	case FLASH_5720VENDOR_M_ST_M45PE80:
14927 	case FLASH_5720VENDOR_A_ST_M25PE80:
14928 	case FLASH_5720VENDOR_A_ST_M45PE80:
14929 	case FLASH_5720VENDOR_ST_25USPT:
14930 	case FLASH_5720VENDOR_ST_45USPT:
14931 		tp->nvram_jedecnum = JEDEC_ST;
14932 		tg3_flag_set(tp, NVRAM_BUFFERED);
14933 		tg3_flag_set(tp, FLASH);
14934 
14935 		switch (nvmpinstrp) {
14936 		case FLASH_5720VENDOR_M_ST_M25PE20:
14937 		case FLASH_5720VENDOR_M_ST_M45PE20:
14938 		case FLASH_5720VENDOR_A_ST_M25PE20:
14939 		case FLASH_5720VENDOR_A_ST_M45PE20:
14940 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14941 			break;
14942 		case FLASH_5720VENDOR_M_ST_M25PE40:
14943 		case FLASH_5720VENDOR_M_ST_M45PE40:
14944 		case FLASH_5720VENDOR_A_ST_M25PE40:
14945 		case FLASH_5720VENDOR_A_ST_M45PE40:
14946 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14947 			break;
14948 		case FLASH_5720VENDOR_M_ST_M25PE80:
14949 		case FLASH_5720VENDOR_M_ST_M45PE80:
14950 		case FLASH_5720VENDOR_A_ST_M25PE80:
14951 		case FLASH_5720VENDOR_A_ST_M45PE80:
14952 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14953 			break;
14954 		default:
14955 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14956 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14957 			break;
14958 		}
14959 		break;
14960 	default:
14961 		tg3_flag_set(tp, NO_NVRAM);
14962 		return;
14963 	}
14964 
14965 	tg3_nvram_get_pagesize(tp, nvcfg1);
14966 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14967 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14968 
14969 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14970 		u32 val;
14971 
14972 		if (tg3_nvram_read(tp, 0, &val))
14973 			return;
14974 
14975 		if (val != TG3_EEPROM_MAGIC &&
14976 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14977 			tg3_flag_set(tp, NO_NVRAM);
14978 	}
14979 }
14980 
14981 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14982 static void tg3_nvram_init(struct tg3 *tp)
14983 {
14984 	if (tg3_flag(tp, IS_SSB_CORE)) {
14985 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14986 		tg3_flag_clear(tp, NVRAM);
14987 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14988 		tg3_flag_set(tp, NO_NVRAM);
14989 		return;
14990 	}
14991 
14992 	tw32_f(GRC_EEPROM_ADDR,
14993 	     (EEPROM_ADDR_FSM_RESET |
14994 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14995 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14996 
14997 	msleep(1);
14998 
14999 	/* Enable seeprom accesses. */
15000 	tw32_f(GRC_LOCAL_CTRL,
15001 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15002 	udelay(100);
15003 
15004 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15005 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
15006 		tg3_flag_set(tp, NVRAM);
15007 
15008 		if (tg3_nvram_lock(tp)) {
15009 			netdev_warn(tp->dev,
15010 				    "Cannot get nvram lock, %s failed\n",
15011 				    __func__);
15012 			return;
15013 		}
15014 		tg3_enable_nvram_access(tp);
15015 
15016 		tp->nvram_size = 0;
15017 
15018 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
15019 			tg3_get_5752_nvram_info(tp);
15020 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15021 			tg3_get_5755_nvram_info(tp);
15022 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15023 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15024 			 tg3_asic_rev(tp) == ASIC_REV_5785)
15025 			tg3_get_5787_nvram_info(tp);
15026 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15027 			tg3_get_5761_nvram_info(tp);
15028 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15029 			tg3_get_5906_nvram_info(tp);
15030 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15031 			 tg3_flag(tp, 57765_CLASS))
15032 			tg3_get_57780_nvram_info(tp);
15033 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15034 			 tg3_asic_rev(tp) == ASIC_REV_5719)
15035 			tg3_get_5717_nvram_info(tp);
15036 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15037 			 tg3_asic_rev(tp) == ASIC_REV_5762)
15038 			tg3_get_5720_nvram_info(tp);
15039 		else
15040 			tg3_get_nvram_info(tp);
15041 
15042 		if (tp->nvram_size == 0)
15043 			tg3_get_nvram_size(tp);
15044 
15045 		tg3_disable_nvram_access(tp);
15046 		tg3_nvram_unlock(tp);
15047 
15048 	} else {
15049 		tg3_flag_clear(tp, NVRAM);
15050 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15051 
15052 		tg3_get_eeprom_size(tp);
15053 	}
15054 }
15055 
15056 struct subsys_tbl_ent {
15057 	u16 subsys_vendor, subsys_devid;
15058 	u32 phy_id;
15059 };
15060 
15061 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15062 	/* Broadcom boards. */
15063 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15064 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15065 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15066 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15067 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15068 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15069 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15070 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15071 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15072 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15073 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15074 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15075 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15076 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15077 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15078 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15079 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15080 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15081 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15082 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15083 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15084 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15085 
15086 	/* 3com boards. */
15087 	{ TG3PCI_SUBVENDOR_ID_3COM,
15088 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15089 	{ TG3PCI_SUBVENDOR_ID_3COM,
15090 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15091 	{ TG3PCI_SUBVENDOR_ID_3COM,
15092 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15093 	{ TG3PCI_SUBVENDOR_ID_3COM,
15094 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15095 	{ TG3PCI_SUBVENDOR_ID_3COM,
15096 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15097 
15098 	/* DELL boards. */
15099 	{ TG3PCI_SUBVENDOR_ID_DELL,
15100 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15101 	{ TG3PCI_SUBVENDOR_ID_DELL,
15102 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15103 	{ TG3PCI_SUBVENDOR_ID_DELL,
15104 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15105 	{ TG3PCI_SUBVENDOR_ID_DELL,
15106 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15107 
15108 	/* Compaq boards. */
15109 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15110 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15111 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15112 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15113 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15114 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15115 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15116 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15117 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15118 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15119 
15120 	/* IBM boards. */
15121 	{ TG3PCI_SUBVENDOR_ID_IBM,
15122 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15123 };
15124 
15125 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15126 {
15127 	int i;
15128 
15129 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15130 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15131 		     tp->pdev->subsystem_vendor) &&
15132 		    (subsys_id_to_phy_id[i].subsys_devid ==
15133 		     tp->pdev->subsystem_device))
15134 			return &subsys_id_to_phy_id[i];
15135 	}
15136 	return NULL;
15137 }
15138 
15139 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15140 {
15141 	u32 val;
15142 
15143 	tp->phy_id = TG3_PHY_ID_INVALID;
15144 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15145 
15146 	/* Assume an onboard device and WOL capable by default.  */
15147 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15148 	tg3_flag_set(tp, WOL_CAP);
15149 
15150 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15151 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15152 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15153 			tg3_flag_set(tp, IS_NIC);
15154 		}
15155 		val = tr32(VCPU_CFGSHDW);
15156 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15157 			tg3_flag_set(tp, ASPM_WORKAROUND);
15158 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15159 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15160 			tg3_flag_set(tp, WOL_ENABLE);
15161 			device_set_wakeup_enable(&tp->pdev->dev, true);
15162 		}
15163 		goto done;
15164 	}
15165 
15166 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15167 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15168 		u32 nic_cfg, led_cfg;
15169 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15170 		u32 nic_phy_id, ver, eeprom_phy_id;
15171 		int eeprom_phy_serdes = 0;
15172 
15173 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15174 		tp->nic_sram_data_cfg = nic_cfg;
15175 
15176 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15177 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15178 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15179 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15180 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15181 		    (ver > 0) && (ver < 0x100))
15182 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15183 
15184 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15185 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15186 
15187 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15188 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15189 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15190 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15191 
15192 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15193 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15194 			eeprom_phy_serdes = 1;
15195 
15196 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15197 		if (nic_phy_id != 0) {
15198 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15199 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15200 
15201 			eeprom_phy_id  = (id1 >> 16) << 10;
15202 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15203 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15204 		} else
15205 			eeprom_phy_id = 0;
15206 
15207 		tp->phy_id = eeprom_phy_id;
15208 		if (eeprom_phy_serdes) {
15209 			if (!tg3_flag(tp, 5705_PLUS))
15210 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15211 			else
15212 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15213 		}
15214 
15215 		if (tg3_flag(tp, 5750_PLUS))
15216 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15217 				    SHASTA_EXT_LED_MODE_MASK);
15218 		else
15219 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15220 
15221 		switch (led_cfg) {
15222 		default:
15223 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15224 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15225 			break;
15226 
15227 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15228 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15229 			break;
15230 
15231 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15232 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15233 
15234 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15235 			 * read on some older 5700/5701 bootcode.
15236 			 */
15237 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15238 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15239 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15240 
15241 			break;
15242 
15243 		case SHASTA_EXT_LED_SHARED:
15244 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15245 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15246 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15247 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15248 						 LED_CTRL_MODE_PHY_2);
15249 
15250 			if (tg3_flag(tp, 5717_PLUS) ||
15251 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15252 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15253 						LED_CTRL_BLINK_RATE_MASK;
15254 
15255 			break;
15256 
15257 		case SHASTA_EXT_LED_MAC:
15258 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15259 			break;
15260 
15261 		case SHASTA_EXT_LED_COMBO:
15262 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15263 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15264 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15265 						 LED_CTRL_MODE_PHY_2);
15266 			break;
15267 
15268 		}
15269 
15270 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15271 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15272 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15273 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15274 
15275 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15276 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15277 
15278 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15279 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15280 			if ((tp->pdev->subsystem_vendor ==
15281 			     PCI_VENDOR_ID_ARIMA) &&
15282 			    (tp->pdev->subsystem_device == 0x205a ||
15283 			     tp->pdev->subsystem_device == 0x2063))
15284 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15285 		} else {
15286 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15287 			tg3_flag_set(tp, IS_NIC);
15288 		}
15289 
15290 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15291 			tg3_flag_set(tp, ENABLE_ASF);
15292 			if (tg3_flag(tp, 5750_PLUS))
15293 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15294 		}
15295 
15296 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15297 		    tg3_flag(tp, 5750_PLUS))
15298 			tg3_flag_set(tp, ENABLE_APE);
15299 
15300 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15301 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15302 			tg3_flag_clear(tp, WOL_CAP);
15303 
15304 		if (tg3_flag(tp, WOL_CAP) &&
15305 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15306 			tg3_flag_set(tp, WOL_ENABLE);
15307 			device_set_wakeup_enable(&tp->pdev->dev, true);
15308 		}
15309 
15310 		if (cfg2 & (1 << 17))
15311 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15312 
15313 		/* serdes signal pre-emphasis in register 0x590 set by */
15314 		/* bootcode if bit 18 is set */
15315 		if (cfg2 & (1 << 18))
15316 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15317 
15318 		if ((tg3_flag(tp, 57765_PLUS) ||
15319 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15320 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15321 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15322 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15323 
15324 		if (tg3_flag(tp, PCI_EXPRESS)) {
15325 			u32 cfg3;
15326 
15327 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15328 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15329 			    !tg3_flag(tp, 57765_PLUS) &&
15330 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15331 				tg3_flag_set(tp, ASPM_WORKAROUND);
15332 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15333 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15334 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15335 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15336 		}
15337 
15338 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15339 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15340 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15341 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15342 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15343 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15344 
15345 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15346 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15347 	}
15348 done:
15349 	if (tg3_flag(tp, WOL_CAP))
15350 		device_set_wakeup_enable(&tp->pdev->dev,
15351 					 tg3_flag(tp, WOL_ENABLE));
15352 	else
15353 		device_set_wakeup_capable(&tp->pdev->dev, false);
15354 }
15355 
15356 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15357 {
15358 	int i, err;
15359 	u32 val2, off = offset * 8;
15360 
15361 	err = tg3_nvram_lock(tp);
15362 	if (err)
15363 		return err;
15364 
15365 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15366 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15367 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15368 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15369 	udelay(10);
15370 
15371 	for (i = 0; i < 100; i++) {
15372 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15373 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15374 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15375 			break;
15376 		}
15377 		udelay(10);
15378 	}
15379 
15380 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15381 
15382 	tg3_nvram_unlock(tp);
15383 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15384 		return 0;
15385 
15386 	return -EBUSY;
15387 }
15388 
15389 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15390 {
15391 	int i;
15392 	u32 val;
15393 
15394 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15395 	tw32(OTP_CTRL, cmd);
15396 
15397 	/* Wait for up to 1 ms for command to execute. */
15398 	for (i = 0; i < 100; i++) {
15399 		val = tr32(OTP_STATUS);
15400 		if (val & OTP_STATUS_CMD_DONE)
15401 			break;
15402 		udelay(10);
15403 	}
15404 
15405 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15406 }
15407 
15408 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15409  * configuration is a 32-bit value that straddles the alignment boundary.
15410  * We do two 32-bit reads and then shift and merge the results.
15411  */
15412 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15413 {
15414 	u32 bhalf_otp, thalf_otp;
15415 
15416 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15417 
15418 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15419 		return 0;
15420 
15421 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15422 
15423 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15424 		return 0;
15425 
15426 	thalf_otp = tr32(OTP_READ_DATA);
15427 
15428 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15429 
15430 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15431 		return 0;
15432 
15433 	bhalf_otp = tr32(OTP_READ_DATA);
15434 
15435 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15436 }
15437 
15438 static void tg3_phy_init_link_config(struct tg3 *tp)
15439 {
15440 	u32 adv = ADVERTISED_Autoneg;
15441 
15442 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15443 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15444 			adv |= ADVERTISED_1000baseT_Half;
15445 		adv |= ADVERTISED_1000baseT_Full;
15446 	}
15447 
15448 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15449 		adv |= ADVERTISED_100baseT_Half |
15450 		       ADVERTISED_100baseT_Full |
15451 		       ADVERTISED_10baseT_Half |
15452 		       ADVERTISED_10baseT_Full |
15453 		       ADVERTISED_TP;
15454 	else
15455 		adv |= ADVERTISED_FIBRE;
15456 
15457 	tp->link_config.advertising = adv;
15458 	tp->link_config.speed = SPEED_UNKNOWN;
15459 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15460 	tp->link_config.autoneg = AUTONEG_ENABLE;
15461 	tp->link_config.active_speed = SPEED_UNKNOWN;
15462 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15463 
15464 	tp->old_link = -1;
15465 }
15466 
15467 static int tg3_phy_probe(struct tg3 *tp)
15468 {
15469 	u32 hw_phy_id_1, hw_phy_id_2;
15470 	u32 hw_phy_id, hw_phy_id_masked;
15471 	int err;
15472 
15473 	/* flow control autonegotiation is default behavior */
15474 	tg3_flag_set(tp, PAUSE_AUTONEG);
15475 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15476 
15477 	if (tg3_flag(tp, ENABLE_APE)) {
15478 		switch (tp->pci_fn) {
15479 		case 0:
15480 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15481 			break;
15482 		case 1:
15483 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15484 			break;
15485 		case 2:
15486 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15487 			break;
15488 		case 3:
15489 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15490 			break;
15491 		}
15492 	}
15493 
15494 	if (!tg3_flag(tp, ENABLE_ASF) &&
15495 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15496 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15497 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15498 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15499 
15500 	if (tg3_flag(tp, USE_PHYLIB))
15501 		return tg3_phy_init(tp);
15502 
15503 	/* Reading the PHY ID register can conflict with ASF
15504 	 * firmware access to the PHY hardware.
15505 	 */
15506 	err = 0;
15507 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15508 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15509 	} else {
15510 		/* Now read the physical PHY_ID from the chip and verify
15511 		 * that it is sane.  If it doesn't look good, we fall back
15512 		 * to either the hard-coded table based PHY_ID and failing
15513 		 * that the value found in the eeprom area.
15514 		 */
15515 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15516 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15517 
15518 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15519 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15520 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15521 
15522 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15523 	}
15524 
15525 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15526 		tp->phy_id = hw_phy_id;
15527 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15528 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15529 		else
15530 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15531 	} else {
15532 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15533 			/* Do nothing, phy ID already set up in
15534 			 * tg3_get_eeprom_hw_cfg().
15535 			 */
15536 		} else {
15537 			struct subsys_tbl_ent *p;
15538 
15539 			/* No eeprom signature?  Try the hardcoded
15540 			 * subsys device table.
15541 			 */
15542 			p = tg3_lookup_by_subsys(tp);
15543 			if (p) {
15544 				tp->phy_id = p->phy_id;
15545 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15546 				/* For now we saw the IDs 0xbc050cd0,
15547 				 * 0xbc050f80 and 0xbc050c30 on devices
15548 				 * connected to an BCM4785 and there are
15549 				 * probably more. Just assume that the phy is
15550 				 * supported when it is connected to a SSB core
15551 				 * for now.
15552 				 */
15553 				return -ENODEV;
15554 			}
15555 
15556 			if (!tp->phy_id ||
15557 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15558 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15559 		}
15560 	}
15561 
15562 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15563 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15564 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15565 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15566 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15567 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15568 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15569 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15570 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15571 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15572 
15573 		tp->eee.supported = SUPPORTED_100baseT_Full |
15574 				    SUPPORTED_1000baseT_Full;
15575 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15576 				     ADVERTISED_1000baseT_Full;
15577 		tp->eee.eee_enabled = 1;
15578 		tp->eee.tx_lpi_enabled = 1;
15579 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15580 	}
15581 
15582 	tg3_phy_init_link_config(tp);
15583 
15584 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15585 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15586 	    !tg3_flag(tp, ENABLE_APE) &&
15587 	    !tg3_flag(tp, ENABLE_ASF)) {
15588 		u32 bmsr, dummy;
15589 
15590 		tg3_readphy(tp, MII_BMSR, &bmsr);
15591 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15592 		    (bmsr & BMSR_LSTATUS))
15593 			goto skip_phy_reset;
15594 
15595 		err = tg3_phy_reset(tp);
15596 		if (err)
15597 			return err;
15598 
15599 		tg3_phy_set_wirespeed(tp);
15600 
15601 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15602 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15603 					    tp->link_config.flowctrl);
15604 
15605 			tg3_writephy(tp, MII_BMCR,
15606 				     BMCR_ANENABLE | BMCR_ANRESTART);
15607 		}
15608 	}
15609 
15610 skip_phy_reset:
15611 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15612 		err = tg3_init_5401phy_dsp(tp);
15613 		if (err)
15614 			return err;
15615 
15616 		err = tg3_init_5401phy_dsp(tp);
15617 	}
15618 
15619 	return err;
15620 }
15621 
15622 static void tg3_read_vpd(struct tg3 *tp)
15623 {
15624 	u8 *vpd_data;
15625 	unsigned int block_end, rosize, len;
15626 	u32 vpdlen;
15627 	int j, i = 0;
15628 
15629 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15630 	if (!vpd_data)
15631 		goto out_no_vpd;
15632 
15633 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15634 	if (i < 0)
15635 		goto out_not_found;
15636 
15637 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15638 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15639 	i += PCI_VPD_LRDT_TAG_SIZE;
15640 
15641 	if (block_end > vpdlen)
15642 		goto out_not_found;
15643 
15644 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15645 				      PCI_VPD_RO_KEYWORD_MFR_ID);
15646 	if (j > 0) {
15647 		len = pci_vpd_info_field_size(&vpd_data[j]);
15648 
15649 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15650 		if (j + len > block_end || len != 4 ||
15651 		    memcmp(&vpd_data[j], "1028", 4))
15652 			goto partno;
15653 
15654 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15655 					      PCI_VPD_RO_KEYWORD_VENDOR0);
15656 		if (j < 0)
15657 			goto partno;
15658 
15659 		len = pci_vpd_info_field_size(&vpd_data[j]);
15660 
15661 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15662 		if (j + len > block_end)
15663 			goto partno;
15664 
15665 		if (len >= sizeof(tp->fw_ver))
15666 			len = sizeof(tp->fw_ver) - 1;
15667 		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15668 		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15669 			 &vpd_data[j]);
15670 	}
15671 
15672 partno:
15673 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15674 				      PCI_VPD_RO_KEYWORD_PARTNO);
15675 	if (i < 0)
15676 		goto out_not_found;
15677 
15678 	len = pci_vpd_info_field_size(&vpd_data[i]);
15679 
15680 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15681 	if (len > TG3_BPN_SIZE ||
15682 	    (len + i) > vpdlen)
15683 		goto out_not_found;
15684 
15685 	memcpy(tp->board_part_number, &vpd_data[i], len);
15686 
15687 out_not_found:
15688 	kfree(vpd_data);
15689 	if (tp->board_part_number[0])
15690 		return;
15691 
15692 out_no_vpd:
15693 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15694 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15695 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15696 			strcpy(tp->board_part_number, "BCM5717");
15697 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15698 			strcpy(tp->board_part_number, "BCM5718");
15699 		else
15700 			goto nomatch;
15701 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15702 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15703 			strcpy(tp->board_part_number, "BCM57780");
15704 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15705 			strcpy(tp->board_part_number, "BCM57760");
15706 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15707 			strcpy(tp->board_part_number, "BCM57790");
15708 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15709 			strcpy(tp->board_part_number, "BCM57788");
15710 		else
15711 			goto nomatch;
15712 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15713 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15714 			strcpy(tp->board_part_number, "BCM57761");
15715 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15716 			strcpy(tp->board_part_number, "BCM57765");
15717 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15718 			strcpy(tp->board_part_number, "BCM57781");
15719 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15720 			strcpy(tp->board_part_number, "BCM57785");
15721 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15722 			strcpy(tp->board_part_number, "BCM57791");
15723 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15724 			strcpy(tp->board_part_number, "BCM57795");
15725 		else
15726 			goto nomatch;
15727 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15728 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15729 			strcpy(tp->board_part_number, "BCM57762");
15730 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15731 			strcpy(tp->board_part_number, "BCM57766");
15732 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15733 			strcpy(tp->board_part_number, "BCM57782");
15734 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15735 			strcpy(tp->board_part_number, "BCM57786");
15736 		else
15737 			goto nomatch;
15738 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15739 		strcpy(tp->board_part_number, "BCM95906");
15740 	} else {
15741 nomatch:
15742 		strcpy(tp->board_part_number, "none");
15743 	}
15744 }
15745 
15746 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15747 {
15748 	u32 val;
15749 
15750 	if (tg3_nvram_read(tp, offset, &val) ||
15751 	    (val & 0xfc000000) != 0x0c000000 ||
15752 	    tg3_nvram_read(tp, offset + 4, &val) ||
15753 	    val != 0)
15754 		return 0;
15755 
15756 	return 1;
15757 }
15758 
15759 static void tg3_read_bc_ver(struct tg3 *tp)
15760 {
15761 	u32 val, offset, start, ver_offset;
15762 	int i, dst_off;
15763 	bool newver = false;
15764 
15765 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15766 	    tg3_nvram_read(tp, 0x4, &start))
15767 		return;
15768 
15769 	offset = tg3_nvram_logical_addr(tp, offset);
15770 
15771 	if (tg3_nvram_read(tp, offset, &val))
15772 		return;
15773 
15774 	if ((val & 0xfc000000) == 0x0c000000) {
15775 		if (tg3_nvram_read(tp, offset + 4, &val))
15776 			return;
15777 
15778 		if (val == 0)
15779 			newver = true;
15780 	}
15781 
15782 	dst_off = strlen(tp->fw_ver);
15783 
15784 	if (newver) {
15785 		if (TG3_VER_SIZE - dst_off < 16 ||
15786 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15787 			return;
15788 
15789 		offset = offset + ver_offset - start;
15790 		for (i = 0; i < 16; i += 4) {
15791 			__be32 v;
15792 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15793 				return;
15794 
15795 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15796 		}
15797 	} else {
15798 		u32 major, minor;
15799 
15800 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15801 			return;
15802 
15803 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15804 			TG3_NVM_BCVER_MAJSFT;
15805 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15806 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15807 			 "v%d.%02d", major, minor);
15808 	}
15809 }
15810 
15811 static void tg3_read_hwsb_ver(struct tg3 *tp)
15812 {
15813 	u32 val, major, minor;
15814 
15815 	/* Use native endian representation */
15816 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15817 		return;
15818 
15819 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15820 		TG3_NVM_HWSB_CFG1_MAJSFT;
15821 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15822 		TG3_NVM_HWSB_CFG1_MINSFT;
15823 
15824 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15825 }
15826 
15827 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15828 {
15829 	u32 offset, major, minor, build;
15830 
15831 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15832 
15833 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15834 		return;
15835 
15836 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15837 	case TG3_EEPROM_SB_REVISION_0:
15838 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15839 		break;
15840 	case TG3_EEPROM_SB_REVISION_2:
15841 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15842 		break;
15843 	case TG3_EEPROM_SB_REVISION_3:
15844 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15845 		break;
15846 	case TG3_EEPROM_SB_REVISION_4:
15847 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15848 		break;
15849 	case TG3_EEPROM_SB_REVISION_5:
15850 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15851 		break;
15852 	case TG3_EEPROM_SB_REVISION_6:
15853 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15854 		break;
15855 	default:
15856 		return;
15857 	}
15858 
15859 	if (tg3_nvram_read(tp, offset, &val))
15860 		return;
15861 
15862 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15863 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15864 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15865 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15866 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15867 
15868 	if (minor > 99 || build > 26)
15869 		return;
15870 
15871 	offset = strlen(tp->fw_ver);
15872 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15873 		 " v%d.%02d", major, minor);
15874 
15875 	if (build > 0) {
15876 		offset = strlen(tp->fw_ver);
15877 		if (offset < TG3_VER_SIZE - 1)
15878 			tp->fw_ver[offset] = 'a' + build - 1;
15879 	}
15880 }
15881 
15882 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15883 {
15884 	u32 val, offset, start;
15885 	int i, vlen;
15886 
15887 	for (offset = TG3_NVM_DIR_START;
15888 	     offset < TG3_NVM_DIR_END;
15889 	     offset += TG3_NVM_DIRENT_SIZE) {
15890 		if (tg3_nvram_read(tp, offset, &val))
15891 			return;
15892 
15893 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15894 			break;
15895 	}
15896 
15897 	if (offset == TG3_NVM_DIR_END)
15898 		return;
15899 
15900 	if (!tg3_flag(tp, 5705_PLUS))
15901 		start = 0x08000000;
15902 	else if (tg3_nvram_read(tp, offset - 4, &start))
15903 		return;
15904 
15905 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15906 	    !tg3_fw_img_is_valid(tp, offset) ||
15907 	    tg3_nvram_read(tp, offset + 8, &val))
15908 		return;
15909 
15910 	offset += val - start;
15911 
15912 	vlen = strlen(tp->fw_ver);
15913 
15914 	tp->fw_ver[vlen++] = ',';
15915 	tp->fw_ver[vlen++] = ' ';
15916 
15917 	for (i = 0; i < 4; i++) {
15918 		__be32 v;
15919 		if (tg3_nvram_read_be32(tp, offset, &v))
15920 			return;
15921 
15922 		offset += sizeof(v);
15923 
15924 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15925 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15926 			break;
15927 		}
15928 
15929 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15930 		vlen += sizeof(v);
15931 	}
15932 }
15933 
15934 static void tg3_probe_ncsi(struct tg3 *tp)
15935 {
15936 	u32 apedata;
15937 
15938 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15939 	if (apedata != APE_SEG_SIG_MAGIC)
15940 		return;
15941 
15942 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15943 	if (!(apedata & APE_FW_STATUS_READY))
15944 		return;
15945 
15946 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15947 		tg3_flag_set(tp, APE_HAS_NCSI);
15948 }
15949 
15950 static void tg3_read_dash_ver(struct tg3 *tp)
15951 {
15952 	int vlen;
15953 	u32 apedata;
15954 	char *fwtype;
15955 
15956 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15957 
15958 	if (tg3_flag(tp, APE_HAS_NCSI))
15959 		fwtype = "NCSI";
15960 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15961 		fwtype = "SMASH";
15962 	else
15963 		fwtype = "DASH";
15964 
15965 	vlen = strlen(tp->fw_ver);
15966 
15967 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15968 		 fwtype,
15969 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15970 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15971 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15972 		 (apedata & APE_FW_VERSION_BLDMSK));
15973 }
15974 
15975 static void tg3_read_otp_ver(struct tg3 *tp)
15976 {
15977 	u32 val, val2;
15978 
15979 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15980 		return;
15981 
15982 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15983 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15984 	    TG3_OTP_MAGIC0_VALID(val)) {
15985 		u64 val64 = (u64) val << 32 | val2;
15986 		u32 ver = 0;
15987 		int i, vlen;
15988 
15989 		for (i = 0; i < 7; i++) {
15990 			if ((val64 & 0xff) == 0)
15991 				break;
15992 			ver = val64 & 0xff;
15993 			val64 >>= 8;
15994 		}
15995 		vlen = strlen(tp->fw_ver);
15996 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15997 	}
15998 }
15999 
16000 static void tg3_read_fw_ver(struct tg3 *tp)
16001 {
16002 	u32 val;
16003 	bool vpd_vers = false;
16004 
16005 	if (tp->fw_ver[0] != 0)
16006 		vpd_vers = true;
16007 
16008 	if (tg3_flag(tp, NO_NVRAM)) {
16009 		strcat(tp->fw_ver, "sb");
16010 		tg3_read_otp_ver(tp);
16011 		return;
16012 	}
16013 
16014 	if (tg3_nvram_read(tp, 0, &val))
16015 		return;
16016 
16017 	if (val == TG3_EEPROM_MAGIC)
16018 		tg3_read_bc_ver(tp);
16019 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16020 		tg3_read_sb_ver(tp, val);
16021 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16022 		tg3_read_hwsb_ver(tp);
16023 
16024 	if (tg3_flag(tp, ENABLE_ASF)) {
16025 		if (tg3_flag(tp, ENABLE_APE)) {
16026 			tg3_probe_ncsi(tp);
16027 			if (!vpd_vers)
16028 				tg3_read_dash_ver(tp);
16029 		} else if (!vpd_vers) {
16030 			tg3_read_mgmtfw_ver(tp);
16031 		}
16032 	}
16033 
16034 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16035 }
16036 
16037 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16038 {
16039 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
16040 		return TG3_RX_RET_MAX_SIZE_5717;
16041 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16042 		return TG3_RX_RET_MAX_SIZE_5700;
16043 	else
16044 		return TG3_RX_RET_MAX_SIZE_5705;
16045 }
16046 
16047 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16048 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16049 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16050 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16051 	{ },
16052 };
16053 
16054 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16055 {
16056 	struct pci_dev *peer;
16057 	unsigned int func, devnr = tp->pdev->devfn & ~7;
16058 
16059 	for (func = 0; func < 8; func++) {
16060 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16061 		if (peer && peer != tp->pdev)
16062 			break;
16063 		pci_dev_put(peer);
16064 	}
16065 	/* 5704 can be configured in single-port mode, set peer to
16066 	 * tp->pdev in that case.
16067 	 */
16068 	if (!peer) {
16069 		peer = tp->pdev;
16070 		return peer;
16071 	}
16072 
16073 	/*
16074 	 * We don't need to keep the refcount elevated; there's no way
16075 	 * to remove one half of this device without removing the other
16076 	 */
16077 	pci_dev_put(peer);
16078 
16079 	return peer;
16080 }
16081 
16082 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16083 {
16084 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16085 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16086 		u32 reg;
16087 
16088 		/* All devices that use the alternate
16089 		 * ASIC REV location have a CPMU.
16090 		 */
16091 		tg3_flag_set(tp, CPMU_PRESENT);
16092 
16093 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16094 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16095 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16096 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16097 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16098 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16099 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16100 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16101 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16102 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16103 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16104 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16105 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16106 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16107 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16108 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16109 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16110 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16111 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16112 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16113 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16114 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16115 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16116 		else
16117 			reg = TG3PCI_PRODID_ASICREV;
16118 
16119 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16120 	}
16121 
16122 	/* Wrong chip ID in 5752 A0. This code can be removed later
16123 	 * as A0 is not in production.
16124 	 */
16125 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16126 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16127 
16128 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16129 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16130 
16131 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16132 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16133 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16134 		tg3_flag_set(tp, 5717_PLUS);
16135 
16136 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16137 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16138 		tg3_flag_set(tp, 57765_CLASS);
16139 
16140 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16141 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16142 		tg3_flag_set(tp, 57765_PLUS);
16143 
16144 	/* Intentionally exclude ASIC_REV_5906 */
16145 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16146 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16147 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16148 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16149 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16150 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16151 	    tg3_flag(tp, 57765_PLUS))
16152 		tg3_flag_set(tp, 5755_PLUS);
16153 
16154 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16155 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16156 		tg3_flag_set(tp, 5780_CLASS);
16157 
16158 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16159 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16160 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16161 	    tg3_flag(tp, 5755_PLUS) ||
16162 	    tg3_flag(tp, 5780_CLASS))
16163 		tg3_flag_set(tp, 5750_PLUS);
16164 
16165 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16166 	    tg3_flag(tp, 5750_PLUS))
16167 		tg3_flag_set(tp, 5705_PLUS);
16168 }
16169 
16170 static bool tg3_10_100_only_device(struct tg3 *tp,
16171 				   const struct pci_device_id *ent)
16172 {
16173 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16174 
16175 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16176 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16177 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16178 		return true;
16179 
16180 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16181 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16182 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16183 				return true;
16184 		} else {
16185 			return true;
16186 		}
16187 	}
16188 
16189 	return false;
16190 }
16191 
16192 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16193 {
16194 	u32 misc_ctrl_reg;
16195 	u32 pci_state_reg, grc_misc_cfg;
16196 	u32 val;
16197 	u16 pci_cmd;
16198 	int err;
16199 
16200 	/* Force memory write invalidate off.  If we leave it on,
16201 	 * then on 5700_BX chips we have to enable a workaround.
16202 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16203 	 * to match the cacheline size.  The Broadcom driver have this
16204 	 * workaround but turns MWI off all the times so never uses
16205 	 * it.  This seems to suggest that the workaround is insufficient.
16206 	 */
16207 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16208 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16209 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16210 
16211 	/* Important! -- Make sure register accesses are byteswapped
16212 	 * correctly.  Also, for those chips that require it, make
16213 	 * sure that indirect register accesses are enabled before
16214 	 * the first operation.
16215 	 */
16216 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16217 			      &misc_ctrl_reg);
16218 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16219 			       MISC_HOST_CTRL_CHIPREV);
16220 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16221 			       tp->misc_host_ctrl);
16222 
16223 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16224 
16225 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16226 	 * we need to disable memory and use config. cycles
16227 	 * only to access all registers. The 5702/03 chips
16228 	 * can mistakenly decode the special cycles from the
16229 	 * ICH chipsets as memory write cycles, causing corruption
16230 	 * of register and memory space. Only certain ICH bridges
16231 	 * will drive special cycles with non-zero data during the
16232 	 * address phase which can fall within the 5703's address
16233 	 * range. This is not an ICH bug as the PCI spec allows
16234 	 * non-zero address during special cycles. However, only
16235 	 * these ICH bridges are known to drive non-zero addresses
16236 	 * during special cycles.
16237 	 *
16238 	 * Since special cycles do not cross PCI bridges, we only
16239 	 * enable this workaround if the 5703 is on the secondary
16240 	 * bus of these ICH bridges.
16241 	 */
16242 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16243 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16244 		static struct tg3_dev_id {
16245 			u32	vendor;
16246 			u32	device;
16247 			u32	rev;
16248 		} ich_chipsets[] = {
16249 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16250 			  PCI_ANY_ID },
16251 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16252 			  PCI_ANY_ID },
16253 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16254 			  0xa },
16255 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16256 			  PCI_ANY_ID },
16257 			{ },
16258 		};
16259 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16260 		struct pci_dev *bridge = NULL;
16261 
16262 		while (pci_id->vendor != 0) {
16263 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16264 						bridge);
16265 			if (!bridge) {
16266 				pci_id++;
16267 				continue;
16268 			}
16269 			if (pci_id->rev != PCI_ANY_ID) {
16270 				if (bridge->revision > pci_id->rev)
16271 					continue;
16272 			}
16273 			if (bridge->subordinate &&
16274 			    (bridge->subordinate->number ==
16275 			     tp->pdev->bus->number)) {
16276 				tg3_flag_set(tp, ICH_WORKAROUND);
16277 				pci_dev_put(bridge);
16278 				break;
16279 			}
16280 		}
16281 	}
16282 
16283 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16284 		static struct tg3_dev_id {
16285 			u32	vendor;
16286 			u32	device;
16287 		} bridge_chipsets[] = {
16288 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16289 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16290 			{ },
16291 		};
16292 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16293 		struct pci_dev *bridge = NULL;
16294 
16295 		while (pci_id->vendor != 0) {
16296 			bridge = pci_get_device(pci_id->vendor,
16297 						pci_id->device,
16298 						bridge);
16299 			if (!bridge) {
16300 				pci_id++;
16301 				continue;
16302 			}
16303 			if (bridge->subordinate &&
16304 			    (bridge->subordinate->number <=
16305 			     tp->pdev->bus->number) &&
16306 			    (bridge->subordinate->busn_res.end >=
16307 			     tp->pdev->bus->number)) {
16308 				tg3_flag_set(tp, 5701_DMA_BUG);
16309 				pci_dev_put(bridge);
16310 				break;
16311 			}
16312 		}
16313 	}
16314 
16315 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16316 	 * DMA addresses > 40-bit. This bridge may have other additional
16317 	 * 57xx devices behind it in some 4-port NIC designs for example.
16318 	 * Any tg3 device found behind the bridge will also need the 40-bit
16319 	 * DMA workaround.
16320 	 */
16321 	if (tg3_flag(tp, 5780_CLASS)) {
16322 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16323 		tp->msi_cap = tp->pdev->msi_cap;
16324 	} else {
16325 		struct pci_dev *bridge = NULL;
16326 
16327 		do {
16328 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16329 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16330 						bridge);
16331 			if (bridge && bridge->subordinate &&
16332 			    (bridge->subordinate->number <=
16333 			     tp->pdev->bus->number) &&
16334 			    (bridge->subordinate->busn_res.end >=
16335 			     tp->pdev->bus->number)) {
16336 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16337 				pci_dev_put(bridge);
16338 				break;
16339 			}
16340 		} while (bridge);
16341 	}
16342 
16343 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16344 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16345 		tp->pdev_peer = tg3_find_peer(tp);
16346 
16347 	/* Determine TSO capabilities */
16348 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16349 		; /* Do nothing. HW bug. */
16350 	else if (tg3_flag(tp, 57765_PLUS))
16351 		tg3_flag_set(tp, HW_TSO_3);
16352 	else if (tg3_flag(tp, 5755_PLUS) ||
16353 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16354 		tg3_flag_set(tp, HW_TSO_2);
16355 	else if (tg3_flag(tp, 5750_PLUS)) {
16356 		tg3_flag_set(tp, HW_TSO_1);
16357 		tg3_flag_set(tp, TSO_BUG);
16358 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16359 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16360 			tg3_flag_clear(tp, TSO_BUG);
16361 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16362 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16363 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16364 		tg3_flag_set(tp, FW_TSO);
16365 		tg3_flag_set(tp, TSO_BUG);
16366 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16367 			tp->fw_needed = FIRMWARE_TG3TSO5;
16368 		else
16369 			tp->fw_needed = FIRMWARE_TG3TSO;
16370 	}
16371 
16372 	/* Selectively allow TSO based on operating conditions */
16373 	if (tg3_flag(tp, HW_TSO_1) ||
16374 	    tg3_flag(tp, HW_TSO_2) ||
16375 	    tg3_flag(tp, HW_TSO_3) ||
16376 	    tg3_flag(tp, FW_TSO)) {
16377 		/* For firmware TSO, assume ASF is disabled.
16378 		 * We'll disable TSO later if we discover ASF
16379 		 * is enabled in tg3_get_eeprom_hw_cfg().
16380 		 */
16381 		tg3_flag_set(tp, TSO_CAPABLE);
16382 	} else {
16383 		tg3_flag_clear(tp, TSO_CAPABLE);
16384 		tg3_flag_clear(tp, TSO_BUG);
16385 		tp->fw_needed = NULL;
16386 	}
16387 
16388 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16389 		tp->fw_needed = FIRMWARE_TG3;
16390 
16391 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16392 		tp->fw_needed = FIRMWARE_TG357766;
16393 
16394 	tp->irq_max = 1;
16395 
16396 	if (tg3_flag(tp, 5750_PLUS)) {
16397 		tg3_flag_set(tp, SUPPORT_MSI);
16398 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16399 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16400 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16401 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16402 		     tp->pdev_peer == tp->pdev))
16403 			tg3_flag_clear(tp, SUPPORT_MSI);
16404 
16405 		if (tg3_flag(tp, 5755_PLUS) ||
16406 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16407 			tg3_flag_set(tp, 1SHOT_MSI);
16408 		}
16409 
16410 		if (tg3_flag(tp, 57765_PLUS)) {
16411 			tg3_flag_set(tp, SUPPORT_MSIX);
16412 			tp->irq_max = TG3_IRQ_MAX_VECS;
16413 		}
16414 	}
16415 
16416 	tp->txq_max = 1;
16417 	tp->rxq_max = 1;
16418 	if (tp->irq_max > 1) {
16419 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16420 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16421 
16422 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16423 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16424 			tp->txq_max = tp->irq_max - 1;
16425 	}
16426 
16427 	if (tg3_flag(tp, 5755_PLUS) ||
16428 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16429 		tg3_flag_set(tp, SHORT_DMA_BUG);
16430 
16431 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16432 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16433 
16434 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16435 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16436 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16437 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16438 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16439 
16440 	if (tg3_flag(tp, 57765_PLUS) &&
16441 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16442 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16443 
16444 	if (!tg3_flag(tp, 5705_PLUS) ||
16445 	    tg3_flag(tp, 5780_CLASS) ||
16446 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16447 		tg3_flag_set(tp, JUMBO_CAPABLE);
16448 
16449 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16450 			      &pci_state_reg);
16451 
16452 	if (pci_is_pcie(tp->pdev)) {
16453 		u16 lnkctl;
16454 
16455 		tg3_flag_set(tp, PCI_EXPRESS);
16456 
16457 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16458 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16459 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16460 				tg3_flag_clear(tp, HW_TSO_2);
16461 				tg3_flag_clear(tp, TSO_CAPABLE);
16462 			}
16463 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16464 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16465 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16466 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16467 				tg3_flag_set(tp, CLKREQ_BUG);
16468 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16469 			tg3_flag_set(tp, L1PLLPD_EN);
16470 		}
16471 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16472 		/* BCM5785 devices are effectively PCIe devices, and should
16473 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16474 		 * section.
16475 		 */
16476 		tg3_flag_set(tp, PCI_EXPRESS);
16477 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16478 		   tg3_flag(tp, 5780_CLASS)) {
16479 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16480 		if (!tp->pcix_cap) {
16481 			dev_err(&tp->pdev->dev,
16482 				"Cannot find PCI-X capability, aborting\n");
16483 			return -EIO;
16484 		}
16485 
16486 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16487 			tg3_flag_set(tp, PCIX_MODE);
16488 	}
16489 
16490 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16491 	 * reordering to the mailbox registers done by the host
16492 	 * controller can cause major troubles.  We read back from
16493 	 * every mailbox register write to force the writes to be
16494 	 * posted to the chip in order.
16495 	 */
16496 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16497 	    !tg3_flag(tp, PCI_EXPRESS))
16498 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16499 
16500 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16501 			     &tp->pci_cacheline_sz);
16502 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16503 			     &tp->pci_lat_timer);
16504 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16505 	    tp->pci_lat_timer < 64) {
16506 		tp->pci_lat_timer = 64;
16507 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16508 				      tp->pci_lat_timer);
16509 	}
16510 
16511 	/* Important! -- It is critical that the PCI-X hw workaround
16512 	 * situation is decided before the first MMIO register access.
16513 	 */
16514 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16515 		/* 5700 BX chips need to have their TX producer index
16516 		 * mailboxes written twice to workaround a bug.
16517 		 */
16518 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16519 
16520 		/* If we are in PCI-X mode, enable register write workaround.
16521 		 *
16522 		 * The workaround is to use indirect register accesses
16523 		 * for all chip writes not to mailbox registers.
16524 		 */
16525 		if (tg3_flag(tp, PCIX_MODE)) {
16526 			u32 pm_reg;
16527 
16528 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16529 
16530 			/* The chip can have it's power management PCI config
16531 			 * space registers clobbered due to this bug.
16532 			 * So explicitly force the chip into D0 here.
16533 			 */
16534 			pci_read_config_dword(tp->pdev,
16535 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16536 					      &pm_reg);
16537 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16538 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16539 			pci_write_config_dword(tp->pdev,
16540 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16541 					       pm_reg);
16542 
16543 			/* Also, force SERR#/PERR# in PCI command. */
16544 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16545 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16546 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16547 		}
16548 	}
16549 
16550 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16551 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16552 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16553 		tg3_flag_set(tp, PCI_32BIT);
16554 
16555 	/* Chip-specific fixup from Broadcom driver */
16556 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16557 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16558 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16559 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16560 	}
16561 
16562 	/* Default fast path register access methods */
16563 	tp->read32 = tg3_read32;
16564 	tp->write32 = tg3_write32;
16565 	tp->read32_mbox = tg3_read32;
16566 	tp->write32_mbox = tg3_write32;
16567 	tp->write32_tx_mbox = tg3_write32;
16568 	tp->write32_rx_mbox = tg3_write32;
16569 
16570 	/* Various workaround register access methods */
16571 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16572 		tp->write32 = tg3_write_indirect_reg32;
16573 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16574 		 (tg3_flag(tp, PCI_EXPRESS) &&
16575 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16576 		/*
16577 		 * Back to back register writes can cause problems on these
16578 		 * chips, the workaround is to read back all reg writes
16579 		 * except those to mailbox regs.
16580 		 *
16581 		 * See tg3_write_indirect_reg32().
16582 		 */
16583 		tp->write32 = tg3_write_flush_reg32;
16584 	}
16585 
16586 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16587 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16588 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16589 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16590 	}
16591 
16592 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16593 		tp->read32 = tg3_read_indirect_reg32;
16594 		tp->write32 = tg3_write_indirect_reg32;
16595 		tp->read32_mbox = tg3_read_indirect_mbox;
16596 		tp->write32_mbox = tg3_write_indirect_mbox;
16597 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16598 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16599 
16600 		iounmap(tp->regs);
16601 		tp->regs = NULL;
16602 
16603 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16604 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16605 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16606 	}
16607 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16608 		tp->read32_mbox = tg3_read32_mbox_5906;
16609 		tp->write32_mbox = tg3_write32_mbox_5906;
16610 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16611 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16612 	}
16613 
16614 	if (tp->write32 == tg3_write_indirect_reg32 ||
16615 	    (tg3_flag(tp, PCIX_MODE) &&
16616 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16617 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16618 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16619 
16620 	/* The memory arbiter has to be enabled in order for SRAM accesses
16621 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16622 	 * sure it is enabled, but other entities such as system netboot
16623 	 * code might disable it.
16624 	 */
16625 	val = tr32(MEMARB_MODE);
16626 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16627 
16628 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16629 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16630 	    tg3_flag(tp, 5780_CLASS)) {
16631 		if (tg3_flag(tp, PCIX_MODE)) {
16632 			pci_read_config_dword(tp->pdev,
16633 					      tp->pcix_cap + PCI_X_STATUS,
16634 					      &val);
16635 			tp->pci_fn = val & 0x7;
16636 		}
16637 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16638 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16639 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16640 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16641 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16642 			val = tr32(TG3_CPMU_STATUS);
16643 
16644 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16645 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16646 		else
16647 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16648 				     TG3_CPMU_STATUS_FSHFT_5719;
16649 	}
16650 
16651 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16652 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16653 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16654 	}
16655 
16656 	/* Get eeprom hw config before calling tg3_set_power_state().
16657 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16658 	 * determined before calling tg3_set_power_state() so that
16659 	 * we know whether or not to switch out of Vaux power.
16660 	 * When the flag is set, it means that GPIO1 is used for eeprom
16661 	 * write protect and also implies that it is a LOM where GPIOs
16662 	 * are not used to switch power.
16663 	 */
16664 	tg3_get_eeprom_hw_cfg(tp);
16665 
16666 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16667 		tg3_flag_clear(tp, TSO_CAPABLE);
16668 		tg3_flag_clear(tp, TSO_BUG);
16669 		tp->fw_needed = NULL;
16670 	}
16671 
16672 	if (tg3_flag(tp, ENABLE_APE)) {
16673 		/* Allow reads and writes to the
16674 		 * APE register and memory space.
16675 		 */
16676 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16677 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16678 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16679 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16680 				       pci_state_reg);
16681 
16682 		tg3_ape_lock_init(tp);
16683 		tp->ape_hb_interval =
16684 			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16685 	}
16686 
16687 	/* Set up tp->grc_local_ctrl before calling
16688 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16689 	 * will bring 5700's external PHY out of reset.
16690 	 * It is also used as eeprom write protect on LOMs.
16691 	 */
16692 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16693 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16694 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16695 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16696 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16697 	/* Unused GPIO3 must be driven as output on 5752 because there
16698 	 * are no pull-up resistors on unused GPIO pins.
16699 	 */
16700 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16701 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16702 
16703 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16704 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16705 	    tg3_flag(tp, 57765_CLASS))
16706 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16707 
16708 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16709 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16710 		/* Turn off the debug UART. */
16711 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16712 		if (tg3_flag(tp, IS_NIC))
16713 			/* Keep VMain power. */
16714 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16715 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16716 	}
16717 
16718 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16719 		tp->grc_local_ctrl |=
16720 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16721 
16722 	/* Switch out of Vaux if it is a NIC */
16723 	tg3_pwrsrc_switch_to_vmain(tp);
16724 
16725 	/* Derive initial jumbo mode from MTU assigned in
16726 	 * ether_setup() via the alloc_etherdev() call
16727 	 */
16728 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16729 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16730 
16731 	/* Determine WakeOnLan speed to use. */
16732 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16733 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16734 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16735 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16736 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16737 	} else {
16738 		tg3_flag_set(tp, WOL_SPEED_100MB);
16739 	}
16740 
16741 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16742 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16743 
16744 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16745 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16746 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16747 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16748 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16749 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16750 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16751 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16752 
16753 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16754 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16755 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16756 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16757 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16758 
16759 	if (tg3_flag(tp, 5705_PLUS) &&
16760 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16761 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16762 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16763 	    !tg3_flag(tp, 57765_PLUS)) {
16764 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16765 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16766 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16767 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16768 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16769 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16770 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16771 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16772 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16773 		} else
16774 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16775 	}
16776 
16777 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16778 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16779 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16780 		if (tp->phy_otp == 0)
16781 			tp->phy_otp = TG3_OTP_DEFAULT;
16782 	}
16783 
16784 	if (tg3_flag(tp, CPMU_PRESENT))
16785 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16786 	else
16787 		tp->mi_mode = MAC_MI_MODE_BASE;
16788 
16789 	tp->coalesce_mode = 0;
16790 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16791 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16792 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16793 
16794 	/* Set these bits to enable statistics workaround. */
16795 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16796 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16797 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16798 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16799 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16800 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16801 	}
16802 
16803 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16804 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16805 		tg3_flag_set(tp, USE_PHYLIB);
16806 
16807 	err = tg3_mdio_init(tp);
16808 	if (err)
16809 		return err;
16810 
16811 	/* Initialize data/descriptor byte/word swapping. */
16812 	val = tr32(GRC_MODE);
16813 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16814 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16815 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16816 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16817 			GRC_MODE_B2HRX_ENABLE |
16818 			GRC_MODE_HTX2B_ENABLE |
16819 			GRC_MODE_HOST_STACKUP);
16820 	else
16821 		val &= GRC_MODE_HOST_STACKUP;
16822 
16823 	tw32(GRC_MODE, val | tp->grc_mode);
16824 
16825 	tg3_switch_clocks(tp);
16826 
16827 	/* Clear this out for sanity. */
16828 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16829 
16830 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16831 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16832 
16833 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16834 			      &pci_state_reg);
16835 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16836 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16837 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16838 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16839 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16840 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16841 			void __iomem *sram_base;
16842 
16843 			/* Write some dummy words into the SRAM status block
16844 			 * area, see if it reads back correctly.  If the return
16845 			 * value is bad, force enable the PCIX workaround.
16846 			 */
16847 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16848 
16849 			writel(0x00000000, sram_base);
16850 			writel(0x00000000, sram_base + 4);
16851 			writel(0xffffffff, sram_base + 4);
16852 			if (readl(sram_base) != 0x00000000)
16853 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16854 		}
16855 	}
16856 
16857 	udelay(50);
16858 	tg3_nvram_init(tp);
16859 
16860 	/* If the device has an NVRAM, no need to load patch firmware */
16861 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16862 	    !tg3_flag(tp, NO_NVRAM))
16863 		tp->fw_needed = NULL;
16864 
16865 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16866 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16867 
16868 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16869 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16870 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16871 		tg3_flag_set(tp, IS_5788);
16872 
16873 	if (!tg3_flag(tp, IS_5788) &&
16874 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16875 		tg3_flag_set(tp, TAGGED_STATUS);
16876 	if (tg3_flag(tp, TAGGED_STATUS)) {
16877 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16878 				      HOSTCC_MODE_CLRTICK_TXBD);
16879 
16880 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16881 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16882 				       tp->misc_host_ctrl);
16883 	}
16884 
16885 	/* Preserve the APE MAC_MODE bits */
16886 	if (tg3_flag(tp, ENABLE_APE))
16887 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16888 	else
16889 		tp->mac_mode = 0;
16890 
16891 	if (tg3_10_100_only_device(tp, ent))
16892 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16893 
16894 	err = tg3_phy_probe(tp);
16895 	if (err) {
16896 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16897 		/* ... but do not return immediately ... */
16898 		tg3_mdio_fini(tp);
16899 	}
16900 
16901 	tg3_read_vpd(tp);
16902 	tg3_read_fw_ver(tp);
16903 
16904 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16905 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16906 	} else {
16907 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16908 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16909 		else
16910 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16911 	}
16912 
16913 	/* 5700 {AX,BX} chips have a broken status block link
16914 	 * change bit implementation, so we must use the
16915 	 * status register in those cases.
16916 	 */
16917 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16918 		tg3_flag_set(tp, USE_LINKCHG_REG);
16919 	else
16920 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16921 
16922 	/* The led_ctrl is set during tg3_phy_probe, here we might
16923 	 * have to force the link status polling mechanism based
16924 	 * upon subsystem IDs.
16925 	 */
16926 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16927 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16928 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16929 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16930 		tg3_flag_set(tp, USE_LINKCHG_REG);
16931 	}
16932 
16933 	/* For all SERDES we poll the MAC status register. */
16934 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16935 		tg3_flag_set(tp, POLL_SERDES);
16936 	else
16937 		tg3_flag_clear(tp, POLL_SERDES);
16938 
16939 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16940 		tg3_flag_set(tp, POLL_CPMU_LINK);
16941 
16942 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16943 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16944 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16945 	    tg3_flag(tp, PCIX_MODE)) {
16946 		tp->rx_offset = NET_SKB_PAD;
16947 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16948 		tp->rx_copy_thresh = ~(u16)0;
16949 #endif
16950 	}
16951 
16952 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16953 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16954 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16955 
16956 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16957 
16958 	/* Increment the rx prod index on the rx std ring by at most
16959 	 * 8 for these chips to workaround hw errata.
16960 	 */
16961 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16962 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16963 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16964 		tp->rx_std_max_post = 8;
16965 
16966 	if (tg3_flag(tp, ASPM_WORKAROUND))
16967 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16968 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16969 
16970 	return err;
16971 }
16972 
16973 #ifdef CONFIG_SPARC
16974 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16975 {
16976 	struct net_device *dev = tp->dev;
16977 	struct pci_dev *pdev = tp->pdev;
16978 	struct device_node *dp = pci_device_to_OF_node(pdev);
16979 	const unsigned char *addr;
16980 	int len;
16981 
16982 	addr = of_get_property(dp, "local-mac-address", &len);
16983 	if (addr && len == ETH_ALEN) {
16984 		memcpy(dev->dev_addr, addr, ETH_ALEN);
16985 		return 0;
16986 	}
16987 	return -ENODEV;
16988 }
16989 
16990 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16991 {
16992 	struct net_device *dev = tp->dev;
16993 
16994 	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16995 	return 0;
16996 }
16997 #endif
16998 
16999 static int tg3_get_device_address(struct tg3 *tp)
17000 {
17001 	struct net_device *dev = tp->dev;
17002 	u32 hi, lo, mac_offset;
17003 	int addr_ok = 0;
17004 	int err;
17005 
17006 #ifdef CONFIG_SPARC
17007 	if (!tg3_get_macaddr_sparc(tp))
17008 		return 0;
17009 #endif
17010 
17011 	if (tg3_flag(tp, IS_SSB_CORE)) {
17012 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
17013 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
17014 			return 0;
17015 	}
17016 
17017 	mac_offset = 0x7c;
17018 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17019 	    tg3_flag(tp, 5780_CLASS)) {
17020 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17021 			mac_offset = 0xcc;
17022 		if (tg3_nvram_lock(tp))
17023 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17024 		else
17025 			tg3_nvram_unlock(tp);
17026 	} else if (tg3_flag(tp, 5717_PLUS)) {
17027 		if (tp->pci_fn & 1)
17028 			mac_offset = 0xcc;
17029 		if (tp->pci_fn > 1)
17030 			mac_offset += 0x18c;
17031 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17032 		mac_offset = 0x10;
17033 
17034 	/* First try to get it from MAC address mailbox. */
17035 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17036 	if ((hi >> 16) == 0x484b) {
17037 		dev->dev_addr[0] = (hi >>  8) & 0xff;
17038 		dev->dev_addr[1] = (hi >>  0) & 0xff;
17039 
17040 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17041 		dev->dev_addr[2] = (lo >> 24) & 0xff;
17042 		dev->dev_addr[3] = (lo >> 16) & 0xff;
17043 		dev->dev_addr[4] = (lo >>  8) & 0xff;
17044 		dev->dev_addr[5] = (lo >>  0) & 0xff;
17045 
17046 		/* Some old bootcode may report a 0 MAC address in SRAM */
17047 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17048 	}
17049 	if (!addr_ok) {
17050 		/* Next, try NVRAM. */
17051 		if (!tg3_flag(tp, NO_NVRAM) &&
17052 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17053 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17054 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17055 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17056 		}
17057 		/* Finally just fetch it out of the MAC control regs. */
17058 		else {
17059 			hi = tr32(MAC_ADDR_0_HIGH);
17060 			lo = tr32(MAC_ADDR_0_LOW);
17061 
17062 			dev->dev_addr[5] = lo & 0xff;
17063 			dev->dev_addr[4] = (lo >> 8) & 0xff;
17064 			dev->dev_addr[3] = (lo >> 16) & 0xff;
17065 			dev->dev_addr[2] = (lo >> 24) & 0xff;
17066 			dev->dev_addr[1] = hi & 0xff;
17067 			dev->dev_addr[0] = (hi >> 8) & 0xff;
17068 		}
17069 	}
17070 
17071 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17072 #ifdef CONFIG_SPARC
17073 		if (!tg3_get_default_macaddr_sparc(tp))
17074 			return 0;
17075 #endif
17076 		return -EINVAL;
17077 	}
17078 	return 0;
17079 }
17080 
17081 #define BOUNDARY_SINGLE_CACHELINE	1
17082 #define BOUNDARY_MULTI_CACHELINE	2
17083 
17084 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17085 {
17086 	int cacheline_size;
17087 	u8 byte;
17088 	int goal;
17089 
17090 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17091 	if (byte == 0)
17092 		cacheline_size = 1024;
17093 	else
17094 		cacheline_size = (int) byte * 4;
17095 
17096 	/* On 5703 and later chips, the boundary bits have no
17097 	 * effect.
17098 	 */
17099 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17100 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17101 	    !tg3_flag(tp, PCI_EXPRESS))
17102 		goto out;
17103 
17104 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17105 	goal = BOUNDARY_MULTI_CACHELINE;
17106 #else
17107 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17108 	goal = BOUNDARY_SINGLE_CACHELINE;
17109 #else
17110 	goal = 0;
17111 #endif
17112 #endif
17113 
17114 	if (tg3_flag(tp, 57765_PLUS)) {
17115 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17116 		goto out;
17117 	}
17118 
17119 	if (!goal)
17120 		goto out;
17121 
17122 	/* PCI controllers on most RISC systems tend to disconnect
17123 	 * when a device tries to burst across a cache-line boundary.
17124 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17125 	 *
17126 	 * Unfortunately, for PCI-E there are only limited
17127 	 * write-side controls for this, and thus for reads
17128 	 * we will still get the disconnects.  We'll also waste
17129 	 * these PCI cycles for both read and write for chips
17130 	 * other than 5700 and 5701 which do not implement the
17131 	 * boundary bits.
17132 	 */
17133 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17134 		switch (cacheline_size) {
17135 		case 16:
17136 		case 32:
17137 		case 64:
17138 		case 128:
17139 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17140 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17141 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17142 			} else {
17143 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17144 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17145 			}
17146 			break;
17147 
17148 		case 256:
17149 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17150 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17151 			break;
17152 
17153 		default:
17154 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17155 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17156 			break;
17157 		}
17158 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17159 		switch (cacheline_size) {
17160 		case 16:
17161 		case 32:
17162 		case 64:
17163 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17164 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17165 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17166 				break;
17167 			}
17168 			/* fallthrough */
17169 		case 128:
17170 		default:
17171 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17172 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17173 			break;
17174 		}
17175 	} else {
17176 		switch (cacheline_size) {
17177 		case 16:
17178 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17179 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17180 					DMA_RWCTRL_WRITE_BNDRY_16);
17181 				break;
17182 			}
17183 			/* fallthrough */
17184 		case 32:
17185 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17186 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17187 					DMA_RWCTRL_WRITE_BNDRY_32);
17188 				break;
17189 			}
17190 			/* fallthrough */
17191 		case 64:
17192 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17193 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17194 					DMA_RWCTRL_WRITE_BNDRY_64);
17195 				break;
17196 			}
17197 			/* fallthrough */
17198 		case 128:
17199 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17200 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17201 					DMA_RWCTRL_WRITE_BNDRY_128);
17202 				break;
17203 			}
17204 			/* fallthrough */
17205 		case 256:
17206 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17207 				DMA_RWCTRL_WRITE_BNDRY_256);
17208 			break;
17209 		case 512:
17210 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17211 				DMA_RWCTRL_WRITE_BNDRY_512);
17212 			break;
17213 		case 1024:
17214 		default:
17215 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17216 				DMA_RWCTRL_WRITE_BNDRY_1024);
17217 			break;
17218 		}
17219 	}
17220 
17221 out:
17222 	return val;
17223 }
17224 
17225 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17226 			   int size, bool to_device)
17227 {
17228 	struct tg3_internal_buffer_desc test_desc;
17229 	u32 sram_dma_descs;
17230 	int i, ret;
17231 
17232 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17233 
17234 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17235 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17236 	tw32(RDMAC_STATUS, 0);
17237 	tw32(WDMAC_STATUS, 0);
17238 
17239 	tw32(BUFMGR_MODE, 0);
17240 	tw32(FTQ_RESET, 0);
17241 
17242 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17243 	test_desc.addr_lo = buf_dma & 0xffffffff;
17244 	test_desc.nic_mbuf = 0x00002100;
17245 	test_desc.len = size;
17246 
17247 	/*
17248 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17249 	 * the *second* time the tg3 driver was getting loaded after an
17250 	 * initial scan.
17251 	 *
17252 	 * Broadcom tells me:
17253 	 *   ...the DMA engine is connected to the GRC block and a DMA
17254 	 *   reset may affect the GRC block in some unpredictable way...
17255 	 *   The behavior of resets to individual blocks has not been tested.
17256 	 *
17257 	 * Broadcom noted the GRC reset will also reset all sub-components.
17258 	 */
17259 	if (to_device) {
17260 		test_desc.cqid_sqid = (13 << 8) | 2;
17261 
17262 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17263 		udelay(40);
17264 	} else {
17265 		test_desc.cqid_sqid = (16 << 8) | 7;
17266 
17267 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17268 		udelay(40);
17269 	}
17270 	test_desc.flags = 0x00000005;
17271 
17272 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17273 		u32 val;
17274 
17275 		val = *(((u32 *)&test_desc) + i);
17276 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17277 				       sram_dma_descs + (i * sizeof(u32)));
17278 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17279 	}
17280 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17281 
17282 	if (to_device)
17283 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17284 	else
17285 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17286 
17287 	ret = -ENODEV;
17288 	for (i = 0; i < 40; i++) {
17289 		u32 val;
17290 
17291 		if (to_device)
17292 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17293 		else
17294 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17295 		if ((val & 0xffff) == sram_dma_descs) {
17296 			ret = 0;
17297 			break;
17298 		}
17299 
17300 		udelay(100);
17301 	}
17302 
17303 	return ret;
17304 }
17305 
17306 #define TEST_BUFFER_SIZE	0x2000
17307 
17308 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17309 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17310 	{ },
17311 };
17312 
17313 static int tg3_test_dma(struct tg3 *tp)
17314 {
17315 	dma_addr_t buf_dma;
17316 	u32 *buf, saved_dma_rwctrl;
17317 	int ret = 0;
17318 
17319 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17320 				 &buf_dma, GFP_KERNEL);
17321 	if (!buf) {
17322 		ret = -ENOMEM;
17323 		goto out_nofree;
17324 	}
17325 
17326 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17327 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17328 
17329 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17330 
17331 	if (tg3_flag(tp, 57765_PLUS))
17332 		goto out;
17333 
17334 	if (tg3_flag(tp, PCI_EXPRESS)) {
17335 		/* DMA read watermark not used on PCIE */
17336 		tp->dma_rwctrl |= 0x00180000;
17337 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17338 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17339 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17340 			tp->dma_rwctrl |= 0x003f0000;
17341 		else
17342 			tp->dma_rwctrl |= 0x003f000f;
17343 	} else {
17344 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17345 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17346 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17347 			u32 read_water = 0x7;
17348 
17349 			/* If the 5704 is behind the EPB bridge, we can
17350 			 * do the less restrictive ONE_DMA workaround for
17351 			 * better performance.
17352 			 */
17353 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17354 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17355 				tp->dma_rwctrl |= 0x8000;
17356 			else if (ccval == 0x6 || ccval == 0x7)
17357 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17358 
17359 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17360 				read_water = 4;
17361 			/* Set bit 23 to enable PCIX hw bug fix */
17362 			tp->dma_rwctrl |=
17363 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17364 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17365 				(1 << 23);
17366 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17367 			/* 5780 always in PCIX mode */
17368 			tp->dma_rwctrl |= 0x00144000;
17369 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17370 			/* 5714 always in PCIX mode */
17371 			tp->dma_rwctrl |= 0x00148000;
17372 		} else {
17373 			tp->dma_rwctrl |= 0x001b000f;
17374 		}
17375 	}
17376 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17377 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17378 
17379 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17380 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17381 		tp->dma_rwctrl &= 0xfffffff0;
17382 
17383 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17384 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17385 		/* Remove this if it causes problems for some boards. */
17386 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17387 
17388 		/* On 5700/5701 chips, we need to set this bit.
17389 		 * Otherwise the chip will issue cacheline transactions
17390 		 * to streamable DMA memory with not all the byte
17391 		 * enables turned on.  This is an error on several
17392 		 * RISC PCI controllers, in particular sparc64.
17393 		 *
17394 		 * On 5703/5704 chips, this bit has been reassigned
17395 		 * a different meaning.  In particular, it is used
17396 		 * on those chips to enable a PCI-X workaround.
17397 		 */
17398 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17399 	}
17400 
17401 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17402 
17403 
17404 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17405 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17406 		goto out;
17407 
17408 	/* It is best to perform DMA test with maximum write burst size
17409 	 * to expose the 5700/5701 write DMA bug.
17410 	 */
17411 	saved_dma_rwctrl = tp->dma_rwctrl;
17412 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17413 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17414 
17415 	while (1) {
17416 		u32 *p = buf, i;
17417 
17418 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17419 			p[i] = i;
17420 
17421 		/* Send the buffer to the chip. */
17422 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17423 		if (ret) {
17424 			dev_err(&tp->pdev->dev,
17425 				"%s: Buffer write failed. err = %d\n",
17426 				__func__, ret);
17427 			break;
17428 		}
17429 
17430 		/* Now read it back. */
17431 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17432 		if (ret) {
17433 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17434 				"err = %d\n", __func__, ret);
17435 			break;
17436 		}
17437 
17438 		/* Verify it. */
17439 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17440 			if (p[i] == i)
17441 				continue;
17442 
17443 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17444 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17445 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17446 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17447 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17448 				break;
17449 			} else {
17450 				dev_err(&tp->pdev->dev,
17451 					"%s: Buffer corrupted on read back! "
17452 					"(%d != %d)\n", __func__, p[i], i);
17453 				ret = -ENODEV;
17454 				goto out;
17455 			}
17456 		}
17457 
17458 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17459 			/* Success. */
17460 			ret = 0;
17461 			break;
17462 		}
17463 	}
17464 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17465 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17466 		/* DMA test passed without adjusting DMA boundary,
17467 		 * now look for chipsets that are known to expose the
17468 		 * DMA bug without failing the test.
17469 		 */
17470 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17471 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17472 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17473 		} else {
17474 			/* Safe to use the calculated DMA boundary. */
17475 			tp->dma_rwctrl = saved_dma_rwctrl;
17476 		}
17477 
17478 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17479 	}
17480 
17481 out:
17482 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17483 out_nofree:
17484 	return ret;
17485 }
17486 
17487 static void tg3_init_bufmgr_config(struct tg3 *tp)
17488 {
17489 	if (tg3_flag(tp, 57765_PLUS)) {
17490 		tp->bufmgr_config.mbuf_read_dma_low_water =
17491 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17492 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17493 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17494 		tp->bufmgr_config.mbuf_high_water =
17495 			DEFAULT_MB_HIGH_WATER_57765;
17496 
17497 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17498 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17499 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17500 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17501 		tp->bufmgr_config.mbuf_high_water_jumbo =
17502 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17503 	} else if (tg3_flag(tp, 5705_PLUS)) {
17504 		tp->bufmgr_config.mbuf_read_dma_low_water =
17505 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17506 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17507 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17508 		tp->bufmgr_config.mbuf_high_water =
17509 			DEFAULT_MB_HIGH_WATER_5705;
17510 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17511 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17512 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17513 			tp->bufmgr_config.mbuf_high_water =
17514 				DEFAULT_MB_HIGH_WATER_5906;
17515 		}
17516 
17517 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17518 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17519 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17520 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17521 		tp->bufmgr_config.mbuf_high_water_jumbo =
17522 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17523 	} else {
17524 		tp->bufmgr_config.mbuf_read_dma_low_water =
17525 			DEFAULT_MB_RDMA_LOW_WATER;
17526 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17527 			DEFAULT_MB_MACRX_LOW_WATER;
17528 		tp->bufmgr_config.mbuf_high_water =
17529 			DEFAULT_MB_HIGH_WATER;
17530 
17531 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17532 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17533 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17534 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17535 		tp->bufmgr_config.mbuf_high_water_jumbo =
17536 			DEFAULT_MB_HIGH_WATER_JUMBO;
17537 	}
17538 
17539 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17540 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17541 }
17542 
17543 static char *tg3_phy_string(struct tg3 *tp)
17544 {
17545 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17546 	case TG3_PHY_ID_BCM5400:	return "5400";
17547 	case TG3_PHY_ID_BCM5401:	return "5401";
17548 	case TG3_PHY_ID_BCM5411:	return "5411";
17549 	case TG3_PHY_ID_BCM5701:	return "5701";
17550 	case TG3_PHY_ID_BCM5703:	return "5703";
17551 	case TG3_PHY_ID_BCM5704:	return "5704";
17552 	case TG3_PHY_ID_BCM5705:	return "5705";
17553 	case TG3_PHY_ID_BCM5750:	return "5750";
17554 	case TG3_PHY_ID_BCM5752:	return "5752";
17555 	case TG3_PHY_ID_BCM5714:	return "5714";
17556 	case TG3_PHY_ID_BCM5780:	return "5780";
17557 	case TG3_PHY_ID_BCM5755:	return "5755";
17558 	case TG3_PHY_ID_BCM5787:	return "5787";
17559 	case TG3_PHY_ID_BCM5784:	return "5784";
17560 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17561 	case TG3_PHY_ID_BCM5906:	return "5906";
17562 	case TG3_PHY_ID_BCM5761:	return "5761";
17563 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17564 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17565 	case TG3_PHY_ID_BCM57765:	return "57765";
17566 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17567 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17568 	case TG3_PHY_ID_BCM5762:	return "5762C";
17569 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17570 	case 0:			return "serdes";
17571 	default:		return "unknown";
17572 	}
17573 }
17574 
17575 static char *tg3_bus_string(struct tg3 *tp, char *str)
17576 {
17577 	if (tg3_flag(tp, PCI_EXPRESS)) {
17578 		strcpy(str, "PCI Express");
17579 		return str;
17580 	} else if (tg3_flag(tp, PCIX_MODE)) {
17581 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17582 
17583 		strcpy(str, "PCIX:");
17584 
17585 		if ((clock_ctrl == 7) ||
17586 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17587 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17588 			strcat(str, "133MHz");
17589 		else if (clock_ctrl == 0)
17590 			strcat(str, "33MHz");
17591 		else if (clock_ctrl == 2)
17592 			strcat(str, "50MHz");
17593 		else if (clock_ctrl == 4)
17594 			strcat(str, "66MHz");
17595 		else if (clock_ctrl == 6)
17596 			strcat(str, "100MHz");
17597 	} else {
17598 		strcpy(str, "PCI:");
17599 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17600 			strcat(str, "66MHz");
17601 		else
17602 			strcat(str, "33MHz");
17603 	}
17604 	if (tg3_flag(tp, PCI_32BIT))
17605 		strcat(str, ":32-bit");
17606 	else
17607 		strcat(str, ":64-bit");
17608 	return str;
17609 }
17610 
17611 static void tg3_init_coal(struct tg3 *tp)
17612 {
17613 	struct ethtool_coalesce *ec = &tp->coal;
17614 
17615 	memset(ec, 0, sizeof(*ec));
17616 	ec->cmd = ETHTOOL_GCOALESCE;
17617 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17618 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17619 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17620 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17621 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17622 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17623 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17624 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17625 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17626 
17627 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17628 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17629 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17630 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17631 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17632 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17633 	}
17634 
17635 	if (tg3_flag(tp, 5705_PLUS)) {
17636 		ec->rx_coalesce_usecs_irq = 0;
17637 		ec->tx_coalesce_usecs_irq = 0;
17638 		ec->stats_block_coalesce_usecs = 0;
17639 	}
17640 }
17641 
17642 static int tg3_init_one(struct pci_dev *pdev,
17643 				  const struct pci_device_id *ent)
17644 {
17645 	struct net_device *dev;
17646 	struct tg3 *tp;
17647 	int i, err;
17648 	u32 sndmbx, rcvmbx, intmbx;
17649 	char str[40];
17650 	u64 dma_mask, persist_dma_mask;
17651 	netdev_features_t features = 0;
17652 
17653 	printk_once(KERN_INFO "%s\n", version);
17654 
17655 	err = pci_enable_device(pdev);
17656 	if (err) {
17657 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17658 		return err;
17659 	}
17660 
17661 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17662 	if (err) {
17663 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17664 		goto err_out_disable_pdev;
17665 	}
17666 
17667 	pci_set_master(pdev);
17668 
17669 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17670 	if (!dev) {
17671 		err = -ENOMEM;
17672 		goto err_out_free_res;
17673 	}
17674 
17675 	SET_NETDEV_DEV(dev, &pdev->dev);
17676 
17677 	tp = netdev_priv(dev);
17678 	tp->pdev = pdev;
17679 	tp->dev = dev;
17680 	tp->rx_mode = TG3_DEF_RX_MODE;
17681 	tp->tx_mode = TG3_DEF_TX_MODE;
17682 	tp->irq_sync = 1;
17683 	tp->pcierr_recovery = false;
17684 
17685 	if (tg3_debug > 0)
17686 		tp->msg_enable = tg3_debug;
17687 	else
17688 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17689 
17690 	if (pdev_is_ssb_gige_core(pdev)) {
17691 		tg3_flag_set(tp, IS_SSB_CORE);
17692 		if (ssb_gige_must_flush_posted_writes(pdev))
17693 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17694 		if (ssb_gige_one_dma_at_once(pdev))
17695 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17696 		if (ssb_gige_have_roboswitch(pdev)) {
17697 			tg3_flag_set(tp, USE_PHYLIB);
17698 			tg3_flag_set(tp, ROBOSWITCH);
17699 		}
17700 		if (ssb_gige_is_rgmii(pdev))
17701 			tg3_flag_set(tp, RGMII_MODE);
17702 	}
17703 
17704 	/* The word/byte swap controls here control register access byte
17705 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17706 	 * setting below.
17707 	 */
17708 	tp->misc_host_ctrl =
17709 		MISC_HOST_CTRL_MASK_PCI_INT |
17710 		MISC_HOST_CTRL_WORD_SWAP |
17711 		MISC_HOST_CTRL_INDIR_ACCESS |
17712 		MISC_HOST_CTRL_PCISTATE_RW;
17713 
17714 	/* The NONFRM (non-frame) byte/word swap controls take effect
17715 	 * on descriptor entries, anything which isn't packet data.
17716 	 *
17717 	 * The StrongARM chips on the board (one for tx, one for rx)
17718 	 * are running in big-endian mode.
17719 	 */
17720 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17721 			GRC_MODE_WSWAP_NONFRM_DATA);
17722 #ifdef __BIG_ENDIAN
17723 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17724 #endif
17725 	spin_lock_init(&tp->lock);
17726 	spin_lock_init(&tp->indirect_lock);
17727 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17728 
17729 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17730 	if (!tp->regs) {
17731 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17732 		err = -ENOMEM;
17733 		goto err_out_free_dev;
17734 	}
17735 
17736 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17737 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17738 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17739 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17740 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17741 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17742 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17743 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17744 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17745 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17746 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17747 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17748 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17749 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17750 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17751 		tg3_flag_set(tp, ENABLE_APE);
17752 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17753 		if (!tp->aperegs) {
17754 			dev_err(&pdev->dev,
17755 				"Cannot map APE registers, aborting\n");
17756 			err = -ENOMEM;
17757 			goto err_out_iounmap;
17758 		}
17759 	}
17760 
17761 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17762 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17763 
17764 	dev->ethtool_ops = &tg3_ethtool_ops;
17765 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17766 	dev->netdev_ops = &tg3_netdev_ops;
17767 	dev->irq = pdev->irq;
17768 
17769 	err = tg3_get_invariants(tp, ent);
17770 	if (err) {
17771 		dev_err(&pdev->dev,
17772 			"Problem fetching invariants of chip, aborting\n");
17773 		goto err_out_apeunmap;
17774 	}
17775 
17776 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17777 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17778 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17779 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17780 	 * do DMA address check in tg3_start_xmit().
17781 	 */
17782 	if (tg3_flag(tp, IS_5788))
17783 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17784 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17785 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17786 #ifdef CONFIG_HIGHMEM
17787 		dma_mask = DMA_BIT_MASK(64);
17788 #endif
17789 	} else
17790 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17791 
17792 	/* Configure DMA attributes. */
17793 	if (dma_mask > DMA_BIT_MASK(32)) {
17794 		err = pci_set_dma_mask(pdev, dma_mask);
17795 		if (!err) {
17796 			features |= NETIF_F_HIGHDMA;
17797 			err = pci_set_consistent_dma_mask(pdev,
17798 							  persist_dma_mask);
17799 			if (err < 0) {
17800 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17801 					"DMA for consistent allocations\n");
17802 				goto err_out_apeunmap;
17803 			}
17804 		}
17805 	}
17806 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17807 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17808 		if (err) {
17809 			dev_err(&pdev->dev,
17810 				"No usable DMA configuration, aborting\n");
17811 			goto err_out_apeunmap;
17812 		}
17813 	}
17814 
17815 	tg3_init_bufmgr_config(tp);
17816 
17817 	/* 5700 B0 chips do not support checksumming correctly due
17818 	 * to hardware bugs.
17819 	 */
17820 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17821 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17822 
17823 		if (tg3_flag(tp, 5755_PLUS))
17824 			features |= NETIF_F_IPV6_CSUM;
17825 	}
17826 
17827 	/* TSO is on by default on chips that support hardware TSO.
17828 	 * Firmware TSO on older chips gives lower performance, so it
17829 	 * is off by default, but can be enabled using ethtool.
17830 	 */
17831 	if ((tg3_flag(tp, HW_TSO_1) ||
17832 	     tg3_flag(tp, HW_TSO_2) ||
17833 	     tg3_flag(tp, HW_TSO_3)) &&
17834 	    (features & NETIF_F_IP_CSUM))
17835 		features |= NETIF_F_TSO;
17836 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17837 		if (features & NETIF_F_IPV6_CSUM)
17838 			features |= NETIF_F_TSO6;
17839 		if (tg3_flag(tp, HW_TSO_3) ||
17840 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17841 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17842 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17843 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17844 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17845 			features |= NETIF_F_TSO_ECN;
17846 	}
17847 
17848 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17849 			 NETIF_F_HW_VLAN_CTAG_RX;
17850 	dev->vlan_features |= features;
17851 
17852 	/*
17853 	 * Add loopback capability only for a subset of devices that support
17854 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17855 	 * loopback for the remaining devices.
17856 	 */
17857 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17858 	    !tg3_flag(tp, CPMU_PRESENT))
17859 		/* Add the loopback capability */
17860 		features |= NETIF_F_LOOPBACK;
17861 
17862 	dev->hw_features |= features;
17863 	dev->priv_flags |= IFF_UNICAST_FLT;
17864 
17865 	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17866 	dev->min_mtu = TG3_MIN_MTU;
17867 	dev->max_mtu = TG3_MAX_MTU(tp);
17868 
17869 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17870 	    !tg3_flag(tp, TSO_CAPABLE) &&
17871 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17872 		tg3_flag_set(tp, MAX_RXPEND_64);
17873 		tp->rx_pending = 63;
17874 	}
17875 
17876 	err = tg3_get_device_address(tp);
17877 	if (err) {
17878 		dev_err(&pdev->dev,
17879 			"Could not obtain valid ethernet address, aborting\n");
17880 		goto err_out_apeunmap;
17881 	}
17882 
17883 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17884 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17885 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17886 	for (i = 0; i < tp->irq_max; i++) {
17887 		struct tg3_napi *tnapi = &tp->napi[i];
17888 
17889 		tnapi->tp = tp;
17890 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17891 
17892 		tnapi->int_mbox = intmbx;
17893 		if (i <= 4)
17894 			intmbx += 0x8;
17895 		else
17896 			intmbx += 0x4;
17897 
17898 		tnapi->consmbox = rcvmbx;
17899 		tnapi->prodmbox = sndmbx;
17900 
17901 		if (i)
17902 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17903 		else
17904 			tnapi->coal_now = HOSTCC_MODE_NOW;
17905 
17906 		if (!tg3_flag(tp, SUPPORT_MSIX))
17907 			break;
17908 
17909 		/*
17910 		 * If we support MSIX, we'll be using RSS.  If we're using
17911 		 * RSS, the first vector only handles link interrupts and the
17912 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17913 		 * mailbox values for the next iteration.  The values we setup
17914 		 * above are still useful for the single vectored mode.
17915 		 */
17916 		if (!i)
17917 			continue;
17918 
17919 		rcvmbx += 0x8;
17920 
17921 		if (sndmbx & 0x4)
17922 			sndmbx -= 0x4;
17923 		else
17924 			sndmbx += 0xc;
17925 	}
17926 
17927 	/*
17928 	 * Reset chip in case UNDI or EFI driver did not shutdown
17929 	 * DMA self test will enable WDMAC and we'll see (spurious)
17930 	 * pending DMA on the PCI bus at that point.
17931 	 */
17932 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17933 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17934 		tg3_full_lock(tp, 0);
17935 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17936 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17937 		tg3_full_unlock(tp);
17938 	}
17939 
17940 	err = tg3_test_dma(tp);
17941 	if (err) {
17942 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17943 		goto err_out_apeunmap;
17944 	}
17945 
17946 	tg3_init_coal(tp);
17947 
17948 	pci_set_drvdata(pdev, dev);
17949 
17950 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17951 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17952 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17953 		tg3_flag_set(tp, PTP_CAPABLE);
17954 
17955 	tg3_timer_init(tp);
17956 
17957 	tg3_carrier_off(tp);
17958 
17959 	err = register_netdev(dev);
17960 	if (err) {
17961 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17962 		goto err_out_apeunmap;
17963 	}
17964 
17965 	if (tg3_flag(tp, PTP_CAPABLE)) {
17966 		tg3_ptp_init(tp);
17967 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17968 						   &tp->pdev->dev);
17969 		if (IS_ERR(tp->ptp_clock))
17970 			tp->ptp_clock = NULL;
17971 	}
17972 
17973 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17974 		    tp->board_part_number,
17975 		    tg3_chip_rev_id(tp),
17976 		    tg3_bus_string(tp, str),
17977 		    dev->dev_addr);
17978 
17979 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17980 		char *ethtype;
17981 
17982 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17983 			ethtype = "10/100Base-TX";
17984 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17985 			ethtype = "1000Base-SX";
17986 		else
17987 			ethtype = "10/100/1000Base-T";
17988 
17989 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17990 			    "(WireSpeed[%d], EEE[%d])\n",
17991 			    tg3_phy_string(tp), ethtype,
17992 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17993 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17994 	}
17995 
17996 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17997 		    (dev->features & NETIF_F_RXCSUM) != 0,
17998 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17999 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18000 		    tg3_flag(tp, ENABLE_ASF) != 0,
18001 		    tg3_flag(tp, TSO_CAPABLE) != 0);
18002 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18003 		    tp->dma_rwctrl,
18004 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18005 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18006 
18007 	pci_save_state(pdev);
18008 
18009 	return 0;
18010 
18011 err_out_apeunmap:
18012 	if (tp->aperegs) {
18013 		iounmap(tp->aperegs);
18014 		tp->aperegs = NULL;
18015 	}
18016 
18017 err_out_iounmap:
18018 	if (tp->regs) {
18019 		iounmap(tp->regs);
18020 		tp->regs = NULL;
18021 	}
18022 
18023 err_out_free_dev:
18024 	free_netdev(dev);
18025 
18026 err_out_free_res:
18027 	pci_release_regions(pdev);
18028 
18029 err_out_disable_pdev:
18030 	if (pci_is_enabled(pdev))
18031 		pci_disable_device(pdev);
18032 	return err;
18033 }
18034 
18035 static void tg3_remove_one(struct pci_dev *pdev)
18036 {
18037 	struct net_device *dev = pci_get_drvdata(pdev);
18038 
18039 	if (dev) {
18040 		struct tg3 *tp = netdev_priv(dev);
18041 
18042 		tg3_ptp_fini(tp);
18043 
18044 		release_firmware(tp->fw);
18045 
18046 		tg3_reset_task_cancel(tp);
18047 
18048 		if (tg3_flag(tp, USE_PHYLIB)) {
18049 			tg3_phy_fini(tp);
18050 			tg3_mdio_fini(tp);
18051 		}
18052 
18053 		unregister_netdev(dev);
18054 		if (tp->aperegs) {
18055 			iounmap(tp->aperegs);
18056 			tp->aperegs = NULL;
18057 		}
18058 		if (tp->regs) {
18059 			iounmap(tp->regs);
18060 			tp->regs = NULL;
18061 		}
18062 		free_netdev(dev);
18063 		pci_release_regions(pdev);
18064 		pci_disable_device(pdev);
18065 	}
18066 }
18067 
18068 #ifdef CONFIG_PM_SLEEP
18069 static int tg3_suspend(struct device *device)
18070 {
18071 	struct pci_dev *pdev = to_pci_dev(device);
18072 	struct net_device *dev = pci_get_drvdata(pdev);
18073 	struct tg3 *tp = netdev_priv(dev);
18074 	int err = 0;
18075 
18076 	rtnl_lock();
18077 
18078 	if (!netif_running(dev))
18079 		goto unlock;
18080 
18081 	tg3_reset_task_cancel(tp);
18082 	tg3_phy_stop(tp);
18083 	tg3_netif_stop(tp);
18084 
18085 	tg3_timer_stop(tp);
18086 
18087 	tg3_full_lock(tp, 1);
18088 	tg3_disable_ints(tp);
18089 	tg3_full_unlock(tp);
18090 
18091 	netif_device_detach(dev);
18092 
18093 	tg3_full_lock(tp, 0);
18094 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18095 	tg3_flag_clear(tp, INIT_COMPLETE);
18096 	tg3_full_unlock(tp);
18097 
18098 	err = tg3_power_down_prepare(tp);
18099 	if (err) {
18100 		int err2;
18101 
18102 		tg3_full_lock(tp, 0);
18103 
18104 		tg3_flag_set(tp, INIT_COMPLETE);
18105 		err2 = tg3_restart_hw(tp, true);
18106 		if (err2)
18107 			goto out;
18108 
18109 		tg3_timer_start(tp);
18110 
18111 		netif_device_attach(dev);
18112 		tg3_netif_start(tp);
18113 
18114 out:
18115 		tg3_full_unlock(tp);
18116 
18117 		if (!err2)
18118 			tg3_phy_start(tp);
18119 	}
18120 
18121 unlock:
18122 	rtnl_unlock();
18123 	return err;
18124 }
18125 
18126 static int tg3_resume(struct device *device)
18127 {
18128 	struct pci_dev *pdev = to_pci_dev(device);
18129 	struct net_device *dev = pci_get_drvdata(pdev);
18130 	struct tg3 *tp = netdev_priv(dev);
18131 	int err = 0;
18132 
18133 	rtnl_lock();
18134 
18135 	if (!netif_running(dev))
18136 		goto unlock;
18137 
18138 	netif_device_attach(dev);
18139 
18140 	tg3_full_lock(tp, 0);
18141 
18142 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18143 
18144 	tg3_flag_set(tp, INIT_COMPLETE);
18145 	err = tg3_restart_hw(tp,
18146 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18147 	if (err)
18148 		goto out;
18149 
18150 	tg3_timer_start(tp);
18151 
18152 	tg3_netif_start(tp);
18153 
18154 out:
18155 	tg3_full_unlock(tp);
18156 
18157 	if (!err)
18158 		tg3_phy_start(tp);
18159 
18160 unlock:
18161 	rtnl_unlock();
18162 	return err;
18163 }
18164 #endif /* CONFIG_PM_SLEEP */
18165 
18166 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18167 
18168 static void tg3_shutdown(struct pci_dev *pdev)
18169 {
18170 	struct net_device *dev = pci_get_drvdata(pdev);
18171 	struct tg3 *tp = netdev_priv(dev);
18172 
18173 	rtnl_lock();
18174 	netif_device_detach(dev);
18175 
18176 	if (netif_running(dev))
18177 		dev_close(dev);
18178 
18179 	if (system_state == SYSTEM_POWER_OFF)
18180 		tg3_power_down(tp);
18181 
18182 	rtnl_unlock();
18183 }
18184 
18185 /**
18186  * tg3_io_error_detected - called when PCI error is detected
18187  * @pdev: Pointer to PCI device
18188  * @state: The current pci connection state
18189  *
18190  * This function is called after a PCI bus error affecting
18191  * this device has been detected.
18192  */
18193 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18194 					      pci_channel_state_t state)
18195 {
18196 	struct net_device *netdev = pci_get_drvdata(pdev);
18197 	struct tg3 *tp = netdev_priv(netdev);
18198 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18199 
18200 	netdev_info(netdev, "PCI I/O error detected\n");
18201 
18202 	rtnl_lock();
18203 
18204 	/* We probably don't have netdev yet */
18205 	if (!netdev || !netif_running(netdev))
18206 		goto done;
18207 
18208 	/* We needn't recover from permanent error */
18209 	if (state == pci_channel_io_frozen)
18210 		tp->pcierr_recovery = true;
18211 
18212 	tg3_phy_stop(tp);
18213 
18214 	tg3_netif_stop(tp);
18215 
18216 	tg3_timer_stop(tp);
18217 
18218 	/* Want to make sure that the reset task doesn't run */
18219 	tg3_reset_task_cancel(tp);
18220 
18221 	netif_device_detach(netdev);
18222 
18223 	/* Clean up software state, even if MMIO is blocked */
18224 	tg3_full_lock(tp, 0);
18225 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18226 	tg3_full_unlock(tp);
18227 
18228 done:
18229 	if (state == pci_channel_io_perm_failure) {
18230 		if (netdev) {
18231 			tg3_napi_enable(tp);
18232 			dev_close(netdev);
18233 		}
18234 		err = PCI_ERS_RESULT_DISCONNECT;
18235 	} else {
18236 		pci_disable_device(pdev);
18237 	}
18238 
18239 	rtnl_unlock();
18240 
18241 	return err;
18242 }
18243 
18244 /**
18245  * tg3_io_slot_reset - called after the pci bus has been reset.
18246  * @pdev: Pointer to PCI device
18247  *
18248  * Restart the card from scratch, as if from a cold-boot.
18249  * At this point, the card has exprienced a hard reset,
18250  * followed by fixups by BIOS, and has its config space
18251  * set up identically to what it was at cold boot.
18252  */
18253 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18254 {
18255 	struct net_device *netdev = pci_get_drvdata(pdev);
18256 	struct tg3 *tp = netdev_priv(netdev);
18257 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18258 	int err;
18259 
18260 	rtnl_lock();
18261 
18262 	if (pci_enable_device(pdev)) {
18263 		dev_err(&pdev->dev,
18264 			"Cannot re-enable PCI device after reset.\n");
18265 		goto done;
18266 	}
18267 
18268 	pci_set_master(pdev);
18269 	pci_restore_state(pdev);
18270 	pci_save_state(pdev);
18271 
18272 	if (!netdev || !netif_running(netdev)) {
18273 		rc = PCI_ERS_RESULT_RECOVERED;
18274 		goto done;
18275 	}
18276 
18277 	err = tg3_power_up(tp);
18278 	if (err)
18279 		goto done;
18280 
18281 	rc = PCI_ERS_RESULT_RECOVERED;
18282 
18283 done:
18284 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18285 		tg3_napi_enable(tp);
18286 		dev_close(netdev);
18287 	}
18288 	rtnl_unlock();
18289 
18290 	return rc;
18291 }
18292 
18293 /**
18294  * tg3_io_resume - called when traffic can start flowing again.
18295  * @pdev: Pointer to PCI device
18296  *
18297  * This callback is called when the error recovery driver tells
18298  * us that its OK to resume normal operation.
18299  */
18300 static void tg3_io_resume(struct pci_dev *pdev)
18301 {
18302 	struct net_device *netdev = pci_get_drvdata(pdev);
18303 	struct tg3 *tp = netdev_priv(netdev);
18304 	int err;
18305 
18306 	rtnl_lock();
18307 
18308 	if (!netdev || !netif_running(netdev))
18309 		goto done;
18310 
18311 	tg3_full_lock(tp, 0);
18312 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18313 	tg3_flag_set(tp, INIT_COMPLETE);
18314 	err = tg3_restart_hw(tp, true);
18315 	if (err) {
18316 		tg3_full_unlock(tp);
18317 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18318 		goto done;
18319 	}
18320 
18321 	netif_device_attach(netdev);
18322 
18323 	tg3_timer_start(tp);
18324 
18325 	tg3_netif_start(tp);
18326 
18327 	tg3_full_unlock(tp);
18328 
18329 	tg3_phy_start(tp);
18330 
18331 done:
18332 	tp->pcierr_recovery = false;
18333 	rtnl_unlock();
18334 }
18335 
18336 static const struct pci_error_handlers tg3_err_handler = {
18337 	.error_detected	= tg3_io_error_detected,
18338 	.slot_reset	= tg3_io_slot_reset,
18339 	.resume		= tg3_io_resume
18340 };
18341 
18342 static struct pci_driver tg3_driver = {
18343 	.name		= DRV_MODULE_NAME,
18344 	.id_table	= tg3_pci_tbl,
18345 	.probe		= tg3_init_one,
18346 	.remove		= tg3_remove_one,
18347 	.err_handler	= &tg3_err_handler,
18348 	.driver.pm	= &tg3_pm_ops,
18349 	.shutdown	= tg3_shutdown,
18350 };
18351 
18352 module_pci_driver(tg3_driver);
18353