xref: /linux/drivers/net/ethernet/broadcom/tg3.c (revision 89e47d3b8a273b0eac21e4bf6d7fdb86b654fa16)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 
51 #include <net/checksum.h>
52 #include <net/ip.h>
53 
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57 
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60 
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65 
66 #define BAR_0	0
67 #define BAR_2	2
68 
69 #include "tg3.h"
70 
71 /* Functions & macros to verify TG3_FLAGS types */
72 
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 	return test_bit(flag, bits);
76 }
77 
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	set_bit(flag, bits);
81 }
82 
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 	clear_bit(flag, bits);
86 }
87 
88 #define tg3_flag(tp, flag)				\
89 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)				\
91 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)			\
93 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94 
95 #define DRV_MODULE_NAME		"tg3"
96 #define TG3_MAJ_NUM			3
97 #define TG3_MIN_NUM			135
98 #define DRV_MODULE_VERSION	\
99 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE	"Nov 14, 2013"
101 
102 #define RESET_KIND_SHUTDOWN	0
103 #define RESET_KIND_INIT		1
104 #define RESET_KIND_SUSPEND	2
105 
106 #define TG3_DEF_RX_MODE		0
107 #define TG3_DEF_TX_MODE		0
108 #define TG3_DEF_MSG_ENABLE	  \
109 	(NETIF_MSG_DRV		| \
110 	 NETIF_MSG_PROBE	| \
111 	 NETIF_MSG_LINK		| \
112 	 NETIF_MSG_TIMER	| \
113 	 NETIF_MSG_IFDOWN	| \
114 	 NETIF_MSG_IFUP		| \
115 	 NETIF_MSG_RX_ERR	| \
116 	 NETIF_MSG_TX_ERR)
117 
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
119 
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123 
124 #define TG3_TX_TIMEOUT			(5 * HZ)
125 
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU			60
128 #define TG3_MAX_MTU(tp)	\
129 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130 
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING		200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
143 
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150 
151 #define TG3_TX_RING_SIZE		512
152 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
153 
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
161 				 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
163 
164 #define TG3_DMA_BYTE_ENAB		64
165 
166 #define TG3_RX_STD_DMA_SZ		1536
167 #define TG3_RX_JMB_DMA_SZ		9046
168 
169 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
170 
171 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173 
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176 
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179 
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD		256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
194 #else
195 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
196 #endif
197 
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
202 #endif
203 
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K		2048
207 #define TG3_TX_BD_DMA_MAX_4K		4096
208 
209 #define TG3_RAW_IP_ALIGN 2
210 
211 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
212 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213 
214 #define FIRMWARE_TG3		"tigon/tg3.bin"
215 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
218 
219 static char version[] =
220 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221 
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229 
230 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233 
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
236 
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 			TG3_DRV_DATA_FLAG_5705_10_100},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 			TG3_DRV_DATA_FLAG_5705_10_100},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 			TG3_DRV_DATA_FLAG_5705_10_100},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 			PCI_VENDOR_ID_LENOVO,
288 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
353 	{}
354 };
355 
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
357 
358 static const struct {
359 	const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
361 	{ "rx_octets" },
362 	{ "rx_fragments" },
363 	{ "rx_ucast_packets" },
364 	{ "rx_mcast_packets" },
365 	{ "rx_bcast_packets" },
366 	{ "rx_fcs_errors" },
367 	{ "rx_align_errors" },
368 	{ "rx_xon_pause_rcvd" },
369 	{ "rx_xoff_pause_rcvd" },
370 	{ "rx_mac_ctrl_rcvd" },
371 	{ "rx_xoff_entered" },
372 	{ "rx_frame_too_long_errors" },
373 	{ "rx_jabbers" },
374 	{ "rx_undersize_packets" },
375 	{ "rx_in_length_errors" },
376 	{ "rx_out_length_errors" },
377 	{ "rx_64_or_less_octet_packets" },
378 	{ "rx_65_to_127_octet_packets" },
379 	{ "rx_128_to_255_octet_packets" },
380 	{ "rx_256_to_511_octet_packets" },
381 	{ "rx_512_to_1023_octet_packets" },
382 	{ "rx_1024_to_1522_octet_packets" },
383 	{ "rx_1523_to_2047_octet_packets" },
384 	{ "rx_2048_to_4095_octet_packets" },
385 	{ "rx_4096_to_8191_octet_packets" },
386 	{ "rx_8192_to_9022_octet_packets" },
387 
388 	{ "tx_octets" },
389 	{ "tx_collisions" },
390 
391 	{ "tx_xon_sent" },
392 	{ "tx_xoff_sent" },
393 	{ "tx_flow_control" },
394 	{ "tx_mac_errors" },
395 	{ "tx_single_collisions" },
396 	{ "tx_mult_collisions" },
397 	{ "tx_deferred" },
398 	{ "tx_excessive_collisions" },
399 	{ "tx_late_collisions" },
400 	{ "tx_collide_2times" },
401 	{ "tx_collide_3times" },
402 	{ "tx_collide_4times" },
403 	{ "tx_collide_5times" },
404 	{ "tx_collide_6times" },
405 	{ "tx_collide_7times" },
406 	{ "tx_collide_8times" },
407 	{ "tx_collide_9times" },
408 	{ "tx_collide_10times" },
409 	{ "tx_collide_11times" },
410 	{ "tx_collide_12times" },
411 	{ "tx_collide_13times" },
412 	{ "tx_collide_14times" },
413 	{ "tx_collide_15times" },
414 	{ "tx_ucast_packets" },
415 	{ "tx_mcast_packets" },
416 	{ "tx_bcast_packets" },
417 	{ "tx_carrier_sense_errors" },
418 	{ "tx_discards" },
419 	{ "tx_errors" },
420 
421 	{ "dma_writeq_full" },
422 	{ "dma_write_prioq_full" },
423 	{ "rxbds_empty" },
424 	{ "rx_discards" },
425 	{ "rx_errors" },
426 	{ "rx_threshold_hit" },
427 
428 	{ "dma_readq_full" },
429 	{ "dma_read_prioq_full" },
430 	{ "tx_comp_queue_full" },
431 
432 	{ "ring_set_send_prod_index" },
433 	{ "ring_status_update" },
434 	{ "nic_irqs" },
435 	{ "nic_avoided_irqs" },
436 	{ "nic_tx_threshold_hit" },
437 
438 	{ "mbuf_lwm_thresh_hit" },
439 };
440 
441 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST		0
443 #define TG3_LINK_TEST		1
444 #define TG3_REGISTER_TEST	2
445 #define TG3_MEMORY_TEST		3
446 #define TG3_MAC_LOOPB_TEST	4
447 #define TG3_PHY_LOOPB_TEST	5
448 #define TG3_EXT_LOOPB_TEST	6
449 #define TG3_INTERRUPT_TEST	7
450 
451 
452 static const struct {
453 	const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
456 	[TG3_LINK_TEST]		= { "link test         (online) " },
457 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
458 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
459 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
460 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
461 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
462 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
463 };
464 
465 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
466 
467 
468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
469 {
470 	writel(val, tp->regs + off);
471 }
472 
473 static u32 tg3_read32(struct tg3 *tp, u32 off)
474 {
475 	return readl(tp->regs + off);
476 }
477 
478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
479 {
480 	writel(val, tp->aperegs + off);
481 }
482 
483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
484 {
485 	return readl(tp->aperegs + off);
486 }
487 
488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
489 {
490 	unsigned long flags;
491 
492 	spin_lock_irqsave(&tp->indirect_lock, flags);
493 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
496 }
497 
498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
499 {
500 	writel(val, tp->regs + off);
501 	readl(tp->regs + off);
502 }
503 
504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
505 {
506 	unsigned long flags;
507 	u32 val;
508 
509 	spin_lock_irqsave(&tp->indirect_lock, flags);
510 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
513 	return val;
514 }
515 
516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
517 {
518 	unsigned long flags;
519 
520 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522 				       TG3_64BIT_REG_LOW, val);
523 		return;
524 	}
525 	if (off == TG3_RX_STD_PROD_IDX_REG) {
526 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527 				       TG3_64BIT_REG_LOW, val);
528 		return;
529 	}
530 
531 	spin_lock_irqsave(&tp->indirect_lock, flags);
532 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
535 
536 	/* In indirect mode when disabling interrupts, we also need
537 	 * to clear the interrupt bit in the GRC local ctrl register.
538 	 */
539 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
540 	    (val == 0x1)) {
541 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
543 	}
544 }
545 
546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
547 {
548 	unsigned long flags;
549 	u32 val;
550 
551 	spin_lock_irqsave(&tp->indirect_lock, flags);
552 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
555 	return val;
556 }
557 
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559  * where it is unsafe to read back the register without some delay.
560  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
562  */
563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
564 {
565 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566 		/* Non-posted methods */
567 		tp->write32(tp, off, val);
568 	else {
569 		/* Posted method */
570 		tg3_write32(tp, off, val);
571 		if (usec_wait)
572 			udelay(usec_wait);
573 		tp->read32(tp, off);
574 	}
575 	/* Wait again after the read for the posted method to guarantee that
576 	 * the wait time is met.
577 	 */
578 	if (usec_wait)
579 		udelay(usec_wait);
580 }
581 
582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
583 {
584 	tp->write32_mbox(tp, off, val);
585 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587 	     !tg3_flag(tp, ICH_WORKAROUND)))
588 		tp->read32_mbox(tp, off);
589 }
590 
591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
592 {
593 	void __iomem *mbox = tp->regs + off;
594 	writel(val, mbox);
595 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
596 		writel(val, mbox);
597 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
599 		readl(mbox);
600 }
601 
602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
603 {
604 	return readl(tp->regs + off + GRCMBOX_BASE);
605 }
606 
607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
608 {
609 	writel(val, tp->regs + off + GRCMBOX_BASE);
610 }
611 
612 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
617 
618 #define tw32(reg, val)			tp->write32(tp, reg, val)
619 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg)			tp->read32(tp, reg)
622 
623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
624 {
625 	unsigned long flags;
626 
627 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
629 		return;
630 
631 	spin_lock_irqsave(&tp->indirect_lock, flags);
632 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
635 
636 		/* Always leave this as zero. */
637 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
638 	} else {
639 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
641 
642 		/* Always leave this as zero. */
643 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
644 	}
645 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
646 }
647 
648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
649 {
650 	unsigned long flags;
651 
652 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
654 		*val = 0;
655 		return;
656 	}
657 
658 	spin_lock_irqsave(&tp->indirect_lock, flags);
659 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
662 
663 		/* Always leave this as zero. */
664 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
665 	} else {
666 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667 		*val = tr32(TG3PCI_MEM_WIN_DATA);
668 
669 		/* Always leave this as zero. */
670 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
671 	}
672 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
673 }
674 
675 static void tg3_ape_lock_init(struct tg3 *tp)
676 {
677 	int i;
678 	u32 regbase, bit;
679 
680 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
681 		regbase = TG3_APE_LOCK_GRANT;
682 	else
683 		regbase = TG3_APE_PER_LOCK_GRANT;
684 
685 	/* Make sure the driver hasn't any stale locks. */
686 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
687 		switch (i) {
688 		case TG3_APE_LOCK_PHY0:
689 		case TG3_APE_LOCK_PHY1:
690 		case TG3_APE_LOCK_PHY2:
691 		case TG3_APE_LOCK_PHY3:
692 			bit = APE_LOCK_GRANT_DRIVER;
693 			break;
694 		default:
695 			if (!tp->pci_fn)
696 				bit = APE_LOCK_GRANT_DRIVER;
697 			else
698 				bit = 1 << tp->pci_fn;
699 		}
700 		tg3_ape_write32(tp, regbase + 4 * i, bit);
701 	}
702 
703 }
704 
705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
706 {
707 	int i, off;
708 	int ret = 0;
709 	u32 status, req, gnt, bit;
710 
711 	if (!tg3_flag(tp, ENABLE_APE))
712 		return 0;
713 
714 	switch (locknum) {
715 	case TG3_APE_LOCK_GPIO:
716 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
717 			return 0;
718 	case TG3_APE_LOCK_GRC:
719 	case TG3_APE_LOCK_MEM:
720 		if (!tp->pci_fn)
721 			bit = APE_LOCK_REQ_DRIVER;
722 		else
723 			bit = 1 << tp->pci_fn;
724 		break;
725 	case TG3_APE_LOCK_PHY0:
726 	case TG3_APE_LOCK_PHY1:
727 	case TG3_APE_LOCK_PHY2:
728 	case TG3_APE_LOCK_PHY3:
729 		bit = APE_LOCK_REQ_DRIVER;
730 		break;
731 	default:
732 		return -EINVAL;
733 	}
734 
735 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
736 		req = TG3_APE_LOCK_REQ;
737 		gnt = TG3_APE_LOCK_GRANT;
738 	} else {
739 		req = TG3_APE_PER_LOCK_REQ;
740 		gnt = TG3_APE_PER_LOCK_GRANT;
741 	}
742 
743 	off = 4 * locknum;
744 
745 	tg3_ape_write32(tp, req + off, bit);
746 
747 	/* Wait for up to 1 millisecond to acquire lock. */
748 	for (i = 0; i < 100; i++) {
749 		status = tg3_ape_read32(tp, gnt + off);
750 		if (status == bit)
751 			break;
752 		if (pci_channel_offline(tp->pdev))
753 			break;
754 
755 		udelay(10);
756 	}
757 
758 	if (status != bit) {
759 		/* Revoke the lock request. */
760 		tg3_ape_write32(tp, gnt + off, bit);
761 		ret = -EBUSY;
762 	}
763 
764 	return ret;
765 }
766 
767 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
768 {
769 	u32 gnt, bit;
770 
771 	if (!tg3_flag(tp, ENABLE_APE))
772 		return;
773 
774 	switch (locknum) {
775 	case TG3_APE_LOCK_GPIO:
776 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
777 			return;
778 	case TG3_APE_LOCK_GRC:
779 	case TG3_APE_LOCK_MEM:
780 		if (!tp->pci_fn)
781 			bit = APE_LOCK_GRANT_DRIVER;
782 		else
783 			bit = 1 << tp->pci_fn;
784 		break;
785 	case TG3_APE_LOCK_PHY0:
786 	case TG3_APE_LOCK_PHY1:
787 	case TG3_APE_LOCK_PHY2:
788 	case TG3_APE_LOCK_PHY3:
789 		bit = APE_LOCK_GRANT_DRIVER;
790 		break;
791 	default:
792 		return;
793 	}
794 
795 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
796 		gnt = TG3_APE_LOCK_GRANT;
797 	else
798 		gnt = TG3_APE_PER_LOCK_GRANT;
799 
800 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
801 }
802 
803 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
804 {
805 	u32 apedata;
806 
807 	while (timeout_us) {
808 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
809 			return -EBUSY;
810 
811 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
812 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
813 			break;
814 
815 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
816 
817 		udelay(10);
818 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
819 	}
820 
821 	return timeout_us ? 0 : -EBUSY;
822 }
823 
824 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
825 {
826 	u32 i, apedata;
827 
828 	for (i = 0; i < timeout_us / 10; i++) {
829 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
830 
831 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
832 			break;
833 
834 		udelay(10);
835 	}
836 
837 	return i == timeout_us / 10;
838 }
839 
840 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
841 				   u32 len)
842 {
843 	int err;
844 	u32 i, bufoff, msgoff, maxlen, apedata;
845 
846 	if (!tg3_flag(tp, APE_HAS_NCSI))
847 		return 0;
848 
849 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
850 	if (apedata != APE_SEG_SIG_MAGIC)
851 		return -ENODEV;
852 
853 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
854 	if (!(apedata & APE_FW_STATUS_READY))
855 		return -EAGAIN;
856 
857 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
858 		 TG3_APE_SHMEM_BASE;
859 	msgoff = bufoff + 2 * sizeof(u32);
860 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
861 
862 	while (len) {
863 		u32 length;
864 
865 		/* Cap xfer sizes to scratchpad limits. */
866 		length = (len > maxlen) ? maxlen : len;
867 		len -= length;
868 
869 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
870 		if (!(apedata & APE_FW_STATUS_READY))
871 			return -EAGAIN;
872 
873 		/* Wait for up to 1 msec for APE to service previous event. */
874 		err = tg3_ape_event_lock(tp, 1000);
875 		if (err)
876 			return err;
877 
878 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
879 			  APE_EVENT_STATUS_SCRTCHPD_READ |
880 			  APE_EVENT_STATUS_EVENT_PENDING;
881 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
882 
883 		tg3_ape_write32(tp, bufoff, base_off);
884 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
885 
886 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
887 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
888 
889 		base_off += length;
890 
891 		if (tg3_ape_wait_for_event(tp, 30000))
892 			return -EAGAIN;
893 
894 		for (i = 0; length; i += 4, length -= 4) {
895 			u32 val = tg3_ape_read32(tp, msgoff + i);
896 			memcpy(data, &val, sizeof(u32));
897 			data++;
898 		}
899 	}
900 
901 	return 0;
902 }
903 
904 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
905 {
906 	int err;
907 	u32 apedata;
908 
909 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
910 	if (apedata != APE_SEG_SIG_MAGIC)
911 		return -EAGAIN;
912 
913 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
914 	if (!(apedata & APE_FW_STATUS_READY))
915 		return -EAGAIN;
916 
917 	/* Wait for up to 1 millisecond for APE to service previous event. */
918 	err = tg3_ape_event_lock(tp, 1000);
919 	if (err)
920 		return err;
921 
922 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
923 			event | APE_EVENT_STATUS_EVENT_PENDING);
924 
925 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
926 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
927 
928 	return 0;
929 }
930 
931 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
932 {
933 	u32 event;
934 	u32 apedata;
935 
936 	if (!tg3_flag(tp, ENABLE_APE))
937 		return;
938 
939 	switch (kind) {
940 	case RESET_KIND_INIT:
941 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
942 				APE_HOST_SEG_SIG_MAGIC);
943 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
944 				APE_HOST_SEG_LEN_MAGIC);
945 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
946 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
947 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
948 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
949 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
950 				APE_HOST_BEHAV_NO_PHYLOCK);
951 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
952 				    TG3_APE_HOST_DRVR_STATE_START);
953 
954 		event = APE_EVENT_STATUS_STATE_START;
955 		break;
956 	case RESET_KIND_SHUTDOWN:
957 		/* With the interface we are currently using,
958 		 * APE does not track driver state.  Wiping
959 		 * out the HOST SEGMENT SIGNATURE forces
960 		 * the APE to assume OS absent status.
961 		 */
962 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
963 
964 		if (device_may_wakeup(&tp->pdev->dev) &&
965 		    tg3_flag(tp, WOL_ENABLE)) {
966 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 					    TG3_APE_HOST_WOL_SPEED_AUTO);
968 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969 		} else
970 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
971 
972 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
973 
974 		event = APE_EVENT_STATUS_STATE_UNLOAD;
975 		break;
976 	default:
977 		return;
978 	}
979 
980 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
981 
982 	tg3_ape_send_event(tp, event);
983 }
984 
985 static void tg3_disable_ints(struct tg3 *tp)
986 {
987 	int i;
988 
989 	tw32(TG3PCI_MISC_HOST_CTRL,
990 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
991 	for (i = 0; i < tp->irq_max; i++)
992 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
993 }
994 
995 static void tg3_enable_ints(struct tg3 *tp)
996 {
997 	int i;
998 
999 	tp->irq_sync = 0;
1000 	wmb();
1001 
1002 	tw32(TG3PCI_MISC_HOST_CTRL,
1003 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1004 
1005 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1006 	for (i = 0; i < tp->irq_cnt; i++) {
1007 		struct tg3_napi *tnapi = &tp->napi[i];
1008 
1009 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1010 		if (tg3_flag(tp, 1SHOT_MSI))
1011 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1012 
1013 		tp->coal_now |= tnapi->coal_now;
1014 	}
1015 
1016 	/* Force an initial interrupt */
1017 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1018 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1019 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1020 	else
1021 		tw32(HOSTCC_MODE, tp->coal_now);
1022 
1023 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1024 }
1025 
1026 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1027 {
1028 	struct tg3 *tp = tnapi->tp;
1029 	struct tg3_hw_status *sblk = tnapi->hw_status;
1030 	unsigned int work_exists = 0;
1031 
1032 	/* check for phy events */
1033 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1034 		if (sblk->status & SD_STATUS_LINK_CHG)
1035 			work_exists = 1;
1036 	}
1037 
1038 	/* check for TX work to do */
1039 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1040 		work_exists = 1;
1041 
1042 	/* check for RX work to do */
1043 	if (tnapi->rx_rcb_prod_idx &&
1044 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1045 		work_exists = 1;
1046 
1047 	return work_exists;
1048 }
1049 
1050 /* tg3_int_reenable
1051  *  similar to tg3_enable_ints, but it accurately determines whether there
1052  *  is new work pending and can return without flushing the PIO write
1053  *  which reenables interrupts
1054  */
1055 static void tg3_int_reenable(struct tg3_napi *tnapi)
1056 {
1057 	struct tg3 *tp = tnapi->tp;
1058 
1059 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1060 	mmiowb();
1061 
1062 	/* When doing tagged status, this work check is unnecessary.
1063 	 * The last_tag we write above tells the chip which piece of
1064 	 * work we've completed.
1065 	 */
1066 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1067 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1068 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1069 }
1070 
1071 static void tg3_switch_clocks(struct tg3 *tp)
1072 {
1073 	u32 clock_ctrl;
1074 	u32 orig_clock_ctrl;
1075 
1076 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1077 		return;
1078 
1079 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1080 
1081 	orig_clock_ctrl = clock_ctrl;
1082 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1083 		       CLOCK_CTRL_CLKRUN_OENABLE |
1084 		       0x1f);
1085 	tp->pci_clock_ctrl = clock_ctrl;
1086 
1087 	if (tg3_flag(tp, 5705_PLUS)) {
1088 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1089 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1091 		}
1092 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1093 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1094 			    clock_ctrl |
1095 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1096 			    40);
1097 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1098 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1099 			    40);
1100 	}
1101 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1102 }
1103 
1104 #define PHY_BUSY_LOOPS	5000
1105 
1106 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1107 			 u32 *val)
1108 {
1109 	u32 frame_val;
1110 	unsigned int loops;
1111 	int ret;
1112 
1113 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1114 		tw32_f(MAC_MI_MODE,
1115 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1116 		udelay(80);
1117 	}
1118 
1119 	tg3_ape_lock(tp, tp->phy_ape_lock);
1120 
1121 	*val = 0x0;
1122 
1123 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1124 		      MI_COM_PHY_ADDR_MASK);
1125 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1126 		      MI_COM_REG_ADDR_MASK);
1127 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1128 
1129 	tw32_f(MAC_MI_COM, frame_val);
1130 
1131 	loops = PHY_BUSY_LOOPS;
1132 	while (loops != 0) {
1133 		udelay(10);
1134 		frame_val = tr32(MAC_MI_COM);
1135 
1136 		if ((frame_val & MI_COM_BUSY) == 0) {
1137 			udelay(5);
1138 			frame_val = tr32(MAC_MI_COM);
1139 			break;
1140 		}
1141 		loops -= 1;
1142 	}
1143 
1144 	ret = -EBUSY;
1145 	if (loops != 0) {
1146 		*val = frame_val & MI_COM_DATA_MASK;
1147 		ret = 0;
1148 	}
1149 
1150 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1151 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1152 		udelay(80);
1153 	}
1154 
1155 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1156 
1157 	return ret;
1158 }
1159 
1160 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1161 {
1162 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1163 }
1164 
1165 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1166 			  u32 val)
1167 {
1168 	u32 frame_val;
1169 	unsigned int loops;
1170 	int ret;
1171 
1172 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1173 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1174 		return 0;
1175 
1176 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1177 		tw32_f(MAC_MI_MODE,
1178 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1179 		udelay(80);
1180 	}
1181 
1182 	tg3_ape_lock(tp, tp->phy_ape_lock);
1183 
1184 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1185 		      MI_COM_PHY_ADDR_MASK);
1186 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1187 		      MI_COM_REG_ADDR_MASK);
1188 	frame_val |= (val & MI_COM_DATA_MASK);
1189 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1190 
1191 	tw32_f(MAC_MI_COM, frame_val);
1192 
1193 	loops = PHY_BUSY_LOOPS;
1194 	while (loops != 0) {
1195 		udelay(10);
1196 		frame_val = tr32(MAC_MI_COM);
1197 		if ((frame_val & MI_COM_BUSY) == 0) {
1198 			udelay(5);
1199 			frame_val = tr32(MAC_MI_COM);
1200 			break;
1201 		}
1202 		loops -= 1;
1203 	}
1204 
1205 	ret = -EBUSY;
1206 	if (loops != 0)
1207 		ret = 0;
1208 
1209 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1210 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1211 		udelay(80);
1212 	}
1213 
1214 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1215 
1216 	return ret;
1217 }
1218 
1219 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1220 {
1221 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1222 }
1223 
1224 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1225 {
1226 	int err;
1227 
1228 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1229 	if (err)
1230 		goto done;
1231 
1232 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1233 	if (err)
1234 		goto done;
1235 
1236 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1237 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1238 	if (err)
1239 		goto done;
1240 
1241 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1242 
1243 done:
1244 	return err;
1245 }
1246 
1247 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1248 {
1249 	int err;
1250 
1251 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1252 	if (err)
1253 		goto done;
1254 
1255 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1256 	if (err)
1257 		goto done;
1258 
1259 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1260 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1261 	if (err)
1262 		goto done;
1263 
1264 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1265 
1266 done:
1267 	return err;
1268 }
1269 
1270 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1271 {
1272 	int err;
1273 
1274 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1275 	if (!err)
1276 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1277 
1278 	return err;
1279 }
1280 
1281 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1282 {
1283 	int err;
1284 
1285 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286 	if (!err)
1287 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1288 
1289 	return err;
1290 }
1291 
1292 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1293 {
1294 	int err;
1295 
1296 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1297 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1298 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1299 	if (!err)
1300 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1301 
1302 	return err;
1303 }
1304 
1305 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1306 {
1307 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1308 		set |= MII_TG3_AUXCTL_MISC_WREN;
1309 
1310 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1311 }
1312 
1313 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1314 {
1315 	u32 val;
1316 	int err;
1317 
1318 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1319 
1320 	if (err)
1321 		return err;
1322 
1323 	if (enable)
1324 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1325 	else
1326 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1327 
1328 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1329 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1330 
1331 	return err;
1332 }
1333 
1334 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1335 {
1336 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1337 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1338 }
1339 
1340 static int tg3_bmcr_reset(struct tg3 *tp)
1341 {
1342 	u32 phy_control;
1343 	int limit, err;
1344 
1345 	/* OK, reset it, and poll the BMCR_RESET bit until it
1346 	 * clears or we time out.
1347 	 */
1348 	phy_control = BMCR_RESET;
1349 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1350 	if (err != 0)
1351 		return -EBUSY;
1352 
1353 	limit = 5000;
1354 	while (limit--) {
1355 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1356 		if (err != 0)
1357 			return -EBUSY;
1358 
1359 		if ((phy_control & BMCR_RESET) == 0) {
1360 			udelay(40);
1361 			break;
1362 		}
1363 		udelay(10);
1364 	}
1365 	if (limit < 0)
1366 		return -EBUSY;
1367 
1368 	return 0;
1369 }
1370 
1371 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1372 {
1373 	struct tg3 *tp = bp->priv;
1374 	u32 val;
1375 
1376 	spin_lock_bh(&tp->lock);
1377 
1378 	if (__tg3_readphy(tp, mii_id, reg, &val))
1379 		val = -EIO;
1380 
1381 	spin_unlock_bh(&tp->lock);
1382 
1383 	return val;
1384 }
1385 
1386 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1387 {
1388 	struct tg3 *tp = bp->priv;
1389 	u32 ret = 0;
1390 
1391 	spin_lock_bh(&tp->lock);
1392 
1393 	if (__tg3_writephy(tp, mii_id, reg, val))
1394 		ret = -EIO;
1395 
1396 	spin_unlock_bh(&tp->lock);
1397 
1398 	return ret;
1399 }
1400 
1401 static int tg3_mdio_reset(struct mii_bus *bp)
1402 {
1403 	return 0;
1404 }
1405 
1406 static void tg3_mdio_config_5785(struct tg3 *tp)
1407 {
1408 	u32 val;
1409 	struct phy_device *phydev;
1410 
1411 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1412 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1413 	case PHY_ID_BCM50610:
1414 	case PHY_ID_BCM50610M:
1415 		val = MAC_PHYCFG2_50610_LED_MODES;
1416 		break;
1417 	case PHY_ID_BCMAC131:
1418 		val = MAC_PHYCFG2_AC131_LED_MODES;
1419 		break;
1420 	case PHY_ID_RTL8211C:
1421 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1422 		break;
1423 	case PHY_ID_RTL8201E:
1424 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1425 		break;
1426 	default:
1427 		return;
1428 	}
1429 
1430 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1431 		tw32(MAC_PHYCFG2, val);
1432 
1433 		val = tr32(MAC_PHYCFG1);
1434 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1435 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1436 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1437 		tw32(MAC_PHYCFG1, val);
1438 
1439 		return;
1440 	}
1441 
1442 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1443 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1444 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1445 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1446 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1447 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1448 		       MAC_PHYCFG2_INBAND_ENABLE;
1449 
1450 	tw32(MAC_PHYCFG2, val);
1451 
1452 	val = tr32(MAC_PHYCFG1);
1453 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1454 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1455 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1456 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1457 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1458 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1459 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1460 	}
1461 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1462 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1463 	tw32(MAC_PHYCFG1, val);
1464 
1465 	val = tr32(MAC_EXT_RGMII_MODE);
1466 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1467 		 MAC_RGMII_MODE_RX_QUALITY |
1468 		 MAC_RGMII_MODE_RX_ACTIVITY |
1469 		 MAC_RGMII_MODE_RX_ENG_DET |
1470 		 MAC_RGMII_MODE_TX_ENABLE |
1471 		 MAC_RGMII_MODE_TX_LOWPWR |
1472 		 MAC_RGMII_MODE_TX_RESET);
1473 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1474 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1475 			val |= MAC_RGMII_MODE_RX_INT_B |
1476 			       MAC_RGMII_MODE_RX_QUALITY |
1477 			       MAC_RGMII_MODE_RX_ACTIVITY |
1478 			       MAC_RGMII_MODE_RX_ENG_DET;
1479 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1480 			val |= MAC_RGMII_MODE_TX_ENABLE |
1481 			       MAC_RGMII_MODE_TX_LOWPWR |
1482 			       MAC_RGMII_MODE_TX_RESET;
1483 	}
1484 	tw32(MAC_EXT_RGMII_MODE, val);
1485 }
1486 
1487 static void tg3_mdio_start(struct tg3 *tp)
1488 {
1489 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1490 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1491 	udelay(80);
1492 
1493 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1494 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1495 		tg3_mdio_config_5785(tp);
1496 }
1497 
1498 static int tg3_mdio_init(struct tg3 *tp)
1499 {
1500 	int i;
1501 	u32 reg;
1502 	struct phy_device *phydev;
1503 
1504 	if (tg3_flag(tp, 5717_PLUS)) {
1505 		u32 is_serdes;
1506 
1507 		tp->phy_addr = tp->pci_fn + 1;
1508 
1509 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1510 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1511 		else
1512 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1513 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1514 		if (is_serdes)
1515 			tp->phy_addr += 7;
1516 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1517 		int addr;
1518 
1519 		addr = ssb_gige_get_phyaddr(tp->pdev);
1520 		if (addr < 0)
1521 			return addr;
1522 		tp->phy_addr = addr;
1523 	} else
1524 		tp->phy_addr = TG3_PHY_MII_ADDR;
1525 
1526 	tg3_mdio_start(tp);
1527 
1528 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1529 		return 0;
1530 
1531 	tp->mdio_bus = mdiobus_alloc();
1532 	if (tp->mdio_bus == NULL)
1533 		return -ENOMEM;
1534 
1535 	tp->mdio_bus->name     = "tg3 mdio bus";
1536 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1537 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1538 	tp->mdio_bus->priv     = tp;
1539 	tp->mdio_bus->parent   = &tp->pdev->dev;
1540 	tp->mdio_bus->read     = &tg3_mdio_read;
1541 	tp->mdio_bus->write    = &tg3_mdio_write;
1542 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1543 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1544 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1545 
1546 	for (i = 0; i < PHY_MAX_ADDR; i++)
1547 		tp->mdio_bus->irq[i] = PHY_POLL;
1548 
1549 	/* The bus registration will look for all the PHYs on the mdio bus.
1550 	 * Unfortunately, it does not ensure the PHY is powered up before
1551 	 * accessing the PHY ID registers.  A chip reset is the
1552 	 * quickest way to bring the device back to an operational state..
1553 	 */
1554 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1555 		tg3_bmcr_reset(tp);
1556 
1557 	i = mdiobus_register(tp->mdio_bus);
1558 	if (i) {
1559 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560 		mdiobus_free(tp->mdio_bus);
1561 		return i;
1562 	}
1563 
1564 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1565 
1566 	if (!phydev || !phydev->drv) {
1567 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568 		mdiobus_unregister(tp->mdio_bus);
1569 		mdiobus_free(tp->mdio_bus);
1570 		return -ENODEV;
1571 	}
1572 
1573 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574 	case PHY_ID_BCM57780:
1575 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1576 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 		break;
1578 	case PHY_ID_BCM50610:
1579 	case PHY_ID_BCM50610M:
1580 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581 				     PHY_BRCM_RX_REFCLK_UNUSED |
1582 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1585 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1586 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1587 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1588 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1589 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1590 		/* fallthru */
1591 	case PHY_ID_RTL8211C:
1592 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1593 		break;
1594 	case PHY_ID_RTL8201E:
1595 	case PHY_ID_BCMAC131:
1596 		phydev->interface = PHY_INTERFACE_MODE_MII;
1597 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1598 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1599 		break;
1600 	}
1601 
1602 	tg3_flag_set(tp, MDIOBUS_INITED);
1603 
1604 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1605 		tg3_mdio_config_5785(tp);
1606 
1607 	return 0;
1608 }
1609 
1610 static void tg3_mdio_fini(struct tg3 *tp)
1611 {
1612 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1613 		tg3_flag_clear(tp, MDIOBUS_INITED);
1614 		mdiobus_unregister(tp->mdio_bus);
1615 		mdiobus_free(tp->mdio_bus);
1616 	}
1617 }
1618 
1619 /* tp->lock is held. */
1620 static inline void tg3_generate_fw_event(struct tg3 *tp)
1621 {
1622 	u32 val;
1623 
1624 	val = tr32(GRC_RX_CPU_EVENT);
1625 	val |= GRC_RX_CPU_DRIVER_EVENT;
1626 	tw32_f(GRC_RX_CPU_EVENT, val);
1627 
1628 	tp->last_event_jiffies = jiffies;
1629 }
1630 
1631 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1632 
1633 /* tp->lock is held. */
1634 static void tg3_wait_for_event_ack(struct tg3 *tp)
1635 {
1636 	int i;
1637 	unsigned int delay_cnt;
1638 	long time_remain;
1639 
1640 	/* If enough time has passed, no wait is necessary. */
1641 	time_remain = (long)(tp->last_event_jiffies + 1 +
1642 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1643 		      (long)jiffies;
1644 	if (time_remain < 0)
1645 		return;
1646 
1647 	/* Check if we can shorten the wait time. */
1648 	delay_cnt = jiffies_to_usecs(time_remain);
1649 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1650 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1651 	delay_cnt = (delay_cnt >> 3) + 1;
1652 
1653 	for (i = 0; i < delay_cnt; i++) {
1654 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1655 			break;
1656 		if (pci_channel_offline(tp->pdev))
1657 			break;
1658 
1659 		udelay(8);
1660 	}
1661 }
1662 
1663 /* tp->lock is held. */
1664 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1665 {
1666 	u32 reg, val;
1667 
1668 	val = 0;
1669 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1670 		val = reg << 16;
1671 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1672 		val |= (reg & 0xffff);
1673 	*data++ = val;
1674 
1675 	val = 0;
1676 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1677 		val = reg << 16;
1678 	if (!tg3_readphy(tp, MII_LPA, &reg))
1679 		val |= (reg & 0xffff);
1680 	*data++ = val;
1681 
1682 	val = 0;
1683 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1684 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1685 			val = reg << 16;
1686 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1687 			val |= (reg & 0xffff);
1688 	}
1689 	*data++ = val;
1690 
1691 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1692 		val = reg << 16;
1693 	else
1694 		val = 0;
1695 	*data++ = val;
1696 }
1697 
1698 /* tp->lock is held. */
1699 static void tg3_ump_link_report(struct tg3 *tp)
1700 {
1701 	u32 data[4];
1702 
1703 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1704 		return;
1705 
1706 	tg3_phy_gather_ump_data(tp, data);
1707 
1708 	tg3_wait_for_event_ack(tp);
1709 
1710 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1711 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1712 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1713 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1714 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1715 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1716 
1717 	tg3_generate_fw_event(tp);
1718 }
1719 
1720 /* tp->lock is held. */
1721 static void tg3_stop_fw(struct tg3 *tp)
1722 {
1723 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1724 		/* Wait for RX cpu to ACK the previous event. */
1725 		tg3_wait_for_event_ack(tp);
1726 
1727 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1728 
1729 		tg3_generate_fw_event(tp);
1730 
1731 		/* Wait for RX cpu to ACK this event. */
1732 		tg3_wait_for_event_ack(tp);
1733 	}
1734 }
1735 
1736 /* tp->lock is held. */
1737 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1738 {
1739 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1740 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1741 
1742 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1743 		switch (kind) {
1744 		case RESET_KIND_INIT:
1745 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746 				      DRV_STATE_START);
1747 			break;
1748 
1749 		case RESET_KIND_SHUTDOWN:
1750 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751 				      DRV_STATE_UNLOAD);
1752 			break;
1753 
1754 		case RESET_KIND_SUSPEND:
1755 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1756 				      DRV_STATE_SUSPEND);
1757 			break;
1758 
1759 		default:
1760 			break;
1761 		}
1762 	}
1763 }
1764 
1765 /* tp->lock is held. */
1766 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1767 {
1768 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1769 		switch (kind) {
1770 		case RESET_KIND_INIT:
1771 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 				      DRV_STATE_START_DONE);
1773 			break;
1774 
1775 		case RESET_KIND_SHUTDOWN:
1776 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777 				      DRV_STATE_UNLOAD_DONE);
1778 			break;
1779 
1780 		default:
1781 			break;
1782 		}
1783 	}
1784 }
1785 
1786 /* tp->lock is held. */
1787 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1788 {
1789 	if (tg3_flag(tp, ENABLE_ASF)) {
1790 		switch (kind) {
1791 		case RESET_KIND_INIT:
1792 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793 				      DRV_STATE_START);
1794 			break;
1795 
1796 		case RESET_KIND_SHUTDOWN:
1797 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 				      DRV_STATE_UNLOAD);
1799 			break;
1800 
1801 		case RESET_KIND_SUSPEND:
1802 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1803 				      DRV_STATE_SUSPEND);
1804 			break;
1805 
1806 		default:
1807 			break;
1808 		}
1809 	}
1810 }
1811 
1812 static int tg3_poll_fw(struct tg3 *tp)
1813 {
1814 	int i;
1815 	u32 val;
1816 
1817 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1818 		return 0;
1819 
1820 	if (tg3_flag(tp, IS_SSB_CORE)) {
1821 		/* We don't use firmware. */
1822 		return 0;
1823 	}
1824 
1825 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1826 		/* Wait up to 20ms for init done. */
1827 		for (i = 0; i < 200; i++) {
1828 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1829 				return 0;
1830 			if (pci_channel_offline(tp->pdev))
1831 				return -ENODEV;
1832 
1833 			udelay(100);
1834 		}
1835 		return -ENODEV;
1836 	}
1837 
1838 	/* Wait for firmware initialization to complete. */
1839 	for (i = 0; i < 100000; i++) {
1840 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1841 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1842 			break;
1843 		if (pci_channel_offline(tp->pdev)) {
1844 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1845 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1846 				netdev_info(tp->dev, "No firmware running\n");
1847 			}
1848 
1849 			break;
1850 		}
1851 
1852 		udelay(10);
1853 	}
1854 
1855 	/* Chip might not be fitted with firmware.  Some Sun onboard
1856 	 * parts are configured like that.  So don't signal the timeout
1857 	 * of the above loop as an error, but do report the lack of
1858 	 * running firmware once.
1859 	 */
1860 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1861 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1862 
1863 		netdev_info(tp->dev, "No firmware running\n");
1864 	}
1865 
1866 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1867 		/* The 57765 A0 needs a little more
1868 		 * time to do some important work.
1869 		 */
1870 		mdelay(10);
1871 	}
1872 
1873 	return 0;
1874 }
1875 
1876 static void tg3_link_report(struct tg3 *tp)
1877 {
1878 	if (!netif_carrier_ok(tp->dev)) {
1879 		netif_info(tp, link, tp->dev, "Link is down\n");
1880 		tg3_ump_link_report(tp);
1881 	} else if (netif_msg_link(tp)) {
1882 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1883 			    (tp->link_config.active_speed == SPEED_1000 ?
1884 			     1000 :
1885 			     (tp->link_config.active_speed == SPEED_100 ?
1886 			      100 : 10)),
1887 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1888 			     "full" : "half"));
1889 
1890 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1891 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1892 			    "on" : "off",
1893 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1894 			    "on" : "off");
1895 
1896 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1897 			netdev_info(tp->dev, "EEE is %s\n",
1898 				    tp->setlpicnt ? "enabled" : "disabled");
1899 
1900 		tg3_ump_link_report(tp);
1901 	}
1902 
1903 	tp->link_up = netif_carrier_ok(tp->dev);
1904 }
1905 
1906 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1907 {
1908 	u32 flowctrl = 0;
1909 
1910 	if (adv & ADVERTISE_PAUSE_CAP) {
1911 		flowctrl |= FLOW_CTRL_RX;
1912 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1913 			flowctrl |= FLOW_CTRL_TX;
1914 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1915 		flowctrl |= FLOW_CTRL_TX;
1916 
1917 	return flowctrl;
1918 }
1919 
1920 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1921 {
1922 	u16 miireg;
1923 
1924 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1925 		miireg = ADVERTISE_1000XPAUSE;
1926 	else if (flow_ctrl & FLOW_CTRL_TX)
1927 		miireg = ADVERTISE_1000XPSE_ASYM;
1928 	else if (flow_ctrl & FLOW_CTRL_RX)
1929 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1930 	else
1931 		miireg = 0;
1932 
1933 	return miireg;
1934 }
1935 
1936 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1937 {
1938 	u32 flowctrl = 0;
1939 
1940 	if (adv & ADVERTISE_1000XPAUSE) {
1941 		flowctrl |= FLOW_CTRL_RX;
1942 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1943 			flowctrl |= FLOW_CTRL_TX;
1944 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1945 		flowctrl |= FLOW_CTRL_TX;
1946 
1947 	return flowctrl;
1948 }
1949 
1950 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1951 {
1952 	u8 cap = 0;
1953 
1954 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1955 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1956 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1957 		if (lcladv & ADVERTISE_1000XPAUSE)
1958 			cap = FLOW_CTRL_RX;
1959 		if (rmtadv & ADVERTISE_1000XPAUSE)
1960 			cap = FLOW_CTRL_TX;
1961 	}
1962 
1963 	return cap;
1964 }
1965 
1966 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1967 {
1968 	u8 autoneg;
1969 	u8 flowctrl = 0;
1970 	u32 old_rx_mode = tp->rx_mode;
1971 	u32 old_tx_mode = tp->tx_mode;
1972 
1973 	if (tg3_flag(tp, USE_PHYLIB))
1974 		autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1975 	else
1976 		autoneg = tp->link_config.autoneg;
1977 
1978 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1979 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1980 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1981 		else
1982 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1983 	} else
1984 		flowctrl = tp->link_config.flowctrl;
1985 
1986 	tp->link_config.active_flowctrl = flowctrl;
1987 
1988 	if (flowctrl & FLOW_CTRL_RX)
1989 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1990 	else
1991 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1992 
1993 	if (old_rx_mode != tp->rx_mode)
1994 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1995 
1996 	if (flowctrl & FLOW_CTRL_TX)
1997 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1998 	else
1999 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2000 
2001 	if (old_tx_mode != tp->tx_mode)
2002 		tw32_f(MAC_TX_MODE, tp->tx_mode);
2003 }
2004 
2005 static void tg3_adjust_link(struct net_device *dev)
2006 {
2007 	u8 oldflowctrl, linkmesg = 0;
2008 	u32 mac_mode, lcl_adv, rmt_adv;
2009 	struct tg3 *tp = netdev_priv(dev);
2010 	struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2011 
2012 	spin_lock_bh(&tp->lock);
2013 
2014 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2015 				    MAC_MODE_HALF_DUPLEX);
2016 
2017 	oldflowctrl = tp->link_config.active_flowctrl;
2018 
2019 	if (phydev->link) {
2020 		lcl_adv = 0;
2021 		rmt_adv = 0;
2022 
2023 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2024 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2025 		else if (phydev->speed == SPEED_1000 ||
2026 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2027 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2028 		else
2029 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2030 
2031 		if (phydev->duplex == DUPLEX_HALF)
2032 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2033 		else {
2034 			lcl_adv = mii_advertise_flowctrl(
2035 				  tp->link_config.flowctrl);
2036 
2037 			if (phydev->pause)
2038 				rmt_adv = LPA_PAUSE_CAP;
2039 			if (phydev->asym_pause)
2040 				rmt_adv |= LPA_PAUSE_ASYM;
2041 		}
2042 
2043 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2044 	} else
2045 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2046 
2047 	if (mac_mode != tp->mac_mode) {
2048 		tp->mac_mode = mac_mode;
2049 		tw32_f(MAC_MODE, tp->mac_mode);
2050 		udelay(40);
2051 	}
2052 
2053 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2054 		if (phydev->speed == SPEED_10)
2055 			tw32(MAC_MI_STAT,
2056 			     MAC_MI_STAT_10MBPS_MODE |
2057 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2058 		else
2059 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2060 	}
2061 
2062 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2063 		tw32(MAC_TX_LENGTHS,
2064 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2066 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067 	else
2068 		tw32(MAC_TX_LENGTHS,
2069 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2070 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2071 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2072 
2073 	if (phydev->link != tp->old_link ||
2074 	    phydev->speed != tp->link_config.active_speed ||
2075 	    phydev->duplex != tp->link_config.active_duplex ||
2076 	    oldflowctrl != tp->link_config.active_flowctrl)
2077 		linkmesg = 1;
2078 
2079 	tp->old_link = phydev->link;
2080 	tp->link_config.active_speed = phydev->speed;
2081 	tp->link_config.active_duplex = phydev->duplex;
2082 
2083 	spin_unlock_bh(&tp->lock);
2084 
2085 	if (linkmesg)
2086 		tg3_link_report(tp);
2087 }
2088 
2089 static int tg3_phy_init(struct tg3 *tp)
2090 {
2091 	struct phy_device *phydev;
2092 
2093 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2094 		return 0;
2095 
2096 	/* Bring the PHY back to a known state. */
2097 	tg3_bmcr_reset(tp);
2098 
2099 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2100 
2101 	/* Attach the MAC to the PHY. */
2102 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2103 			     tg3_adjust_link, phydev->interface);
2104 	if (IS_ERR(phydev)) {
2105 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2106 		return PTR_ERR(phydev);
2107 	}
2108 
2109 	/* Mask with MAC supported features. */
2110 	switch (phydev->interface) {
2111 	case PHY_INTERFACE_MODE_GMII:
2112 	case PHY_INTERFACE_MODE_RGMII:
2113 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2114 			phydev->supported &= (PHY_GBIT_FEATURES |
2115 					      SUPPORTED_Pause |
2116 					      SUPPORTED_Asym_Pause);
2117 			break;
2118 		}
2119 		/* fallthru */
2120 	case PHY_INTERFACE_MODE_MII:
2121 		phydev->supported &= (PHY_BASIC_FEATURES |
2122 				      SUPPORTED_Pause |
2123 				      SUPPORTED_Asym_Pause);
2124 		break;
2125 	default:
2126 		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2127 		return -EINVAL;
2128 	}
2129 
2130 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2131 
2132 	phydev->advertising = phydev->supported;
2133 
2134 	return 0;
2135 }
2136 
2137 static void tg3_phy_start(struct tg3 *tp)
2138 {
2139 	struct phy_device *phydev;
2140 
2141 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2142 		return;
2143 
2144 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2145 
2146 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2147 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2148 		phydev->speed = tp->link_config.speed;
2149 		phydev->duplex = tp->link_config.duplex;
2150 		phydev->autoneg = tp->link_config.autoneg;
2151 		phydev->advertising = tp->link_config.advertising;
2152 	}
2153 
2154 	phy_start(phydev);
2155 
2156 	phy_start_aneg(phydev);
2157 }
2158 
2159 static void tg3_phy_stop(struct tg3 *tp)
2160 {
2161 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2162 		return;
2163 
2164 	phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2165 }
2166 
2167 static void tg3_phy_fini(struct tg3 *tp)
2168 {
2169 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2170 		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2171 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2172 	}
2173 }
2174 
2175 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2176 {
2177 	int err;
2178 	u32 val;
2179 
2180 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2181 		return 0;
2182 
2183 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2184 		/* Cannot do read-modify-write on 5401 */
2185 		err = tg3_phy_auxctl_write(tp,
2186 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2187 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2188 					   0x4c20);
2189 		goto done;
2190 	}
2191 
2192 	err = tg3_phy_auxctl_read(tp,
2193 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2194 	if (err)
2195 		return err;
2196 
2197 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2198 	err = tg3_phy_auxctl_write(tp,
2199 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2200 
2201 done:
2202 	return err;
2203 }
2204 
2205 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2206 {
2207 	u32 phytest;
2208 
2209 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2210 		u32 phy;
2211 
2212 		tg3_writephy(tp, MII_TG3_FET_TEST,
2213 			     phytest | MII_TG3_FET_SHADOW_EN);
2214 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2215 			if (enable)
2216 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2217 			else
2218 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2219 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2220 		}
2221 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2222 	}
2223 }
2224 
2225 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2226 {
2227 	u32 reg;
2228 
2229 	if (!tg3_flag(tp, 5705_PLUS) ||
2230 	    (tg3_flag(tp, 5717_PLUS) &&
2231 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2232 		return;
2233 
2234 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2235 		tg3_phy_fet_toggle_apd(tp, enable);
2236 		return;
2237 	}
2238 
2239 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2240 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2241 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2242 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2243 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2244 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2245 
2246 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2247 
2248 
2249 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2250 	if (enable)
2251 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2252 
2253 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2254 }
2255 
2256 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2257 {
2258 	u32 phy;
2259 
2260 	if (!tg3_flag(tp, 5705_PLUS) ||
2261 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2262 		return;
2263 
2264 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2265 		u32 ephy;
2266 
2267 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2268 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2269 
2270 			tg3_writephy(tp, MII_TG3_FET_TEST,
2271 				     ephy | MII_TG3_FET_SHADOW_EN);
2272 			if (!tg3_readphy(tp, reg, &phy)) {
2273 				if (enable)
2274 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2275 				else
2276 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2277 				tg3_writephy(tp, reg, phy);
2278 			}
2279 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2280 		}
2281 	} else {
2282 		int ret;
2283 
2284 		ret = tg3_phy_auxctl_read(tp,
2285 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2286 		if (!ret) {
2287 			if (enable)
2288 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2289 			else
2290 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2291 			tg3_phy_auxctl_write(tp,
2292 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2293 		}
2294 	}
2295 }
2296 
2297 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2298 {
2299 	int ret;
2300 	u32 val;
2301 
2302 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2303 		return;
2304 
2305 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2306 	if (!ret)
2307 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2308 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2309 }
2310 
2311 static void tg3_phy_apply_otp(struct tg3 *tp)
2312 {
2313 	u32 otp, phy;
2314 
2315 	if (!tp->phy_otp)
2316 		return;
2317 
2318 	otp = tp->phy_otp;
2319 
2320 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2321 		return;
2322 
2323 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2324 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2325 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2326 
2327 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2328 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2329 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2330 
2331 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2332 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2333 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2334 
2335 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2336 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2337 
2338 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2339 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2340 
2341 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2342 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2343 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2344 
2345 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2346 }
2347 
2348 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2349 {
2350 	u32 val;
2351 	struct ethtool_eee *dest = &tp->eee;
2352 
2353 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2354 		return;
2355 
2356 	if (eee)
2357 		dest = eee;
2358 
2359 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2360 		return;
2361 
2362 	/* Pull eee_active */
2363 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2364 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2365 		dest->eee_active = 1;
2366 	} else
2367 		dest->eee_active = 0;
2368 
2369 	/* Pull lp advertised settings */
2370 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2371 		return;
2372 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2373 
2374 	/* Pull advertised and eee_enabled settings */
2375 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2376 		return;
2377 	dest->eee_enabled = !!val;
2378 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2379 
2380 	/* Pull tx_lpi_enabled */
2381 	val = tr32(TG3_CPMU_EEE_MODE);
2382 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2383 
2384 	/* Pull lpi timer value */
2385 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2386 }
2387 
2388 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2389 {
2390 	u32 val;
2391 
2392 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2393 		return;
2394 
2395 	tp->setlpicnt = 0;
2396 
2397 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2398 	    current_link_up &&
2399 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2400 	    (tp->link_config.active_speed == SPEED_100 ||
2401 	     tp->link_config.active_speed == SPEED_1000)) {
2402 		u32 eeectl;
2403 
2404 		if (tp->link_config.active_speed == SPEED_1000)
2405 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2406 		else
2407 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2408 
2409 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2410 
2411 		tg3_eee_pull_config(tp, NULL);
2412 		if (tp->eee.eee_active)
2413 			tp->setlpicnt = 2;
2414 	}
2415 
2416 	if (!tp->setlpicnt) {
2417 		if (current_link_up &&
2418 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2419 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2420 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2421 		}
2422 
2423 		val = tr32(TG3_CPMU_EEE_MODE);
2424 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2425 	}
2426 }
2427 
2428 static void tg3_phy_eee_enable(struct tg3 *tp)
2429 {
2430 	u32 val;
2431 
2432 	if (tp->link_config.active_speed == SPEED_1000 &&
2433 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2434 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2435 	     tg3_flag(tp, 57765_CLASS)) &&
2436 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2437 		val = MII_TG3_DSP_TAP26_ALNOKO |
2438 		      MII_TG3_DSP_TAP26_RMRXSTO;
2439 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2440 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2441 	}
2442 
2443 	val = tr32(TG3_CPMU_EEE_MODE);
2444 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2445 }
2446 
2447 static int tg3_wait_macro_done(struct tg3 *tp)
2448 {
2449 	int limit = 100;
2450 
2451 	while (limit--) {
2452 		u32 tmp32;
2453 
2454 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2455 			if ((tmp32 & 0x1000) == 0)
2456 				break;
2457 		}
2458 	}
2459 	if (limit < 0)
2460 		return -EBUSY;
2461 
2462 	return 0;
2463 }
2464 
2465 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2466 {
2467 	static const u32 test_pat[4][6] = {
2468 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2469 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2470 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2471 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2472 	};
2473 	int chan;
2474 
2475 	for (chan = 0; chan < 4; chan++) {
2476 		int i;
2477 
2478 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479 			     (chan * 0x2000) | 0x0200);
2480 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2481 
2482 		for (i = 0; i < 6; i++)
2483 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2484 				     test_pat[chan][i]);
2485 
2486 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2487 		if (tg3_wait_macro_done(tp)) {
2488 			*resetp = 1;
2489 			return -EBUSY;
2490 		}
2491 
2492 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2493 			     (chan * 0x2000) | 0x0200);
2494 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2495 		if (tg3_wait_macro_done(tp)) {
2496 			*resetp = 1;
2497 			return -EBUSY;
2498 		}
2499 
2500 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2501 		if (tg3_wait_macro_done(tp)) {
2502 			*resetp = 1;
2503 			return -EBUSY;
2504 		}
2505 
2506 		for (i = 0; i < 6; i += 2) {
2507 			u32 low, high;
2508 
2509 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2510 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2511 			    tg3_wait_macro_done(tp)) {
2512 				*resetp = 1;
2513 				return -EBUSY;
2514 			}
2515 			low &= 0x7fff;
2516 			high &= 0x000f;
2517 			if (low != test_pat[chan][i] ||
2518 			    high != test_pat[chan][i+1]) {
2519 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2520 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2521 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2522 
2523 				return -EBUSY;
2524 			}
2525 		}
2526 	}
2527 
2528 	return 0;
2529 }
2530 
2531 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2532 {
2533 	int chan;
2534 
2535 	for (chan = 0; chan < 4; chan++) {
2536 		int i;
2537 
2538 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2539 			     (chan * 0x2000) | 0x0200);
2540 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2541 		for (i = 0; i < 6; i++)
2542 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2543 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2544 		if (tg3_wait_macro_done(tp))
2545 			return -EBUSY;
2546 	}
2547 
2548 	return 0;
2549 }
2550 
2551 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2552 {
2553 	u32 reg32, phy9_orig;
2554 	int retries, do_phy_reset, err;
2555 
2556 	retries = 10;
2557 	do_phy_reset = 1;
2558 	do {
2559 		if (do_phy_reset) {
2560 			err = tg3_bmcr_reset(tp);
2561 			if (err)
2562 				return err;
2563 			do_phy_reset = 0;
2564 		}
2565 
2566 		/* Disable transmitter and interrupt.  */
2567 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2568 			continue;
2569 
2570 		reg32 |= 0x3000;
2571 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2572 
2573 		/* Set full-duplex, 1000 mbps.  */
2574 		tg3_writephy(tp, MII_BMCR,
2575 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2576 
2577 		/* Set to master mode.  */
2578 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2579 			continue;
2580 
2581 		tg3_writephy(tp, MII_CTRL1000,
2582 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2583 
2584 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2585 		if (err)
2586 			return err;
2587 
2588 		/* Block the PHY control access.  */
2589 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2590 
2591 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2592 		if (!err)
2593 			break;
2594 	} while (--retries);
2595 
2596 	err = tg3_phy_reset_chanpat(tp);
2597 	if (err)
2598 		return err;
2599 
2600 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2601 
2602 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2603 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2604 
2605 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2606 
2607 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2608 
2609 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2610 		reg32 &= ~0x3000;
2611 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2612 	} else if (!err)
2613 		err = -EBUSY;
2614 
2615 	return err;
2616 }
2617 
2618 static void tg3_carrier_off(struct tg3 *tp)
2619 {
2620 	netif_carrier_off(tp->dev);
2621 	tp->link_up = false;
2622 }
2623 
2624 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2625 {
2626 	if (tg3_flag(tp, ENABLE_ASF))
2627 		netdev_warn(tp->dev,
2628 			    "Management side-band traffic will be interrupted during phy settings change\n");
2629 }
2630 
2631 /* This will reset the tigon3 PHY if there is no valid
2632  * link unless the FORCE argument is non-zero.
2633  */
2634 static int tg3_phy_reset(struct tg3 *tp)
2635 {
2636 	u32 val, cpmuctrl;
2637 	int err;
2638 
2639 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2640 		val = tr32(GRC_MISC_CFG);
2641 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2642 		udelay(40);
2643 	}
2644 	err  = tg3_readphy(tp, MII_BMSR, &val);
2645 	err |= tg3_readphy(tp, MII_BMSR, &val);
2646 	if (err != 0)
2647 		return -EBUSY;
2648 
2649 	if (netif_running(tp->dev) && tp->link_up) {
2650 		netif_carrier_off(tp->dev);
2651 		tg3_link_report(tp);
2652 	}
2653 
2654 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2655 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2656 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2657 		err = tg3_phy_reset_5703_4_5(tp);
2658 		if (err)
2659 			return err;
2660 		goto out;
2661 	}
2662 
2663 	cpmuctrl = 0;
2664 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2665 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2666 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2667 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2668 			tw32(TG3_CPMU_CTRL,
2669 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2670 	}
2671 
2672 	err = tg3_bmcr_reset(tp);
2673 	if (err)
2674 		return err;
2675 
2676 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2677 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2678 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2679 
2680 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2681 	}
2682 
2683 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2684 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2685 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2686 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2687 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2688 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2689 			udelay(40);
2690 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2691 		}
2692 	}
2693 
2694 	if (tg3_flag(tp, 5717_PLUS) &&
2695 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2696 		return 0;
2697 
2698 	tg3_phy_apply_otp(tp);
2699 
2700 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2701 		tg3_phy_toggle_apd(tp, true);
2702 	else
2703 		tg3_phy_toggle_apd(tp, false);
2704 
2705 out:
2706 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2707 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2708 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2709 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2710 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2711 	}
2712 
2713 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2714 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2715 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2716 	}
2717 
2718 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2719 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2720 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2721 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2722 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2723 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2724 		}
2725 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2726 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2727 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2728 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2729 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2730 				tg3_writephy(tp, MII_TG3_TEST1,
2731 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2732 			} else
2733 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2734 
2735 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2736 		}
2737 	}
2738 
2739 	/* Set Extended packet length bit (bit 14) on all chips that */
2740 	/* support jumbo frames */
2741 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2742 		/* Cannot do read-modify-write on 5401 */
2743 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2744 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2745 		/* Set bit 14 with read-modify-write to preserve other bits */
2746 		err = tg3_phy_auxctl_read(tp,
2747 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2748 		if (!err)
2749 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2750 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2751 	}
2752 
2753 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2754 	 * jumbo frames transmission.
2755 	 */
2756 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2757 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2758 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2759 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2760 	}
2761 
2762 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2763 		/* adjust output voltage */
2764 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2765 	}
2766 
2767 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2768 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2769 
2770 	tg3_phy_toggle_automdix(tp, true);
2771 	tg3_phy_set_wirespeed(tp);
2772 	return 0;
2773 }
2774 
2775 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2776 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2777 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2778 					  TG3_GPIO_MSG_NEED_VAUX)
2779 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2780 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2781 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2782 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2783 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2784 
2785 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2786 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2787 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2788 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2789 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2790 
2791 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2792 {
2793 	u32 status, shift;
2794 
2795 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2796 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2797 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2798 	else
2799 		status = tr32(TG3_CPMU_DRV_STATUS);
2800 
2801 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2802 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2803 	status |= (newstat << shift);
2804 
2805 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2807 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2808 	else
2809 		tw32(TG3_CPMU_DRV_STATUS, status);
2810 
2811 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2812 }
2813 
2814 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2815 {
2816 	if (!tg3_flag(tp, IS_NIC))
2817 		return 0;
2818 
2819 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2820 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2821 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2822 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2823 			return -EIO;
2824 
2825 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2826 
2827 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2829 
2830 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2831 	} else {
2832 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2833 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2834 	}
2835 
2836 	return 0;
2837 }
2838 
2839 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2840 {
2841 	u32 grc_local_ctrl;
2842 
2843 	if (!tg3_flag(tp, IS_NIC) ||
2844 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2845 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2846 		return;
2847 
2848 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2849 
2850 	tw32_wait_f(GRC_LOCAL_CTRL,
2851 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2852 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2853 
2854 	tw32_wait_f(GRC_LOCAL_CTRL,
2855 		    grc_local_ctrl,
2856 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2857 
2858 	tw32_wait_f(GRC_LOCAL_CTRL,
2859 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2860 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2861 }
2862 
2863 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2864 {
2865 	if (!tg3_flag(tp, IS_NIC))
2866 		return;
2867 
2868 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2869 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2870 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2871 			    (GRC_LCLCTRL_GPIO_OE0 |
2872 			     GRC_LCLCTRL_GPIO_OE1 |
2873 			     GRC_LCLCTRL_GPIO_OE2 |
2874 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2875 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2876 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2877 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2878 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2879 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2880 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2881 				     GRC_LCLCTRL_GPIO_OE1 |
2882 				     GRC_LCLCTRL_GPIO_OE2 |
2883 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2884 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2885 				     tp->grc_local_ctrl;
2886 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2887 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2888 
2889 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2890 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2891 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2892 
2893 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2894 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2895 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2896 	} else {
2897 		u32 no_gpio2;
2898 		u32 grc_local_ctrl = 0;
2899 
2900 		/* Workaround to prevent overdrawing Amps. */
2901 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2902 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2903 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2904 				    grc_local_ctrl,
2905 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2906 		}
2907 
2908 		/* On 5753 and variants, GPIO2 cannot be used. */
2909 		no_gpio2 = tp->nic_sram_data_cfg &
2910 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2911 
2912 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2913 				  GRC_LCLCTRL_GPIO_OE1 |
2914 				  GRC_LCLCTRL_GPIO_OE2 |
2915 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2916 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2917 		if (no_gpio2) {
2918 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2919 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2920 		}
2921 		tw32_wait_f(GRC_LOCAL_CTRL,
2922 			    tp->grc_local_ctrl | grc_local_ctrl,
2923 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2924 
2925 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2926 
2927 		tw32_wait_f(GRC_LOCAL_CTRL,
2928 			    tp->grc_local_ctrl | grc_local_ctrl,
2929 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2930 
2931 		if (!no_gpio2) {
2932 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2933 			tw32_wait_f(GRC_LOCAL_CTRL,
2934 				    tp->grc_local_ctrl | grc_local_ctrl,
2935 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2936 		}
2937 	}
2938 }
2939 
2940 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2941 {
2942 	u32 msg = 0;
2943 
2944 	/* Serialize power state transitions */
2945 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2946 		return;
2947 
2948 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2949 		msg = TG3_GPIO_MSG_NEED_VAUX;
2950 
2951 	msg = tg3_set_function_status(tp, msg);
2952 
2953 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2954 		goto done;
2955 
2956 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2957 		tg3_pwrsrc_switch_to_vaux(tp);
2958 	else
2959 		tg3_pwrsrc_die_with_vmain(tp);
2960 
2961 done:
2962 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2963 }
2964 
2965 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2966 {
2967 	bool need_vaux = false;
2968 
2969 	/* The GPIOs do something completely different on 57765. */
2970 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2971 		return;
2972 
2973 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2974 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2975 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2976 		tg3_frob_aux_power_5717(tp, include_wol ?
2977 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2978 		return;
2979 	}
2980 
2981 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2982 		struct net_device *dev_peer;
2983 
2984 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2985 
2986 		/* remove_one() may have been run on the peer. */
2987 		if (dev_peer) {
2988 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2989 
2990 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2991 				return;
2992 
2993 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2994 			    tg3_flag(tp_peer, ENABLE_ASF))
2995 				need_vaux = true;
2996 		}
2997 	}
2998 
2999 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3000 	    tg3_flag(tp, ENABLE_ASF))
3001 		need_vaux = true;
3002 
3003 	if (need_vaux)
3004 		tg3_pwrsrc_switch_to_vaux(tp);
3005 	else
3006 		tg3_pwrsrc_die_with_vmain(tp);
3007 }
3008 
3009 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3010 {
3011 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3012 		return 1;
3013 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3014 		if (speed != SPEED_10)
3015 			return 1;
3016 	} else if (speed == SPEED_10)
3017 		return 1;
3018 
3019 	return 0;
3020 }
3021 
3022 static bool tg3_phy_power_bug(struct tg3 *tp)
3023 {
3024 	switch (tg3_asic_rev(tp)) {
3025 	case ASIC_REV_5700:
3026 	case ASIC_REV_5704:
3027 		return true;
3028 	case ASIC_REV_5780:
3029 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3030 			return true;
3031 		return false;
3032 	case ASIC_REV_5717:
3033 		if (!tp->pci_fn)
3034 			return true;
3035 		return false;
3036 	case ASIC_REV_5719:
3037 	case ASIC_REV_5720:
3038 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3039 		    !tp->pci_fn)
3040 			return true;
3041 		return false;
3042 	}
3043 
3044 	return false;
3045 }
3046 
3047 static bool tg3_phy_led_bug(struct tg3 *tp)
3048 {
3049 	switch (tg3_asic_rev(tp)) {
3050 	case ASIC_REV_5719:
3051 	case ASIC_REV_5720:
3052 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3053 		    !tp->pci_fn)
3054 			return true;
3055 		return false;
3056 	}
3057 
3058 	return false;
3059 }
3060 
3061 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3062 {
3063 	u32 val;
3064 
3065 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3066 		return;
3067 
3068 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3069 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3070 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3071 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3072 
3073 			sg_dig_ctrl |=
3074 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3075 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3076 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3077 		}
3078 		return;
3079 	}
3080 
3081 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3082 		tg3_bmcr_reset(tp);
3083 		val = tr32(GRC_MISC_CFG);
3084 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3085 		udelay(40);
3086 		return;
3087 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3088 		u32 phytest;
3089 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3090 			u32 phy;
3091 
3092 			tg3_writephy(tp, MII_ADVERTISE, 0);
3093 			tg3_writephy(tp, MII_BMCR,
3094 				     BMCR_ANENABLE | BMCR_ANRESTART);
3095 
3096 			tg3_writephy(tp, MII_TG3_FET_TEST,
3097 				     phytest | MII_TG3_FET_SHADOW_EN);
3098 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3099 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3100 				tg3_writephy(tp,
3101 					     MII_TG3_FET_SHDW_AUXMODE4,
3102 					     phy);
3103 			}
3104 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3105 		}
3106 		return;
3107 	} else if (do_low_power) {
3108 		if (!tg3_phy_led_bug(tp))
3109 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3110 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3111 
3112 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3113 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3114 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3115 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3116 	}
3117 
3118 	/* The PHY should not be powered down on some chips because
3119 	 * of bugs.
3120 	 */
3121 	if (tg3_phy_power_bug(tp))
3122 		return;
3123 
3124 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3125 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3126 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3127 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3128 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3129 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3130 	}
3131 
3132 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3133 }
3134 
3135 /* tp->lock is held. */
3136 static int tg3_nvram_lock(struct tg3 *tp)
3137 {
3138 	if (tg3_flag(tp, NVRAM)) {
3139 		int i;
3140 
3141 		if (tp->nvram_lock_cnt == 0) {
3142 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3143 			for (i = 0; i < 8000; i++) {
3144 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3145 					break;
3146 				udelay(20);
3147 			}
3148 			if (i == 8000) {
3149 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3150 				return -ENODEV;
3151 			}
3152 		}
3153 		tp->nvram_lock_cnt++;
3154 	}
3155 	return 0;
3156 }
3157 
3158 /* tp->lock is held. */
3159 static void tg3_nvram_unlock(struct tg3 *tp)
3160 {
3161 	if (tg3_flag(tp, NVRAM)) {
3162 		if (tp->nvram_lock_cnt > 0)
3163 			tp->nvram_lock_cnt--;
3164 		if (tp->nvram_lock_cnt == 0)
3165 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3166 	}
3167 }
3168 
3169 /* tp->lock is held. */
3170 static void tg3_enable_nvram_access(struct tg3 *tp)
3171 {
3172 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3173 		u32 nvaccess = tr32(NVRAM_ACCESS);
3174 
3175 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3176 	}
3177 }
3178 
3179 /* tp->lock is held. */
3180 static void tg3_disable_nvram_access(struct tg3 *tp)
3181 {
3182 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3183 		u32 nvaccess = tr32(NVRAM_ACCESS);
3184 
3185 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3186 	}
3187 }
3188 
3189 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3190 					u32 offset, u32 *val)
3191 {
3192 	u32 tmp;
3193 	int i;
3194 
3195 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3196 		return -EINVAL;
3197 
3198 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3199 					EEPROM_ADDR_DEVID_MASK |
3200 					EEPROM_ADDR_READ);
3201 	tw32(GRC_EEPROM_ADDR,
3202 	     tmp |
3203 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3204 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3205 	      EEPROM_ADDR_ADDR_MASK) |
3206 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3207 
3208 	for (i = 0; i < 1000; i++) {
3209 		tmp = tr32(GRC_EEPROM_ADDR);
3210 
3211 		if (tmp & EEPROM_ADDR_COMPLETE)
3212 			break;
3213 		msleep(1);
3214 	}
3215 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3216 		return -EBUSY;
3217 
3218 	tmp = tr32(GRC_EEPROM_DATA);
3219 
3220 	/*
3221 	 * The data will always be opposite the native endian
3222 	 * format.  Perform a blind byteswap to compensate.
3223 	 */
3224 	*val = swab32(tmp);
3225 
3226 	return 0;
3227 }
3228 
3229 #define NVRAM_CMD_TIMEOUT 10000
3230 
3231 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3232 {
3233 	int i;
3234 
3235 	tw32(NVRAM_CMD, nvram_cmd);
3236 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3237 		udelay(10);
3238 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3239 			udelay(10);
3240 			break;
3241 		}
3242 	}
3243 
3244 	if (i == NVRAM_CMD_TIMEOUT)
3245 		return -EBUSY;
3246 
3247 	return 0;
3248 }
3249 
3250 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3251 {
3252 	if (tg3_flag(tp, NVRAM) &&
3253 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3254 	    tg3_flag(tp, FLASH) &&
3255 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3256 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3257 
3258 		addr = ((addr / tp->nvram_pagesize) <<
3259 			ATMEL_AT45DB0X1B_PAGE_POS) +
3260 		       (addr % tp->nvram_pagesize);
3261 
3262 	return addr;
3263 }
3264 
3265 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3266 {
3267 	if (tg3_flag(tp, NVRAM) &&
3268 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3269 	    tg3_flag(tp, FLASH) &&
3270 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3271 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3272 
3273 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3274 			tp->nvram_pagesize) +
3275 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3276 
3277 	return addr;
3278 }
3279 
3280 /* NOTE: Data read in from NVRAM is byteswapped according to
3281  * the byteswapping settings for all other register accesses.
3282  * tg3 devices are BE devices, so on a BE machine, the data
3283  * returned will be exactly as it is seen in NVRAM.  On a LE
3284  * machine, the 32-bit value will be byteswapped.
3285  */
3286 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3287 {
3288 	int ret;
3289 
3290 	if (!tg3_flag(tp, NVRAM))
3291 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3292 
3293 	offset = tg3_nvram_phys_addr(tp, offset);
3294 
3295 	if (offset > NVRAM_ADDR_MSK)
3296 		return -EINVAL;
3297 
3298 	ret = tg3_nvram_lock(tp);
3299 	if (ret)
3300 		return ret;
3301 
3302 	tg3_enable_nvram_access(tp);
3303 
3304 	tw32(NVRAM_ADDR, offset);
3305 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3306 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3307 
3308 	if (ret == 0)
3309 		*val = tr32(NVRAM_RDDATA);
3310 
3311 	tg3_disable_nvram_access(tp);
3312 
3313 	tg3_nvram_unlock(tp);
3314 
3315 	return ret;
3316 }
3317 
3318 /* Ensures NVRAM data is in bytestream format. */
3319 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3320 {
3321 	u32 v;
3322 	int res = tg3_nvram_read(tp, offset, &v);
3323 	if (!res)
3324 		*val = cpu_to_be32(v);
3325 	return res;
3326 }
3327 
3328 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3329 				    u32 offset, u32 len, u8 *buf)
3330 {
3331 	int i, j, rc = 0;
3332 	u32 val;
3333 
3334 	for (i = 0; i < len; i += 4) {
3335 		u32 addr;
3336 		__be32 data;
3337 
3338 		addr = offset + i;
3339 
3340 		memcpy(&data, buf + i, 4);
3341 
3342 		/*
3343 		 * The SEEPROM interface expects the data to always be opposite
3344 		 * the native endian format.  We accomplish this by reversing
3345 		 * all the operations that would have been performed on the
3346 		 * data from a call to tg3_nvram_read_be32().
3347 		 */
3348 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3349 
3350 		val = tr32(GRC_EEPROM_ADDR);
3351 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3352 
3353 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3354 			EEPROM_ADDR_READ);
3355 		tw32(GRC_EEPROM_ADDR, val |
3356 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3357 			(addr & EEPROM_ADDR_ADDR_MASK) |
3358 			EEPROM_ADDR_START |
3359 			EEPROM_ADDR_WRITE);
3360 
3361 		for (j = 0; j < 1000; j++) {
3362 			val = tr32(GRC_EEPROM_ADDR);
3363 
3364 			if (val & EEPROM_ADDR_COMPLETE)
3365 				break;
3366 			msleep(1);
3367 		}
3368 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3369 			rc = -EBUSY;
3370 			break;
3371 		}
3372 	}
3373 
3374 	return rc;
3375 }
3376 
3377 /* offset and length are dword aligned */
3378 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3379 		u8 *buf)
3380 {
3381 	int ret = 0;
3382 	u32 pagesize = tp->nvram_pagesize;
3383 	u32 pagemask = pagesize - 1;
3384 	u32 nvram_cmd;
3385 	u8 *tmp;
3386 
3387 	tmp = kmalloc(pagesize, GFP_KERNEL);
3388 	if (tmp == NULL)
3389 		return -ENOMEM;
3390 
3391 	while (len) {
3392 		int j;
3393 		u32 phy_addr, page_off, size;
3394 
3395 		phy_addr = offset & ~pagemask;
3396 
3397 		for (j = 0; j < pagesize; j += 4) {
3398 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3399 						  (__be32 *) (tmp + j));
3400 			if (ret)
3401 				break;
3402 		}
3403 		if (ret)
3404 			break;
3405 
3406 		page_off = offset & pagemask;
3407 		size = pagesize;
3408 		if (len < size)
3409 			size = len;
3410 
3411 		len -= size;
3412 
3413 		memcpy(tmp + page_off, buf, size);
3414 
3415 		offset = offset + (pagesize - page_off);
3416 
3417 		tg3_enable_nvram_access(tp);
3418 
3419 		/*
3420 		 * Before we can erase the flash page, we need
3421 		 * to issue a special "write enable" command.
3422 		 */
3423 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3424 
3425 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3426 			break;
3427 
3428 		/* Erase the target page */
3429 		tw32(NVRAM_ADDR, phy_addr);
3430 
3431 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3432 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3433 
3434 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435 			break;
3436 
3437 		/* Issue another write enable to start the write. */
3438 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3439 
3440 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3441 			break;
3442 
3443 		for (j = 0; j < pagesize; j += 4) {
3444 			__be32 data;
3445 
3446 			data = *((__be32 *) (tmp + j));
3447 
3448 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3449 
3450 			tw32(NVRAM_ADDR, phy_addr + j);
3451 
3452 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3453 				NVRAM_CMD_WR;
3454 
3455 			if (j == 0)
3456 				nvram_cmd |= NVRAM_CMD_FIRST;
3457 			else if (j == (pagesize - 4))
3458 				nvram_cmd |= NVRAM_CMD_LAST;
3459 
3460 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3461 			if (ret)
3462 				break;
3463 		}
3464 		if (ret)
3465 			break;
3466 	}
3467 
3468 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3469 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3470 
3471 	kfree(tmp);
3472 
3473 	return ret;
3474 }
3475 
3476 /* offset and length are dword aligned */
3477 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3478 		u8 *buf)
3479 {
3480 	int i, ret = 0;
3481 
3482 	for (i = 0; i < len; i += 4, offset += 4) {
3483 		u32 page_off, phy_addr, nvram_cmd;
3484 		__be32 data;
3485 
3486 		memcpy(&data, buf + i, 4);
3487 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3488 
3489 		page_off = offset % tp->nvram_pagesize;
3490 
3491 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3492 
3493 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3494 
3495 		if (page_off == 0 || i == 0)
3496 			nvram_cmd |= NVRAM_CMD_FIRST;
3497 		if (page_off == (tp->nvram_pagesize - 4))
3498 			nvram_cmd |= NVRAM_CMD_LAST;
3499 
3500 		if (i == (len - 4))
3501 			nvram_cmd |= NVRAM_CMD_LAST;
3502 
3503 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3504 		    !tg3_flag(tp, FLASH) ||
3505 		    !tg3_flag(tp, 57765_PLUS))
3506 			tw32(NVRAM_ADDR, phy_addr);
3507 
3508 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3509 		    !tg3_flag(tp, 5755_PLUS) &&
3510 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3511 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3512 			u32 cmd;
3513 
3514 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3515 			ret = tg3_nvram_exec_cmd(tp, cmd);
3516 			if (ret)
3517 				break;
3518 		}
3519 		if (!tg3_flag(tp, FLASH)) {
3520 			/* We always do complete word writes to eeprom. */
3521 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3522 		}
3523 
3524 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3525 		if (ret)
3526 			break;
3527 	}
3528 	return ret;
3529 }
3530 
3531 /* offset and length are dword aligned */
3532 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3533 {
3534 	int ret;
3535 
3536 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3537 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3538 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3539 		udelay(40);
3540 	}
3541 
3542 	if (!tg3_flag(tp, NVRAM)) {
3543 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3544 	} else {
3545 		u32 grc_mode;
3546 
3547 		ret = tg3_nvram_lock(tp);
3548 		if (ret)
3549 			return ret;
3550 
3551 		tg3_enable_nvram_access(tp);
3552 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3553 			tw32(NVRAM_WRITE1, 0x406);
3554 
3555 		grc_mode = tr32(GRC_MODE);
3556 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3557 
3558 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3559 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3560 				buf);
3561 		} else {
3562 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3563 				buf);
3564 		}
3565 
3566 		grc_mode = tr32(GRC_MODE);
3567 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3568 
3569 		tg3_disable_nvram_access(tp);
3570 		tg3_nvram_unlock(tp);
3571 	}
3572 
3573 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3574 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3575 		udelay(40);
3576 	}
3577 
3578 	return ret;
3579 }
3580 
3581 #define RX_CPU_SCRATCH_BASE	0x30000
3582 #define RX_CPU_SCRATCH_SIZE	0x04000
3583 #define TX_CPU_SCRATCH_BASE	0x34000
3584 #define TX_CPU_SCRATCH_SIZE	0x04000
3585 
3586 /* tp->lock is held. */
3587 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3588 {
3589 	int i;
3590 	const int iters = 10000;
3591 
3592 	for (i = 0; i < iters; i++) {
3593 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3594 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3595 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3596 			break;
3597 		if (pci_channel_offline(tp->pdev))
3598 			return -EBUSY;
3599 	}
3600 
3601 	return (i == iters) ? -EBUSY : 0;
3602 }
3603 
3604 /* tp->lock is held. */
3605 static int tg3_rxcpu_pause(struct tg3 *tp)
3606 {
3607 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3608 
3609 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3610 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3611 	udelay(10);
3612 
3613 	return rc;
3614 }
3615 
3616 /* tp->lock is held. */
3617 static int tg3_txcpu_pause(struct tg3 *tp)
3618 {
3619 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3620 }
3621 
3622 /* tp->lock is held. */
3623 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3624 {
3625 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3626 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3627 }
3628 
3629 /* tp->lock is held. */
3630 static void tg3_rxcpu_resume(struct tg3 *tp)
3631 {
3632 	tg3_resume_cpu(tp, RX_CPU_BASE);
3633 }
3634 
3635 /* tp->lock is held. */
3636 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3637 {
3638 	int rc;
3639 
3640 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3641 
3642 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3643 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3644 
3645 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3646 		return 0;
3647 	}
3648 	if (cpu_base == RX_CPU_BASE) {
3649 		rc = tg3_rxcpu_pause(tp);
3650 	} else {
3651 		/*
3652 		 * There is only an Rx CPU for the 5750 derivative in the
3653 		 * BCM4785.
3654 		 */
3655 		if (tg3_flag(tp, IS_SSB_CORE))
3656 			return 0;
3657 
3658 		rc = tg3_txcpu_pause(tp);
3659 	}
3660 
3661 	if (rc) {
3662 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3663 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3664 		return -ENODEV;
3665 	}
3666 
3667 	/* Clear firmware's nvram arbitration. */
3668 	if (tg3_flag(tp, NVRAM))
3669 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3670 	return 0;
3671 }
3672 
3673 static int tg3_fw_data_len(struct tg3 *tp,
3674 			   const struct tg3_firmware_hdr *fw_hdr)
3675 {
3676 	int fw_len;
3677 
3678 	/* Non fragmented firmware have one firmware header followed by a
3679 	 * contiguous chunk of data to be written. The length field in that
3680 	 * header is not the length of data to be written but the complete
3681 	 * length of the bss. The data length is determined based on
3682 	 * tp->fw->size minus headers.
3683 	 *
3684 	 * Fragmented firmware have a main header followed by multiple
3685 	 * fragments. Each fragment is identical to non fragmented firmware
3686 	 * with a firmware header followed by a contiguous chunk of data. In
3687 	 * the main header, the length field is unused and set to 0xffffffff.
3688 	 * In each fragment header the length is the entire size of that
3689 	 * fragment i.e. fragment data + header length. Data length is
3690 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3691 	 */
3692 	if (tp->fw_len == 0xffffffff)
3693 		fw_len = be32_to_cpu(fw_hdr->len);
3694 	else
3695 		fw_len = tp->fw->size;
3696 
3697 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3698 }
3699 
3700 /* tp->lock is held. */
3701 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3702 				 u32 cpu_scratch_base, int cpu_scratch_size,
3703 				 const struct tg3_firmware_hdr *fw_hdr)
3704 {
3705 	int err, i;
3706 	void (*write_op)(struct tg3 *, u32, u32);
3707 	int total_len = tp->fw->size;
3708 
3709 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3710 		netdev_err(tp->dev,
3711 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3712 			   __func__);
3713 		return -EINVAL;
3714 	}
3715 
3716 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3717 		write_op = tg3_write_mem;
3718 	else
3719 		write_op = tg3_write_indirect_reg32;
3720 
3721 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3722 		/* It is possible that bootcode is still loading at this point.
3723 		 * Get the nvram lock first before halting the cpu.
3724 		 */
3725 		int lock_err = tg3_nvram_lock(tp);
3726 		err = tg3_halt_cpu(tp, cpu_base);
3727 		if (!lock_err)
3728 			tg3_nvram_unlock(tp);
3729 		if (err)
3730 			goto out;
3731 
3732 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3733 			write_op(tp, cpu_scratch_base + i, 0);
3734 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3735 		tw32(cpu_base + CPU_MODE,
3736 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3737 	} else {
3738 		/* Subtract additional main header for fragmented firmware and
3739 		 * advance to the first fragment
3740 		 */
3741 		total_len -= TG3_FW_HDR_LEN;
3742 		fw_hdr++;
3743 	}
3744 
3745 	do {
3746 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3747 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3748 			write_op(tp, cpu_scratch_base +
3749 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3750 				     (i * sizeof(u32)),
3751 				 be32_to_cpu(fw_data[i]));
3752 
3753 		total_len -= be32_to_cpu(fw_hdr->len);
3754 
3755 		/* Advance to next fragment */
3756 		fw_hdr = (struct tg3_firmware_hdr *)
3757 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3758 	} while (total_len > 0);
3759 
3760 	err = 0;
3761 
3762 out:
3763 	return err;
3764 }
3765 
3766 /* tp->lock is held. */
3767 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3768 {
3769 	int i;
3770 	const int iters = 5;
3771 
3772 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3773 	tw32_f(cpu_base + CPU_PC, pc);
3774 
3775 	for (i = 0; i < iters; i++) {
3776 		if (tr32(cpu_base + CPU_PC) == pc)
3777 			break;
3778 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3779 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3780 		tw32_f(cpu_base + CPU_PC, pc);
3781 		udelay(1000);
3782 	}
3783 
3784 	return (i == iters) ? -EBUSY : 0;
3785 }
3786 
3787 /* tp->lock is held. */
3788 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3789 {
3790 	const struct tg3_firmware_hdr *fw_hdr;
3791 	int err;
3792 
3793 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3794 
3795 	/* Firmware blob starts with version numbers, followed by
3796 	   start address and length. We are setting complete length.
3797 	   length = end_address_of_bss - start_address_of_text.
3798 	   Remainder is the blob to be loaded contiguously
3799 	   from start address. */
3800 
3801 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3802 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3803 				    fw_hdr);
3804 	if (err)
3805 		return err;
3806 
3807 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3808 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3809 				    fw_hdr);
3810 	if (err)
3811 		return err;
3812 
3813 	/* Now startup only the RX cpu. */
3814 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3815 				       be32_to_cpu(fw_hdr->base_addr));
3816 	if (err) {
3817 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3818 			   "should be %08x\n", __func__,
3819 			   tr32(RX_CPU_BASE + CPU_PC),
3820 				be32_to_cpu(fw_hdr->base_addr));
3821 		return -ENODEV;
3822 	}
3823 
3824 	tg3_rxcpu_resume(tp);
3825 
3826 	return 0;
3827 }
3828 
3829 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3830 {
3831 	const int iters = 1000;
3832 	int i;
3833 	u32 val;
3834 
3835 	/* Wait for boot code to complete initialization and enter service
3836 	 * loop. It is then safe to download service patches
3837 	 */
3838 	for (i = 0; i < iters; i++) {
3839 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3840 			break;
3841 
3842 		udelay(10);
3843 	}
3844 
3845 	if (i == iters) {
3846 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3847 		return -EBUSY;
3848 	}
3849 
3850 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3851 	if (val & 0xff) {
3852 		netdev_warn(tp->dev,
3853 			    "Other patches exist. Not downloading EEE patch\n");
3854 		return -EEXIST;
3855 	}
3856 
3857 	return 0;
3858 }
3859 
3860 /* tp->lock is held. */
3861 static void tg3_load_57766_firmware(struct tg3 *tp)
3862 {
3863 	struct tg3_firmware_hdr *fw_hdr;
3864 
3865 	if (!tg3_flag(tp, NO_NVRAM))
3866 		return;
3867 
3868 	if (tg3_validate_rxcpu_state(tp))
3869 		return;
3870 
3871 	if (!tp->fw)
3872 		return;
3873 
3874 	/* This firmware blob has a different format than older firmware
3875 	 * releases as given below. The main difference is we have fragmented
3876 	 * data to be written to non-contiguous locations.
3877 	 *
3878 	 * In the beginning we have a firmware header identical to other
3879 	 * firmware which consists of version, base addr and length. The length
3880 	 * here is unused and set to 0xffffffff.
3881 	 *
3882 	 * This is followed by a series of firmware fragments which are
3883 	 * individually identical to previous firmware. i.e. they have the
3884 	 * firmware header and followed by data for that fragment. The version
3885 	 * field of the individual fragment header is unused.
3886 	 */
3887 
3888 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3889 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3890 		return;
3891 
3892 	if (tg3_rxcpu_pause(tp))
3893 		return;
3894 
3895 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3896 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3897 
3898 	tg3_rxcpu_resume(tp);
3899 }
3900 
3901 /* tp->lock is held. */
3902 static int tg3_load_tso_firmware(struct tg3 *tp)
3903 {
3904 	const struct tg3_firmware_hdr *fw_hdr;
3905 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3906 	int err;
3907 
3908 	if (!tg3_flag(tp, FW_TSO))
3909 		return 0;
3910 
3911 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3912 
3913 	/* Firmware blob starts with version numbers, followed by
3914 	   start address and length. We are setting complete length.
3915 	   length = end_address_of_bss - start_address_of_text.
3916 	   Remainder is the blob to be loaded contiguously
3917 	   from start address. */
3918 
3919 	cpu_scratch_size = tp->fw_len;
3920 
3921 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3922 		cpu_base = RX_CPU_BASE;
3923 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3924 	} else {
3925 		cpu_base = TX_CPU_BASE;
3926 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3927 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3928 	}
3929 
3930 	err = tg3_load_firmware_cpu(tp, cpu_base,
3931 				    cpu_scratch_base, cpu_scratch_size,
3932 				    fw_hdr);
3933 	if (err)
3934 		return err;
3935 
3936 	/* Now startup the cpu. */
3937 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3938 				       be32_to_cpu(fw_hdr->base_addr));
3939 	if (err) {
3940 		netdev_err(tp->dev,
3941 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3942 			   __func__, tr32(cpu_base + CPU_PC),
3943 			   be32_to_cpu(fw_hdr->base_addr));
3944 		return -ENODEV;
3945 	}
3946 
3947 	tg3_resume_cpu(tp, cpu_base);
3948 	return 0;
3949 }
3950 
3951 
3952 /* tp->lock is held. */
3953 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3954 {
3955 	u32 addr_high, addr_low;
3956 	int i;
3957 
3958 	addr_high = ((tp->dev->dev_addr[0] << 8) |
3959 		     tp->dev->dev_addr[1]);
3960 	addr_low = ((tp->dev->dev_addr[2] << 24) |
3961 		    (tp->dev->dev_addr[3] << 16) |
3962 		    (tp->dev->dev_addr[4] <<  8) |
3963 		    (tp->dev->dev_addr[5] <<  0));
3964 	for (i = 0; i < 4; i++) {
3965 		if (i == 1 && skip_mac_1)
3966 			continue;
3967 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3968 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3969 	}
3970 
3971 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3972 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3973 		for (i = 0; i < 12; i++) {
3974 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3975 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3976 		}
3977 	}
3978 
3979 	addr_high = (tp->dev->dev_addr[0] +
3980 		     tp->dev->dev_addr[1] +
3981 		     tp->dev->dev_addr[2] +
3982 		     tp->dev->dev_addr[3] +
3983 		     tp->dev->dev_addr[4] +
3984 		     tp->dev->dev_addr[5]) &
3985 		TX_BACKOFF_SEED_MASK;
3986 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3987 }
3988 
3989 static void tg3_enable_register_access(struct tg3 *tp)
3990 {
3991 	/*
3992 	 * Make sure register accesses (indirect or otherwise) will function
3993 	 * correctly.
3994 	 */
3995 	pci_write_config_dword(tp->pdev,
3996 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3997 }
3998 
3999 static int tg3_power_up(struct tg3 *tp)
4000 {
4001 	int err;
4002 
4003 	tg3_enable_register_access(tp);
4004 
4005 	err = pci_set_power_state(tp->pdev, PCI_D0);
4006 	if (!err) {
4007 		/* Switch out of Vaux if it is a NIC */
4008 		tg3_pwrsrc_switch_to_vmain(tp);
4009 	} else {
4010 		netdev_err(tp->dev, "Transition to D0 failed\n");
4011 	}
4012 
4013 	return err;
4014 }
4015 
4016 static int tg3_setup_phy(struct tg3 *, bool);
4017 
4018 static int tg3_power_down_prepare(struct tg3 *tp)
4019 {
4020 	u32 misc_host_ctrl;
4021 	bool device_should_wake, do_low_power;
4022 
4023 	tg3_enable_register_access(tp);
4024 
4025 	/* Restore the CLKREQ setting. */
4026 	if (tg3_flag(tp, CLKREQ_BUG))
4027 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4028 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4029 
4030 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4031 	tw32(TG3PCI_MISC_HOST_CTRL,
4032 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4033 
4034 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4035 			     tg3_flag(tp, WOL_ENABLE);
4036 
4037 	if (tg3_flag(tp, USE_PHYLIB)) {
4038 		do_low_power = false;
4039 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4040 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4041 			struct phy_device *phydev;
4042 			u32 phyid, advertising;
4043 
4044 			phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4045 
4046 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4047 
4048 			tp->link_config.speed = phydev->speed;
4049 			tp->link_config.duplex = phydev->duplex;
4050 			tp->link_config.autoneg = phydev->autoneg;
4051 			tp->link_config.advertising = phydev->advertising;
4052 
4053 			advertising = ADVERTISED_TP |
4054 				      ADVERTISED_Pause |
4055 				      ADVERTISED_Autoneg |
4056 				      ADVERTISED_10baseT_Half;
4057 
4058 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4059 				if (tg3_flag(tp, WOL_SPEED_100MB))
4060 					advertising |=
4061 						ADVERTISED_100baseT_Half |
4062 						ADVERTISED_100baseT_Full |
4063 						ADVERTISED_10baseT_Full;
4064 				else
4065 					advertising |= ADVERTISED_10baseT_Full;
4066 			}
4067 
4068 			phydev->advertising = advertising;
4069 
4070 			phy_start_aneg(phydev);
4071 
4072 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4073 			if (phyid != PHY_ID_BCMAC131) {
4074 				phyid &= PHY_BCM_OUI_MASK;
4075 				if (phyid == PHY_BCM_OUI_1 ||
4076 				    phyid == PHY_BCM_OUI_2 ||
4077 				    phyid == PHY_BCM_OUI_3)
4078 					do_low_power = true;
4079 			}
4080 		}
4081 	} else {
4082 		do_low_power = true;
4083 
4084 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4085 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4086 
4087 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4088 			tg3_setup_phy(tp, false);
4089 	}
4090 
4091 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4092 		u32 val;
4093 
4094 		val = tr32(GRC_VCPU_EXT_CTRL);
4095 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4096 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4097 		int i;
4098 		u32 val;
4099 
4100 		for (i = 0; i < 200; i++) {
4101 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4102 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4103 				break;
4104 			msleep(1);
4105 		}
4106 	}
4107 	if (tg3_flag(tp, WOL_CAP))
4108 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4109 						     WOL_DRV_STATE_SHUTDOWN |
4110 						     WOL_DRV_WOL |
4111 						     WOL_SET_MAGIC_PKT);
4112 
4113 	if (device_should_wake) {
4114 		u32 mac_mode;
4115 
4116 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4117 			if (do_low_power &&
4118 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4119 				tg3_phy_auxctl_write(tp,
4120 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4121 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4122 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4123 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4124 				udelay(40);
4125 			}
4126 
4127 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4128 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4129 			else if (tp->phy_flags &
4130 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4131 				if (tp->link_config.active_speed == SPEED_1000)
4132 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4133 				else
4134 					mac_mode = MAC_MODE_PORT_MODE_MII;
4135 			} else
4136 				mac_mode = MAC_MODE_PORT_MODE_MII;
4137 
4138 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4139 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4140 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4141 					     SPEED_100 : SPEED_10;
4142 				if (tg3_5700_link_polarity(tp, speed))
4143 					mac_mode |= MAC_MODE_LINK_POLARITY;
4144 				else
4145 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4146 			}
4147 		} else {
4148 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4149 		}
4150 
4151 		if (!tg3_flag(tp, 5750_PLUS))
4152 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4153 
4154 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4155 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4156 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4157 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4158 
4159 		if (tg3_flag(tp, ENABLE_APE))
4160 			mac_mode |= MAC_MODE_APE_TX_EN |
4161 				    MAC_MODE_APE_RX_EN |
4162 				    MAC_MODE_TDE_ENABLE;
4163 
4164 		tw32_f(MAC_MODE, mac_mode);
4165 		udelay(100);
4166 
4167 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4168 		udelay(10);
4169 	}
4170 
4171 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4172 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4173 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4174 		u32 base_val;
4175 
4176 		base_val = tp->pci_clock_ctrl;
4177 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4178 			     CLOCK_CTRL_TXCLK_DISABLE);
4179 
4180 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4181 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4182 	} else if (tg3_flag(tp, 5780_CLASS) ||
4183 		   tg3_flag(tp, CPMU_PRESENT) ||
4184 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4185 		/* do nothing */
4186 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4187 		u32 newbits1, newbits2;
4188 
4189 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4190 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4191 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4192 				    CLOCK_CTRL_TXCLK_DISABLE |
4193 				    CLOCK_CTRL_ALTCLK);
4194 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4195 		} else if (tg3_flag(tp, 5705_PLUS)) {
4196 			newbits1 = CLOCK_CTRL_625_CORE;
4197 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4198 		} else {
4199 			newbits1 = CLOCK_CTRL_ALTCLK;
4200 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4201 		}
4202 
4203 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4204 			    40);
4205 
4206 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4207 			    40);
4208 
4209 		if (!tg3_flag(tp, 5705_PLUS)) {
4210 			u32 newbits3;
4211 
4212 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4213 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4214 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4215 					    CLOCK_CTRL_TXCLK_DISABLE |
4216 					    CLOCK_CTRL_44MHZ_CORE);
4217 			} else {
4218 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4219 			}
4220 
4221 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4222 				    tp->pci_clock_ctrl | newbits3, 40);
4223 		}
4224 	}
4225 
4226 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4227 		tg3_power_down_phy(tp, do_low_power);
4228 
4229 	tg3_frob_aux_power(tp, true);
4230 
4231 	/* Workaround for unstable PLL clock */
4232 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4233 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4234 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4235 		u32 val = tr32(0x7d00);
4236 
4237 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4238 		tw32(0x7d00, val);
4239 		if (!tg3_flag(tp, ENABLE_ASF)) {
4240 			int err;
4241 
4242 			err = tg3_nvram_lock(tp);
4243 			tg3_halt_cpu(tp, RX_CPU_BASE);
4244 			if (!err)
4245 				tg3_nvram_unlock(tp);
4246 		}
4247 	}
4248 
4249 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4250 
4251 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4252 
4253 	return 0;
4254 }
4255 
4256 static void tg3_power_down(struct tg3 *tp)
4257 {
4258 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4259 	pci_set_power_state(tp->pdev, PCI_D3hot);
4260 }
4261 
4262 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4263 {
4264 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4265 	case MII_TG3_AUX_STAT_10HALF:
4266 		*speed = SPEED_10;
4267 		*duplex = DUPLEX_HALF;
4268 		break;
4269 
4270 	case MII_TG3_AUX_STAT_10FULL:
4271 		*speed = SPEED_10;
4272 		*duplex = DUPLEX_FULL;
4273 		break;
4274 
4275 	case MII_TG3_AUX_STAT_100HALF:
4276 		*speed = SPEED_100;
4277 		*duplex = DUPLEX_HALF;
4278 		break;
4279 
4280 	case MII_TG3_AUX_STAT_100FULL:
4281 		*speed = SPEED_100;
4282 		*duplex = DUPLEX_FULL;
4283 		break;
4284 
4285 	case MII_TG3_AUX_STAT_1000HALF:
4286 		*speed = SPEED_1000;
4287 		*duplex = DUPLEX_HALF;
4288 		break;
4289 
4290 	case MII_TG3_AUX_STAT_1000FULL:
4291 		*speed = SPEED_1000;
4292 		*duplex = DUPLEX_FULL;
4293 		break;
4294 
4295 	default:
4296 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4297 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4298 				 SPEED_10;
4299 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4300 				  DUPLEX_HALF;
4301 			break;
4302 		}
4303 		*speed = SPEED_UNKNOWN;
4304 		*duplex = DUPLEX_UNKNOWN;
4305 		break;
4306 	}
4307 }
4308 
4309 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4310 {
4311 	int err = 0;
4312 	u32 val, new_adv;
4313 
4314 	new_adv = ADVERTISE_CSMA;
4315 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4316 	new_adv |= mii_advertise_flowctrl(flowctrl);
4317 
4318 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4319 	if (err)
4320 		goto done;
4321 
4322 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4323 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4324 
4325 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4326 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4327 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4328 
4329 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4330 		if (err)
4331 			goto done;
4332 	}
4333 
4334 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4335 		goto done;
4336 
4337 	tw32(TG3_CPMU_EEE_MODE,
4338 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4339 
4340 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4341 	if (!err) {
4342 		u32 err2;
4343 
4344 		val = 0;
4345 		/* Advertise 100-BaseTX EEE ability */
4346 		if (advertise & ADVERTISED_100baseT_Full)
4347 			val |= MDIO_AN_EEE_ADV_100TX;
4348 		/* Advertise 1000-BaseT EEE ability */
4349 		if (advertise & ADVERTISED_1000baseT_Full)
4350 			val |= MDIO_AN_EEE_ADV_1000T;
4351 
4352 		if (!tp->eee.eee_enabled) {
4353 			val = 0;
4354 			tp->eee.advertised = 0;
4355 		} else {
4356 			tp->eee.advertised = advertise &
4357 					     (ADVERTISED_100baseT_Full |
4358 					      ADVERTISED_1000baseT_Full);
4359 		}
4360 
4361 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4362 		if (err)
4363 			val = 0;
4364 
4365 		switch (tg3_asic_rev(tp)) {
4366 		case ASIC_REV_5717:
4367 		case ASIC_REV_57765:
4368 		case ASIC_REV_57766:
4369 		case ASIC_REV_5719:
4370 			/* If we advertised any eee advertisements above... */
4371 			if (val)
4372 				val = MII_TG3_DSP_TAP26_ALNOKO |
4373 				      MII_TG3_DSP_TAP26_RMRXSTO |
4374 				      MII_TG3_DSP_TAP26_OPCSINPT;
4375 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4376 			/* Fall through */
4377 		case ASIC_REV_5720:
4378 		case ASIC_REV_5762:
4379 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4380 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4381 						 MII_TG3_DSP_CH34TP2_HIBW01);
4382 		}
4383 
4384 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4385 		if (!err)
4386 			err = err2;
4387 	}
4388 
4389 done:
4390 	return err;
4391 }
4392 
4393 static void tg3_phy_copper_begin(struct tg3 *tp)
4394 {
4395 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4396 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4397 		u32 adv, fc;
4398 
4399 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4400 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4401 			adv = ADVERTISED_10baseT_Half |
4402 			      ADVERTISED_10baseT_Full;
4403 			if (tg3_flag(tp, WOL_SPEED_100MB))
4404 				adv |= ADVERTISED_100baseT_Half |
4405 				       ADVERTISED_100baseT_Full;
4406 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4407 				if (!(tp->phy_flags &
4408 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4409 					adv |= ADVERTISED_1000baseT_Half;
4410 				adv |= ADVERTISED_1000baseT_Full;
4411 			}
4412 
4413 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4414 		} else {
4415 			adv = tp->link_config.advertising;
4416 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4417 				adv &= ~(ADVERTISED_1000baseT_Half |
4418 					 ADVERTISED_1000baseT_Full);
4419 
4420 			fc = tp->link_config.flowctrl;
4421 		}
4422 
4423 		tg3_phy_autoneg_cfg(tp, adv, fc);
4424 
4425 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4426 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4427 			/* Normally during power down we want to autonegotiate
4428 			 * the lowest possible speed for WOL. However, to avoid
4429 			 * link flap, we leave it untouched.
4430 			 */
4431 			return;
4432 		}
4433 
4434 		tg3_writephy(tp, MII_BMCR,
4435 			     BMCR_ANENABLE | BMCR_ANRESTART);
4436 	} else {
4437 		int i;
4438 		u32 bmcr, orig_bmcr;
4439 
4440 		tp->link_config.active_speed = tp->link_config.speed;
4441 		tp->link_config.active_duplex = tp->link_config.duplex;
4442 
4443 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4444 			/* With autoneg disabled, 5715 only links up when the
4445 			 * advertisement register has the configured speed
4446 			 * enabled.
4447 			 */
4448 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4449 		}
4450 
4451 		bmcr = 0;
4452 		switch (tp->link_config.speed) {
4453 		default:
4454 		case SPEED_10:
4455 			break;
4456 
4457 		case SPEED_100:
4458 			bmcr |= BMCR_SPEED100;
4459 			break;
4460 
4461 		case SPEED_1000:
4462 			bmcr |= BMCR_SPEED1000;
4463 			break;
4464 		}
4465 
4466 		if (tp->link_config.duplex == DUPLEX_FULL)
4467 			bmcr |= BMCR_FULLDPLX;
4468 
4469 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4470 		    (bmcr != orig_bmcr)) {
4471 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4472 			for (i = 0; i < 1500; i++) {
4473 				u32 tmp;
4474 
4475 				udelay(10);
4476 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4477 				    tg3_readphy(tp, MII_BMSR, &tmp))
4478 					continue;
4479 				if (!(tmp & BMSR_LSTATUS)) {
4480 					udelay(40);
4481 					break;
4482 				}
4483 			}
4484 			tg3_writephy(tp, MII_BMCR, bmcr);
4485 			udelay(40);
4486 		}
4487 	}
4488 }
4489 
4490 static int tg3_phy_pull_config(struct tg3 *tp)
4491 {
4492 	int err;
4493 	u32 val;
4494 
4495 	err = tg3_readphy(tp, MII_BMCR, &val);
4496 	if (err)
4497 		goto done;
4498 
4499 	if (!(val & BMCR_ANENABLE)) {
4500 		tp->link_config.autoneg = AUTONEG_DISABLE;
4501 		tp->link_config.advertising = 0;
4502 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4503 
4504 		err = -EIO;
4505 
4506 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4507 		case 0:
4508 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4509 				goto done;
4510 
4511 			tp->link_config.speed = SPEED_10;
4512 			break;
4513 		case BMCR_SPEED100:
4514 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4515 				goto done;
4516 
4517 			tp->link_config.speed = SPEED_100;
4518 			break;
4519 		case BMCR_SPEED1000:
4520 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4521 				tp->link_config.speed = SPEED_1000;
4522 				break;
4523 			}
4524 			/* Fall through */
4525 		default:
4526 			goto done;
4527 		}
4528 
4529 		if (val & BMCR_FULLDPLX)
4530 			tp->link_config.duplex = DUPLEX_FULL;
4531 		else
4532 			tp->link_config.duplex = DUPLEX_HALF;
4533 
4534 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4535 
4536 		err = 0;
4537 		goto done;
4538 	}
4539 
4540 	tp->link_config.autoneg = AUTONEG_ENABLE;
4541 	tp->link_config.advertising = ADVERTISED_Autoneg;
4542 	tg3_flag_set(tp, PAUSE_AUTONEG);
4543 
4544 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4545 		u32 adv;
4546 
4547 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4548 		if (err)
4549 			goto done;
4550 
4551 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4552 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4553 
4554 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4555 	} else {
4556 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4557 	}
4558 
4559 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4560 		u32 adv;
4561 
4562 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4563 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4564 			if (err)
4565 				goto done;
4566 
4567 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4568 		} else {
4569 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4570 			if (err)
4571 				goto done;
4572 
4573 			adv = tg3_decode_flowctrl_1000X(val);
4574 			tp->link_config.flowctrl = adv;
4575 
4576 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4577 			adv = mii_adv_to_ethtool_adv_x(val);
4578 		}
4579 
4580 		tp->link_config.advertising |= adv;
4581 	}
4582 
4583 done:
4584 	return err;
4585 }
4586 
4587 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4588 {
4589 	int err;
4590 
4591 	/* Turn off tap power management. */
4592 	/* Set Extended packet length bit */
4593 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4594 
4595 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4596 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4597 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4598 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4599 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4600 
4601 	udelay(40);
4602 
4603 	return err;
4604 }
4605 
4606 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4607 {
4608 	struct ethtool_eee eee;
4609 
4610 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4611 		return true;
4612 
4613 	tg3_eee_pull_config(tp, &eee);
4614 
4615 	if (tp->eee.eee_enabled) {
4616 		if (tp->eee.advertised != eee.advertised ||
4617 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4618 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4619 			return false;
4620 	} else {
4621 		/* EEE is disabled but we're advertising */
4622 		if (eee.advertised)
4623 			return false;
4624 	}
4625 
4626 	return true;
4627 }
4628 
4629 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4630 {
4631 	u32 advmsk, tgtadv, advertising;
4632 
4633 	advertising = tp->link_config.advertising;
4634 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4635 
4636 	advmsk = ADVERTISE_ALL;
4637 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4638 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4639 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4640 	}
4641 
4642 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4643 		return false;
4644 
4645 	if ((*lcladv & advmsk) != tgtadv)
4646 		return false;
4647 
4648 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4649 		u32 tg3_ctrl;
4650 
4651 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4652 
4653 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4654 			return false;
4655 
4656 		if (tgtadv &&
4657 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4658 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4659 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4660 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4661 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4662 		} else {
4663 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4664 		}
4665 
4666 		if (tg3_ctrl != tgtadv)
4667 			return false;
4668 	}
4669 
4670 	return true;
4671 }
4672 
4673 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4674 {
4675 	u32 lpeth = 0;
4676 
4677 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4678 		u32 val;
4679 
4680 		if (tg3_readphy(tp, MII_STAT1000, &val))
4681 			return false;
4682 
4683 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4684 	}
4685 
4686 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4687 		return false;
4688 
4689 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4690 	tp->link_config.rmt_adv = lpeth;
4691 
4692 	return true;
4693 }
4694 
4695 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4696 {
4697 	if (curr_link_up != tp->link_up) {
4698 		if (curr_link_up) {
4699 			netif_carrier_on(tp->dev);
4700 		} else {
4701 			netif_carrier_off(tp->dev);
4702 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4703 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4704 		}
4705 
4706 		tg3_link_report(tp);
4707 		return true;
4708 	}
4709 
4710 	return false;
4711 }
4712 
4713 static void tg3_clear_mac_status(struct tg3 *tp)
4714 {
4715 	tw32(MAC_EVENT, 0);
4716 
4717 	tw32_f(MAC_STATUS,
4718 	       MAC_STATUS_SYNC_CHANGED |
4719 	       MAC_STATUS_CFG_CHANGED |
4720 	       MAC_STATUS_MI_COMPLETION |
4721 	       MAC_STATUS_LNKSTATE_CHANGED);
4722 	udelay(40);
4723 }
4724 
4725 static void tg3_setup_eee(struct tg3 *tp)
4726 {
4727 	u32 val;
4728 
4729 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4730 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4731 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4732 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4733 
4734 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4735 
4736 	tw32_f(TG3_CPMU_EEE_CTRL,
4737 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4738 
4739 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4740 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4741 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4742 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4743 
4744 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4745 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4746 
4747 	if (tg3_flag(tp, ENABLE_APE))
4748 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4749 
4750 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4751 
4752 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4753 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4754 	       (tp->eee.tx_lpi_timer & 0xffff));
4755 
4756 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4757 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4758 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4759 }
4760 
4761 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4762 {
4763 	bool current_link_up;
4764 	u32 bmsr, val;
4765 	u32 lcl_adv, rmt_adv;
4766 	u16 current_speed;
4767 	u8 current_duplex;
4768 	int i, err;
4769 
4770 	tg3_clear_mac_status(tp);
4771 
4772 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4773 		tw32_f(MAC_MI_MODE,
4774 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4775 		udelay(80);
4776 	}
4777 
4778 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4779 
4780 	/* Some third-party PHYs need to be reset on link going
4781 	 * down.
4782 	 */
4783 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4784 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4785 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4786 	    tp->link_up) {
4787 		tg3_readphy(tp, MII_BMSR, &bmsr);
4788 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4789 		    !(bmsr & BMSR_LSTATUS))
4790 			force_reset = true;
4791 	}
4792 	if (force_reset)
4793 		tg3_phy_reset(tp);
4794 
4795 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4796 		tg3_readphy(tp, MII_BMSR, &bmsr);
4797 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4798 		    !tg3_flag(tp, INIT_COMPLETE))
4799 			bmsr = 0;
4800 
4801 		if (!(bmsr & BMSR_LSTATUS)) {
4802 			err = tg3_init_5401phy_dsp(tp);
4803 			if (err)
4804 				return err;
4805 
4806 			tg3_readphy(tp, MII_BMSR, &bmsr);
4807 			for (i = 0; i < 1000; i++) {
4808 				udelay(10);
4809 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4810 				    (bmsr & BMSR_LSTATUS)) {
4811 					udelay(40);
4812 					break;
4813 				}
4814 			}
4815 
4816 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4817 			    TG3_PHY_REV_BCM5401_B0 &&
4818 			    !(bmsr & BMSR_LSTATUS) &&
4819 			    tp->link_config.active_speed == SPEED_1000) {
4820 				err = tg3_phy_reset(tp);
4821 				if (!err)
4822 					err = tg3_init_5401phy_dsp(tp);
4823 				if (err)
4824 					return err;
4825 			}
4826 		}
4827 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4828 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4829 		/* 5701 {A0,B0} CRC bug workaround */
4830 		tg3_writephy(tp, 0x15, 0x0a75);
4831 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4832 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4833 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4834 	}
4835 
4836 	/* Clear pending interrupts... */
4837 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4838 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4839 
4840 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4841 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4842 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4843 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4844 
4845 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4846 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4847 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4848 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4849 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4850 		else
4851 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4852 	}
4853 
4854 	current_link_up = false;
4855 	current_speed = SPEED_UNKNOWN;
4856 	current_duplex = DUPLEX_UNKNOWN;
4857 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4858 	tp->link_config.rmt_adv = 0;
4859 
4860 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4861 		err = tg3_phy_auxctl_read(tp,
4862 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4863 					  &val);
4864 		if (!err && !(val & (1 << 10))) {
4865 			tg3_phy_auxctl_write(tp,
4866 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4867 					     val | (1 << 10));
4868 			goto relink;
4869 		}
4870 	}
4871 
4872 	bmsr = 0;
4873 	for (i = 0; i < 100; i++) {
4874 		tg3_readphy(tp, MII_BMSR, &bmsr);
4875 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4876 		    (bmsr & BMSR_LSTATUS))
4877 			break;
4878 		udelay(40);
4879 	}
4880 
4881 	if (bmsr & BMSR_LSTATUS) {
4882 		u32 aux_stat, bmcr;
4883 
4884 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4885 		for (i = 0; i < 2000; i++) {
4886 			udelay(10);
4887 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4888 			    aux_stat)
4889 				break;
4890 		}
4891 
4892 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4893 					     &current_speed,
4894 					     &current_duplex);
4895 
4896 		bmcr = 0;
4897 		for (i = 0; i < 200; i++) {
4898 			tg3_readphy(tp, MII_BMCR, &bmcr);
4899 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4900 				continue;
4901 			if (bmcr && bmcr != 0x7fff)
4902 				break;
4903 			udelay(10);
4904 		}
4905 
4906 		lcl_adv = 0;
4907 		rmt_adv = 0;
4908 
4909 		tp->link_config.active_speed = current_speed;
4910 		tp->link_config.active_duplex = current_duplex;
4911 
4912 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4913 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4914 
4915 			if ((bmcr & BMCR_ANENABLE) &&
4916 			    eee_config_ok &&
4917 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4918 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4919 				current_link_up = true;
4920 
4921 			/* EEE settings changes take effect only after a phy
4922 			 * reset.  If we have skipped a reset due to Link Flap
4923 			 * Avoidance being enabled, do it now.
4924 			 */
4925 			if (!eee_config_ok &&
4926 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4927 			    !force_reset) {
4928 				tg3_setup_eee(tp);
4929 				tg3_phy_reset(tp);
4930 			}
4931 		} else {
4932 			if (!(bmcr & BMCR_ANENABLE) &&
4933 			    tp->link_config.speed == current_speed &&
4934 			    tp->link_config.duplex == current_duplex) {
4935 				current_link_up = true;
4936 			}
4937 		}
4938 
4939 		if (current_link_up &&
4940 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4941 			u32 reg, bit;
4942 
4943 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4944 				reg = MII_TG3_FET_GEN_STAT;
4945 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4946 			} else {
4947 				reg = MII_TG3_EXT_STAT;
4948 				bit = MII_TG3_EXT_STAT_MDIX;
4949 			}
4950 
4951 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4952 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4953 
4954 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4955 		}
4956 	}
4957 
4958 relink:
4959 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4960 		tg3_phy_copper_begin(tp);
4961 
4962 		if (tg3_flag(tp, ROBOSWITCH)) {
4963 			current_link_up = true;
4964 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4965 			current_speed = SPEED_1000;
4966 			current_duplex = DUPLEX_FULL;
4967 			tp->link_config.active_speed = current_speed;
4968 			tp->link_config.active_duplex = current_duplex;
4969 		}
4970 
4971 		tg3_readphy(tp, MII_BMSR, &bmsr);
4972 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4973 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4974 			current_link_up = true;
4975 	}
4976 
4977 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4978 	if (current_link_up) {
4979 		if (tp->link_config.active_speed == SPEED_100 ||
4980 		    tp->link_config.active_speed == SPEED_10)
4981 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4982 		else
4983 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4984 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4985 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4986 	else
4987 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4988 
4989 	/* In order for the 5750 core in BCM4785 chip to work properly
4990 	 * in RGMII mode, the Led Control Register must be set up.
4991 	 */
4992 	if (tg3_flag(tp, RGMII_MODE)) {
4993 		u32 led_ctrl = tr32(MAC_LED_CTRL);
4994 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4995 
4996 		if (tp->link_config.active_speed == SPEED_10)
4997 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4998 		else if (tp->link_config.active_speed == SPEED_100)
4999 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5000 				     LED_CTRL_100MBPS_ON);
5001 		else if (tp->link_config.active_speed == SPEED_1000)
5002 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5003 				     LED_CTRL_1000MBPS_ON);
5004 
5005 		tw32(MAC_LED_CTRL, led_ctrl);
5006 		udelay(40);
5007 	}
5008 
5009 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5010 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5011 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5012 
5013 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5014 		if (current_link_up &&
5015 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5016 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5017 		else
5018 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5019 	}
5020 
5021 	/* ??? Without this setting Netgear GA302T PHY does not
5022 	 * ??? send/receive packets...
5023 	 */
5024 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5025 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5026 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5027 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5028 		udelay(80);
5029 	}
5030 
5031 	tw32_f(MAC_MODE, tp->mac_mode);
5032 	udelay(40);
5033 
5034 	tg3_phy_eee_adjust(tp, current_link_up);
5035 
5036 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5037 		/* Polled via timer. */
5038 		tw32_f(MAC_EVENT, 0);
5039 	} else {
5040 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5041 	}
5042 	udelay(40);
5043 
5044 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5045 	    current_link_up &&
5046 	    tp->link_config.active_speed == SPEED_1000 &&
5047 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5048 		udelay(120);
5049 		tw32_f(MAC_STATUS,
5050 		     (MAC_STATUS_SYNC_CHANGED |
5051 		      MAC_STATUS_CFG_CHANGED));
5052 		udelay(40);
5053 		tg3_write_mem(tp,
5054 			      NIC_SRAM_FIRMWARE_MBOX,
5055 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5056 	}
5057 
5058 	/* Prevent send BD corruption. */
5059 	if (tg3_flag(tp, CLKREQ_BUG)) {
5060 		if (tp->link_config.active_speed == SPEED_100 ||
5061 		    tp->link_config.active_speed == SPEED_10)
5062 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5063 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5064 		else
5065 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5066 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5067 	}
5068 
5069 	tg3_test_and_report_link_chg(tp, current_link_up);
5070 
5071 	return 0;
5072 }
5073 
5074 struct tg3_fiber_aneginfo {
5075 	int state;
5076 #define ANEG_STATE_UNKNOWN		0
5077 #define ANEG_STATE_AN_ENABLE		1
5078 #define ANEG_STATE_RESTART_INIT		2
5079 #define ANEG_STATE_RESTART		3
5080 #define ANEG_STATE_DISABLE_LINK_OK	4
5081 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5082 #define ANEG_STATE_ABILITY_DETECT	6
5083 #define ANEG_STATE_ACK_DETECT_INIT	7
5084 #define ANEG_STATE_ACK_DETECT		8
5085 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5086 #define ANEG_STATE_COMPLETE_ACK		10
5087 #define ANEG_STATE_IDLE_DETECT_INIT	11
5088 #define ANEG_STATE_IDLE_DETECT		12
5089 #define ANEG_STATE_LINK_OK		13
5090 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5091 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5092 
5093 	u32 flags;
5094 #define MR_AN_ENABLE		0x00000001
5095 #define MR_RESTART_AN		0x00000002
5096 #define MR_AN_COMPLETE		0x00000004
5097 #define MR_PAGE_RX		0x00000008
5098 #define MR_NP_LOADED		0x00000010
5099 #define MR_TOGGLE_TX		0x00000020
5100 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5101 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5102 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5103 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5104 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5105 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5106 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5107 #define MR_TOGGLE_RX		0x00002000
5108 #define MR_NP_RX		0x00004000
5109 
5110 #define MR_LINK_OK		0x80000000
5111 
5112 	unsigned long link_time, cur_time;
5113 
5114 	u32 ability_match_cfg;
5115 	int ability_match_count;
5116 
5117 	char ability_match, idle_match, ack_match;
5118 
5119 	u32 txconfig, rxconfig;
5120 #define ANEG_CFG_NP		0x00000080
5121 #define ANEG_CFG_ACK		0x00000040
5122 #define ANEG_CFG_RF2		0x00000020
5123 #define ANEG_CFG_RF1		0x00000010
5124 #define ANEG_CFG_PS2		0x00000001
5125 #define ANEG_CFG_PS1		0x00008000
5126 #define ANEG_CFG_HD		0x00004000
5127 #define ANEG_CFG_FD		0x00002000
5128 #define ANEG_CFG_INVAL		0x00001f06
5129 
5130 };
5131 #define ANEG_OK		0
5132 #define ANEG_DONE	1
5133 #define ANEG_TIMER_ENAB	2
5134 #define ANEG_FAILED	-1
5135 
5136 #define ANEG_STATE_SETTLE_TIME	10000
5137 
5138 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5139 				   struct tg3_fiber_aneginfo *ap)
5140 {
5141 	u16 flowctrl;
5142 	unsigned long delta;
5143 	u32 rx_cfg_reg;
5144 	int ret;
5145 
5146 	if (ap->state == ANEG_STATE_UNKNOWN) {
5147 		ap->rxconfig = 0;
5148 		ap->link_time = 0;
5149 		ap->cur_time = 0;
5150 		ap->ability_match_cfg = 0;
5151 		ap->ability_match_count = 0;
5152 		ap->ability_match = 0;
5153 		ap->idle_match = 0;
5154 		ap->ack_match = 0;
5155 	}
5156 	ap->cur_time++;
5157 
5158 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5159 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5160 
5161 		if (rx_cfg_reg != ap->ability_match_cfg) {
5162 			ap->ability_match_cfg = rx_cfg_reg;
5163 			ap->ability_match = 0;
5164 			ap->ability_match_count = 0;
5165 		} else {
5166 			if (++ap->ability_match_count > 1) {
5167 				ap->ability_match = 1;
5168 				ap->ability_match_cfg = rx_cfg_reg;
5169 			}
5170 		}
5171 		if (rx_cfg_reg & ANEG_CFG_ACK)
5172 			ap->ack_match = 1;
5173 		else
5174 			ap->ack_match = 0;
5175 
5176 		ap->idle_match = 0;
5177 	} else {
5178 		ap->idle_match = 1;
5179 		ap->ability_match_cfg = 0;
5180 		ap->ability_match_count = 0;
5181 		ap->ability_match = 0;
5182 		ap->ack_match = 0;
5183 
5184 		rx_cfg_reg = 0;
5185 	}
5186 
5187 	ap->rxconfig = rx_cfg_reg;
5188 	ret = ANEG_OK;
5189 
5190 	switch (ap->state) {
5191 	case ANEG_STATE_UNKNOWN:
5192 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5193 			ap->state = ANEG_STATE_AN_ENABLE;
5194 
5195 		/* fallthru */
5196 	case ANEG_STATE_AN_ENABLE:
5197 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5198 		if (ap->flags & MR_AN_ENABLE) {
5199 			ap->link_time = 0;
5200 			ap->cur_time = 0;
5201 			ap->ability_match_cfg = 0;
5202 			ap->ability_match_count = 0;
5203 			ap->ability_match = 0;
5204 			ap->idle_match = 0;
5205 			ap->ack_match = 0;
5206 
5207 			ap->state = ANEG_STATE_RESTART_INIT;
5208 		} else {
5209 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5210 		}
5211 		break;
5212 
5213 	case ANEG_STATE_RESTART_INIT:
5214 		ap->link_time = ap->cur_time;
5215 		ap->flags &= ~(MR_NP_LOADED);
5216 		ap->txconfig = 0;
5217 		tw32(MAC_TX_AUTO_NEG, 0);
5218 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5219 		tw32_f(MAC_MODE, tp->mac_mode);
5220 		udelay(40);
5221 
5222 		ret = ANEG_TIMER_ENAB;
5223 		ap->state = ANEG_STATE_RESTART;
5224 
5225 		/* fallthru */
5226 	case ANEG_STATE_RESTART:
5227 		delta = ap->cur_time - ap->link_time;
5228 		if (delta > ANEG_STATE_SETTLE_TIME)
5229 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5230 		else
5231 			ret = ANEG_TIMER_ENAB;
5232 		break;
5233 
5234 	case ANEG_STATE_DISABLE_LINK_OK:
5235 		ret = ANEG_DONE;
5236 		break;
5237 
5238 	case ANEG_STATE_ABILITY_DETECT_INIT:
5239 		ap->flags &= ~(MR_TOGGLE_TX);
5240 		ap->txconfig = ANEG_CFG_FD;
5241 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5242 		if (flowctrl & ADVERTISE_1000XPAUSE)
5243 			ap->txconfig |= ANEG_CFG_PS1;
5244 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5245 			ap->txconfig |= ANEG_CFG_PS2;
5246 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5247 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5248 		tw32_f(MAC_MODE, tp->mac_mode);
5249 		udelay(40);
5250 
5251 		ap->state = ANEG_STATE_ABILITY_DETECT;
5252 		break;
5253 
5254 	case ANEG_STATE_ABILITY_DETECT:
5255 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5256 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5257 		break;
5258 
5259 	case ANEG_STATE_ACK_DETECT_INIT:
5260 		ap->txconfig |= ANEG_CFG_ACK;
5261 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5262 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5263 		tw32_f(MAC_MODE, tp->mac_mode);
5264 		udelay(40);
5265 
5266 		ap->state = ANEG_STATE_ACK_DETECT;
5267 
5268 		/* fallthru */
5269 	case ANEG_STATE_ACK_DETECT:
5270 		if (ap->ack_match != 0) {
5271 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5272 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5273 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5274 			} else {
5275 				ap->state = ANEG_STATE_AN_ENABLE;
5276 			}
5277 		} else if (ap->ability_match != 0 &&
5278 			   ap->rxconfig == 0) {
5279 			ap->state = ANEG_STATE_AN_ENABLE;
5280 		}
5281 		break;
5282 
5283 	case ANEG_STATE_COMPLETE_ACK_INIT:
5284 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5285 			ret = ANEG_FAILED;
5286 			break;
5287 		}
5288 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5289 			       MR_LP_ADV_HALF_DUPLEX |
5290 			       MR_LP_ADV_SYM_PAUSE |
5291 			       MR_LP_ADV_ASYM_PAUSE |
5292 			       MR_LP_ADV_REMOTE_FAULT1 |
5293 			       MR_LP_ADV_REMOTE_FAULT2 |
5294 			       MR_LP_ADV_NEXT_PAGE |
5295 			       MR_TOGGLE_RX |
5296 			       MR_NP_RX);
5297 		if (ap->rxconfig & ANEG_CFG_FD)
5298 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5299 		if (ap->rxconfig & ANEG_CFG_HD)
5300 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5301 		if (ap->rxconfig & ANEG_CFG_PS1)
5302 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5303 		if (ap->rxconfig & ANEG_CFG_PS2)
5304 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5305 		if (ap->rxconfig & ANEG_CFG_RF1)
5306 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5307 		if (ap->rxconfig & ANEG_CFG_RF2)
5308 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5309 		if (ap->rxconfig & ANEG_CFG_NP)
5310 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5311 
5312 		ap->link_time = ap->cur_time;
5313 
5314 		ap->flags ^= (MR_TOGGLE_TX);
5315 		if (ap->rxconfig & 0x0008)
5316 			ap->flags |= MR_TOGGLE_RX;
5317 		if (ap->rxconfig & ANEG_CFG_NP)
5318 			ap->flags |= MR_NP_RX;
5319 		ap->flags |= MR_PAGE_RX;
5320 
5321 		ap->state = ANEG_STATE_COMPLETE_ACK;
5322 		ret = ANEG_TIMER_ENAB;
5323 		break;
5324 
5325 	case ANEG_STATE_COMPLETE_ACK:
5326 		if (ap->ability_match != 0 &&
5327 		    ap->rxconfig == 0) {
5328 			ap->state = ANEG_STATE_AN_ENABLE;
5329 			break;
5330 		}
5331 		delta = ap->cur_time - ap->link_time;
5332 		if (delta > ANEG_STATE_SETTLE_TIME) {
5333 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5334 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5335 			} else {
5336 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5337 				    !(ap->flags & MR_NP_RX)) {
5338 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5339 				} else {
5340 					ret = ANEG_FAILED;
5341 				}
5342 			}
5343 		}
5344 		break;
5345 
5346 	case ANEG_STATE_IDLE_DETECT_INIT:
5347 		ap->link_time = ap->cur_time;
5348 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5349 		tw32_f(MAC_MODE, tp->mac_mode);
5350 		udelay(40);
5351 
5352 		ap->state = ANEG_STATE_IDLE_DETECT;
5353 		ret = ANEG_TIMER_ENAB;
5354 		break;
5355 
5356 	case ANEG_STATE_IDLE_DETECT:
5357 		if (ap->ability_match != 0 &&
5358 		    ap->rxconfig == 0) {
5359 			ap->state = ANEG_STATE_AN_ENABLE;
5360 			break;
5361 		}
5362 		delta = ap->cur_time - ap->link_time;
5363 		if (delta > ANEG_STATE_SETTLE_TIME) {
5364 			/* XXX another gem from the Broadcom driver :( */
5365 			ap->state = ANEG_STATE_LINK_OK;
5366 		}
5367 		break;
5368 
5369 	case ANEG_STATE_LINK_OK:
5370 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5371 		ret = ANEG_DONE;
5372 		break;
5373 
5374 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5375 		/* ??? unimplemented */
5376 		break;
5377 
5378 	case ANEG_STATE_NEXT_PAGE_WAIT:
5379 		/* ??? unimplemented */
5380 		break;
5381 
5382 	default:
5383 		ret = ANEG_FAILED;
5384 		break;
5385 	}
5386 
5387 	return ret;
5388 }
5389 
5390 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5391 {
5392 	int res = 0;
5393 	struct tg3_fiber_aneginfo aninfo;
5394 	int status = ANEG_FAILED;
5395 	unsigned int tick;
5396 	u32 tmp;
5397 
5398 	tw32_f(MAC_TX_AUTO_NEG, 0);
5399 
5400 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5401 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5402 	udelay(40);
5403 
5404 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5405 	udelay(40);
5406 
5407 	memset(&aninfo, 0, sizeof(aninfo));
5408 	aninfo.flags |= MR_AN_ENABLE;
5409 	aninfo.state = ANEG_STATE_UNKNOWN;
5410 	aninfo.cur_time = 0;
5411 	tick = 0;
5412 	while (++tick < 195000) {
5413 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5414 		if (status == ANEG_DONE || status == ANEG_FAILED)
5415 			break;
5416 
5417 		udelay(1);
5418 	}
5419 
5420 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5421 	tw32_f(MAC_MODE, tp->mac_mode);
5422 	udelay(40);
5423 
5424 	*txflags = aninfo.txconfig;
5425 	*rxflags = aninfo.flags;
5426 
5427 	if (status == ANEG_DONE &&
5428 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5429 			     MR_LP_ADV_FULL_DUPLEX)))
5430 		res = 1;
5431 
5432 	return res;
5433 }
5434 
5435 static void tg3_init_bcm8002(struct tg3 *tp)
5436 {
5437 	u32 mac_status = tr32(MAC_STATUS);
5438 	int i;
5439 
5440 	/* Reset when initting first time or we have a link. */
5441 	if (tg3_flag(tp, INIT_COMPLETE) &&
5442 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5443 		return;
5444 
5445 	/* Set PLL lock range. */
5446 	tg3_writephy(tp, 0x16, 0x8007);
5447 
5448 	/* SW reset */
5449 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5450 
5451 	/* Wait for reset to complete. */
5452 	/* XXX schedule_timeout() ... */
5453 	for (i = 0; i < 500; i++)
5454 		udelay(10);
5455 
5456 	/* Config mode; select PMA/Ch 1 regs. */
5457 	tg3_writephy(tp, 0x10, 0x8411);
5458 
5459 	/* Enable auto-lock and comdet, select txclk for tx. */
5460 	tg3_writephy(tp, 0x11, 0x0a10);
5461 
5462 	tg3_writephy(tp, 0x18, 0x00a0);
5463 	tg3_writephy(tp, 0x16, 0x41ff);
5464 
5465 	/* Assert and deassert POR. */
5466 	tg3_writephy(tp, 0x13, 0x0400);
5467 	udelay(40);
5468 	tg3_writephy(tp, 0x13, 0x0000);
5469 
5470 	tg3_writephy(tp, 0x11, 0x0a50);
5471 	udelay(40);
5472 	tg3_writephy(tp, 0x11, 0x0a10);
5473 
5474 	/* Wait for signal to stabilize */
5475 	/* XXX schedule_timeout() ... */
5476 	for (i = 0; i < 15000; i++)
5477 		udelay(10);
5478 
5479 	/* Deselect the channel register so we can read the PHYID
5480 	 * later.
5481 	 */
5482 	tg3_writephy(tp, 0x10, 0x8011);
5483 }
5484 
5485 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5486 {
5487 	u16 flowctrl;
5488 	bool current_link_up;
5489 	u32 sg_dig_ctrl, sg_dig_status;
5490 	u32 serdes_cfg, expected_sg_dig_ctrl;
5491 	int workaround, port_a;
5492 
5493 	serdes_cfg = 0;
5494 	expected_sg_dig_ctrl = 0;
5495 	workaround = 0;
5496 	port_a = 1;
5497 	current_link_up = false;
5498 
5499 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5500 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5501 		workaround = 1;
5502 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5503 			port_a = 0;
5504 
5505 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5506 		/* preserve bits 20-23 for voltage regulator */
5507 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5508 	}
5509 
5510 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5511 
5512 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5513 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5514 			if (workaround) {
5515 				u32 val = serdes_cfg;
5516 
5517 				if (port_a)
5518 					val |= 0xc010000;
5519 				else
5520 					val |= 0x4010000;
5521 				tw32_f(MAC_SERDES_CFG, val);
5522 			}
5523 
5524 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5525 		}
5526 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5527 			tg3_setup_flow_control(tp, 0, 0);
5528 			current_link_up = true;
5529 		}
5530 		goto out;
5531 	}
5532 
5533 	/* Want auto-negotiation.  */
5534 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5535 
5536 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5537 	if (flowctrl & ADVERTISE_1000XPAUSE)
5538 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5539 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5540 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5541 
5542 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5543 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5544 		    tp->serdes_counter &&
5545 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5546 				    MAC_STATUS_RCVD_CFG)) ==
5547 		     MAC_STATUS_PCS_SYNCED)) {
5548 			tp->serdes_counter--;
5549 			current_link_up = true;
5550 			goto out;
5551 		}
5552 restart_autoneg:
5553 		if (workaround)
5554 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5555 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5556 		udelay(5);
5557 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5558 
5559 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5560 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5561 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5562 				 MAC_STATUS_SIGNAL_DET)) {
5563 		sg_dig_status = tr32(SG_DIG_STATUS);
5564 		mac_status = tr32(MAC_STATUS);
5565 
5566 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5567 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5568 			u32 local_adv = 0, remote_adv = 0;
5569 
5570 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5571 				local_adv |= ADVERTISE_1000XPAUSE;
5572 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5573 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5574 
5575 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5576 				remote_adv |= LPA_1000XPAUSE;
5577 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5578 				remote_adv |= LPA_1000XPAUSE_ASYM;
5579 
5580 			tp->link_config.rmt_adv =
5581 					   mii_adv_to_ethtool_adv_x(remote_adv);
5582 
5583 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5584 			current_link_up = true;
5585 			tp->serdes_counter = 0;
5586 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5587 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5588 			if (tp->serdes_counter)
5589 				tp->serdes_counter--;
5590 			else {
5591 				if (workaround) {
5592 					u32 val = serdes_cfg;
5593 
5594 					if (port_a)
5595 						val |= 0xc010000;
5596 					else
5597 						val |= 0x4010000;
5598 
5599 					tw32_f(MAC_SERDES_CFG, val);
5600 				}
5601 
5602 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5603 				udelay(40);
5604 
5605 				/* Link parallel detection - link is up */
5606 				/* only if we have PCS_SYNC and not */
5607 				/* receiving config code words */
5608 				mac_status = tr32(MAC_STATUS);
5609 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5610 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5611 					tg3_setup_flow_control(tp, 0, 0);
5612 					current_link_up = true;
5613 					tp->phy_flags |=
5614 						TG3_PHYFLG_PARALLEL_DETECT;
5615 					tp->serdes_counter =
5616 						SERDES_PARALLEL_DET_TIMEOUT;
5617 				} else
5618 					goto restart_autoneg;
5619 			}
5620 		}
5621 	} else {
5622 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5623 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5624 	}
5625 
5626 out:
5627 	return current_link_up;
5628 }
5629 
5630 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5631 {
5632 	bool current_link_up = false;
5633 
5634 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5635 		goto out;
5636 
5637 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5638 		u32 txflags, rxflags;
5639 		int i;
5640 
5641 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5642 			u32 local_adv = 0, remote_adv = 0;
5643 
5644 			if (txflags & ANEG_CFG_PS1)
5645 				local_adv |= ADVERTISE_1000XPAUSE;
5646 			if (txflags & ANEG_CFG_PS2)
5647 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5648 
5649 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5650 				remote_adv |= LPA_1000XPAUSE;
5651 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5652 				remote_adv |= LPA_1000XPAUSE_ASYM;
5653 
5654 			tp->link_config.rmt_adv =
5655 					   mii_adv_to_ethtool_adv_x(remote_adv);
5656 
5657 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5658 
5659 			current_link_up = true;
5660 		}
5661 		for (i = 0; i < 30; i++) {
5662 			udelay(20);
5663 			tw32_f(MAC_STATUS,
5664 			       (MAC_STATUS_SYNC_CHANGED |
5665 				MAC_STATUS_CFG_CHANGED));
5666 			udelay(40);
5667 			if ((tr32(MAC_STATUS) &
5668 			     (MAC_STATUS_SYNC_CHANGED |
5669 			      MAC_STATUS_CFG_CHANGED)) == 0)
5670 				break;
5671 		}
5672 
5673 		mac_status = tr32(MAC_STATUS);
5674 		if (!current_link_up &&
5675 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5676 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5677 			current_link_up = true;
5678 	} else {
5679 		tg3_setup_flow_control(tp, 0, 0);
5680 
5681 		/* Forcing 1000FD link up. */
5682 		current_link_up = true;
5683 
5684 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5685 		udelay(40);
5686 
5687 		tw32_f(MAC_MODE, tp->mac_mode);
5688 		udelay(40);
5689 	}
5690 
5691 out:
5692 	return current_link_up;
5693 }
5694 
5695 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5696 {
5697 	u32 orig_pause_cfg;
5698 	u16 orig_active_speed;
5699 	u8 orig_active_duplex;
5700 	u32 mac_status;
5701 	bool current_link_up;
5702 	int i;
5703 
5704 	orig_pause_cfg = tp->link_config.active_flowctrl;
5705 	orig_active_speed = tp->link_config.active_speed;
5706 	orig_active_duplex = tp->link_config.active_duplex;
5707 
5708 	if (!tg3_flag(tp, HW_AUTONEG) &&
5709 	    tp->link_up &&
5710 	    tg3_flag(tp, INIT_COMPLETE)) {
5711 		mac_status = tr32(MAC_STATUS);
5712 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5713 			       MAC_STATUS_SIGNAL_DET |
5714 			       MAC_STATUS_CFG_CHANGED |
5715 			       MAC_STATUS_RCVD_CFG);
5716 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5717 				   MAC_STATUS_SIGNAL_DET)) {
5718 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5719 					    MAC_STATUS_CFG_CHANGED));
5720 			return 0;
5721 		}
5722 	}
5723 
5724 	tw32_f(MAC_TX_AUTO_NEG, 0);
5725 
5726 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5727 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5728 	tw32_f(MAC_MODE, tp->mac_mode);
5729 	udelay(40);
5730 
5731 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5732 		tg3_init_bcm8002(tp);
5733 
5734 	/* Enable link change event even when serdes polling.  */
5735 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5736 	udelay(40);
5737 
5738 	current_link_up = false;
5739 	tp->link_config.rmt_adv = 0;
5740 	mac_status = tr32(MAC_STATUS);
5741 
5742 	if (tg3_flag(tp, HW_AUTONEG))
5743 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5744 	else
5745 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5746 
5747 	tp->napi[0].hw_status->status =
5748 		(SD_STATUS_UPDATED |
5749 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5750 
5751 	for (i = 0; i < 100; i++) {
5752 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5753 				    MAC_STATUS_CFG_CHANGED));
5754 		udelay(5);
5755 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5756 					 MAC_STATUS_CFG_CHANGED |
5757 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5758 			break;
5759 	}
5760 
5761 	mac_status = tr32(MAC_STATUS);
5762 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5763 		current_link_up = false;
5764 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5765 		    tp->serdes_counter == 0) {
5766 			tw32_f(MAC_MODE, (tp->mac_mode |
5767 					  MAC_MODE_SEND_CONFIGS));
5768 			udelay(1);
5769 			tw32_f(MAC_MODE, tp->mac_mode);
5770 		}
5771 	}
5772 
5773 	if (current_link_up) {
5774 		tp->link_config.active_speed = SPEED_1000;
5775 		tp->link_config.active_duplex = DUPLEX_FULL;
5776 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5777 				    LED_CTRL_LNKLED_OVERRIDE |
5778 				    LED_CTRL_1000MBPS_ON));
5779 	} else {
5780 		tp->link_config.active_speed = SPEED_UNKNOWN;
5781 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5782 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5783 				    LED_CTRL_LNKLED_OVERRIDE |
5784 				    LED_CTRL_TRAFFIC_OVERRIDE));
5785 	}
5786 
5787 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5788 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5789 		if (orig_pause_cfg != now_pause_cfg ||
5790 		    orig_active_speed != tp->link_config.active_speed ||
5791 		    orig_active_duplex != tp->link_config.active_duplex)
5792 			tg3_link_report(tp);
5793 	}
5794 
5795 	return 0;
5796 }
5797 
5798 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5799 {
5800 	int err = 0;
5801 	u32 bmsr, bmcr;
5802 	u16 current_speed = SPEED_UNKNOWN;
5803 	u8 current_duplex = DUPLEX_UNKNOWN;
5804 	bool current_link_up = false;
5805 	u32 local_adv, remote_adv, sgsr;
5806 
5807 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5808 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5809 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5810 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5811 
5812 		if (force_reset)
5813 			tg3_phy_reset(tp);
5814 
5815 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5816 
5817 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5818 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5819 		} else {
5820 			current_link_up = true;
5821 			if (sgsr & SERDES_TG3_SPEED_1000) {
5822 				current_speed = SPEED_1000;
5823 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5824 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5825 				current_speed = SPEED_100;
5826 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5827 			} else {
5828 				current_speed = SPEED_10;
5829 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5830 			}
5831 
5832 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5833 				current_duplex = DUPLEX_FULL;
5834 			else
5835 				current_duplex = DUPLEX_HALF;
5836 		}
5837 
5838 		tw32_f(MAC_MODE, tp->mac_mode);
5839 		udelay(40);
5840 
5841 		tg3_clear_mac_status(tp);
5842 
5843 		goto fiber_setup_done;
5844 	}
5845 
5846 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5847 	tw32_f(MAC_MODE, tp->mac_mode);
5848 	udelay(40);
5849 
5850 	tg3_clear_mac_status(tp);
5851 
5852 	if (force_reset)
5853 		tg3_phy_reset(tp);
5854 
5855 	tp->link_config.rmt_adv = 0;
5856 
5857 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5858 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5859 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5860 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5861 			bmsr |= BMSR_LSTATUS;
5862 		else
5863 			bmsr &= ~BMSR_LSTATUS;
5864 	}
5865 
5866 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5867 
5868 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5869 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5870 		/* do nothing, just check for link up at the end */
5871 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5872 		u32 adv, newadv;
5873 
5874 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5875 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5876 				 ADVERTISE_1000XPAUSE |
5877 				 ADVERTISE_1000XPSE_ASYM |
5878 				 ADVERTISE_SLCT);
5879 
5880 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5881 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5882 
5883 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5884 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5885 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5886 			tg3_writephy(tp, MII_BMCR, bmcr);
5887 
5888 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5889 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5890 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5891 
5892 			return err;
5893 		}
5894 	} else {
5895 		u32 new_bmcr;
5896 
5897 		bmcr &= ~BMCR_SPEED1000;
5898 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5899 
5900 		if (tp->link_config.duplex == DUPLEX_FULL)
5901 			new_bmcr |= BMCR_FULLDPLX;
5902 
5903 		if (new_bmcr != bmcr) {
5904 			/* BMCR_SPEED1000 is a reserved bit that needs
5905 			 * to be set on write.
5906 			 */
5907 			new_bmcr |= BMCR_SPEED1000;
5908 
5909 			/* Force a linkdown */
5910 			if (tp->link_up) {
5911 				u32 adv;
5912 
5913 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5914 				adv &= ~(ADVERTISE_1000XFULL |
5915 					 ADVERTISE_1000XHALF |
5916 					 ADVERTISE_SLCT);
5917 				tg3_writephy(tp, MII_ADVERTISE, adv);
5918 				tg3_writephy(tp, MII_BMCR, bmcr |
5919 							   BMCR_ANRESTART |
5920 							   BMCR_ANENABLE);
5921 				udelay(10);
5922 				tg3_carrier_off(tp);
5923 			}
5924 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5925 			bmcr = new_bmcr;
5926 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5927 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5928 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5929 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5930 					bmsr |= BMSR_LSTATUS;
5931 				else
5932 					bmsr &= ~BMSR_LSTATUS;
5933 			}
5934 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5935 		}
5936 	}
5937 
5938 	if (bmsr & BMSR_LSTATUS) {
5939 		current_speed = SPEED_1000;
5940 		current_link_up = true;
5941 		if (bmcr & BMCR_FULLDPLX)
5942 			current_duplex = DUPLEX_FULL;
5943 		else
5944 			current_duplex = DUPLEX_HALF;
5945 
5946 		local_adv = 0;
5947 		remote_adv = 0;
5948 
5949 		if (bmcr & BMCR_ANENABLE) {
5950 			u32 common;
5951 
5952 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5953 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5954 			common = local_adv & remote_adv;
5955 			if (common & (ADVERTISE_1000XHALF |
5956 				      ADVERTISE_1000XFULL)) {
5957 				if (common & ADVERTISE_1000XFULL)
5958 					current_duplex = DUPLEX_FULL;
5959 				else
5960 					current_duplex = DUPLEX_HALF;
5961 
5962 				tp->link_config.rmt_adv =
5963 					   mii_adv_to_ethtool_adv_x(remote_adv);
5964 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5965 				/* Link is up via parallel detect */
5966 			} else {
5967 				current_link_up = false;
5968 			}
5969 		}
5970 	}
5971 
5972 fiber_setup_done:
5973 	if (current_link_up && current_duplex == DUPLEX_FULL)
5974 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5975 
5976 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5977 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5978 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5979 
5980 	tw32_f(MAC_MODE, tp->mac_mode);
5981 	udelay(40);
5982 
5983 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5984 
5985 	tp->link_config.active_speed = current_speed;
5986 	tp->link_config.active_duplex = current_duplex;
5987 
5988 	tg3_test_and_report_link_chg(tp, current_link_up);
5989 	return err;
5990 }
5991 
5992 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5993 {
5994 	if (tp->serdes_counter) {
5995 		/* Give autoneg time to complete. */
5996 		tp->serdes_counter--;
5997 		return;
5998 	}
5999 
6000 	if (!tp->link_up &&
6001 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6002 		u32 bmcr;
6003 
6004 		tg3_readphy(tp, MII_BMCR, &bmcr);
6005 		if (bmcr & BMCR_ANENABLE) {
6006 			u32 phy1, phy2;
6007 
6008 			/* Select shadow register 0x1f */
6009 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6010 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6011 
6012 			/* Select expansion interrupt status register */
6013 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6014 					 MII_TG3_DSP_EXP1_INT_STAT);
6015 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6016 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6017 
6018 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6019 				/* We have signal detect and not receiving
6020 				 * config code words, link is up by parallel
6021 				 * detection.
6022 				 */
6023 
6024 				bmcr &= ~BMCR_ANENABLE;
6025 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6026 				tg3_writephy(tp, MII_BMCR, bmcr);
6027 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6028 			}
6029 		}
6030 	} else if (tp->link_up &&
6031 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6032 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6033 		u32 phy2;
6034 
6035 		/* Select expansion interrupt status register */
6036 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6037 				 MII_TG3_DSP_EXP1_INT_STAT);
6038 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6039 		if (phy2 & 0x20) {
6040 			u32 bmcr;
6041 
6042 			/* Config code words received, turn on autoneg. */
6043 			tg3_readphy(tp, MII_BMCR, &bmcr);
6044 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6045 
6046 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6047 
6048 		}
6049 	}
6050 }
6051 
6052 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6053 {
6054 	u32 val;
6055 	int err;
6056 
6057 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6058 		err = tg3_setup_fiber_phy(tp, force_reset);
6059 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6060 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6061 	else
6062 		err = tg3_setup_copper_phy(tp, force_reset);
6063 
6064 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6065 		u32 scale;
6066 
6067 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6068 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6069 			scale = 65;
6070 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6071 			scale = 6;
6072 		else
6073 			scale = 12;
6074 
6075 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6076 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6077 		tw32(GRC_MISC_CFG, val);
6078 	}
6079 
6080 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6081 	      (6 << TX_LENGTHS_IPG_SHIFT);
6082 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6083 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6084 		val |= tr32(MAC_TX_LENGTHS) &
6085 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6086 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6087 
6088 	if (tp->link_config.active_speed == SPEED_1000 &&
6089 	    tp->link_config.active_duplex == DUPLEX_HALF)
6090 		tw32(MAC_TX_LENGTHS, val |
6091 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6092 	else
6093 		tw32(MAC_TX_LENGTHS, val |
6094 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6095 
6096 	if (!tg3_flag(tp, 5705_PLUS)) {
6097 		if (tp->link_up) {
6098 			tw32(HOSTCC_STAT_COAL_TICKS,
6099 			     tp->coal.stats_block_coalesce_usecs);
6100 		} else {
6101 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6102 		}
6103 	}
6104 
6105 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6106 		val = tr32(PCIE_PWR_MGMT_THRESH);
6107 		if (!tp->link_up)
6108 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6109 			      tp->pwrmgmt_thresh;
6110 		else
6111 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6112 		tw32(PCIE_PWR_MGMT_THRESH, val);
6113 	}
6114 
6115 	return err;
6116 }
6117 
6118 /* tp->lock must be held */
6119 static u64 tg3_refclk_read(struct tg3 *tp)
6120 {
6121 	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6122 	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6123 }
6124 
6125 /* tp->lock must be held */
6126 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6127 {
6128 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6129 
6130 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6131 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6132 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6133 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6134 }
6135 
6136 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6137 static inline void tg3_full_unlock(struct tg3 *tp);
6138 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6139 {
6140 	struct tg3 *tp = netdev_priv(dev);
6141 
6142 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6143 				SOF_TIMESTAMPING_RX_SOFTWARE |
6144 				SOF_TIMESTAMPING_SOFTWARE;
6145 
6146 	if (tg3_flag(tp, PTP_CAPABLE)) {
6147 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6148 					SOF_TIMESTAMPING_RX_HARDWARE |
6149 					SOF_TIMESTAMPING_RAW_HARDWARE;
6150 	}
6151 
6152 	if (tp->ptp_clock)
6153 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6154 	else
6155 		info->phc_index = -1;
6156 
6157 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6158 
6159 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6160 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6161 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6162 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6163 	return 0;
6164 }
6165 
6166 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6167 {
6168 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6169 	bool neg_adj = false;
6170 	u32 correction = 0;
6171 
6172 	if (ppb < 0) {
6173 		neg_adj = true;
6174 		ppb = -ppb;
6175 	}
6176 
6177 	/* Frequency adjustment is performed using hardware with a 24 bit
6178 	 * accumulator and a programmable correction value. On each clk, the
6179 	 * correction value gets added to the accumulator and when it
6180 	 * overflows, the time counter is incremented/decremented.
6181 	 *
6182 	 * So conversion from ppb to correction value is
6183 	 *		ppb * (1 << 24) / 1000000000
6184 	 */
6185 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6186 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6187 
6188 	tg3_full_lock(tp, 0);
6189 
6190 	if (correction)
6191 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6192 		     TG3_EAV_REF_CLK_CORRECT_EN |
6193 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6194 	else
6195 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6196 
6197 	tg3_full_unlock(tp);
6198 
6199 	return 0;
6200 }
6201 
6202 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6203 {
6204 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6205 
6206 	tg3_full_lock(tp, 0);
6207 	tp->ptp_adjust += delta;
6208 	tg3_full_unlock(tp);
6209 
6210 	return 0;
6211 }
6212 
6213 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6214 {
6215 	u64 ns;
6216 	u32 remainder;
6217 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6218 
6219 	tg3_full_lock(tp, 0);
6220 	ns = tg3_refclk_read(tp);
6221 	ns += tp->ptp_adjust;
6222 	tg3_full_unlock(tp);
6223 
6224 	ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6225 	ts->tv_nsec = remainder;
6226 
6227 	return 0;
6228 }
6229 
6230 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6231 			   const struct timespec *ts)
6232 {
6233 	u64 ns;
6234 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6235 
6236 	ns = timespec_to_ns(ts);
6237 
6238 	tg3_full_lock(tp, 0);
6239 	tg3_refclk_write(tp, ns);
6240 	tp->ptp_adjust = 0;
6241 	tg3_full_unlock(tp);
6242 
6243 	return 0;
6244 }
6245 
6246 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6247 			  struct ptp_clock_request *rq, int on)
6248 {
6249 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6250 	u32 clock_ctl;
6251 	int rval = 0;
6252 
6253 	switch (rq->type) {
6254 	case PTP_CLK_REQ_PEROUT:
6255 		if (rq->perout.index != 0)
6256 			return -EINVAL;
6257 
6258 		tg3_full_lock(tp, 0);
6259 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6260 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6261 
6262 		if (on) {
6263 			u64 nsec;
6264 
6265 			nsec = rq->perout.start.sec * 1000000000ULL +
6266 			       rq->perout.start.nsec;
6267 
6268 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6269 				netdev_warn(tp->dev,
6270 					    "Device supports only a one-shot timesync output, period must be 0\n");
6271 				rval = -EINVAL;
6272 				goto err_out;
6273 			}
6274 
6275 			if (nsec & (1ULL << 63)) {
6276 				netdev_warn(tp->dev,
6277 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6278 				rval = -EINVAL;
6279 				goto err_out;
6280 			}
6281 
6282 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6283 			tw32(TG3_EAV_WATCHDOG0_MSB,
6284 			     TG3_EAV_WATCHDOG0_EN |
6285 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6286 
6287 			tw32(TG3_EAV_REF_CLCK_CTL,
6288 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6289 		} else {
6290 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6291 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6292 		}
6293 
6294 err_out:
6295 		tg3_full_unlock(tp);
6296 		return rval;
6297 
6298 	default:
6299 		break;
6300 	}
6301 
6302 	return -EOPNOTSUPP;
6303 }
6304 
6305 static const struct ptp_clock_info tg3_ptp_caps = {
6306 	.owner		= THIS_MODULE,
6307 	.name		= "tg3 clock",
6308 	.max_adj	= 250000000,
6309 	.n_alarm	= 0,
6310 	.n_ext_ts	= 0,
6311 	.n_per_out	= 1,
6312 	.pps		= 0,
6313 	.adjfreq	= tg3_ptp_adjfreq,
6314 	.adjtime	= tg3_ptp_adjtime,
6315 	.gettime	= tg3_ptp_gettime,
6316 	.settime	= tg3_ptp_settime,
6317 	.enable		= tg3_ptp_enable,
6318 };
6319 
6320 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6321 				     struct skb_shared_hwtstamps *timestamp)
6322 {
6323 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6324 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6325 					   tp->ptp_adjust);
6326 }
6327 
6328 /* tp->lock must be held */
6329 static void tg3_ptp_init(struct tg3 *tp)
6330 {
6331 	if (!tg3_flag(tp, PTP_CAPABLE))
6332 		return;
6333 
6334 	/* Initialize the hardware clock to the system time. */
6335 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6336 	tp->ptp_adjust = 0;
6337 	tp->ptp_info = tg3_ptp_caps;
6338 }
6339 
6340 /* tp->lock must be held */
6341 static void tg3_ptp_resume(struct tg3 *tp)
6342 {
6343 	if (!tg3_flag(tp, PTP_CAPABLE))
6344 		return;
6345 
6346 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6347 	tp->ptp_adjust = 0;
6348 }
6349 
6350 static void tg3_ptp_fini(struct tg3 *tp)
6351 {
6352 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6353 		return;
6354 
6355 	ptp_clock_unregister(tp->ptp_clock);
6356 	tp->ptp_clock = NULL;
6357 	tp->ptp_adjust = 0;
6358 }
6359 
6360 static inline int tg3_irq_sync(struct tg3 *tp)
6361 {
6362 	return tp->irq_sync;
6363 }
6364 
6365 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6366 {
6367 	int i;
6368 
6369 	dst = (u32 *)((u8 *)dst + off);
6370 	for (i = 0; i < len; i += sizeof(u32))
6371 		*dst++ = tr32(off + i);
6372 }
6373 
6374 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6375 {
6376 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6377 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6378 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6379 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6380 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6381 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6382 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6383 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6384 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6385 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6386 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6387 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6388 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6389 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6390 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6391 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6392 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6393 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6394 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6395 
6396 	if (tg3_flag(tp, SUPPORT_MSIX))
6397 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6398 
6399 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6400 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6401 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6402 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6403 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6404 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6405 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6406 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6407 
6408 	if (!tg3_flag(tp, 5705_PLUS)) {
6409 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6410 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6411 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6412 	}
6413 
6414 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6415 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6416 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6417 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6418 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6419 
6420 	if (tg3_flag(tp, NVRAM))
6421 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6422 }
6423 
6424 static void tg3_dump_state(struct tg3 *tp)
6425 {
6426 	int i;
6427 	u32 *regs;
6428 
6429 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6430 	if (!regs)
6431 		return;
6432 
6433 	if (tg3_flag(tp, PCI_EXPRESS)) {
6434 		/* Read up to but not including private PCI registers */
6435 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6436 			regs[i / sizeof(u32)] = tr32(i);
6437 	} else
6438 		tg3_dump_legacy_regs(tp, regs);
6439 
6440 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6441 		if (!regs[i + 0] && !regs[i + 1] &&
6442 		    !regs[i + 2] && !regs[i + 3])
6443 			continue;
6444 
6445 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6446 			   i * 4,
6447 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6448 	}
6449 
6450 	kfree(regs);
6451 
6452 	for (i = 0; i < tp->irq_cnt; i++) {
6453 		struct tg3_napi *tnapi = &tp->napi[i];
6454 
6455 		/* SW status block */
6456 		netdev_err(tp->dev,
6457 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6458 			   i,
6459 			   tnapi->hw_status->status,
6460 			   tnapi->hw_status->status_tag,
6461 			   tnapi->hw_status->rx_jumbo_consumer,
6462 			   tnapi->hw_status->rx_consumer,
6463 			   tnapi->hw_status->rx_mini_consumer,
6464 			   tnapi->hw_status->idx[0].rx_producer,
6465 			   tnapi->hw_status->idx[0].tx_consumer);
6466 
6467 		netdev_err(tp->dev,
6468 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6469 			   i,
6470 			   tnapi->last_tag, tnapi->last_irq_tag,
6471 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6472 			   tnapi->rx_rcb_ptr,
6473 			   tnapi->prodring.rx_std_prod_idx,
6474 			   tnapi->prodring.rx_std_cons_idx,
6475 			   tnapi->prodring.rx_jmb_prod_idx,
6476 			   tnapi->prodring.rx_jmb_cons_idx);
6477 	}
6478 }
6479 
6480 /* This is called whenever we suspect that the system chipset is re-
6481  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6482  * is bogus tx completions. We try to recover by setting the
6483  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6484  * in the workqueue.
6485  */
6486 static void tg3_tx_recover(struct tg3 *tp)
6487 {
6488 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6489 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6490 
6491 	netdev_warn(tp->dev,
6492 		    "The system may be re-ordering memory-mapped I/O "
6493 		    "cycles to the network device, attempting to recover. "
6494 		    "Please report the problem to the driver maintainer "
6495 		    "and include system chipset information.\n");
6496 
6497 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6498 }
6499 
6500 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6501 {
6502 	/* Tell compiler to fetch tx indices from memory. */
6503 	barrier();
6504 	return tnapi->tx_pending -
6505 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6506 }
6507 
6508 /* Tigon3 never reports partial packet sends.  So we do not
6509  * need special logic to handle SKBs that have not had all
6510  * of their frags sent yet, like SunGEM does.
6511  */
6512 static void tg3_tx(struct tg3_napi *tnapi)
6513 {
6514 	struct tg3 *tp = tnapi->tp;
6515 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6516 	u32 sw_idx = tnapi->tx_cons;
6517 	struct netdev_queue *txq;
6518 	int index = tnapi - tp->napi;
6519 	unsigned int pkts_compl = 0, bytes_compl = 0;
6520 
6521 	if (tg3_flag(tp, ENABLE_TSS))
6522 		index--;
6523 
6524 	txq = netdev_get_tx_queue(tp->dev, index);
6525 
6526 	while (sw_idx != hw_idx) {
6527 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6528 		struct sk_buff *skb = ri->skb;
6529 		int i, tx_bug = 0;
6530 
6531 		if (unlikely(skb == NULL)) {
6532 			tg3_tx_recover(tp);
6533 			return;
6534 		}
6535 
6536 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6537 			struct skb_shared_hwtstamps timestamp;
6538 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6539 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6540 
6541 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6542 
6543 			skb_tstamp_tx(skb, &timestamp);
6544 		}
6545 
6546 		pci_unmap_single(tp->pdev,
6547 				 dma_unmap_addr(ri, mapping),
6548 				 skb_headlen(skb),
6549 				 PCI_DMA_TODEVICE);
6550 
6551 		ri->skb = NULL;
6552 
6553 		while (ri->fragmented) {
6554 			ri->fragmented = false;
6555 			sw_idx = NEXT_TX(sw_idx);
6556 			ri = &tnapi->tx_buffers[sw_idx];
6557 		}
6558 
6559 		sw_idx = NEXT_TX(sw_idx);
6560 
6561 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6562 			ri = &tnapi->tx_buffers[sw_idx];
6563 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6564 				tx_bug = 1;
6565 
6566 			pci_unmap_page(tp->pdev,
6567 				       dma_unmap_addr(ri, mapping),
6568 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6569 				       PCI_DMA_TODEVICE);
6570 
6571 			while (ri->fragmented) {
6572 				ri->fragmented = false;
6573 				sw_idx = NEXT_TX(sw_idx);
6574 				ri = &tnapi->tx_buffers[sw_idx];
6575 			}
6576 
6577 			sw_idx = NEXT_TX(sw_idx);
6578 		}
6579 
6580 		pkts_compl++;
6581 		bytes_compl += skb->len;
6582 
6583 		dev_kfree_skb(skb);
6584 
6585 		if (unlikely(tx_bug)) {
6586 			tg3_tx_recover(tp);
6587 			return;
6588 		}
6589 	}
6590 
6591 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6592 
6593 	tnapi->tx_cons = sw_idx;
6594 
6595 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6596 	 * before checking for netif_queue_stopped().  Without the
6597 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6598 	 * will miss it and cause the queue to be stopped forever.
6599 	 */
6600 	smp_mb();
6601 
6602 	if (unlikely(netif_tx_queue_stopped(txq) &&
6603 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6604 		__netif_tx_lock(txq, smp_processor_id());
6605 		if (netif_tx_queue_stopped(txq) &&
6606 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6607 			netif_tx_wake_queue(txq);
6608 		__netif_tx_unlock(txq);
6609 	}
6610 }
6611 
6612 static void tg3_frag_free(bool is_frag, void *data)
6613 {
6614 	if (is_frag)
6615 		put_page(virt_to_head_page(data));
6616 	else
6617 		kfree(data);
6618 }
6619 
6620 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6621 {
6622 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6623 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6624 
6625 	if (!ri->data)
6626 		return;
6627 
6628 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6629 			 map_sz, PCI_DMA_FROMDEVICE);
6630 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6631 	ri->data = NULL;
6632 }
6633 
6634 
6635 /* Returns size of skb allocated or < 0 on error.
6636  *
6637  * We only need to fill in the address because the other members
6638  * of the RX descriptor are invariant, see tg3_init_rings.
6639  *
6640  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6641  * posting buffers we only dirty the first cache line of the RX
6642  * descriptor (containing the address).  Whereas for the RX status
6643  * buffers the cpu only reads the last cacheline of the RX descriptor
6644  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6645  */
6646 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6647 			     u32 opaque_key, u32 dest_idx_unmasked,
6648 			     unsigned int *frag_size)
6649 {
6650 	struct tg3_rx_buffer_desc *desc;
6651 	struct ring_info *map;
6652 	u8 *data;
6653 	dma_addr_t mapping;
6654 	int skb_size, data_size, dest_idx;
6655 
6656 	switch (opaque_key) {
6657 	case RXD_OPAQUE_RING_STD:
6658 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6659 		desc = &tpr->rx_std[dest_idx];
6660 		map = &tpr->rx_std_buffers[dest_idx];
6661 		data_size = tp->rx_pkt_map_sz;
6662 		break;
6663 
6664 	case RXD_OPAQUE_RING_JUMBO:
6665 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6666 		desc = &tpr->rx_jmb[dest_idx].std;
6667 		map = &tpr->rx_jmb_buffers[dest_idx];
6668 		data_size = TG3_RX_JMB_MAP_SZ;
6669 		break;
6670 
6671 	default:
6672 		return -EINVAL;
6673 	}
6674 
6675 	/* Do not overwrite any of the map or rp information
6676 	 * until we are sure we can commit to a new buffer.
6677 	 *
6678 	 * Callers depend upon this behavior and assume that
6679 	 * we leave everything unchanged if we fail.
6680 	 */
6681 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6682 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6683 	if (skb_size <= PAGE_SIZE) {
6684 		data = netdev_alloc_frag(skb_size);
6685 		*frag_size = skb_size;
6686 	} else {
6687 		data = kmalloc(skb_size, GFP_ATOMIC);
6688 		*frag_size = 0;
6689 	}
6690 	if (!data)
6691 		return -ENOMEM;
6692 
6693 	mapping = pci_map_single(tp->pdev,
6694 				 data + TG3_RX_OFFSET(tp),
6695 				 data_size,
6696 				 PCI_DMA_FROMDEVICE);
6697 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6698 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6699 		return -EIO;
6700 	}
6701 
6702 	map->data = data;
6703 	dma_unmap_addr_set(map, mapping, mapping);
6704 
6705 	desc->addr_hi = ((u64)mapping >> 32);
6706 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6707 
6708 	return data_size;
6709 }
6710 
6711 /* We only need to move over in the address because the other
6712  * members of the RX descriptor are invariant.  See notes above
6713  * tg3_alloc_rx_data for full details.
6714  */
6715 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6716 			   struct tg3_rx_prodring_set *dpr,
6717 			   u32 opaque_key, int src_idx,
6718 			   u32 dest_idx_unmasked)
6719 {
6720 	struct tg3 *tp = tnapi->tp;
6721 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6722 	struct ring_info *src_map, *dest_map;
6723 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6724 	int dest_idx;
6725 
6726 	switch (opaque_key) {
6727 	case RXD_OPAQUE_RING_STD:
6728 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6729 		dest_desc = &dpr->rx_std[dest_idx];
6730 		dest_map = &dpr->rx_std_buffers[dest_idx];
6731 		src_desc = &spr->rx_std[src_idx];
6732 		src_map = &spr->rx_std_buffers[src_idx];
6733 		break;
6734 
6735 	case RXD_OPAQUE_RING_JUMBO:
6736 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6737 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6738 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6739 		src_desc = &spr->rx_jmb[src_idx].std;
6740 		src_map = &spr->rx_jmb_buffers[src_idx];
6741 		break;
6742 
6743 	default:
6744 		return;
6745 	}
6746 
6747 	dest_map->data = src_map->data;
6748 	dma_unmap_addr_set(dest_map, mapping,
6749 			   dma_unmap_addr(src_map, mapping));
6750 	dest_desc->addr_hi = src_desc->addr_hi;
6751 	dest_desc->addr_lo = src_desc->addr_lo;
6752 
6753 	/* Ensure that the update to the skb happens after the physical
6754 	 * addresses have been transferred to the new BD location.
6755 	 */
6756 	smp_wmb();
6757 
6758 	src_map->data = NULL;
6759 }
6760 
6761 /* The RX ring scheme is composed of multiple rings which post fresh
6762  * buffers to the chip, and one special ring the chip uses to report
6763  * status back to the host.
6764  *
6765  * The special ring reports the status of received packets to the
6766  * host.  The chip does not write into the original descriptor the
6767  * RX buffer was obtained from.  The chip simply takes the original
6768  * descriptor as provided by the host, updates the status and length
6769  * field, then writes this into the next status ring entry.
6770  *
6771  * Each ring the host uses to post buffers to the chip is described
6772  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6773  * it is first placed into the on-chip ram.  When the packet's length
6774  * is known, it walks down the TG3_BDINFO entries to select the ring.
6775  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6776  * which is within the range of the new packet's length is chosen.
6777  *
6778  * The "separate ring for rx status" scheme may sound queer, but it makes
6779  * sense from a cache coherency perspective.  If only the host writes
6780  * to the buffer post rings, and only the chip writes to the rx status
6781  * rings, then cache lines never move beyond shared-modified state.
6782  * If both the host and chip were to write into the same ring, cache line
6783  * eviction could occur since both entities want it in an exclusive state.
6784  */
6785 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6786 {
6787 	struct tg3 *tp = tnapi->tp;
6788 	u32 work_mask, rx_std_posted = 0;
6789 	u32 std_prod_idx, jmb_prod_idx;
6790 	u32 sw_idx = tnapi->rx_rcb_ptr;
6791 	u16 hw_idx;
6792 	int received;
6793 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6794 
6795 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6796 	/*
6797 	 * We need to order the read of hw_idx and the read of
6798 	 * the opaque cookie.
6799 	 */
6800 	rmb();
6801 	work_mask = 0;
6802 	received = 0;
6803 	std_prod_idx = tpr->rx_std_prod_idx;
6804 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6805 	while (sw_idx != hw_idx && budget > 0) {
6806 		struct ring_info *ri;
6807 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6808 		unsigned int len;
6809 		struct sk_buff *skb;
6810 		dma_addr_t dma_addr;
6811 		u32 opaque_key, desc_idx, *post_ptr;
6812 		u8 *data;
6813 		u64 tstamp = 0;
6814 
6815 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6816 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6817 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6818 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6819 			dma_addr = dma_unmap_addr(ri, mapping);
6820 			data = ri->data;
6821 			post_ptr = &std_prod_idx;
6822 			rx_std_posted++;
6823 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6824 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6825 			dma_addr = dma_unmap_addr(ri, mapping);
6826 			data = ri->data;
6827 			post_ptr = &jmb_prod_idx;
6828 		} else
6829 			goto next_pkt_nopost;
6830 
6831 		work_mask |= opaque_key;
6832 
6833 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6834 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6835 		drop_it:
6836 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6837 				       desc_idx, *post_ptr);
6838 		drop_it_no_recycle:
6839 			/* Other statistics kept track of by card. */
6840 			tp->rx_dropped++;
6841 			goto next_pkt;
6842 		}
6843 
6844 		prefetch(data + TG3_RX_OFFSET(tp));
6845 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6846 		      ETH_FCS_LEN;
6847 
6848 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6849 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6850 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6851 		     RXD_FLAG_PTPSTAT_PTPV2) {
6852 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6853 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6854 		}
6855 
6856 		if (len > TG3_RX_COPY_THRESH(tp)) {
6857 			int skb_size;
6858 			unsigned int frag_size;
6859 
6860 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6861 						    *post_ptr, &frag_size);
6862 			if (skb_size < 0)
6863 				goto drop_it;
6864 
6865 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6866 					 PCI_DMA_FROMDEVICE);
6867 
6868 			/* Ensure that the update to the data happens
6869 			 * after the usage of the old DMA mapping.
6870 			 */
6871 			smp_wmb();
6872 
6873 			ri->data = NULL;
6874 
6875 			skb = build_skb(data, frag_size);
6876 			if (!skb) {
6877 				tg3_frag_free(frag_size != 0, data);
6878 				goto drop_it_no_recycle;
6879 			}
6880 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6881 		} else {
6882 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6883 				       desc_idx, *post_ptr);
6884 
6885 			skb = netdev_alloc_skb(tp->dev,
6886 					       len + TG3_RAW_IP_ALIGN);
6887 			if (skb == NULL)
6888 				goto drop_it_no_recycle;
6889 
6890 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6891 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6892 			memcpy(skb->data,
6893 			       data + TG3_RX_OFFSET(tp),
6894 			       len);
6895 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6896 		}
6897 
6898 		skb_put(skb, len);
6899 		if (tstamp)
6900 			tg3_hwclock_to_timestamp(tp, tstamp,
6901 						 skb_hwtstamps(skb));
6902 
6903 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6904 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6905 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6906 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6907 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6908 		else
6909 			skb_checksum_none_assert(skb);
6910 
6911 		skb->protocol = eth_type_trans(skb, tp->dev);
6912 
6913 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6914 		    skb->protocol != htons(ETH_P_8021Q)) {
6915 			dev_kfree_skb(skb);
6916 			goto drop_it_no_recycle;
6917 		}
6918 
6919 		if (desc->type_flags & RXD_FLAG_VLAN &&
6920 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6921 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6922 					       desc->err_vlan & RXD_VLAN_MASK);
6923 
6924 		napi_gro_receive(&tnapi->napi, skb);
6925 
6926 		received++;
6927 		budget--;
6928 
6929 next_pkt:
6930 		(*post_ptr)++;
6931 
6932 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6933 			tpr->rx_std_prod_idx = std_prod_idx &
6934 					       tp->rx_std_ring_mask;
6935 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6936 				     tpr->rx_std_prod_idx);
6937 			work_mask &= ~RXD_OPAQUE_RING_STD;
6938 			rx_std_posted = 0;
6939 		}
6940 next_pkt_nopost:
6941 		sw_idx++;
6942 		sw_idx &= tp->rx_ret_ring_mask;
6943 
6944 		/* Refresh hw_idx to see if there is new work */
6945 		if (sw_idx == hw_idx) {
6946 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6947 			rmb();
6948 		}
6949 	}
6950 
6951 	/* ACK the status ring. */
6952 	tnapi->rx_rcb_ptr = sw_idx;
6953 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6954 
6955 	/* Refill RX ring(s). */
6956 	if (!tg3_flag(tp, ENABLE_RSS)) {
6957 		/* Sync BD data before updating mailbox */
6958 		wmb();
6959 
6960 		if (work_mask & RXD_OPAQUE_RING_STD) {
6961 			tpr->rx_std_prod_idx = std_prod_idx &
6962 					       tp->rx_std_ring_mask;
6963 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6964 				     tpr->rx_std_prod_idx);
6965 		}
6966 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6967 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6968 					       tp->rx_jmb_ring_mask;
6969 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6970 				     tpr->rx_jmb_prod_idx);
6971 		}
6972 		mmiowb();
6973 	} else if (work_mask) {
6974 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6975 		 * updated before the producer indices can be updated.
6976 		 */
6977 		smp_wmb();
6978 
6979 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6980 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6981 
6982 		if (tnapi != &tp->napi[1]) {
6983 			tp->rx_refill = true;
6984 			napi_schedule(&tp->napi[1].napi);
6985 		}
6986 	}
6987 
6988 	return received;
6989 }
6990 
6991 static void tg3_poll_link(struct tg3 *tp)
6992 {
6993 	/* handle link change and other phy events */
6994 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6995 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6996 
6997 		if (sblk->status & SD_STATUS_LINK_CHG) {
6998 			sblk->status = SD_STATUS_UPDATED |
6999 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7000 			spin_lock(&tp->lock);
7001 			if (tg3_flag(tp, USE_PHYLIB)) {
7002 				tw32_f(MAC_STATUS,
7003 				     (MAC_STATUS_SYNC_CHANGED |
7004 				      MAC_STATUS_CFG_CHANGED |
7005 				      MAC_STATUS_MI_COMPLETION |
7006 				      MAC_STATUS_LNKSTATE_CHANGED));
7007 				udelay(40);
7008 			} else
7009 				tg3_setup_phy(tp, false);
7010 			spin_unlock(&tp->lock);
7011 		}
7012 	}
7013 }
7014 
7015 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7016 				struct tg3_rx_prodring_set *dpr,
7017 				struct tg3_rx_prodring_set *spr)
7018 {
7019 	u32 si, di, cpycnt, src_prod_idx;
7020 	int i, err = 0;
7021 
7022 	while (1) {
7023 		src_prod_idx = spr->rx_std_prod_idx;
7024 
7025 		/* Make sure updates to the rx_std_buffers[] entries and the
7026 		 * standard producer index are seen in the correct order.
7027 		 */
7028 		smp_rmb();
7029 
7030 		if (spr->rx_std_cons_idx == src_prod_idx)
7031 			break;
7032 
7033 		if (spr->rx_std_cons_idx < src_prod_idx)
7034 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7035 		else
7036 			cpycnt = tp->rx_std_ring_mask + 1 -
7037 				 spr->rx_std_cons_idx;
7038 
7039 		cpycnt = min(cpycnt,
7040 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7041 
7042 		si = spr->rx_std_cons_idx;
7043 		di = dpr->rx_std_prod_idx;
7044 
7045 		for (i = di; i < di + cpycnt; i++) {
7046 			if (dpr->rx_std_buffers[i].data) {
7047 				cpycnt = i - di;
7048 				err = -ENOSPC;
7049 				break;
7050 			}
7051 		}
7052 
7053 		if (!cpycnt)
7054 			break;
7055 
7056 		/* Ensure that updates to the rx_std_buffers ring and the
7057 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7058 		 * ordered correctly WRT the skb check above.
7059 		 */
7060 		smp_rmb();
7061 
7062 		memcpy(&dpr->rx_std_buffers[di],
7063 		       &spr->rx_std_buffers[si],
7064 		       cpycnt * sizeof(struct ring_info));
7065 
7066 		for (i = 0; i < cpycnt; i++, di++, si++) {
7067 			struct tg3_rx_buffer_desc *sbd, *dbd;
7068 			sbd = &spr->rx_std[si];
7069 			dbd = &dpr->rx_std[di];
7070 			dbd->addr_hi = sbd->addr_hi;
7071 			dbd->addr_lo = sbd->addr_lo;
7072 		}
7073 
7074 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7075 				       tp->rx_std_ring_mask;
7076 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7077 				       tp->rx_std_ring_mask;
7078 	}
7079 
7080 	while (1) {
7081 		src_prod_idx = spr->rx_jmb_prod_idx;
7082 
7083 		/* Make sure updates to the rx_jmb_buffers[] entries and
7084 		 * the jumbo producer index are seen in the correct order.
7085 		 */
7086 		smp_rmb();
7087 
7088 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7089 			break;
7090 
7091 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7092 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7093 		else
7094 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7095 				 spr->rx_jmb_cons_idx;
7096 
7097 		cpycnt = min(cpycnt,
7098 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7099 
7100 		si = spr->rx_jmb_cons_idx;
7101 		di = dpr->rx_jmb_prod_idx;
7102 
7103 		for (i = di; i < di + cpycnt; i++) {
7104 			if (dpr->rx_jmb_buffers[i].data) {
7105 				cpycnt = i - di;
7106 				err = -ENOSPC;
7107 				break;
7108 			}
7109 		}
7110 
7111 		if (!cpycnt)
7112 			break;
7113 
7114 		/* Ensure that updates to the rx_jmb_buffers ring and the
7115 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7116 		 * ordered correctly WRT the skb check above.
7117 		 */
7118 		smp_rmb();
7119 
7120 		memcpy(&dpr->rx_jmb_buffers[di],
7121 		       &spr->rx_jmb_buffers[si],
7122 		       cpycnt * sizeof(struct ring_info));
7123 
7124 		for (i = 0; i < cpycnt; i++, di++, si++) {
7125 			struct tg3_rx_buffer_desc *sbd, *dbd;
7126 			sbd = &spr->rx_jmb[si].std;
7127 			dbd = &dpr->rx_jmb[di].std;
7128 			dbd->addr_hi = sbd->addr_hi;
7129 			dbd->addr_lo = sbd->addr_lo;
7130 		}
7131 
7132 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7133 				       tp->rx_jmb_ring_mask;
7134 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7135 				       tp->rx_jmb_ring_mask;
7136 	}
7137 
7138 	return err;
7139 }
7140 
7141 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7142 {
7143 	struct tg3 *tp = tnapi->tp;
7144 
7145 	/* run TX completion thread */
7146 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7147 		tg3_tx(tnapi);
7148 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7149 			return work_done;
7150 	}
7151 
7152 	if (!tnapi->rx_rcb_prod_idx)
7153 		return work_done;
7154 
7155 	/* run RX thread, within the bounds set by NAPI.
7156 	 * All RX "locking" is done by ensuring outside
7157 	 * code synchronizes with tg3->napi.poll()
7158 	 */
7159 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7160 		work_done += tg3_rx(tnapi, budget - work_done);
7161 
7162 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7163 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7164 		int i, err = 0;
7165 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7166 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7167 
7168 		tp->rx_refill = false;
7169 		for (i = 1; i <= tp->rxq_cnt; i++)
7170 			err |= tg3_rx_prodring_xfer(tp, dpr,
7171 						    &tp->napi[i].prodring);
7172 
7173 		wmb();
7174 
7175 		if (std_prod_idx != dpr->rx_std_prod_idx)
7176 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7177 				     dpr->rx_std_prod_idx);
7178 
7179 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7180 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7181 				     dpr->rx_jmb_prod_idx);
7182 
7183 		mmiowb();
7184 
7185 		if (err)
7186 			tw32_f(HOSTCC_MODE, tp->coal_now);
7187 	}
7188 
7189 	return work_done;
7190 }
7191 
7192 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7193 {
7194 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7195 		schedule_work(&tp->reset_task);
7196 }
7197 
7198 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7199 {
7200 	cancel_work_sync(&tp->reset_task);
7201 	tg3_flag_clear(tp, RESET_TASK_PENDING);
7202 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7203 }
7204 
7205 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7206 {
7207 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7208 	struct tg3 *tp = tnapi->tp;
7209 	int work_done = 0;
7210 	struct tg3_hw_status *sblk = tnapi->hw_status;
7211 
7212 	while (1) {
7213 		work_done = tg3_poll_work(tnapi, work_done, budget);
7214 
7215 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7216 			goto tx_recovery;
7217 
7218 		if (unlikely(work_done >= budget))
7219 			break;
7220 
7221 		/* tp->last_tag is used in tg3_int_reenable() below
7222 		 * to tell the hw how much work has been processed,
7223 		 * so we must read it before checking for more work.
7224 		 */
7225 		tnapi->last_tag = sblk->status_tag;
7226 		tnapi->last_irq_tag = tnapi->last_tag;
7227 		rmb();
7228 
7229 		/* check for RX/TX work to do */
7230 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7231 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7232 
7233 			/* This test here is not race free, but will reduce
7234 			 * the number of interrupts by looping again.
7235 			 */
7236 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7237 				continue;
7238 
7239 			napi_complete(napi);
7240 			/* Reenable interrupts. */
7241 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7242 
7243 			/* This test here is synchronized by napi_schedule()
7244 			 * and napi_complete() to close the race condition.
7245 			 */
7246 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7247 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7248 						  HOSTCC_MODE_ENABLE |
7249 						  tnapi->coal_now);
7250 			}
7251 			mmiowb();
7252 			break;
7253 		}
7254 	}
7255 
7256 	return work_done;
7257 
7258 tx_recovery:
7259 	/* work_done is guaranteed to be less than budget. */
7260 	napi_complete(napi);
7261 	tg3_reset_task_schedule(tp);
7262 	return work_done;
7263 }
7264 
7265 static void tg3_process_error(struct tg3 *tp)
7266 {
7267 	u32 val;
7268 	bool real_error = false;
7269 
7270 	if (tg3_flag(tp, ERROR_PROCESSED))
7271 		return;
7272 
7273 	/* Check Flow Attention register */
7274 	val = tr32(HOSTCC_FLOW_ATTN);
7275 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7276 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7277 		real_error = true;
7278 	}
7279 
7280 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7281 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7282 		real_error = true;
7283 	}
7284 
7285 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7286 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7287 		real_error = true;
7288 	}
7289 
7290 	if (!real_error)
7291 		return;
7292 
7293 	tg3_dump_state(tp);
7294 
7295 	tg3_flag_set(tp, ERROR_PROCESSED);
7296 	tg3_reset_task_schedule(tp);
7297 }
7298 
7299 static int tg3_poll(struct napi_struct *napi, int budget)
7300 {
7301 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7302 	struct tg3 *tp = tnapi->tp;
7303 	int work_done = 0;
7304 	struct tg3_hw_status *sblk = tnapi->hw_status;
7305 
7306 	while (1) {
7307 		if (sblk->status & SD_STATUS_ERROR)
7308 			tg3_process_error(tp);
7309 
7310 		tg3_poll_link(tp);
7311 
7312 		work_done = tg3_poll_work(tnapi, work_done, budget);
7313 
7314 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7315 			goto tx_recovery;
7316 
7317 		if (unlikely(work_done >= budget))
7318 			break;
7319 
7320 		if (tg3_flag(tp, TAGGED_STATUS)) {
7321 			/* tp->last_tag is used in tg3_int_reenable() below
7322 			 * to tell the hw how much work has been processed,
7323 			 * so we must read it before checking for more work.
7324 			 */
7325 			tnapi->last_tag = sblk->status_tag;
7326 			tnapi->last_irq_tag = tnapi->last_tag;
7327 			rmb();
7328 		} else
7329 			sblk->status &= ~SD_STATUS_UPDATED;
7330 
7331 		if (likely(!tg3_has_work(tnapi))) {
7332 			napi_complete(napi);
7333 			tg3_int_reenable(tnapi);
7334 			break;
7335 		}
7336 	}
7337 
7338 	return work_done;
7339 
7340 tx_recovery:
7341 	/* work_done is guaranteed to be less than budget. */
7342 	napi_complete(napi);
7343 	tg3_reset_task_schedule(tp);
7344 	return work_done;
7345 }
7346 
7347 static void tg3_napi_disable(struct tg3 *tp)
7348 {
7349 	int i;
7350 
7351 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7352 		napi_disable(&tp->napi[i].napi);
7353 }
7354 
7355 static void tg3_napi_enable(struct tg3 *tp)
7356 {
7357 	int i;
7358 
7359 	for (i = 0; i < tp->irq_cnt; i++)
7360 		napi_enable(&tp->napi[i].napi);
7361 }
7362 
7363 static void tg3_napi_init(struct tg3 *tp)
7364 {
7365 	int i;
7366 
7367 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7368 	for (i = 1; i < tp->irq_cnt; i++)
7369 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7370 }
7371 
7372 static void tg3_napi_fini(struct tg3 *tp)
7373 {
7374 	int i;
7375 
7376 	for (i = 0; i < tp->irq_cnt; i++)
7377 		netif_napi_del(&tp->napi[i].napi);
7378 }
7379 
7380 static inline void tg3_netif_stop(struct tg3 *tp)
7381 {
7382 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
7383 	tg3_napi_disable(tp);
7384 	netif_carrier_off(tp->dev);
7385 	netif_tx_disable(tp->dev);
7386 }
7387 
7388 /* tp->lock must be held */
7389 static inline void tg3_netif_start(struct tg3 *tp)
7390 {
7391 	tg3_ptp_resume(tp);
7392 
7393 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7394 	 * appropriate so long as all callers are assured to
7395 	 * have free tx slots (such as after tg3_init_hw)
7396 	 */
7397 	netif_tx_wake_all_queues(tp->dev);
7398 
7399 	if (tp->link_up)
7400 		netif_carrier_on(tp->dev);
7401 
7402 	tg3_napi_enable(tp);
7403 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7404 	tg3_enable_ints(tp);
7405 }
7406 
7407 static void tg3_irq_quiesce(struct tg3 *tp)
7408 {
7409 	int i;
7410 
7411 	BUG_ON(tp->irq_sync);
7412 
7413 	tp->irq_sync = 1;
7414 	smp_mb();
7415 
7416 	for (i = 0; i < tp->irq_cnt; i++)
7417 		synchronize_irq(tp->napi[i].irq_vec);
7418 }
7419 
7420 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7421  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7422  * with as well.  Most of the time, this is not necessary except when
7423  * shutting down the device.
7424  */
7425 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7426 {
7427 	spin_lock_bh(&tp->lock);
7428 	if (irq_sync)
7429 		tg3_irq_quiesce(tp);
7430 }
7431 
7432 static inline void tg3_full_unlock(struct tg3 *tp)
7433 {
7434 	spin_unlock_bh(&tp->lock);
7435 }
7436 
7437 /* One-shot MSI handler - Chip automatically disables interrupt
7438  * after sending MSI so driver doesn't have to do it.
7439  */
7440 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7441 {
7442 	struct tg3_napi *tnapi = dev_id;
7443 	struct tg3 *tp = tnapi->tp;
7444 
7445 	prefetch(tnapi->hw_status);
7446 	if (tnapi->rx_rcb)
7447 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7448 
7449 	if (likely(!tg3_irq_sync(tp)))
7450 		napi_schedule(&tnapi->napi);
7451 
7452 	return IRQ_HANDLED;
7453 }
7454 
7455 /* MSI ISR - No need to check for interrupt sharing and no need to
7456  * flush status block and interrupt mailbox. PCI ordering rules
7457  * guarantee that MSI will arrive after the status block.
7458  */
7459 static irqreturn_t tg3_msi(int irq, void *dev_id)
7460 {
7461 	struct tg3_napi *tnapi = dev_id;
7462 	struct tg3 *tp = tnapi->tp;
7463 
7464 	prefetch(tnapi->hw_status);
7465 	if (tnapi->rx_rcb)
7466 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7467 	/*
7468 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7469 	 * chip-internal interrupt pending events.
7470 	 * Writing non-zero to intr-mbox-0 additional tells the
7471 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7472 	 * event coalescing.
7473 	 */
7474 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7475 	if (likely(!tg3_irq_sync(tp)))
7476 		napi_schedule(&tnapi->napi);
7477 
7478 	return IRQ_RETVAL(1);
7479 }
7480 
7481 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7482 {
7483 	struct tg3_napi *tnapi = dev_id;
7484 	struct tg3 *tp = tnapi->tp;
7485 	struct tg3_hw_status *sblk = tnapi->hw_status;
7486 	unsigned int handled = 1;
7487 
7488 	/* In INTx mode, it is possible for the interrupt to arrive at
7489 	 * the CPU before the status block posted prior to the interrupt.
7490 	 * Reading the PCI State register will confirm whether the
7491 	 * interrupt is ours and will flush the status block.
7492 	 */
7493 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7494 		if (tg3_flag(tp, CHIP_RESETTING) ||
7495 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7496 			handled = 0;
7497 			goto out;
7498 		}
7499 	}
7500 
7501 	/*
7502 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7503 	 * chip-internal interrupt pending events.
7504 	 * Writing non-zero to intr-mbox-0 additional tells the
7505 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7506 	 * event coalescing.
7507 	 *
7508 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7509 	 * spurious interrupts.  The flush impacts performance but
7510 	 * excessive spurious interrupts can be worse in some cases.
7511 	 */
7512 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7513 	if (tg3_irq_sync(tp))
7514 		goto out;
7515 	sblk->status &= ~SD_STATUS_UPDATED;
7516 	if (likely(tg3_has_work(tnapi))) {
7517 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7518 		napi_schedule(&tnapi->napi);
7519 	} else {
7520 		/* No work, shared interrupt perhaps?  re-enable
7521 		 * interrupts, and flush that PCI write
7522 		 */
7523 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7524 			       0x00000000);
7525 	}
7526 out:
7527 	return IRQ_RETVAL(handled);
7528 }
7529 
7530 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7531 {
7532 	struct tg3_napi *tnapi = dev_id;
7533 	struct tg3 *tp = tnapi->tp;
7534 	struct tg3_hw_status *sblk = tnapi->hw_status;
7535 	unsigned int handled = 1;
7536 
7537 	/* In INTx mode, it is possible for the interrupt to arrive at
7538 	 * the CPU before the status block posted prior to the interrupt.
7539 	 * Reading the PCI State register will confirm whether the
7540 	 * interrupt is ours and will flush the status block.
7541 	 */
7542 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7543 		if (tg3_flag(tp, CHIP_RESETTING) ||
7544 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7545 			handled = 0;
7546 			goto out;
7547 		}
7548 	}
7549 
7550 	/*
7551 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7552 	 * chip-internal interrupt pending events.
7553 	 * writing non-zero to intr-mbox-0 additional tells the
7554 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7555 	 * event coalescing.
7556 	 *
7557 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7558 	 * spurious interrupts.  The flush impacts performance but
7559 	 * excessive spurious interrupts can be worse in some cases.
7560 	 */
7561 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7562 
7563 	/*
7564 	 * In a shared interrupt configuration, sometimes other devices'
7565 	 * interrupts will scream.  We record the current status tag here
7566 	 * so that the above check can report that the screaming interrupts
7567 	 * are unhandled.  Eventually they will be silenced.
7568 	 */
7569 	tnapi->last_irq_tag = sblk->status_tag;
7570 
7571 	if (tg3_irq_sync(tp))
7572 		goto out;
7573 
7574 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7575 
7576 	napi_schedule(&tnapi->napi);
7577 
7578 out:
7579 	return IRQ_RETVAL(handled);
7580 }
7581 
7582 /* ISR for interrupt test */
7583 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7584 {
7585 	struct tg3_napi *tnapi = dev_id;
7586 	struct tg3 *tp = tnapi->tp;
7587 	struct tg3_hw_status *sblk = tnapi->hw_status;
7588 
7589 	if ((sblk->status & SD_STATUS_UPDATED) ||
7590 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7591 		tg3_disable_ints(tp);
7592 		return IRQ_RETVAL(1);
7593 	}
7594 	return IRQ_RETVAL(0);
7595 }
7596 
7597 #ifdef CONFIG_NET_POLL_CONTROLLER
7598 static void tg3_poll_controller(struct net_device *dev)
7599 {
7600 	int i;
7601 	struct tg3 *tp = netdev_priv(dev);
7602 
7603 	if (tg3_irq_sync(tp))
7604 		return;
7605 
7606 	for (i = 0; i < tp->irq_cnt; i++)
7607 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7608 }
7609 #endif
7610 
7611 static void tg3_tx_timeout(struct net_device *dev)
7612 {
7613 	struct tg3 *tp = netdev_priv(dev);
7614 
7615 	if (netif_msg_tx_err(tp)) {
7616 		netdev_err(dev, "transmit timed out, resetting\n");
7617 		tg3_dump_state(tp);
7618 	}
7619 
7620 	tg3_reset_task_schedule(tp);
7621 }
7622 
7623 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7624 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7625 {
7626 	u32 base = (u32) mapping & 0xffffffff;
7627 
7628 	return (base > 0xffffdcc0) && (base + len + 8 < base);
7629 }
7630 
7631 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7632  * of any 4GB boundaries: 4G, 8G, etc
7633  */
7634 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7635 					   u32 len, u32 mss)
7636 {
7637 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7638 		u32 base = (u32) mapping & 0xffffffff;
7639 
7640 		return ((base + len + (mss & 0x3fff)) < base);
7641 	}
7642 	return 0;
7643 }
7644 
7645 /* Test for DMA addresses > 40-bit */
7646 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7647 					  int len)
7648 {
7649 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7650 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7651 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7652 	return 0;
7653 #else
7654 	return 0;
7655 #endif
7656 }
7657 
7658 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7659 				 dma_addr_t mapping, u32 len, u32 flags,
7660 				 u32 mss, u32 vlan)
7661 {
7662 	txbd->addr_hi = ((u64) mapping >> 32);
7663 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7664 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7665 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7666 }
7667 
7668 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7669 			    dma_addr_t map, u32 len, u32 flags,
7670 			    u32 mss, u32 vlan)
7671 {
7672 	struct tg3 *tp = tnapi->tp;
7673 	bool hwbug = false;
7674 
7675 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7676 		hwbug = true;
7677 
7678 	if (tg3_4g_overflow_test(map, len))
7679 		hwbug = true;
7680 
7681 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7682 		hwbug = true;
7683 
7684 	if (tg3_40bit_overflow_test(tp, map, len))
7685 		hwbug = true;
7686 
7687 	if (tp->dma_limit) {
7688 		u32 prvidx = *entry;
7689 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7690 		while (len > tp->dma_limit && *budget) {
7691 			u32 frag_len = tp->dma_limit;
7692 			len -= tp->dma_limit;
7693 
7694 			/* Avoid the 8byte DMA problem */
7695 			if (len <= 8) {
7696 				len += tp->dma_limit / 2;
7697 				frag_len = tp->dma_limit / 2;
7698 			}
7699 
7700 			tnapi->tx_buffers[*entry].fragmented = true;
7701 
7702 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7703 				      frag_len, tmp_flag, mss, vlan);
7704 			*budget -= 1;
7705 			prvidx = *entry;
7706 			*entry = NEXT_TX(*entry);
7707 
7708 			map += frag_len;
7709 		}
7710 
7711 		if (len) {
7712 			if (*budget) {
7713 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7714 					      len, flags, mss, vlan);
7715 				*budget -= 1;
7716 				*entry = NEXT_TX(*entry);
7717 			} else {
7718 				hwbug = true;
7719 				tnapi->tx_buffers[prvidx].fragmented = false;
7720 			}
7721 		}
7722 	} else {
7723 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7724 			      len, flags, mss, vlan);
7725 		*entry = NEXT_TX(*entry);
7726 	}
7727 
7728 	return hwbug;
7729 }
7730 
7731 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7732 {
7733 	int i;
7734 	struct sk_buff *skb;
7735 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7736 
7737 	skb = txb->skb;
7738 	txb->skb = NULL;
7739 
7740 	pci_unmap_single(tnapi->tp->pdev,
7741 			 dma_unmap_addr(txb, mapping),
7742 			 skb_headlen(skb),
7743 			 PCI_DMA_TODEVICE);
7744 
7745 	while (txb->fragmented) {
7746 		txb->fragmented = false;
7747 		entry = NEXT_TX(entry);
7748 		txb = &tnapi->tx_buffers[entry];
7749 	}
7750 
7751 	for (i = 0; i <= last; i++) {
7752 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7753 
7754 		entry = NEXT_TX(entry);
7755 		txb = &tnapi->tx_buffers[entry];
7756 
7757 		pci_unmap_page(tnapi->tp->pdev,
7758 			       dma_unmap_addr(txb, mapping),
7759 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7760 
7761 		while (txb->fragmented) {
7762 			txb->fragmented = false;
7763 			entry = NEXT_TX(entry);
7764 			txb = &tnapi->tx_buffers[entry];
7765 		}
7766 	}
7767 }
7768 
7769 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7770 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7771 				       struct sk_buff **pskb,
7772 				       u32 *entry, u32 *budget,
7773 				       u32 base_flags, u32 mss, u32 vlan)
7774 {
7775 	struct tg3 *tp = tnapi->tp;
7776 	struct sk_buff *new_skb, *skb = *pskb;
7777 	dma_addr_t new_addr = 0;
7778 	int ret = 0;
7779 
7780 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7781 		new_skb = skb_copy(skb, GFP_ATOMIC);
7782 	else {
7783 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7784 
7785 		new_skb = skb_copy_expand(skb,
7786 					  skb_headroom(skb) + more_headroom,
7787 					  skb_tailroom(skb), GFP_ATOMIC);
7788 	}
7789 
7790 	if (!new_skb) {
7791 		ret = -1;
7792 	} else {
7793 		/* New SKB is guaranteed to be linear. */
7794 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7795 					  PCI_DMA_TODEVICE);
7796 		/* Make sure the mapping succeeded */
7797 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7798 			dev_kfree_skb(new_skb);
7799 			ret = -1;
7800 		} else {
7801 			u32 save_entry = *entry;
7802 
7803 			base_flags |= TXD_FLAG_END;
7804 
7805 			tnapi->tx_buffers[*entry].skb = new_skb;
7806 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7807 					   mapping, new_addr);
7808 
7809 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7810 					    new_skb->len, base_flags,
7811 					    mss, vlan)) {
7812 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7813 				dev_kfree_skb(new_skb);
7814 				ret = -1;
7815 			}
7816 		}
7817 	}
7818 
7819 	dev_kfree_skb(skb);
7820 	*pskb = new_skb;
7821 	return ret;
7822 }
7823 
7824 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7825 
7826 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7827  * TSO header is greater than 80 bytes.
7828  */
7829 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7830 {
7831 	struct sk_buff *segs, *nskb;
7832 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7833 
7834 	/* Estimate the number of fragments in the worst case */
7835 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7836 		netif_stop_queue(tp->dev);
7837 
7838 		/* netif_tx_stop_queue() must be done before checking
7839 		 * checking tx index in tg3_tx_avail() below, because in
7840 		 * tg3_tx(), we update tx index before checking for
7841 		 * netif_tx_queue_stopped().
7842 		 */
7843 		smp_mb();
7844 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7845 			return NETDEV_TX_BUSY;
7846 
7847 		netif_wake_queue(tp->dev);
7848 	}
7849 
7850 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7851 	if (IS_ERR(segs))
7852 		goto tg3_tso_bug_end;
7853 
7854 	do {
7855 		nskb = segs;
7856 		segs = segs->next;
7857 		nskb->next = NULL;
7858 		tg3_start_xmit(nskb, tp->dev);
7859 	} while (segs);
7860 
7861 tg3_tso_bug_end:
7862 	dev_kfree_skb(skb);
7863 
7864 	return NETDEV_TX_OK;
7865 }
7866 
7867 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7868  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7869  */
7870 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7871 {
7872 	struct tg3 *tp = netdev_priv(dev);
7873 	u32 len, entry, base_flags, mss, vlan = 0;
7874 	u32 budget;
7875 	int i = -1, would_hit_hwbug;
7876 	dma_addr_t mapping;
7877 	struct tg3_napi *tnapi;
7878 	struct netdev_queue *txq;
7879 	unsigned int last;
7880 
7881 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7882 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7883 	if (tg3_flag(tp, ENABLE_TSS))
7884 		tnapi++;
7885 
7886 	budget = tg3_tx_avail(tnapi);
7887 
7888 	/* We are running in BH disabled context with netif_tx_lock
7889 	 * and TX reclaim runs via tp->napi.poll inside of a software
7890 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7891 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7892 	 */
7893 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7894 		if (!netif_tx_queue_stopped(txq)) {
7895 			netif_tx_stop_queue(txq);
7896 
7897 			/* This is a hard error, log it. */
7898 			netdev_err(dev,
7899 				   "BUG! Tx Ring full when queue awake!\n");
7900 		}
7901 		return NETDEV_TX_BUSY;
7902 	}
7903 
7904 	entry = tnapi->tx_prod;
7905 	base_flags = 0;
7906 	if (skb->ip_summed == CHECKSUM_PARTIAL)
7907 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
7908 
7909 	mss = skb_shinfo(skb)->gso_size;
7910 	if (mss) {
7911 		struct iphdr *iph;
7912 		u32 tcp_opt_len, hdr_len;
7913 
7914 		if (skb_header_cloned(skb) &&
7915 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7916 			goto drop;
7917 
7918 		iph = ip_hdr(skb);
7919 		tcp_opt_len = tcp_optlen(skb);
7920 
7921 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7922 
7923 		if (!skb_is_gso_v6(skb)) {
7924 			iph->check = 0;
7925 			iph->tot_len = htons(mss + hdr_len);
7926 		}
7927 
7928 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7929 		    tg3_flag(tp, TSO_BUG))
7930 			return tg3_tso_bug(tp, skb);
7931 
7932 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7933 			       TXD_FLAG_CPU_POST_DMA);
7934 
7935 		if (tg3_flag(tp, HW_TSO_1) ||
7936 		    tg3_flag(tp, HW_TSO_2) ||
7937 		    tg3_flag(tp, HW_TSO_3)) {
7938 			tcp_hdr(skb)->check = 0;
7939 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7940 		} else
7941 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7942 								 iph->daddr, 0,
7943 								 IPPROTO_TCP,
7944 								 0);
7945 
7946 		if (tg3_flag(tp, HW_TSO_3)) {
7947 			mss |= (hdr_len & 0xc) << 12;
7948 			if (hdr_len & 0x10)
7949 				base_flags |= 0x00000010;
7950 			base_flags |= (hdr_len & 0x3e0) << 5;
7951 		} else if (tg3_flag(tp, HW_TSO_2))
7952 			mss |= hdr_len << 9;
7953 		else if (tg3_flag(tp, HW_TSO_1) ||
7954 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
7955 			if (tcp_opt_len || iph->ihl > 5) {
7956 				int tsflags;
7957 
7958 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7959 				mss |= (tsflags << 11);
7960 			}
7961 		} else {
7962 			if (tcp_opt_len || iph->ihl > 5) {
7963 				int tsflags;
7964 
7965 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7966 				base_flags |= tsflags << 12;
7967 			}
7968 		}
7969 	}
7970 
7971 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7972 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
7973 		base_flags |= TXD_FLAG_JMB_PKT;
7974 
7975 	if (vlan_tx_tag_present(skb)) {
7976 		base_flags |= TXD_FLAG_VLAN;
7977 		vlan = vlan_tx_tag_get(skb);
7978 	}
7979 
7980 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7981 	    tg3_flag(tp, TX_TSTAMP_EN)) {
7982 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7983 		base_flags |= TXD_FLAG_HWTSTAMP;
7984 	}
7985 
7986 	len = skb_headlen(skb);
7987 
7988 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7989 	if (pci_dma_mapping_error(tp->pdev, mapping))
7990 		goto drop;
7991 
7992 
7993 	tnapi->tx_buffers[entry].skb = skb;
7994 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7995 
7996 	would_hit_hwbug = 0;
7997 
7998 	if (tg3_flag(tp, 5701_DMA_BUG))
7999 		would_hit_hwbug = 1;
8000 
8001 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8002 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8003 			    mss, vlan)) {
8004 		would_hit_hwbug = 1;
8005 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8006 		u32 tmp_mss = mss;
8007 
8008 		if (!tg3_flag(tp, HW_TSO_1) &&
8009 		    !tg3_flag(tp, HW_TSO_2) &&
8010 		    !tg3_flag(tp, HW_TSO_3))
8011 			tmp_mss = 0;
8012 
8013 		/* Now loop through additional data
8014 		 * fragments, and queue them.
8015 		 */
8016 		last = skb_shinfo(skb)->nr_frags - 1;
8017 		for (i = 0; i <= last; i++) {
8018 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8019 
8020 			len = skb_frag_size(frag);
8021 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8022 						   len, DMA_TO_DEVICE);
8023 
8024 			tnapi->tx_buffers[entry].skb = NULL;
8025 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8026 					   mapping);
8027 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8028 				goto dma_error;
8029 
8030 			if (!budget ||
8031 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8032 					    len, base_flags |
8033 					    ((i == last) ? TXD_FLAG_END : 0),
8034 					    tmp_mss, vlan)) {
8035 				would_hit_hwbug = 1;
8036 				break;
8037 			}
8038 		}
8039 	}
8040 
8041 	if (would_hit_hwbug) {
8042 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8043 
8044 		/* If the workaround fails due to memory/mapping
8045 		 * failure, silently drop this packet.
8046 		 */
8047 		entry = tnapi->tx_prod;
8048 		budget = tg3_tx_avail(tnapi);
8049 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8050 						base_flags, mss, vlan))
8051 			goto drop_nofree;
8052 	}
8053 
8054 	skb_tx_timestamp(skb);
8055 	netdev_tx_sent_queue(txq, skb->len);
8056 
8057 	/* Sync BD data before updating mailbox */
8058 	wmb();
8059 
8060 	/* Packets are ready, update Tx producer idx local and on card. */
8061 	tw32_tx_mbox(tnapi->prodmbox, entry);
8062 
8063 	tnapi->tx_prod = entry;
8064 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8065 		netif_tx_stop_queue(txq);
8066 
8067 		/* netif_tx_stop_queue() must be done before checking
8068 		 * checking tx index in tg3_tx_avail() below, because in
8069 		 * tg3_tx(), we update tx index before checking for
8070 		 * netif_tx_queue_stopped().
8071 		 */
8072 		smp_mb();
8073 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8074 			netif_tx_wake_queue(txq);
8075 	}
8076 
8077 	mmiowb();
8078 	return NETDEV_TX_OK;
8079 
8080 dma_error:
8081 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8082 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8083 drop:
8084 	dev_kfree_skb(skb);
8085 drop_nofree:
8086 	tp->tx_dropped++;
8087 	return NETDEV_TX_OK;
8088 }
8089 
8090 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8091 {
8092 	if (enable) {
8093 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8094 				  MAC_MODE_PORT_MODE_MASK);
8095 
8096 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8097 
8098 		if (!tg3_flag(tp, 5705_PLUS))
8099 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8100 
8101 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8102 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8103 		else
8104 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8105 	} else {
8106 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8107 
8108 		if (tg3_flag(tp, 5705_PLUS) ||
8109 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8110 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8111 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8112 	}
8113 
8114 	tw32(MAC_MODE, tp->mac_mode);
8115 	udelay(40);
8116 }
8117 
8118 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8119 {
8120 	u32 val, bmcr, mac_mode, ptest = 0;
8121 
8122 	tg3_phy_toggle_apd(tp, false);
8123 	tg3_phy_toggle_automdix(tp, false);
8124 
8125 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8126 		return -EIO;
8127 
8128 	bmcr = BMCR_FULLDPLX;
8129 	switch (speed) {
8130 	case SPEED_10:
8131 		break;
8132 	case SPEED_100:
8133 		bmcr |= BMCR_SPEED100;
8134 		break;
8135 	case SPEED_1000:
8136 	default:
8137 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8138 			speed = SPEED_100;
8139 			bmcr |= BMCR_SPEED100;
8140 		} else {
8141 			speed = SPEED_1000;
8142 			bmcr |= BMCR_SPEED1000;
8143 		}
8144 	}
8145 
8146 	if (extlpbk) {
8147 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8148 			tg3_readphy(tp, MII_CTRL1000, &val);
8149 			val |= CTL1000_AS_MASTER |
8150 			       CTL1000_ENABLE_MASTER;
8151 			tg3_writephy(tp, MII_CTRL1000, val);
8152 		} else {
8153 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8154 				MII_TG3_FET_PTEST_TRIM_2;
8155 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8156 		}
8157 	} else
8158 		bmcr |= BMCR_LOOPBACK;
8159 
8160 	tg3_writephy(tp, MII_BMCR, bmcr);
8161 
8162 	/* The write needs to be flushed for the FETs */
8163 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8164 		tg3_readphy(tp, MII_BMCR, &bmcr);
8165 
8166 	udelay(40);
8167 
8168 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8169 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8170 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8171 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8172 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8173 
8174 		/* The write needs to be flushed for the AC131 */
8175 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8176 	}
8177 
8178 	/* Reset to prevent losing 1st rx packet intermittently */
8179 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8180 	    tg3_flag(tp, 5780_CLASS)) {
8181 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8182 		udelay(10);
8183 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8184 	}
8185 
8186 	mac_mode = tp->mac_mode &
8187 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8188 	if (speed == SPEED_1000)
8189 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8190 	else
8191 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8192 
8193 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8194 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8195 
8196 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8197 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8198 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8199 			mac_mode |= MAC_MODE_LINK_POLARITY;
8200 
8201 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8202 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8203 	}
8204 
8205 	tw32(MAC_MODE, mac_mode);
8206 	udelay(40);
8207 
8208 	return 0;
8209 }
8210 
8211 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8212 {
8213 	struct tg3 *tp = netdev_priv(dev);
8214 
8215 	if (features & NETIF_F_LOOPBACK) {
8216 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8217 			return;
8218 
8219 		spin_lock_bh(&tp->lock);
8220 		tg3_mac_loopback(tp, true);
8221 		netif_carrier_on(tp->dev);
8222 		spin_unlock_bh(&tp->lock);
8223 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8224 	} else {
8225 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8226 			return;
8227 
8228 		spin_lock_bh(&tp->lock);
8229 		tg3_mac_loopback(tp, false);
8230 		/* Force link status check */
8231 		tg3_setup_phy(tp, true);
8232 		spin_unlock_bh(&tp->lock);
8233 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8234 	}
8235 }
8236 
8237 static netdev_features_t tg3_fix_features(struct net_device *dev,
8238 	netdev_features_t features)
8239 {
8240 	struct tg3 *tp = netdev_priv(dev);
8241 
8242 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8243 		features &= ~NETIF_F_ALL_TSO;
8244 
8245 	return features;
8246 }
8247 
8248 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8249 {
8250 	netdev_features_t changed = dev->features ^ features;
8251 
8252 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8253 		tg3_set_loopback(dev, features);
8254 
8255 	return 0;
8256 }
8257 
8258 static void tg3_rx_prodring_free(struct tg3 *tp,
8259 				 struct tg3_rx_prodring_set *tpr)
8260 {
8261 	int i;
8262 
8263 	if (tpr != &tp->napi[0].prodring) {
8264 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8265 		     i = (i + 1) & tp->rx_std_ring_mask)
8266 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8267 					tp->rx_pkt_map_sz);
8268 
8269 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8270 			for (i = tpr->rx_jmb_cons_idx;
8271 			     i != tpr->rx_jmb_prod_idx;
8272 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8273 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8274 						TG3_RX_JMB_MAP_SZ);
8275 			}
8276 		}
8277 
8278 		return;
8279 	}
8280 
8281 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8282 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8283 				tp->rx_pkt_map_sz);
8284 
8285 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8286 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8287 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8288 					TG3_RX_JMB_MAP_SZ);
8289 	}
8290 }
8291 
8292 /* Initialize rx rings for packet processing.
8293  *
8294  * The chip has been shut down and the driver detached from
8295  * the networking, so no interrupts or new tx packets will
8296  * end up in the driver.  tp->{tx,}lock are held and thus
8297  * we may not sleep.
8298  */
8299 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8300 				 struct tg3_rx_prodring_set *tpr)
8301 {
8302 	u32 i, rx_pkt_dma_sz;
8303 
8304 	tpr->rx_std_cons_idx = 0;
8305 	tpr->rx_std_prod_idx = 0;
8306 	tpr->rx_jmb_cons_idx = 0;
8307 	tpr->rx_jmb_prod_idx = 0;
8308 
8309 	if (tpr != &tp->napi[0].prodring) {
8310 		memset(&tpr->rx_std_buffers[0], 0,
8311 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8312 		if (tpr->rx_jmb_buffers)
8313 			memset(&tpr->rx_jmb_buffers[0], 0,
8314 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8315 		goto done;
8316 	}
8317 
8318 	/* Zero out all descriptors. */
8319 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8320 
8321 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8322 	if (tg3_flag(tp, 5780_CLASS) &&
8323 	    tp->dev->mtu > ETH_DATA_LEN)
8324 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8325 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8326 
8327 	/* Initialize invariants of the rings, we only set this
8328 	 * stuff once.  This works because the card does not
8329 	 * write into the rx buffer posting rings.
8330 	 */
8331 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8332 		struct tg3_rx_buffer_desc *rxd;
8333 
8334 		rxd = &tpr->rx_std[i];
8335 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8336 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8337 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8338 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8339 	}
8340 
8341 	/* Now allocate fresh SKBs for each rx ring. */
8342 	for (i = 0; i < tp->rx_pending; i++) {
8343 		unsigned int frag_size;
8344 
8345 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8346 				      &frag_size) < 0) {
8347 			netdev_warn(tp->dev,
8348 				    "Using a smaller RX standard ring. Only "
8349 				    "%d out of %d buffers were allocated "
8350 				    "successfully\n", i, tp->rx_pending);
8351 			if (i == 0)
8352 				goto initfail;
8353 			tp->rx_pending = i;
8354 			break;
8355 		}
8356 	}
8357 
8358 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8359 		goto done;
8360 
8361 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8362 
8363 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8364 		goto done;
8365 
8366 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8367 		struct tg3_rx_buffer_desc *rxd;
8368 
8369 		rxd = &tpr->rx_jmb[i].std;
8370 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8371 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8372 				  RXD_FLAG_JUMBO;
8373 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8374 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8375 	}
8376 
8377 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8378 		unsigned int frag_size;
8379 
8380 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8381 				      &frag_size) < 0) {
8382 			netdev_warn(tp->dev,
8383 				    "Using a smaller RX jumbo ring. Only %d "
8384 				    "out of %d buffers were allocated "
8385 				    "successfully\n", i, tp->rx_jumbo_pending);
8386 			if (i == 0)
8387 				goto initfail;
8388 			tp->rx_jumbo_pending = i;
8389 			break;
8390 		}
8391 	}
8392 
8393 done:
8394 	return 0;
8395 
8396 initfail:
8397 	tg3_rx_prodring_free(tp, tpr);
8398 	return -ENOMEM;
8399 }
8400 
8401 static void tg3_rx_prodring_fini(struct tg3 *tp,
8402 				 struct tg3_rx_prodring_set *tpr)
8403 {
8404 	kfree(tpr->rx_std_buffers);
8405 	tpr->rx_std_buffers = NULL;
8406 	kfree(tpr->rx_jmb_buffers);
8407 	tpr->rx_jmb_buffers = NULL;
8408 	if (tpr->rx_std) {
8409 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8410 				  tpr->rx_std, tpr->rx_std_mapping);
8411 		tpr->rx_std = NULL;
8412 	}
8413 	if (tpr->rx_jmb) {
8414 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8415 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8416 		tpr->rx_jmb = NULL;
8417 	}
8418 }
8419 
8420 static int tg3_rx_prodring_init(struct tg3 *tp,
8421 				struct tg3_rx_prodring_set *tpr)
8422 {
8423 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8424 				      GFP_KERNEL);
8425 	if (!tpr->rx_std_buffers)
8426 		return -ENOMEM;
8427 
8428 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8429 					 TG3_RX_STD_RING_BYTES(tp),
8430 					 &tpr->rx_std_mapping,
8431 					 GFP_KERNEL);
8432 	if (!tpr->rx_std)
8433 		goto err_out;
8434 
8435 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8436 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8437 					      GFP_KERNEL);
8438 		if (!tpr->rx_jmb_buffers)
8439 			goto err_out;
8440 
8441 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8442 						 TG3_RX_JMB_RING_BYTES(tp),
8443 						 &tpr->rx_jmb_mapping,
8444 						 GFP_KERNEL);
8445 		if (!tpr->rx_jmb)
8446 			goto err_out;
8447 	}
8448 
8449 	return 0;
8450 
8451 err_out:
8452 	tg3_rx_prodring_fini(tp, tpr);
8453 	return -ENOMEM;
8454 }
8455 
8456 /* Free up pending packets in all rx/tx rings.
8457  *
8458  * The chip has been shut down and the driver detached from
8459  * the networking, so no interrupts or new tx packets will
8460  * end up in the driver.  tp->{tx,}lock is not held and we are not
8461  * in an interrupt context and thus may sleep.
8462  */
8463 static void tg3_free_rings(struct tg3 *tp)
8464 {
8465 	int i, j;
8466 
8467 	for (j = 0; j < tp->irq_cnt; j++) {
8468 		struct tg3_napi *tnapi = &tp->napi[j];
8469 
8470 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8471 
8472 		if (!tnapi->tx_buffers)
8473 			continue;
8474 
8475 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8476 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8477 
8478 			if (!skb)
8479 				continue;
8480 
8481 			tg3_tx_skb_unmap(tnapi, i,
8482 					 skb_shinfo(skb)->nr_frags - 1);
8483 
8484 			dev_kfree_skb_any(skb);
8485 		}
8486 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8487 	}
8488 }
8489 
8490 /* Initialize tx/rx rings for packet processing.
8491  *
8492  * The chip has been shut down and the driver detached from
8493  * the networking, so no interrupts or new tx packets will
8494  * end up in the driver.  tp->{tx,}lock are held and thus
8495  * we may not sleep.
8496  */
8497 static int tg3_init_rings(struct tg3 *tp)
8498 {
8499 	int i;
8500 
8501 	/* Free up all the SKBs. */
8502 	tg3_free_rings(tp);
8503 
8504 	for (i = 0; i < tp->irq_cnt; i++) {
8505 		struct tg3_napi *tnapi = &tp->napi[i];
8506 
8507 		tnapi->last_tag = 0;
8508 		tnapi->last_irq_tag = 0;
8509 		tnapi->hw_status->status = 0;
8510 		tnapi->hw_status->status_tag = 0;
8511 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8512 
8513 		tnapi->tx_prod = 0;
8514 		tnapi->tx_cons = 0;
8515 		if (tnapi->tx_ring)
8516 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8517 
8518 		tnapi->rx_rcb_ptr = 0;
8519 		if (tnapi->rx_rcb)
8520 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8521 
8522 		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8523 			tg3_free_rings(tp);
8524 			return -ENOMEM;
8525 		}
8526 	}
8527 
8528 	return 0;
8529 }
8530 
8531 static void tg3_mem_tx_release(struct tg3 *tp)
8532 {
8533 	int i;
8534 
8535 	for (i = 0; i < tp->irq_max; i++) {
8536 		struct tg3_napi *tnapi = &tp->napi[i];
8537 
8538 		if (tnapi->tx_ring) {
8539 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8540 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8541 			tnapi->tx_ring = NULL;
8542 		}
8543 
8544 		kfree(tnapi->tx_buffers);
8545 		tnapi->tx_buffers = NULL;
8546 	}
8547 }
8548 
8549 static int tg3_mem_tx_acquire(struct tg3 *tp)
8550 {
8551 	int i;
8552 	struct tg3_napi *tnapi = &tp->napi[0];
8553 
8554 	/* If multivector TSS is enabled, vector 0 does not handle
8555 	 * tx interrupts.  Don't allocate any resources for it.
8556 	 */
8557 	if (tg3_flag(tp, ENABLE_TSS))
8558 		tnapi++;
8559 
8560 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8561 		tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8562 					    TG3_TX_RING_SIZE, GFP_KERNEL);
8563 		if (!tnapi->tx_buffers)
8564 			goto err_out;
8565 
8566 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8567 						    TG3_TX_RING_BYTES,
8568 						    &tnapi->tx_desc_mapping,
8569 						    GFP_KERNEL);
8570 		if (!tnapi->tx_ring)
8571 			goto err_out;
8572 	}
8573 
8574 	return 0;
8575 
8576 err_out:
8577 	tg3_mem_tx_release(tp);
8578 	return -ENOMEM;
8579 }
8580 
8581 static void tg3_mem_rx_release(struct tg3 *tp)
8582 {
8583 	int i;
8584 
8585 	for (i = 0; i < tp->irq_max; i++) {
8586 		struct tg3_napi *tnapi = &tp->napi[i];
8587 
8588 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8589 
8590 		if (!tnapi->rx_rcb)
8591 			continue;
8592 
8593 		dma_free_coherent(&tp->pdev->dev,
8594 				  TG3_RX_RCB_RING_BYTES(tp),
8595 				  tnapi->rx_rcb,
8596 				  tnapi->rx_rcb_mapping);
8597 		tnapi->rx_rcb = NULL;
8598 	}
8599 }
8600 
8601 static int tg3_mem_rx_acquire(struct tg3 *tp)
8602 {
8603 	unsigned int i, limit;
8604 
8605 	limit = tp->rxq_cnt;
8606 
8607 	/* If RSS is enabled, we need a (dummy) producer ring
8608 	 * set on vector zero.  This is the true hw prodring.
8609 	 */
8610 	if (tg3_flag(tp, ENABLE_RSS))
8611 		limit++;
8612 
8613 	for (i = 0; i < limit; i++) {
8614 		struct tg3_napi *tnapi = &tp->napi[i];
8615 
8616 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8617 			goto err_out;
8618 
8619 		/* If multivector RSS is enabled, vector 0
8620 		 * does not handle rx or tx interrupts.
8621 		 * Don't allocate any resources for it.
8622 		 */
8623 		if (!i && tg3_flag(tp, ENABLE_RSS))
8624 			continue;
8625 
8626 		tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8627 						    TG3_RX_RCB_RING_BYTES(tp),
8628 						    &tnapi->rx_rcb_mapping,
8629 						    GFP_KERNEL);
8630 		if (!tnapi->rx_rcb)
8631 			goto err_out;
8632 	}
8633 
8634 	return 0;
8635 
8636 err_out:
8637 	tg3_mem_rx_release(tp);
8638 	return -ENOMEM;
8639 }
8640 
8641 /*
8642  * Must not be invoked with interrupt sources disabled and
8643  * the hardware shutdown down.
8644  */
8645 static void tg3_free_consistent(struct tg3 *tp)
8646 {
8647 	int i;
8648 
8649 	for (i = 0; i < tp->irq_cnt; i++) {
8650 		struct tg3_napi *tnapi = &tp->napi[i];
8651 
8652 		if (tnapi->hw_status) {
8653 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8654 					  tnapi->hw_status,
8655 					  tnapi->status_mapping);
8656 			tnapi->hw_status = NULL;
8657 		}
8658 	}
8659 
8660 	tg3_mem_rx_release(tp);
8661 	tg3_mem_tx_release(tp);
8662 
8663 	if (tp->hw_stats) {
8664 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8665 				  tp->hw_stats, tp->stats_mapping);
8666 		tp->hw_stats = NULL;
8667 	}
8668 }
8669 
8670 /*
8671  * Must not be invoked with interrupt sources disabled and
8672  * the hardware shutdown down.  Can sleep.
8673  */
8674 static int tg3_alloc_consistent(struct tg3 *tp)
8675 {
8676 	int i;
8677 
8678 	tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8679 					   sizeof(struct tg3_hw_stats),
8680 					   &tp->stats_mapping, GFP_KERNEL);
8681 	if (!tp->hw_stats)
8682 		goto err_out;
8683 
8684 	for (i = 0; i < tp->irq_cnt; i++) {
8685 		struct tg3_napi *tnapi = &tp->napi[i];
8686 		struct tg3_hw_status *sblk;
8687 
8688 		tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8689 						       TG3_HW_STATUS_SIZE,
8690 						       &tnapi->status_mapping,
8691 						       GFP_KERNEL);
8692 		if (!tnapi->hw_status)
8693 			goto err_out;
8694 
8695 		sblk = tnapi->hw_status;
8696 
8697 		if (tg3_flag(tp, ENABLE_RSS)) {
8698 			u16 *prodptr = NULL;
8699 
8700 			/*
8701 			 * When RSS is enabled, the status block format changes
8702 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8703 			 * and "rx_mini_consumer" members get mapped to the
8704 			 * other three rx return ring producer indexes.
8705 			 */
8706 			switch (i) {
8707 			case 1:
8708 				prodptr = &sblk->idx[0].rx_producer;
8709 				break;
8710 			case 2:
8711 				prodptr = &sblk->rx_jumbo_consumer;
8712 				break;
8713 			case 3:
8714 				prodptr = &sblk->reserved;
8715 				break;
8716 			case 4:
8717 				prodptr = &sblk->rx_mini_consumer;
8718 				break;
8719 			}
8720 			tnapi->rx_rcb_prod_idx = prodptr;
8721 		} else {
8722 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8723 		}
8724 	}
8725 
8726 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8727 		goto err_out;
8728 
8729 	return 0;
8730 
8731 err_out:
8732 	tg3_free_consistent(tp);
8733 	return -ENOMEM;
8734 }
8735 
8736 #define MAX_WAIT_CNT 1000
8737 
8738 /* To stop a block, clear the enable bit and poll till it
8739  * clears.  tp->lock is held.
8740  */
8741 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8742 {
8743 	unsigned int i;
8744 	u32 val;
8745 
8746 	if (tg3_flag(tp, 5705_PLUS)) {
8747 		switch (ofs) {
8748 		case RCVLSC_MODE:
8749 		case DMAC_MODE:
8750 		case MBFREE_MODE:
8751 		case BUFMGR_MODE:
8752 		case MEMARB_MODE:
8753 			/* We can't enable/disable these bits of the
8754 			 * 5705/5750, just say success.
8755 			 */
8756 			return 0;
8757 
8758 		default:
8759 			break;
8760 		}
8761 	}
8762 
8763 	val = tr32(ofs);
8764 	val &= ~enable_bit;
8765 	tw32_f(ofs, val);
8766 
8767 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8768 		if (pci_channel_offline(tp->pdev)) {
8769 			dev_err(&tp->pdev->dev,
8770 				"tg3_stop_block device offline, "
8771 				"ofs=%lx enable_bit=%x\n",
8772 				ofs, enable_bit);
8773 			return -ENODEV;
8774 		}
8775 
8776 		udelay(100);
8777 		val = tr32(ofs);
8778 		if ((val & enable_bit) == 0)
8779 			break;
8780 	}
8781 
8782 	if (i == MAX_WAIT_CNT && !silent) {
8783 		dev_err(&tp->pdev->dev,
8784 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8785 			ofs, enable_bit);
8786 		return -ENODEV;
8787 	}
8788 
8789 	return 0;
8790 }
8791 
8792 /* tp->lock is held. */
8793 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8794 {
8795 	int i, err;
8796 
8797 	tg3_disable_ints(tp);
8798 
8799 	if (pci_channel_offline(tp->pdev)) {
8800 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8801 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8802 		err = -ENODEV;
8803 		goto err_no_dev;
8804 	}
8805 
8806 	tp->rx_mode &= ~RX_MODE_ENABLE;
8807 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8808 	udelay(10);
8809 
8810 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8811 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8812 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8813 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8814 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8815 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8816 
8817 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8818 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8819 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8820 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8821 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8822 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8823 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8824 
8825 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8826 	tw32_f(MAC_MODE, tp->mac_mode);
8827 	udelay(40);
8828 
8829 	tp->tx_mode &= ~TX_MODE_ENABLE;
8830 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8831 
8832 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8833 		udelay(100);
8834 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8835 			break;
8836 	}
8837 	if (i >= MAX_WAIT_CNT) {
8838 		dev_err(&tp->pdev->dev,
8839 			"%s timed out, TX_MODE_ENABLE will not clear "
8840 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8841 		err |= -ENODEV;
8842 	}
8843 
8844 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8845 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8846 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8847 
8848 	tw32(FTQ_RESET, 0xffffffff);
8849 	tw32(FTQ_RESET, 0x00000000);
8850 
8851 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8852 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8853 
8854 err_no_dev:
8855 	for (i = 0; i < tp->irq_cnt; i++) {
8856 		struct tg3_napi *tnapi = &tp->napi[i];
8857 		if (tnapi->hw_status)
8858 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8859 	}
8860 
8861 	return err;
8862 }
8863 
8864 /* Save PCI command register before chip reset */
8865 static void tg3_save_pci_state(struct tg3 *tp)
8866 {
8867 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8868 }
8869 
8870 /* Restore PCI state after chip reset */
8871 static void tg3_restore_pci_state(struct tg3 *tp)
8872 {
8873 	u32 val;
8874 
8875 	/* Re-enable indirect register accesses. */
8876 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8877 			       tp->misc_host_ctrl);
8878 
8879 	/* Set MAX PCI retry to zero. */
8880 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8881 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8882 	    tg3_flag(tp, PCIX_MODE))
8883 		val |= PCISTATE_RETRY_SAME_DMA;
8884 	/* Allow reads and writes to the APE register and memory space. */
8885 	if (tg3_flag(tp, ENABLE_APE))
8886 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8887 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8888 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8889 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8890 
8891 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8892 
8893 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8894 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8895 				      tp->pci_cacheline_sz);
8896 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8897 				      tp->pci_lat_timer);
8898 	}
8899 
8900 	/* Make sure PCI-X relaxed ordering bit is clear. */
8901 	if (tg3_flag(tp, PCIX_MODE)) {
8902 		u16 pcix_cmd;
8903 
8904 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8905 				     &pcix_cmd);
8906 		pcix_cmd &= ~PCI_X_CMD_ERO;
8907 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8908 				      pcix_cmd);
8909 	}
8910 
8911 	if (tg3_flag(tp, 5780_CLASS)) {
8912 
8913 		/* Chip reset on 5780 will reset MSI enable bit,
8914 		 * so need to restore it.
8915 		 */
8916 		if (tg3_flag(tp, USING_MSI)) {
8917 			u16 ctrl;
8918 
8919 			pci_read_config_word(tp->pdev,
8920 					     tp->msi_cap + PCI_MSI_FLAGS,
8921 					     &ctrl);
8922 			pci_write_config_word(tp->pdev,
8923 					      tp->msi_cap + PCI_MSI_FLAGS,
8924 					      ctrl | PCI_MSI_FLAGS_ENABLE);
8925 			val = tr32(MSGINT_MODE);
8926 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8927 		}
8928 	}
8929 }
8930 
8931 /* tp->lock is held. */
8932 static int tg3_chip_reset(struct tg3 *tp)
8933 {
8934 	u32 val;
8935 	void (*write_op)(struct tg3 *, u32, u32);
8936 	int i, err;
8937 
8938 	if (!pci_device_is_present(tp->pdev))
8939 		return -ENODEV;
8940 
8941 	tg3_nvram_lock(tp);
8942 
8943 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8944 
8945 	/* No matching tg3_nvram_unlock() after this because
8946 	 * chip reset below will undo the nvram lock.
8947 	 */
8948 	tp->nvram_lock_cnt = 0;
8949 
8950 	/* GRC_MISC_CFG core clock reset will clear the memory
8951 	 * enable bit in PCI register 4 and the MSI enable bit
8952 	 * on some chips, so we save relevant registers here.
8953 	 */
8954 	tg3_save_pci_state(tp);
8955 
8956 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8957 	    tg3_flag(tp, 5755_PLUS))
8958 		tw32(GRC_FASTBOOT_PC, 0);
8959 
8960 	/*
8961 	 * We must avoid the readl() that normally takes place.
8962 	 * It locks machines, causes machine checks, and other
8963 	 * fun things.  So, temporarily disable the 5701
8964 	 * hardware workaround, while we do the reset.
8965 	 */
8966 	write_op = tp->write32;
8967 	if (write_op == tg3_write_flush_reg32)
8968 		tp->write32 = tg3_write32;
8969 
8970 	/* Prevent the irq handler from reading or writing PCI registers
8971 	 * during chip reset when the memory enable bit in the PCI command
8972 	 * register may be cleared.  The chip does not generate interrupt
8973 	 * at this time, but the irq handler may still be called due to irq
8974 	 * sharing or irqpoll.
8975 	 */
8976 	tg3_flag_set(tp, CHIP_RESETTING);
8977 	for (i = 0; i < tp->irq_cnt; i++) {
8978 		struct tg3_napi *tnapi = &tp->napi[i];
8979 		if (tnapi->hw_status) {
8980 			tnapi->hw_status->status = 0;
8981 			tnapi->hw_status->status_tag = 0;
8982 		}
8983 		tnapi->last_tag = 0;
8984 		tnapi->last_irq_tag = 0;
8985 	}
8986 	smp_mb();
8987 
8988 	for (i = 0; i < tp->irq_cnt; i++)
8989 		synchronize_irq(tp->napi[i].irq_vec);
8990 
8991 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8992 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8993 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8994 	}
8995 
8996 	/* do the reset */
8997 	val = GRC_MISC_CFG_CORECLK_RESET;
8998 
8999 	if (tg3_flag(tp, PCI_EXPRESS)) {
9000 		/* Force PCIe 1.0a mode */
9001 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9002 		    !tg3_flag(tp, 57765_PLUS) &&
9003 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9004 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9005 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9006 
9007 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9008 			tw32(GRC_MISC_CFG, (1 << 29));
9009 			val |= (1 << 29);
9010 		}
9011 	}
9012 
9013 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9014 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9015 		tw32(GRC_VCPU_EXT_CTRL,
9016 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9017 	}
9018 
9019 	/* Manage gphy power for all CPMU absent PCIe devices. */
9020 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9021 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9022 
9023 	tw32(GRC_MISC_CFG, val);
9024 
9025 	/* restore 5701 hardware bug workaround write method */
9026 	tp->write32 = write_op;
9027 
9028 	/* Unfortunately, we have to delay before the PCI read back.
9029 	 * Some 575X chips even will not respond to a PCI cfg access
9030 	 * when the reset command is given to the chip.
9031 	 *
9032 	 * How do these hardware designers expect things to work
9033 	 * properly if the PCI write is posted for a long period
9034 	 * of time?  It is always necessary to have some method by
9035 	 * which a register read back can occur to push the write
9036 	 * out which does the reset.
9037 	 *
9038 	 * For most tg3 variants the trick below was working.
9039 	 * Ho hum...
9040 	 */
9041 	udelay(120);
9042 
9043 	/* Flush PCI posted writes.  The normal MMIO registers
9044 	 * are inaccessible at this time so this is the only
9045 	 * way to make this reliably (actually, this is no longer
9046 	 * the case, see above).  I tried to use indirect
9047 	 * register read/write but this upset some 5701 variants.
9048 	 */
9049 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9050 
9051 	udelay(120);
9052 
9053 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9054 		u16 val16;
9055 
9056 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9057 			int j;
9058 			u32 cfg_val;
9059 
9060 			/* Wait for link training to complete.  */
9061 			for (j = 0; j < 5000; j++)
9062 				udelay(100);
9063 
9064 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9065 			pci_write_config_dword(tp->pdev, 0xc4,
9066 					       cfg_val | (1 << 15));
9067 		}
9068 
9069 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9070 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9071 		/*
9072 		 * Older PCIe devices only support the 128 byte
9073 		 * MPS setting.  Enforce the restriction.
9074 		 */
9075 		if (!tg3_flag(tp, CPMU_PRESENT))
9076 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9077 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9078 
9079 		/* Clear error status */
9080 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9081 				      PCI_EXP_DEVSTA_CED |
9082 				      PCI_EXP_DEVSTA_NFED |
9083 				      PCI_EXP_DEVSTA_FED |
9084 				      PCI_EXP_DEVSTA_URD);
9085 	}
9086 
9087 	tg3_restore_pci_state(tp);
9088 
9089 	tg3_flag_clear(tp, CHIP_RESETTING);
9090 	tg3_flag_clear(tp, ERROR_PROCESSED);
9091 
9092 	val = 0;
9093 	if (tg3_flag(tp, 5780_CLASS))
9094 		val = tr32(MEMARB_MODE);
9095 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9096 
9097 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9098 		tg3_stop_fw(tp);
9099 		tw32(0x5000, 0x400);
9100 	}
9101 
9102 	if (tg3_flag(tp, IS_SSB_CORE)) {
9103 		/*
9104 		 * BCM4785: In order to avoid repercussions from using
9105 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9106 		 * which is not required.
9107 		 */
9108 		tg3_stop_fw(tp);
9109 		tg3_halt_cpu(tp, RX_CPU_BASE);
9110 	}
9111 
9112 	err = tg3_poll_fw(tp);
9113 	if (err)
9114 		return err;
9115 
9116 	tw32(GRC_MODE, tp->grc_mode);
9117 
9118 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9119 		val = tr32(0xc4);
9120 
9121 		tw32(0xc4, val | (1 << 15));
9122 	}
9123 
9124 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9125 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9126 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9127 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9128 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9129 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9130 	}
9131 
9132 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9133 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9134 		val = tp->mac_mode;
9135 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9136 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9137 		val = tp->mac_mode;
9138 	} else
9139 		val = 0;
9140 
9141 	tw32_f(MAC_MODE, val);
9142 	udelay(40);
9143 
9144 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9145 
9146 	tg3_mdio_start(tp);
9147 
9148 	if (tg3_flag(tp, PCI_EXPRESS) &&
9149 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9150 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9151 	    !tg3_flag(tp, 57765_PLUS)) {
9152 		val = tr32(0x7c00);
9153 
9154 		tw32(0x7c00, val | (1 << 25));
9155 	}
9156 
9157 	if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9158 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9159 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9160 	}
9161 
9162 	/* Reprobe ASF enable state.  */
9163 	tg3_flag_clear(tp, ENABLE_ASF);
9164 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9165 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9166 
9167 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9168 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9169 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9170 		u32 nic_cfg;
9171 
9172 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9173 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9174 			tg3_flag_set(tp, ENABLE_ASF);
9175 			tp->last_event_jiffies = jiffies;
9176 			if (tg3_flag(tp, 5750_PLUS))
9177 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9178 
9179 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9180 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9181 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9182 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9183 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9184 		}
9185 	}
9186 
9187 	return 0;
9188 }
9189 
9190 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9191 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9192 
9193 /* tp->lock is held. */
9194 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9195 {
9196 	int err;
9197 
9198 	tg3_stop_fw(tp);
9199 
9200 	tg3_write_sig_pre_reset(tp, kind);
9201 
9202 	tg3_abort_hw(tp, silent);
9203 	err = tg3_chip_reset(tp);
9204 
9205 	__tg3_set_mac_addr(tp, false);
9206 
9207 	tg3_write_sig_legacy(tp, kind);
9208 	tg3_write_sig_post_reset(tp, kind);
9209 
9210 	if (tp->hw_stats) {
9211 		/* Save the stats across chip resets... */
9212 		tg3_get_nstats(tp, &tp->net_stats_prev);
9213 		tg3_get_estats(tp, &tp->estats_prev);
9214 
9215 		/* And make sure the next sample is new data */
9216 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9217 	}
9218 
9219 	return err;
9220 }
9221 
9222 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9223 {
9224 	struct tg3 *tp = netdev_priv(dev);
9225 	struct sockaddr *addr = p;
9226 	int err = 0;
9227 	bool skip_mac_1 = false;
9228 
9229 	if (!is_valid_ether_addr(addr->sa_data))
9230 		return -EADDRNOTAVAIL;
9231 
9232 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9233 
9234 	if (!netif_running(dev))
9235 		return 0;
9236 
9237 	if (tg3_flag(tp, ENABLE_ASF)) {
9238 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9239 
9240 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9241 		addr0_low = tr32(MAC_ADDR_0_LOW);
9242 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9243 		addr1_low = tr32(MAC_ADDR_1_LOW);
9244 
9245 		/* Skip MAC addr 1 if ASF is using it. */
9246 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9247 		    !(addr1_high == 0 && addr1_low == 0))
9248 			skip_mac_1 = true;
9249 	}
9250 	spin_lock_bh(&tp->lock);
9251 	__tg3_set_mac_addr(tp, skip_mac_1);
9252 	spin_unlock_bh(&tp->lock);
9253 
9254 	return err;
9255 }
9256 
9257 /* tp->lock is held. */
9258 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9259 			   dma_addr_t mapping, u32 maxlen_flags,
9260 			   u32 nic_addr)
9261 {
9262 	tg3_write_mem(tp,
9263 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9264 		      ((u64) mapping >> 32));
9265 	tg3_write_mem(tp,
9266 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9267 		      ((u64) mapping & 0xffffffff));
9268 	tg3_write_mem(tp,
9269 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9270 		       maxlen_flags);
9271 
9272 	if (!tg3_flag(tp, 5705_PLUS))
9273 		tg3_write_mem(tp,
9274 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9275 			      nic_addr);
9276 }
9277 
9278 
9279 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9280 {
9281 	int i = 0;
9282 
9283 	if (!tg3_flag(tp, ENABLE_TSS)) {
9284 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9285 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9286 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9287 	} else {
9288 		tw32(HOSTCC_TXCOL_TICKS, 0);
9289 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9290 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9291 
9292 		for (; i < tp->txq_cnt; i++) {
9293 			u32 reg;
9294 
9295 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9296 			tw32(reg, ec->tx_coalesce_usecs);
9297 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9298 			tw32(reg, ec->tx_max_coalesced_frames);
9299 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9300 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9301 		}
9302 	}
9303 
9304 	for (; i < tp->irq_max - 1; i++) {
9305 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9306 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9307 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9308 	}
9309 }
9310 
9311 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9312 {
9313 	int i = 0;
9314 	u32 limit = tp->rxq_cnt;
9315 
9316 	if (!tg3_flag(tp, ENABLE_RSS)) {
9317 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9318 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9319 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9320 		limit--;
9321 	} else {
9322 		tw32(HOSTCC_RXCOL_TICKS, 0);
9323 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9324 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9325 	}
9326 
9327 	for (; i < limit; i++) {
9328 		u32 reg;
9329 
9330 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9331 		tw32(reg, ec->rx_coalesce_usecs);
9332 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9333 		tw32(reg, ec->rx_max_coalesced_frames);
9334 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9335 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9336 	}
9337 
9338 	for (; i < tp->irq_max - 1; i++) {
9339 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9340 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9341 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9342 	}
9343 }
9344 
9345 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9346 {
9347 	tg3_coal_tx_init(tp, ec);
9348 	tg3_coal_rx_init(tp, ec);
9349 
9350 	if (!tg3_flag(tp, 5705_PLUS)) {
9351 		u32 val = ec->stats_block_coalesce_usecs;
9352 
9353 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9354 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9355 
9356 		if (!tp->link_up)
9357 			val = 0;
9358 
9359 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9360 	}
9361 }
9362 
9363 /* tp->lock is held. */
9364 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9365 {
9366 	u32 txrcb, limit;
9367 
9368 	/* Disable all transmit rings but the first. */
9369 	if (!tg3_flag(tp, 5705_PLUS))
9370 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9371 	else if (tg3_flag(tp, 5717_PLUS))
9372 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9373 	else if (tg3_flag(tp, 57765_CLASS) ||
9374 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9375 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9376 	else
9377 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9378 
9379 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9380 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9381 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9382 			      BDINFO_FLAGS_DISABLED);
9383 }
9384 
9385 /* tp->lock is held. */
9386 static void tg3_tx_rcbs_init(struct tg3 *tp)
9387 {
9388 	int i = 0;
9389 	u32 txrcb = NIC_SRAM_SEND_RCB;
9390 
9391 	if (tg3_flag(tp, ENABLE_TSS))
9392 		i++;
9393 
9394 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9395 		struct tg3_napi *tnapi = &tp->napi[i];
9396 
9397 		if (!tnapi->tx_ring)
9398 			continue;
9399 
9400 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9401 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9402 			       NIC_SRAM_TX_BUFFER_DESC);
9403 	}
9404 }
9405 
9406 /* tp->lock is held. */
9407 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9408 {
9409 	u32 rxrcb, limit;
9410 
9411 	/* Disable all receive return rings but the first. */
9412 	if (tg3_flag(tp, 5717_PLUS))
9413 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9414 	else if (!tg3_flag(tp, 5705_PLUS))
9415 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9416 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9417 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9418 		 tg3_flag(tp, 57765_CLASS))
9419 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9420 	else
9421 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9422 
9423 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9424 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9425 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9426 			      BDINFO_FLAGS_DISABLED);
9427 }
9428 
9429 /* tp->lock is held. */
9430 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9431 {
9432 	int i = 0;
9433 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9434 
9435 	if (tg3_flag(tp, ENABLE_RSS))
9436 		i++;
9437 
9438 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9439 		struct tg3_napi *tnapi = &tp->napi[i];
9440 
9441 		if (!tnapi->rx_rcb)
9442 			continue;
9443 
9444 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9445 			       (tp->rx_ret_ring_mask + 1) <<
9446 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9447 	}
9448 }
9449 
9450 /* tp->lock is held. */
9451 static void tg3_rings_reset(struct tg3 *tp)
9452 {
9453 	int i;
9454 	u32 stblk;
9455 	struct tg3_napi *tnapi = &tp->napi[0];
9456 
9457 	tg3_tx_rcbs_disable(tp);
9458 
9459 	tg3_rx_ret_rcbs_disable(tp);
9460 
9461 	/* Disable interrupts */
9462 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9463 	tp->napi[0].chk_msi_cnt = 0;
9464 	tp->napi[0].last_rx_cons = 0;
9465 	tp->napi[0].last_tx_cons = 0;
9466 
9467 	/* Zero mailbox registers. */
9468 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9469 		for (i = 1; i < tp->irq_max; i++) {
9470 			tp->napi[i].tx_prod = 0;
9471 			tp->napi[i].tx_cons = 0;
9472 			if (tg3_flag(tp, ENABLE_TSS))
9473 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9474 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9475 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9476 			tp->napi[i].chk_msi_cnt = 0;
9477 			tp->napi[i].last_rx_cons = 0;
9478 			tp->napi[i].last_tx_cons = 0;
9479 		}
9480 		if (!tg3_flag(tp, ENABLE_TSS))
9481 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9482 	} else {
9483 		tp->napi[0].tx_prod = 0;
9484 		tp->napi[0].tx_cons = 0;
9485 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9486 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9487 	}
9488 
9489 	/* Make sure the NIC-based send BD rings are disabled. */
9490 	if (!tg3_flag(tp, 5705_PLUS)) {
9491 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9492 		for (i = 0; i < 16; i++)
9493 			tw32_tx_mbox(mbox + i * 8, 0);
9494 	}
9495 
9496 	/* Clear status block in ram. */
9497 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9498 
9499 	/* Set status block DMA address */
9500 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9501 	     ((u64) tnapi->status_mapping >> 32));
9502 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9503 	     ((u64) tnapi->status_mapping & 0xffffffff));
9504 
9505 	stblk = HOSTCC_STATBLCK_RING1;
9506 
9507 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9508 		u64 mapping = (u64)tnapi->status_mapping;
9509 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9510 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9511 		stblk += 8;
9512 
9513 		/* Clear status block in ram. */
9514 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9515 	}
9516 
9517 	tg3_tx_rcbs_init(tp);
9518 	tg3_rx_ret_rcbs_init(tp);
9519 }
9520 
9521 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9522 {
9523 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9524 
9525 	if (!tg3_flag(tp, 5750_PLUS) ||
9526 	    tg3_flag(tp, 5780_CLASS) ||
9527 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9528 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9529 	    tg3_flag(tp, 57765_PLUS))
9530 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9531 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9532 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9533 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9534 	else
9535 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9536 
9537 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9538 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9539 
9540 	val = min(nic_rep_thresh, host_rep_thresh);
9541 	tw32(RCVBDI_STD_THRESH, val);
9542 
9543 	if (tg3_flag(tp, 57765_PLUS))
9544 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9545 
9546 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9547 		return;
9548 
9549 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9550 
9551 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9552 
9553 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9554 	tw32(RCVBDI_JUMBO_THRESH, val);
9555 
9556 	if (tg3_flag(tp, 57765_PLUS))
9557 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9558 }
9559 
9560 static inline u32 calc_crc(unsigned char *buf, int len)
9561 {
9562 	u32 reg;
9563 	u32 tmp;
9564 	int j, k;
9565 
9566 	reg = 0xffffffff;
9567 
9568 	for (j = 0; j < len; j++) {
9569 		reg ^= buf[j];
9570 
9571 		for (k = 0; k < 8; k++) {
9572 			tmp = reg & 0x01;
9573 
9574 			reg >>= 1;
9575 
9576 			if (tmp)
9577 				reg ^= 0xedb88320;
9578 		}
9579 	}
9580 
9581 	return ~reg;
9582 }
9583 
9584 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9585 {
9586 	/* accept or reject all multicast frames */
9587 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9588 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9589 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9590 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9591 }
9592 
9593 static void __tg3_set_rx_mode(struct net_device *dev)
9594 {
9595 	struct tg3 *tp = netdev_priv(dev);
9596 	u32 rx_mode;
9597 
9598 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9599 				  RX_MODE_KEEP_VLAN_TAG);
9600 
9601 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9602 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9603 	 * flag clear.
9604 	 */
9605 	if (!tg3_flag(tp, ENABLE_ASF))
9606 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9607 #endif
9608 
9609 	if (dev->flags & IFF_PROMISC) {
9610 		/* Promiscuous mode. */
9611 		rx_mode |= RX_MODE_PROMISC;
9612 	} else if (dev->flags & IFF_ALLMULTI) {
9613 		/* Accept all multicast. */
9614 		tg3_set_multi(tp, 1);
9615 	} else if (netdev_mc_empty(dev)) {
9616 		/* Reject all multicast. */
9617 		tg3_set_multi(tp, 0);
9618 	} else {
9619 		/* Accept one or more multicast(s). */
9620 		struct netdev_hw_addr *ha;
9621 		u32 mc_filter[4] = { 0, };
9622 		u32 regidx;
9623 		u32 bit;
9624 		u32 crc;
9625 
9626 		netdev_for_each_mc_addr(ha, dev) {
9627 			crc = calc_crc(ha->addr, ETH_ALEN);
9628 			bit = ~crc & 0x7f;
9629 			regidx = (bit & 0x60) >> 5;
9630 			bit &= 0x1f;
9631 			mc_filter[regidx] |= (1 << bit);
9632 		}
9633 
9634 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9635 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9636 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9637 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9638 	}
9639 
9640 	if (rx_mode != tp->rx_mode) {
9641 		tp->rx_mode = rx_mode;
9642 		tw32_f(MAC_RX_MODE, rx_mode);
9643 		udelay(10);
9644 	}
9645 }
9646 
9647 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9648 {
9649 	int i;
9650 
9651 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9652 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9653 }
9654 
9655 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9656 {
9657 	int i;
9658 
9659 	if (!tg3_flag(tp, SUPPORT_MSIX))
9660 		return;
9661 
9662 	if (tp->rxq_cnt == 1) {
9663 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9664 		return;
9665 	}
9666 
9667 	/* Validate table against current IRQ count */
9668 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9669 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9670 			break;
9671 	}
9672 
9673 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9674 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9675 }
9676 
9677 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9678 {
9679 	int i = 0;
9680 	u32 reg = MAC_RSS_INDIR_TBL_0;
9681 
9682 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9683 		u32 val = tp->rss_ind_tbl[i];
9684 		i++;
9685 		for (; i % 8; i++) {
9686 			val <<= 4;
9687 			val |= tp->rss_ind_tbl[i];
9688 		}
9689 		tw32(reg, val);
9690 		reg += 4;
9691 	}
9692 }
9693 
9694 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9695 {
9696 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9697 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9698 	else
9699 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9700 }
9701 
9702 /* tp->lock is held. */
9703 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9704 {
9705 	u32 val, rdmac_mode;
9706 	int i, err, limit;
9707 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9708 
9709 	tg3_disable_ints(tp);
9710 
9711 	tg3_stop_fw(tp);
9712 
9713 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9714 
9715 	if (tg3_flag(tp, INIT_COMPLETE))
9716 		tg3_abort_hw(tp, 1);
9717 
9718 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9719 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9720 		tg3_phy_pull_config(tp);
9721 		tg3_eee_pull_config(tp, NULL);
9722 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9723 	}
9724 
9725 	/* Enable MAC control of LPI */
9726 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9727 		tg3_setup_eee(tp);
9728 
9729 	if (reset_phy)
9730 		tg3_phy_reset(tp);
9731 
9732 	err = tg3_chip_reset(tp);
9733 	if (err)
9734 		return err;
9735 
9736 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9737 
9738 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9739 		val = tr32(TG3_CPMU_CTRL);
9740 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9741 		tw32(TG3_CPMU_CTRL, val);
9742 
9743 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9744 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9745 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9746 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9747 
9748 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9749 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9750 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9751 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9752 
9753 		val = tr32(TG3_CPMU_HST_ACC);
9754 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9755 		val |= CPMU_HST_ACC_MACCLK_6_25;
9756 		tw32(TG3_CPMU_HST_ACC, val);
9757 	}
9758 
9759 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9760 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9761 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9762 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9763 		tw32(PCIE_PWR_MGMT_THRESH, val);
9764 
9765 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9766 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9767 
9768 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9769 
9770 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9771 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9772 	}
9773 
9774 	if (tg3_flag(tp, L1PLLPD_EN)) {
9775 		u32 grc_mode = tr32(GRC_MODE);
9776 
9777 		/* Access the lower 1K of PL PCIE block registers. */
9778 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9779 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9780 
9781 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9782 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9783 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9784 
9785 		tw32(GRC_MODE, grc_mode);
9786 	}
9787 
9788 	if (tg3_flag(tp, 57765_CLASS)) {
9789 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9790 			u32 grc_mode = tr32(GRC_MODE);
9791 
9792 			/* Access the lower 1K of PL PCIE block registers. */
9793 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9794 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9795 
9796 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9797 				   TG3_PCIE_PL_LO_PHYCTL5);
9798 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9799 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9800 
9801 			tw32(GRC_MODE, grc_mode);
9802 		}
9803 
9804 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9805 			u32 grc_mode;
9806 
9807 			/* Fix transmit hangs */
9808 			val = tr32(TG3_CPMU_PADRNG_CTL);
9809 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9810 			tw32(TG3_CPMU_PADRNG_CTL, val);
9811 
9812 			grc_mode = tr32(GRC_MODE);
9813 
9814 			/* Access the lower 1K of DL PCIE block registers. */
9815 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9816 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9817 
9818 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9819 				   TG3_PCIE_DL_LO_FTSMAX);
9820 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9821 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9822 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9823 
9824 			tw32(GRC_MODE, grc_mode);
9825 		}
9826 
9827 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9828 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9829 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9830 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9831 	}
9832 
9833 	/* This works around an issue with Athlon chipsets on
9834 	 * B3 tigon3 silicon.  This bit has no effect on any
9835 	 * other revision.  But do not set this on PCI Express
9836 	 * chips and don't even touch the clocks if the CPMU is present.
9837 	 */
9838 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9839 		if (!tg3_flag(tp, PCI_EXPRESS))
9840 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9841 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9842 	}
9843 
9844 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9845 	    tg3_flag(tp, PCIX_MODE)) {
9846 		val = tr32(TG3PCI_PCISTATE);
9847 		val |= PCISTATE_RETRY_SAME_DMA;
9848 		tw32(TG3PCI_PCISTATE, val);
9849 	}
9850 
9851 	if (tg3_flag(tp, ENABLE_APE)) {
9852 		/* Allow reads and writes to the
9853 		 * APE register and memory space.
9854 		 */
9855 		val = tr32(TG3PCI_PCISTATE);
9856 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9857 		       PCISTATE_ALLOW_APE_SHMEM_WR |
9858 		       PCISTATE_ALLOW_APE_PSPACE_WR;
9859 		tw32(TG3PCI_PCISTATE, val);
9860 	}
9861 
9862 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9863 		/* Enable some hw fixes.  */
9864 		val = tr32(TG3PCI_MSI_DATA);
9865 		val |= (1 << 26) | (1 << 28) | (1 << 29);
9866 		tw32(TG3PCI_MSI_DATA, val);
9867 	}
9868 
9869 	/* Descriptor ring init may make accesses to the
9870 	 * NIC SRAM area to setup the TX descriptors, so we
9871 	 * can only do this after the hardware has been
9872 	 * successfully reset.
9873 	 */
9874 	err = tg3_init_rings(tp);
9875 	if (err)
9876 		return err;
9877 
9878 	if (tg3_flag(tp, 57765_PLUS)) {
9879 		val = tr32(TG3PCI_DMA_RW_CTRL) &
9880 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9881 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9882 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9883 		if (!tg3_flag(tp, 57765_CLASS) &&
9884 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
9885 		    tg3_asic_rev(tp) != ASIC_REV_5762)
9886 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
9887 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9888 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9889 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
9890 		/* This value is determined during the probe time DMA
9891 		 * engine test, tg3_test_dma.
9892 		 */
9893 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9894 	}
9895 
9896 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9897 			  GRC_MODE_4X_NIC_SEND_RINGS |
9898 			  GRC_MODE_NO_TX_PHDR_CSUM |
9899 			  GRC_MODE_NO_RX_PHDR_CSUM);
9900 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9901 
9902 	/* Pseudo-header checksum is done by hardware logic and not
9903 	 * the offload processers, so make the chip do the pseudo-
9904 	 * header checksums on receive.  For transmit it is more
9905 	 * convenient to do the pseudo-header checksum in software
9906 	 * as Linux does that on transmit for us in all cases.
9907 	 */
9908 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9909 
9910 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9911 	if (tp->rxptpctl)
9912 		tw32(TG3_RX_PTP_CTL,
9913 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9914 
9915 	if (tg3_flag(tp, PTP_CAPABLE))
9916 		val |= GRC_MODE_TIME_SYNC_ENABLE;
9917 
9918 	tw32(GRC_MODE, tp->grc_mode | val);
9919 
9920 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
9921 	val = tr32(GRC_MISC_CFG);
9922 	val &= ~0xff;
9923 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9924 	tw32(GRC_MISC_CFG, val);
9925 
9926 	/* Initialize MBUF/DESC pool. */
9927 	if (tg3_flag(tp, 5750_PLUS)) {
9928 		/* Do nothing.  */
9929 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9930 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9931 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
9932 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9933 		else
9934 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9935 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9936 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9937 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
9938 		int fw_len;
9939 
9940 		fw_len = tp->fw_len;
9941 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9942 		tw32(BUFMGR_MB_POOL_ADDR,
9943 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9944 		tw32(BUFMGR_MB_POOL_SIZE,
9945 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9946 	}
9947 
9948 	if (tp->dev->mtu <= ETH_DATA_LEN) {
9949 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9950 		     tp->bufmgr_config.mbuf_read_dma_low_water);
9951 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9952 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
9953 		tw32(BUFMGR_MB_HIGH_WATER,
9954 		     tp->bufmgr_config.mbuf_high_water);
9955 	} else {
9956 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9957 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9958 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9959 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9960 		tw32(BUFMGR_MB_HIGH_WATER,
9961 		     tp->bufmgr_config.mbuf_high_water_jumbo);
9962 	}
9963 	tw32(BUFMGR_DMA_LOW_WATER,
9964 	     tp->bufmgr_config.dma_low_water);
9965 	tw32(BUFMGR_DMA_HIGH_WATER,
9966 	     tp->bufmgr_config.dma_high_water);
9967 
9968 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9969 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9970 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9971 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9972 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
9973 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9974 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9975 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9976 	tw32(BUFMGR_MODE, val);
9977 	for (i = 0; i < 2000; i++) {
9978 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9979 			break;
9980 		udelay(10);
9981 	}
9982 	if (i >= 2000) {
9983 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9984 		return -ENODEV;
9985 	}
9986 
9987 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9988 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9989 
9990 	tg3_setup_rxbd_thresholds(tp);
9991 
9992 	/* Initialize TG3_BDINFO's at:
9993 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
9994 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
9995 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
9996 	 *
9997 	 * like so:
9998 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
9999 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10000 	 *                              ring attribute flags
10001 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10002 	 *
10003 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10004 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10005 	 *
10006 	 * The size of each ring is fixed in the firmware, but the location is
10007 	 * configurable.
10008 	 */
10009 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10010 	     ((u64) tpr->rx_std_mapping >> 32));
10011 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10012 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10013 	if (!tg3_flag(tp, 5717_PLUS))
10014 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10015 		     NIC_SRAM_RX_BUFFER_DESC);
10016 
10017 	/* Disable the mini ring */
10018 	if (!tg3_flag(tp, 5705_PLUS))
10019 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10020 		     BDINFO_FLAGS_DISABLED);
10021 
10022 	/* Program the jumbo buffer descriptor ring control
10023 	 * blocks on those devices that have them.
10024 	 */
10025 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10026 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10027 
10028 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10029 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10030 			     ((u64) tpr->rx_jmb_mapping >> 32));
10031 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10032 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10033 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10034 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10035 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10036 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10037 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10038 			    tg3_flag(tp, 57765_CLASS) ||
10039 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10040 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10041 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10042 		} else {
10043 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10044 			     BDINFO_FLAGS_DISABLED);
10045 		}
10046 
10047 		if (tg3_flag(tp, 57765_PLUS)) {
10048 			val = TG3_RX_STD_RING_SIZE(tp);
10049 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10050 			val |= (TG3_RX_STD_DMA_SZ << 2);
10051 		} else
10052 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10053 	} else
10054 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10055 
10056 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10057 
10058 	tpr->rx_std_prod_idx = tp->rx_pending;
10059 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10060 
10061 	tpr->rx_jmb_prod_idx =
10062 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10063 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10064 
10065 	tg3_rings_reset(tp);
10066 
10067 	/* Initialize MAC address and backoff seed. */
10068 	__tg3_set_mac_addr(tp, false);
10069 
10070 	/* MTU + ethernet header + FCS + optional VLAN tag */
10071 	tw32(MAC_RX_MTU_SIZE,
10072 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10073 
10074 	/* The slot time is changed by tg3_setup_phy if we
10075 	 * run at gigabit with half duplex.
10076 	 */
10077 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10078 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10079 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10080 
10081 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10082 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10083 		val |= tr32(MAC_TX_LENGTHS) &
10084 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10085 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10086 
10087 	tw32(MAC_TX_LENGTHS, val);
10088 
10089 	/* Receive rules. */
10090 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10091 	tw32(RCVLPC_CONFIG, 0x0181);
10092 
10093 	/* Calculate RDMAC_MODE setting early, we need it to determine
10094 	 * the RCVLPC_STATE_ENABLE mask.
10095 	 */
10096 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10097 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10098 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10099 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10100 		      RDMAC_MODE_LNGREAD_ENAB);
10101 
10102 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10103 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10104 
10105 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10106 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10107 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10108 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10109 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10110 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10111 
10112 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10113 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10114 		if (tg3_flag(tp, TSO_CAPABLE) &&
10115 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
10116 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10117 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10118 			   !tg3_flag(tp, IS_5788)) {
10119 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10120 		}
10121 	}
10122 
10123 	if (tg3_flag(tp, PCI_EXPRESS))
10124 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10125 
10126 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10127 		tp->dma_limit = 0;
10128 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10129 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10130 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10131 		}
10132 	}
10133 
10134 	if (tg3_flag(tp, HW_TSO_1) ||
10135 	    tg3_flag(tp, HW_TSO_2) ||
10136 	    tg3_flag(tp, HW_TSO_3))
10137 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10138 
10139 	if (tg3_flag(tp, 57765_PLUS) ||
10140 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10141 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10142 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10143 
10144 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10145 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10146 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10147 
10148 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10149 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10150 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10151 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10152 	    tg3_flag(tp, 57765_PLUS)) {
10153 		u32 tgtreg;
10154 
10155 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10156 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10157 		else
10158 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10159 
10160 		val = tr32(tgtreg);
10161 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10162 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10163 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10164 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10165 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10166 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10167 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10168 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10169 		}
10170 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10171 	}
10172 
10173 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10174 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10175 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10176 		u32 tgtreg;
10177 
10178 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10179 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10180 		else
10181 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10182 
10183 		val = tr32(tgtreg);
10184 		tw32(tgtreg, val |
10185 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10186 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10187 	}
10188 
10189 	/* Receive/send statistics. */
10190 	if (tg3_flag(tp, 5750_PLUS)) {
10191 		val = tr32(RCVLPC_STATS_ENABLE);
10192 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10193 		tw32(RCVLPC_STATS_ENABLE, val);
10194 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10195 		   tg3_flag(tp, TSO_CAPABLE)) {
10196 		val = tr32(RCVLPC_STATS_ENABLE);
10197 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10198 		tw32(RCVLPC_STATS_ENABLE, val);
10199 	} else {
10200 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10201 	}
10202 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10203 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10204 	tw32(SNDDATAI_STATSCTRL,
10205 	     (SNDDATAI_SCTRL_ENABLE |
10206 	      SNDDATAI_SCTRL_FASTUPD));
10207 
10208 	/* Setup host coalescing engine. */
10209 	tw32(HOSTCC_MODE, 0);
10210 	for (i = 0; i < 2000; i++) {
10211 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10212 			break;
10213 		udelay(10);
10214 	}
10215 
10216 	__tg3_set_coalesce(tp, &tp->coal);
10217 
10218 	if (!tg3_flag(tp, 5705_PLUS)) {
10219 		/* Status/statistics block address.  See tg3_timer,
10220 		 * the tg3_periodic_fetch_stats call there, and
10221 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10222 		 */
10223 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10224 		     ((u64) tp->stats_mapping >> 32));
10225 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10226 		     ((u64) tp->stats_mapping & 0xffffffff));
10227 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10228 
10229 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10230 
10231 		/* Clear statistics and status block memory areas */
10232 		for (i = NIC_SRAM_STATS_BLK;
10233 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10234 		     i += sizeof(u32)) {
10235 			tg3_write_mem(tp, i, 0);
10236 			udelay(40);
10237 		}
10238 	}
10239 
10240 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10241 
10242 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10243 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10244 	if (!tg3_flag(tp, 5705_PLUS))
10245 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10246 
10247 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10248 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10249 		/* reset to prevent losing 1st rx packet intermittently */
10250 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10251 		udelay(10);
10252 	}
10253 
10254 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10255 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10256 			MAC_MODE_FHDE_ENABLE;
10257 	if (tg3_flag(tp, ENABLE_APE))
10258 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10259 	if (!tg3_flag(tp, 5705_PLUS) &&
10260 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10261 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10262 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10263 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10264 	udelay(40);
10265 
10266 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10267 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10268 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10269 	 * whether used as inputs or outputs, are set by boot code after
10270 	 * reset.
10271 	 */
10272 	if (!tg3_flag(tp, IS_NIC)) {
10273 		u32 gpio_mask;
10274 
10275 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10276 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10277 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10278 
10279 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10280 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10281 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10282 
10283 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10284 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10285 
10286 		tp->grc_local_ctrl &= ~gpio_mask;
10287 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10288 
10289 		/* GPIO1 must be driven high for eeprom write protect */
10290 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10291 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10292 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10293 	}
10294 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10295 	udelay(100);
10296 
10297 	if (tg3_flag(tp, USING_MSIX)) {
10298 		val = tr32(MSGINT_MODE);
10299 		val |= MSGINT_MODE_ENABLE;
10300 		if (tp->irq_cnt > 1)
10301 			val |= MSGINT_MODE_MULTIVEC_EN;
10302 		if (!tg3_flag(tp, 1SHOT_MSI))
10303 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10304 		tw32(MSGINT_MODE, val);
10305 	}
10306 
10307 	if (!tg3_flag(tp, 5705_PLUS)) {
10308 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10309 		udelay(40);
10310 	}
10311 
10312 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10313 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10314 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10315 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10316 	       WDMAC_MODE_LNGREAD_ENAB);
10317 
10318 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10319 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10320 		if (tg3_flag(tp, TSO_CAPABLE) &&
10321 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10322 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10323 			/* nothing */
10324 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10325 			   !tg3_flag(tp, IS_5788)) {
10326 			val |= WDMAC_MODE_RX_ACCEL;
10327 		}
10328 	}
10329 
10330 	/* Enable host coalescing bug fix */
10331 	if (tg3_flag(tp, 5755_PLUS))
10332 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10333 
10334 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10335 		val |= WDMAC_MODE_BURST_ALL_DATA;
10336 
10337 	tw32_f(WDMAC_MODE, val);
10338 	udelay(40);
10339 
10340 	if (tg3_flag(tp, PCIX_MODE)) {
10341 		u16 pcix_cmd;
10342 
10343 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10344 				     &pcix_cmd);
10345 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10346 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10347 			pcix_cmd |= PCI_X_CMD_READ_2K;
10348 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10349 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10350 			pcix_cmd |= PCI_X_CMD_READ_2K;
10351 		}
10352 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10353 				      pcix_cmd);
10354 	}
10355 
10356 	tw32_f(RDMAC_MODE, rdmac_mode);
10357 	udelay(40);
10358 
10359 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10360 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10361 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10362 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10363 				break;
10364 		}
10365 		if (i < TG3_NUM_RDMA_CHANNELS) {
10366 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10367 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10368 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10369 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10370 		}
10371 	}
10372 
10373 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10374 	if (!tg3_flag(tp, 5705_PLUS))
10375 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10376 
10377 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10378 		tw32(SNDDATAC_MODE,
10379 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10380 	else
10381 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10382 
10383 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10384 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10385 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10386 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10387 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10388 	tw32(RCVDBDI_MODE, val);
10389 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10390 	if (tg3_flag(tp, HW_TSO_1) ||
10391 	    tg3_flag(tp, HW_TSO_2) ||
10392 	    tg3_flag(tp, HW_TSO_3))
10393 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10394 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10395 	if (tg3_flag(tp, ENABLE_TSS))
10396 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10397 	tw32(SNDBDI_MODE, val);
10398 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10399 
10400 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10401 		err = tg3_load_5701_a0_firmware_fix(tp);
10402 		if (err)
10403 			return err;
10404 	}
10405 
10406 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10407 		/* Ignore any errors for the firmware download. If download
10408 		 * fails, the device will operate with EEE disabled
10409 		 */
10410 		tg3_load_57766_firmware(tp);
10411 	}
10412 
10413 	if (tg3_flag(tp, TSO_CAPABLE)) {
10414 		err = tg3_load_tso_firmware(tp);
10415 		if (err)
10416 			return err;
10417 	}
10418 
10419 	tp->tx_mode = TX_MODE_ENABLE;
10420 
10421 	if (tg3_flag(tp, 5755_PLUS) ||
10422 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10423 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10424 
10425 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10426 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10427 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10428 		tp->tx_mode &= ~val;
10429 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10430 	}
10431 
10432 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10433 	udelay(100);
10434 
10435 	if (tg3_flag(tp, ENABLE_RSS)) {
10436 		tg3_rss_write_indir_tbl(tp);
10437 
10438 		/* Setup the "secret" hash key. */
10439 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10440 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10441 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10442 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10443 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10444 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10445 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10446 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10447 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10448 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10449 	}
10450 
10451 	tp->rx_mode = RX_MODE_ENABLE;
10452 	if (tg3_flag(tp, 5755_PLUS))
10453 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10454 
10455 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10456 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10457 
10458 	if (tg3_flag(tp, ENABLE_RSS))
10459 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10460 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10461 			       RX_MODE_RSS_IPV6_HASH_EN |
10462 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10463 			       RX_MODE_RSS_IPV4_HASH_EN |
10464 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10465 
10466 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10467 	udelay(10);
10468 
10469 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10470 
10471 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10472 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10473 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10474 		udelay(10);
10475 	}
10476 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10477 	udelay(10);
10478 
10479 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10480 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10481 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10482 			/* Set drive transmission level to 1.2V  */
10483 			/* only if the signal pre-emphasis bit is not set  */
10484 			val = tr32(MAC_SERDES_CFG);
10485 			val &= 0xfffff000;
10486 			val |= 0x880;
10487 			tw32(MAC_SERDES_CFG, val);
10488 		}
10489 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10490 			tw32(MAC_SERDES_CFG, 0x616000);
10491 	}
10492 
10493 	/* Prevent chip from dropping frames when flow control
10494 	 * is enabled.
10495 	 */
10496 	if (tg3_flag(tp, 57765_CLASS))
10497 		val = 1;
10498 	else
10499 		val = 2;
10500 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10501 
10502 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10503 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10504 		/* Use hardware link auto-negotiation */
10505 		tg3_flag_set(tp, HW_AUTONEG);
10506 	}
10507 
10508 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10509 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10510 		u32 tmp;
10511 
10512 		tmp = tr32(SERDES_RX_CTRL);
10513 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10514 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10515 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10516 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10517 	}
10518 
10519 	if (!tg3_flag(tp, USE_PHYLIB)) {
10520 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10521 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10522 
10523 		err = tg3_setup_phy(tp, false);
10524 		if (err)
10525 			return err;
10526 
10527 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10528 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10529 			u32 tmp;
10530 
10531 			/* Clear CRC stats. */
10532 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10533 				tg3_writephy(tp, MII_TG3_TEST1,
10534 					     tmp | MII_TG3_TEST1_CRC_EN);
10535 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10536 			}
10537 		}
10538 	}
10539 
10540 	__tg3_set_rx_mode(tp->dev);
10541 
10542 	/* Initialize receive rules. */
10543 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10544 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10545 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10546 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10547 
10548 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10549 		limit = 8;
10550 	else
10551 		limit = 16;
10552 	if (tg3_flag(tp, ENABLE_ASF))
10553 		limit -= 4;
10554 	switch (limit) {
10555 	case 16:
10556 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10557 	case 15:
10558 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10559 	case 14:
10560 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10561 	case 13:
10562 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10563 	case 12:
10564 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10565 	case 11:
10566 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10567 	case 10:
10568 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10569 	case 9:
10570 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10571 	case 8:
10572 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10573 	case 7:
10574 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10575 	case 6:
10576 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10577 	case 5:
10578 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10579 	case 4:
10580 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10581 	case 3:
10582 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10583 	case 2:
10584 	case 1:
10585 
10586 	default:
10587 		break;
10588 	}
10589 
10590 	if (tg3_flag(tp, ENABLE_APE))
10591 		/* Write our heartbeat update interval to APE. */
10592 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10593 				APE_HOST_HEARTBEAT_INT_DISABLE);
10594 
10595 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10596 
10597 	return 0;
10598 }
10599 
10600 /* Called at device open time to get the chip ready for
10601  * packet processing.  Invoked with tp->lock held.
10602  */
10603 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10604 {
10605 	/* Chip may have been just powered on. If so, the boot code may still
10606 	 * be running initialization. Wait for it to finish to avoid races in
10607 	 * accessing the hardware.
10608 	 */
10609 	tg3_enable_register_access(tp);
10610 	tg3_poll_fw(tp);
10611 
10612 	tg3_switch_clocks(tp);
10613 
10614 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10615 
10616 	return tg3_reset_hw(tp, reset_phy);
10617 }
10618 
10619 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10620 {
10621 	int i;
10622 
10623 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10624 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10625 
10626 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10627 		off += len;
10628 
10629 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10630 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10631 			memset(ocir, 0, TG3_OCIR_LEN);
10632 	}
10633 }
10634 
10635 /* sysfs attributes for hwmon */
10636 static ssize_t tg3_show_temp(struct device *dev,
10637 			     struct device_attribute *devattr, char *buf)
10638 {
10639 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10640 	struct tg3 *tp = dev_get_drvdata(dev);
10641 	u32 temperature;
10642 
10643 	spin_lock_bh(&tp->lock);
10644 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10645 				sizeof(temperature));
10646 	spin_unlock_bh(&tp->lock);
10647 	return sprintf(buf, "%u\n", temperature);
10648 }
10649 
10650 
10651 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10652 			  TG3_TEMP_SENSOR_OFFSET);
10653 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10654 			  TG3_TEMP_CAUTION_OFFSET);
10655 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10656 			  TG3_TEMP_MAX_OFFSET);
10657 
10658 static struct attribute *tg3_attrs[] = {
10659 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10660 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10661 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10662 	NULL
10663 };
10664 ATTRIBUTE_GROUPS(tg3);
10665 
10666 static void tg3_hwmon_close(struct tg3 *tp)
10667 {
10668 	if (tp->hwmon_dev) {
10669 		hwmon_device_unregister(tp->hwmon_dev);
10670 		tp->hwmon_dev = NULL;
10671 	}
10672 }
10673 
10674 static void tg3_hwmon_open(struct tg3 *tp)
10675 {
10676 	int i;
10677 	u32 size = 0;
10678 	struct pci_dev *pdev = tp->pdev;
10679 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10680 
10681 	tg3_sd_scan_scratchpad(tp, ocirs);
10682 
10683 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10684 		if (!ocirs[i].src_data_length)
10685 			continue;
10686 
10687 		size += ocirs[i].src_hdr_length;
10688 		size += ocirs[i].src_data_length;
10689 	}
10690 
10691 	if (!size)
10692 		return;
10693 
10694 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10695 							  tp, tg3_groups);
10696 	if (IS_ERR(tp->hwmon_dev)) {
10697 		tp->hwmon_dev = NULL;
10698 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10699 	}
10700 }
10701 
10702 
10703 #define TG3_STAT_ADD32(PSTAT, REG) \
10704 do {	u32 __val = tr32(REG); \
10705 	(PSTAT)->low += __val; \
10706 	if ((PSTAT)->low < __val) \
10707 		(PSTAT)->high += 1; \
10708 } while (0)
10709 
10710 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10711 {
10712 	struct tg3_hw_stats *sp = tp->hw_stats;
10713 
10714 	if (!tp->link_up)
10715 		return;
10716 
10717 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10718 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10719 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10720 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10721 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10722 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10723 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10724 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10725 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10726 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10727 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10728 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10729 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10730 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10731 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10732 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10733 		u32 val;
10734 
10735 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10736 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10737 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10738 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10739 	}
10740 
10741 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10742 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10743 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10744 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10745 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10746 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10747 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10748 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10749 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10750 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10751 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10752 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10753 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10754 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10755 
10756 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10757 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10758 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
10759 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10760 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10761 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10762 	} else {
10763 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10764 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10765 		if (val) {
10766 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10767 			sp->rx_discards.low += val;
10768 			if (sp->rx_discards.low < val)
10769 				sp->rx_discards.high += 1;
10770 		}
10771 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10772 	}
10773 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10774 }
10775 
10776 static void tg3_chk_missed_msi(struct tg3 *tp)
10777 {
10778 	u32 i;
10779 
10780 	for (i = 0; i < tp->irq_cnt; i++) {
10781 		struct tg3_napi *tnapi = &tp->napi[i];
10782 
10783 		if (tg3_has_work(tnapi)) {
10784 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10785 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10786 				if (tnapi->chk_msi_cnt < 1) {
10787 					tnapi->chk_msi_cnt++;
10788 					return;
10789 				}
10790 				tg3_msi(0, tnapi);
10791 			}
10792 		}
10793 		tnapi->chk_msi_cnt = 0;
10794 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10795 		tnapi->last_tx_cons = tnapi->tx_cons;
10796 	}
10797 }
10798 
10799 static void tg3_timer(unsigned long __opaque)
10800 {
10801 	struct tg3 *tp = (struct tg3 *) __opaque;
10802 
10803 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10804 		goto restart_timer;
10805 
10806 	spin_lock(&tp->lock);
10807 
10808 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10809 	    tg3_flag(tp, 57765_CLASS))
10810 		tg3_chk_missed_msi(tp);
10811 
10812 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10813 		/* BCM4785: Flush posted writes from GbE to host memory. */
10814 		tr32(HOSTCC_MODE);
10815 	}
10816 
10817 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10818 		/* All of this garbage is because when using non-tagged
10819 		 * IRQ status the mailbox/status_block protocol the chip
10820 		 * uses with the cpu is race prone.
10821 		 */
10822 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10823 			tw32(GRC_LOCAL_CTRL,
10824 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10825 		} else {
10826 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10827 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10828 		}
10829 
10830 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10831 			spin_unlock(&tp->lock);
10832 			tg3_reset_task_schedule(tp);
10833 			goto restart_timer;
10834 		}
10835 	}
10836 
10837 	/* This part only runs once per second. */
10838 	if (!--tp->timer_counter) {
10839 		if (tg3_flag(tp, 5705_PLUS))
10840 			tg3_periodic_fetch_stats(tp);
10841 
10842 		if (tp->setlpicnt && !--tp->setlpicnt)
10843 			tg3_phy_eee_enable(tp);
10844 
10845 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
10846 			u32 mac_stat;
10847 			int phy_event;
10848 
10849 			mac_stat = tr32(MAC_STATUS);
10850 
10851 			phy_event = 0;
10852 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10853 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10854 					phy_event = 1;
10855 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10856 				phy_event = 1;
10857 
10858 			if (phy_event)
10859 				tg3_setup_phy(tp, false);
10860 		} else if (tg3_flag(tp, POLL_SERDES)) {
10861 			u32 mac_stat = tr32(MAC_STATUS);
10862 			int need_setup = 0;
10863 
10864 			if (tp->link_up &&
10865 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10866 				need_setup = 1;
10867 			}
10868 			if (!tp->link_up &&
10869 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
10870 					 MAC_STATUS_SIGNAL_DET))) {
10871 				need_setup = 1;
10872 			}
10873 			if (need_setup) {
10874 				if (!tp->serdes_counter) {
10875 					tw32_f(MAC_MODE,
10876 					     (tp->mac_mode &
10877 					      ~MAC_MODE_PORT_MODE_MASK));
10878 					udelay(40);
10879 					tw32_f(MAC_MODE, tp->mac_mode);
10880 					udelay(40);
10881 				}
10882 				tg3_setup_phy(tp, false);
10883 			}
10884 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10885 			   tg3_flag(tp, 5780_CLASS)) {
10886 			tg3_serdes_parallel_detect(tp);
10887 		}
10888 
10889 		tp->timer_counter = tp->timer_multiplier;
10890 	}
10891 
10892 	/* Heartbeat is only sent once every 2 seconds.
10893 	 *
10894 	 * The heartbeat is to tell the ASF firmware that the host
10895 	 * driver is still alive.  In the event that the OS crashes,
10896 	 * ASF needs to reset the hardware to free up the FIFO space
10897 	 * that may be filled with rx packets destined for the host.
10898 	 * If the FIFO is full, ASF will no longer function properly.
10899 	 *
10900 	 * Unintended resets have been reported on real time kernels
10901 	 * where the timer doesn't run on time.  Netpoll will also have
10902 	 * same problem.
10903 	 *
10904 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10905 	 * to check the ring condition when the heartbeat is expiring
10906 	 * before doing the reset.  This will prevent most unintended
10907 	 * resets.
10908 	 */
10909 	if (!--tp->asf_counter) {
10910 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10911 			tg3_wait_for_event_ack(tp);
10912 
10913 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10914 				      FWCMD_NICDRV_ALIVE3);
10915 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10916 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10917 				      TG3_FW_UPDATE_TIMEOUT_SEC);
10918 
10919 			tg3_generate_fw_event(tp);
10920 		}
10921 		tp->asf_counter = tp->asf_multiplier;
10922 	}
10923 
10924 	spin_unlock(&tp->lock);
10925 
10926 restart_timer:
10927 	tp->timer.expires = jiffies + tp->timer_offset;
10928 	add_timer(&tp->timer);
10929 }
10930 
10931 static void tg3_timer_init(struct tg3 *tp)
10932 {
10933 	if (tg3_flag(tp, TAGGED_STATUS) &&
10934 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10935 	    !tg3_flag(tp, 57765_CLASS))
10936 		tp->timer_offset = HZ;
10937 	else
10938 		tp->timer_offset = HZ / 10;
10939 
10940 	BUG_ON(tp->timer_offset > HZ);
10941 
10942 	tp->timer_multiplier = (HZ / tp->timer_offset);
10943 	tp->asf_multiplier = (HZ / tp->timer_offset) *
10944 			     TG3_FW_UPDATE_FREQ_SEC;
10945 
10946 	init_timer(&tp->timer);
10947 	tp->timer.data = (unsigned long) tp;
10948 	tp->timer.function = tg3_timer;
10949 }
10950 
10951 static void tg3_timer_start(struct tg3 *tp)
10952 {
10953 	tp->asf_counter   = tp->asf_multiplier;
10954 	tp->timer_counter = tp->timer_multiplier;
10955 
10956 	tp->timer.expires = jiffies + tp->timer_offset;
10957 	add_timer(&tp->timer);
10958 }
10959 
10960 static void tg3_timer_stop(struct tg3 *tp)
10961 {
10962 	del_timer_sync(&tp->timer);
10963 }
10964 
10965 /* Restart hardware after configuration changes, self-test, etc.
10966  * Invoked with tp->lock held.
10967  */
10968 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10969 	__releases(tp->lock)
10970 	__acquires(tp->lock)
10971 {
10972 	int err;
10973 
10974 	err = tg3_init_hw(tp, reset_phy);
10975 	if (err) {
10976 		netdev_err(tp->dev,
10977 			   "Failed to re-initialize device, aborting\n");
10978 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10979 		tg3_full_unlock(tp);
10980 		tg3_timer_stop(tp);
10981 		tp->irq_sync = 0;
10982 		tg3_napi_enable(tp);
10983 		dev_close(tp->dev);
10984 		tg3_full_lock(tp, 0);
10985 	}
10986 	return err;
10987 }
10988 
10989 static void tg3_reset_task(struct work_struct *work)
10990 {
10991 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
10992 	int err;
10993 
10994 	tg3_full_lock(tp, 0);
10995 
10996 	if (!netif_running(tp->dev)) {
10997 		tg3_flag_clear(tp, RESET_TASK_PENDING);
10998 		tg3_full_unlock(tp);
10999 		return;
11000 	}
11001 
11002 	tg3_full_unlock(tp);
11003 
11004 	tg3_phy_stop(tp);
11005 
11006 	tg3_netif_stop(tp);
11007 
11008 	tg3_full_lock(tp, 1);
11009 
11010 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11011 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11012 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11013 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11014 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11015 	}
11016 
11017 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11018 	err = tg3_init_hw(tp, true);
11019 	if (err)
11020 		goto out;
11021 
11022 	tg3_netif_start(tp);
11023 
11024 out:
11025 	tg3_full_unlock(tp);
11026 
11027 	if (!err)
11028 		tg3_phy_start(tp);
11029 
11030 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11031 }
11032 
11033 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11034 {
11035 	irq_handler_t fn;
11036 	unsigned long flags;
11037 	char *name;
11038 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11039 
11040 	if (tp->irq_cnt == 1)
11041 		name = tp->dev->name;
11042 	else {
11043 		name = &tnapi->irq_lbl[0];
11044 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11045 			snprintf(name, IFNAMSIZ,
11046 				 "%s-txrx-%d", tp->dev->name, irq_num);
11047 		else if (tnapi->tx_buffers)
11048 			snprintf(name, IFNAMSIZ,
11049 				 "%s-tx-%d", tp->dev->name, irq_num);
11050 		else if (tnapi->rx_rcb)
11051 			snprintf(name, IFNAMSIZ,
11052 				 "%s-rx-%d", tp->dev->name, irq_num);
11053 		else
11054 			snprintf(name, IFNAMSIZ,
11055 				 "%s-%d", tp->dev->name, irq_num);
11056 		name[IFNAMSIZ-1] = 0;
11057 	}
11058 
11059 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11060 		fn = tg3_msi;
11061 		if (tg3_flag(tp, 1SHOT_MSI))
11062 			fn = tg3_msi_1shot;
11063 		flags = 0;
11064 	} else {
11065 		fn = tg3_interrupt;
11066 		if (tg3_flag(tp, TAGGED_STATUS))
11067 			fn = tg3_interrupt_tagged;
11068 		flags = IRQF_SHARED;
11069 	}
11070 
11071 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11072 }
11073 
11074 static int tg3_test_interrupt(struct tg3 *tp)
11075 {
11076 	struct tg3_napi *tnapi = &tp->napi[0];
11077 	struct net_device *dev = tp->dev;
11078 	int err, i, intr_ok = 0;
11079 	u32 val;
11080 
11081 	if (!netif_running(dev))
11082 		return -ENODEV;
11083 
11084 	tg3_disable_ints(tp);
11085 
11086 	free_irq(tnapi->irq_vec, tnapi);
11087 
11088 	/*
11089 	 * Turn off MSI one shot mode.  Otherwise this test has no
11090 	 * observable way to know whether the interrupt was delivered.
11091 	 */
11092 	if (tg3_flag(tp, 57765_PLUS)) {
11093 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11094 		tw32(MSGINT_MODE, val);
11095 	}
11096 
11097 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11098 			  IRQF_SHARED, dev->name, tnapi);
11099 	if (err)
11100 		return err;
11101 
11102 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11103 	tg3_enable_ints(tp);
11104 
11105 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11106 	       tnapi->coal_now);
11107 
11108 	for (i = 0; i < 5; i++) {
11109 		u32 int_mbox, misc_host_ctrl;
11110 
11111 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11112 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11113 
11114 		if ((int_mbox != 0) ||
11115 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11116 			intr_ok = 1;
11117 			break;
11118 		}
11119 
11120 		if (tg3_flag(tp, 57765_PLUS) &&
11121 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11122 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11123 
11124 		msleep(10);
11125 	}
11126 
11127 	tg3_disable_ints(tp);
11128 
11129 	free_irq(tnapi->irq_vec, tnapi);
11130 
11131 	err = tg3_request_irq(tp, 0);
11132 
11133 	if (err)
11134 		return err;
11135 
11136 	if (intr_ok) {
11137 		/* Reenable MSI one shot mode. */
11138 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11139 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11140 			tw32(MSGINT_MODE, val);
11141 		}
11142 		return 0;
11143 	}
11144 
11145 	return -EIO;
11146 }
11147 
11148 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11149  * successfully restored
11150  */
11151 static int tg3_test_msi(struct tg3 *tp)
11152 {
11153 	int err;
11154 	u16 pci_cmd;
11155 
11156 	if (!tg3_flag(tp, USING_MSI))
11157 		return 0;
11158 
11159 	/* Turn off SERR reporting in case MSI terminates with Master
11160 	 * Abort.
11161 	 */
11162 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11163 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11164 			      pci_cmd & ~PCI_COMMAND_SERR);
11165 
11166 	err = tg3_test_interrupt(tp);
11167 
11168 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11169 
11170 	if (!err)
11171 		return 0;
11172 
11173 	/* other failures */
11174 	if (err != -EIO)
11175 		return err;
11176 
11177 	/* MSI test failed, go back to INTx mode */
11178 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11179 		    "to INTx mode. Please report this failure to the PCI "
11180 		    "maintainer and include system chipset information\n");
11181 
11182 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11183 
11184 	pci_disable_msi(tp->pdev);
11185 
11186 	tg3_flag_clear(tp, USING_MSI);
11187 	tp->napi[0].irq_vec = tp->pdev->irq;
11188 
11189 	err = tg3_request_irq(tp, 0);
11190 	if (err)
11191 		return err;
11192 
11193 	/* Need to reset the chip because the MSI cycle may have terminated
11194 	 * with Master Abort.
11195 	 */
11196 	tg3_full_lock(tp, 1);
11197 
11198 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11199 	err = tg3_init_hw(tp, true);
11200 
11201 	tg3_full_unlock(tp);
11202 
11203 	if (err)
11204 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11205 
11206 	return err;
11207 }
11208 
11209 static int tg3_request_firmware(struct tg3 *tp)
11210 {
11211 	const struct tg3_firmware_hdr *fw_hdr;
11212 
11213 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11214 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11215 			   tp->fw_needed);
11216 		return -ENOENT;
11217 	}
11218 
11219 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11220 
11221 	/* Firmware blob starts with version numbers, followed by
11222 	 * start address and _full_ length including BSS sections
11223 	 * (which must be longer than the actual data, of course
11224 	 */
11225 
11226 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11227 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11228 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11229 			   tp->fw_len, tp->fw_needed);
11230 		release_firmware(tp->fw);
11231 		tp->fw = NULL;
11232 		return -EINVAL;
11233 	}
11234 
11235 	/* We no longer need firmware; we have it. */
11236 	tp->fw_needed = NULL;
11237 	return 0;
11238 }
11239 
11240 static u32 tg3_irq_count(struct tg3 *tp)
11241 {
11242 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11243 
11244 	if (irq_cnt > 1) {
11245 		/* We want as many rx rings enabled as there are cpus.
11246 		 * In multiqueue MSI-X mode, the first MSI-X vector
11247 		 * only deals with link interrupts, etc, so we add
11248 		 * one to the number of vectors we are requesting.
11249 		 */
11250 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11251 	}
11252 
11253 	return irq_cnt;
11254 }
11255 
11256 static bool tg3_enable_msix(struct tg3 *tp)
11257 {
11258 	int i, rc;
11259 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11260 
11261 	tp->txq_cnt = tp->txq_req;
11262 	tp->rxq_cnt = tp->rxq_req;
11263 	if (!tp->rxq_cnt)
11264 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11265 	if (tp->rxq_cnt > tp->rxq_max)
11266 		tp->rxq_cnt = tp->rxq_max;
11267 
11268 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11269 	 * scheduling of the TX rings can cause starvation of rings with
11270 	 * small packets when other rings have TSO or jumbo packets.
11271 	 */
11272 	if (!tp->txq_req)
11273 		tp->txq_cnt = 1;
11274 
11275 	tp->irq_cnt = tg3_irq_count(tp);
11276 
11277 	for (i = 0; i < tp->irq_max; i++) {
11278 		msix_ent[i].entry  = i;
11279 		msix_ent[i].vector = 0;
11280 	}
11281 
11282 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11283 	if (rc < 0) {
11284 		return false;
11285 	} else if (rc != 0) {
11286 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
11287 			return false;
11288 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11289 			      tp->irq_cnt, rc);
11290 		tp->irq_cnt = rc;
11291 		tp->rxq_cnt = max(rc - 1, 1);
11292 		if (tp->txq_cnt)
11293 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11294 	}
11295 
11296 	for (i = 0; i < tp->irq_max; i++)
11297 		tp->napi[i].irq_vec = msix_ent[i].vector;
11298 
11299 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11300 		pci_disable_msix(tp->pdev);
11301 		return false;
11302 	}
11303 
11304 	if (tp->irq_cnt == 1)
11305 		return true;
11306 
11307 	tg3_flag_set(tp, ENABLE_RSS);
11308 
11309 	if (tp->txq_cnt > 1)
11310 		tg3_flag_set(tp, ENABLE_TSS);
11311 
11312 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11313 
11314 	return true;
11315 }
11316 
11317 static void tg3_ints_init(struct tg3 *tp)
11318 {
11319 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11320 	    !tg3_flag(tp, TAGGED_STATUS)) {
11321 		/* All MSI supporting chips should support tagged
11322 		 * status.  Assert that this is the case.
11323 		 */
11324 		netdev_warn(tp->dev,
11325 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11326 		goto defcfg;
11327 	}
11328 
11329 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11330 		tg3_flag_set(tp, USING_MSIX);
11331 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11332 		tg3_flag_set(tp, USING_MSI);
11333 
11334 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11335 		u32 msi_mode = tr32(MSGINT_MODE);
11336 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11337 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11338 		if (!tg3_flag(tp, 1SHOT_MSI))
11339 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11340 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11341 	}
11342 defcfg:
11343 	if (!tg3_flag(tp, USING_MSIX)) {
11344 		tp->irq_cnt = 1;
11345 		tp->napi[0].irq_vec = tp->pdev->irq;
11346 	}
11347 
11348 	if (tp->irq_cnt == 1) {
11349 		tp->txq_cnt = 1;
11350 		tp->rxq_cnt = 1;
11351 		netif_set_real_num_tx_queues(tp->dev, 1);
11352 		netif_set_real_num_rx_queues(tp->dev, 1);
11353 	}
11354 }
11355 
11356 static void tg3_ints_fini(struct tg3 *tp)
11357 {
11358 	if (tg3_flag(tp, USING_MSIX))
11359 		pci_disable_msix(tp->pdev);
11360 	else if (tg3_flag(tp, USING_MSI))
11361 		pci_disable_msi(tp->pdev);
11362 	tg3_flag_clear(tp, USING_MSI);
11363 	tg3_flag_clear(tp, USING_MSIX);
11364 	tg3_flag_clear(tp, ENABLE_RSS);
11365 	tg3_flag_clear(tp, ENABLE_TSS);
11366 }
11367 
11368 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11369 		     bool init)
11370 {
11371 	struct net_device *dev = tp->dev;
11372 	int i, err;
11373 
11374 	/*
11375 	 * Setup interrupts first so we know how
11376 	 * many NAPI resources to allocate
11377 	 */
11378 	tg3_ints_init(tp);
11379 
11380 	tg3_rss_check_indir_tbl(tp);
11381 
11382 	/* The placement of this call is tied
11383 	 * to the setup and use of Host TX descriptors.
11384 	 */
11385 	err = tg3_alloc_consistent(tp);
11386 	if (err)
11387 		goto out_ints_fini;
11388 
11389 	tg3_napi_init(tp);
11390 
11391 	tg3_napi_enable(tp);
11392 
11393 	for (i = 0; i < tp->irq_cnt; i++) {
11394 		struct tg3_napi *tnapi = &tp->napi[i];
11395 		err = tg3_request_irq(tp, i);
11396 		if (err) {
11397 			for (i--; i >= 0; i--) {
11398 				tnapi = &tp->napi[i];
11399 				free_irq(tnapi->irq_vec, tnapi);
11400 			}
11401 			goto out_napi_fini;
11402 		}
11403 	}
11404 
11405 	tg3_full_lock(tp, 0);
11406 
11407 	if (init)
11408 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11409 
11410 	err = tg3_init_hw(tp, reset_phy);
11411 	if (err) {
11412 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11413 		tg3_free_rings(tp);
11414 	}
11415 
11416 	tg3_full_unlock(tp);
11417 
11418 	if (err)
11419 		goto out_free_irq;
11420 
11421 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11422 		err = tg3_test_msi(tp);
11423 
11424 		if (err) {
11425 			tg3_full_lock(tp, 0);
11426 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11427 			tg3_free_rings(tp);
11428 			tg3_full_unlock(tp);
11429 
11430 			goto out_napi_fini;
11431 		}
11432 
11433 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11434 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11435 
11436 			tw32(PCIE_TRANSACTION_CFG,
11437 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11438 		}
11439 	}
11440 
11441 	tg3_phy_start(tp);
11442 
11443 	tg3_hwmon_open(tp);
11444 
11445 	tg3_full_lock(tp, 0);
11446 
11447 	tg3_timer_start(tp);
11448 	tg3_flag_set(tp, INIT_COMPLETE);
11449 	tg3_enable_ints(tp);
11450 
11451 	if (init)
11452 		tg3_ptp_init(tp);
11453 	else
11454 		tg3_ptp_resume(tp);
11455 
11456 
11457 	tg3_full_unlock(tp);
11458 
11459 	netif_tx_start_all_queues(dev);
11460 
11461 	/*
11462 	 * Reset loopback feature if it was turned on while the device was down
11463 	 * make sure that it's installed properly now.
11464 	 */
11465 	if (dev->features & NETIF_F_LOOPBACK)
11466 		tg3_set_loopback(dev, dev->features);
11467 
11468 	return 0;
11469 
11470 out_free_irq:
11471 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11472 		struct tg3_napi *tnapi = &tp->napi[i];
11473 		free_irq(tnapi->irq_vec, tnapi);
11474 	}
11475 
11476 out_napi_fini:
11477 	tg3_napi_disable(tp);
11478 	tg3_napi_fini(tp);
11479 	tg3_free_consistent(tp);
11480 
11481 out_ints_fini:
11482 	tg3_ints_fini(tp);
11483 
11484 	return err;
11485 }
11486 
11487 static void tg3_stop(struct tg3 *tp)
11488 {
11489 	int i;
11490 
11491 	tg3_reset_task_cancel(tp);
11492 	tg3_netif_stop(tp);
11493 
11494 	tg3_timer_stop(tp);
11495 
11496 	tg3_hwmon_close(tp);
11497 
11498 	tg3_phy_stop(tp);
11499 
11500 	tg3_full_lock(tp, 1);
11501 
11502 	tg3_disable_ints(tp);
11503 
11504 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11505 	tg3_free_rings(tp);
11506 	tg3_flag_clear(tp, INIT_COMPLETE);
11507 
11508 	tg3_full_unlock(tp);
11509 
11510 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11511 		struct tg3_napi *tnapi = &tp->napi[i];
11512 		free_irq(tnapi->irq_vec, tnapi);
11513 	}
11514 
11515 	tg3_ints_fini(tp);
11516 
11517 	tg3_napi_fini(tp);
11518 
11519 	tg3_free_consistent(tp);
11520 }
11521 
11522 static int tg3_open(struct net_device *dev)
11523 {
11524 	struct tg3 *tp = netdev_priv(dev);
11525 	int err;
11526 
11527 	if (tp->fw_needed) {
11528 		err = tg3_request_firmware(tp);
11529 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11530 			if (err) {
11531 				netdev_warn(tp->dev, "EEE capability disabled\n");
11532 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11533 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11534 				netdev_warn(tp->dev, "EEE capability restored\n");
11535 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11536 			}
11537 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11538 			if (err)
11539 				return err;
11540 		} else if (err) {
11541 			netdev_warn(tp->dev, "TSO capability disabled\n");
11542 			tg3_flag_clear(tp, TSO_CAPABLE);
11543 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11544 			netdev_notice(tp->dev, "TSO capability restored\n");
11545 			tg3_flag_set(tp, TSO_CAPABLE);
11546 		}
11547 	}
11548 
11549 	tg3_carrier_off(tp);
11550 
11551 	err = tg3_power_up(tp);
11552 	if (err)
11553 		return err;
11554 
11555 	tg3_full_lock(tp, 0);
11556 
11557 	tg3_disable_ints(tp);
11558 	tg3_flag_clear(tp, INIT_COMPLETE);
11559 
11560 	tg3_full_unlock(tp);
11561 
11562 	err = tg3_start(tp,
11563 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11564 			true, true);
11565 	if (err) {
11566 		tg3_frob_aux_power(tp, false);
11567 		pci_set_power_state(tp->pdev, PCI_D3hot);
11568 	}
11569 
11570 	if (tg3_flag(tp, PTP_CAPABLE)) {
11571 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11572 						   &tp->pdev->dev);
11573 		if (IS_ERR(tp->ptp_clock))
11574 			tp->ptp_clock = NULL;
11575 	}
11576 
11577 	return err;
11578 }
11579 
11580 static int tg3_close(struct net_device *dev)
11581 {
11582 	struct tg3 *tp = netdev_priv(dev);
11583 
11584 	tg3_ptp_fini(tp);
11585 
11586 	tg3_stop(tp);
11587 
11588 	/* Clear stats across close / open calls */
11589 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11590 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11591 
11592 	if (pci_device_is_present(tp->pdev)) {
11593 		tg3_power_down_prepare(tp);
11594 
11595 		tg3_carrier_off(tp);
11596 	}
11597 	return 0;
11598 }
11599 
11600 static inline u64 get_stat64(tg3_stat64_t *val)
11601 {
11602        return ((u64)val->high << 32) | ((u64)val->low);
11603 }
11604 
11605 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11606 {
11607 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11608 
11609 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11610 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11611 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11612 		u32 val;
11613 
11614 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11615 			tg3_writephy(tp, MII_TG3_TEST1,
11616 				     val | MII_TG3_TEST1_CRC_EN);
11617 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11618 		} else
11619 			val = 0;
11620 
11621 		tp->phy_crc_errors += val;
11622 
11623 		return tp->phy_crc_errors;
11624 	}
11625 
11626 	return get_stat64(&hw_stats->rx_fcs_errors);
11627 }
11628 
11629 #define ESTAT_ADD(member) \
11630 	estats->member =	old_estats->member + \
11631 				get_stat64(&hw_stats->member)
11632 
11633 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11634 {
11635 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11636 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11637 
11638 	ESTAT_ADD(rx_octets);
11639 	ESTAT_ADD(rx_fragments);
11640 	ESTAT_ADD(rx_ucast_packets);
11641 	ESTAT_ADD(rx_mcast_packets);
11642 	ESTAT_ADD(rx_bcast_packets);
11643 	ESTAT_ADD(rx_fcs_errors);
11644 	ESTAT_ADD(rx_align_errors);
11645 	ESTAT_ADD(rx_xon_pause_rcvd);
11646 	ESTAT_ADD(rx_xoff_pause_rcvd);
11647 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11648 	ESTAT_ADD(rx_xoff_entered);
11649 	ESTAT_ADD(rx_frame_too_long_errors);
11650 	ESTAT_ADD(rx_jabbers);
11651 	ESTAT_ADD(rx_undersize_packets);
11652 	ESTAT_ADD(rx_in_length_errors);
11653 	ESTAT_ADD(rx_out_length_errors);
11654 	ESTAT_ADD(rx_64_or_less_octet_packets);
11655 	ESTAT_ADD(rx_65_to_127_octet_packets);
11656 	ESTAT_ADD(rx_128_to_255_octet_packets);
11657 	ESTAT_ADD(rx_256_to_511_octet_packets);
11658 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11659 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11660 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11661 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11662 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11663 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11664 
11665 	ESTAT_ADD(tx_octets);
11666 	ESTAT_ADD(tx_collisions);
11667 	ESTAT_ADD(tx_xon_sent);
11668 	ESTAT_ADD(tx_xoff_sent);
11669 	ESTAT_ADD(tx_flow_control);
11670 	ESTAT_ADD(tx_mac_errors);
11671 	ESTAT_ADD(tx_single_collisions);
11672 	ESTAT_ADD(tx_mult_collisions);
11673 	ESTAT_ADD(tx_deferred);
11674 	ESTAT_ADD(tx_excessive_collisions);
11675 	ESTAT_ADD(tx_late_collisions);
11676 	ESTAT_ADD(tx_collide_2times);
11677 	ESTAT_ADD(tx_collide_3times);
11678 	ESTAT_ADD(tx_collide_4times);
11679 	ESTAT_ADD(tx_collide_5times);
11680 	ESTAT_ADD(tx_collide_6times);
11681 	ESTAT_ADD(tx_collide_7times);
11682 	ESTAT_ADD(tx_collide_8times);
11683 	ESTAT_ADD(tx_collide_9times);
11684 	ESTAT_ADD(tx_collide_10times);
11685 	ESTAT_ADD(tx_collide_11times);
11686 	ESTAT_ADD(tx_collide_12times);
11687 	ESTAT_ADD(tx_collide_13times);
11688 	ESTAT_ADD(tx_collide_14times);
11689 	ESTAT_ADD(tx_collide_15times);
11690 	ESTAT_ADD(tx_ucast_packets);
11691 	ESTAT_ADD(tx_mcast_packets);
11692 	ESTAT_ADD(tx_bcast_packets);
11693 	ESTAT_ADD(tx_carrier_sense_errors);
11694 	ESTAT_ADD(tx_discards);
11695 	ESTAT_ADD(tx_errors);
11696 
11697 	ESTAT_ADD(dma_writeq_full);
11698 	ESTAT_ADD(dma_write_prioq_full);
11699 	ESTAT_ADD(rxbds_empty);
11700 	ESTAT_ADD(rx_discards);
11701 	ESTAT_ADD(rx_errors);
11702 	ESTAT_ADD(rx_threshold_hit);
11703 
11704 	ESTAT_ADD(dma_readq_full);
11705 	ESTAT_ADD(dma_read_prioq_full);
11706 	ESTAT_ADD(tx_comp_queue_full);
11707 
11708 	ESTAT_ADD(ring_set_send_prod_index);
11709 	ESTAT_ADD(ring_status_update);
11710 	ESTAT_ADD(nic_irqs);
11711 	ESTAT_ADD(nic_avoided_irqs);
11712 	ESTAT_ADD(nic_tx_threshold_hit);
11713 
11714 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11715 }
11716 
11717 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11718 {
11719 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11720 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11721 
11722 	stats->rx_packets = old_stats->rx_packets +
11723 		get_stat64(&hw_stats->rx_ucast_packets) +
11724 		get_stat64(&hw_stats->rx_mcast_packets) +
11725 		get_stat64(&hw_stats->rx_bcast_packets);
11726 
11727 	stats->tx_packets = old_stats->tx_packets +
11728 		get_stat64(&hw_stats->tx_ucast_packets) +
11729 		get_stat64(&hw_stats->tx_mcast_packets) +
11730 		get_stat64(&hw_stats->tx_bcast_packets);
11731 
11732 	stats->rx_bytes = old_stats->rx_bytes +
11733 		get_stat64(&hw_stats->rx_octets);
11734 	stats->tx_bytes = old_stats->tx_bytes +
11735 		get_stat64(&hw_stats->tx_octets);
11736 
11737 	stats->rx_errors = old_stats->rx_errors +
11738 		get_stat64(&hw_stats->rx_errors);
11739 	stats->tx_errors = old_stats->tx_errors +
11740 		get_stat64(&hw_stats->tx_errors) +
11741 		get_stat64(&hw_stats->tx_mac_errors) +
11742 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11743 		get_stat64(&hw_stats->tx_discards);
11744 
11745 	stats->multicast = old_stats->multicast +
11746 		get_stat64(&hw_stats->rx_mcast_packets);
11747 	stats->collisions = old_stats->collisions +
11748 		get_stat64(&hw_stats->tx_collisions);
11749 
11750 	stats->rx_length_errors = old_stats->rx_length_errors +
11751 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11752 		get_stat64(&hw_stats->rx_undersize_packets);
11753 
11754 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11755 		get_stat64(&hw_stats->rx_align_errors);
11756 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11757 		get_stat64(&hw_stats->tx_discards);
11758 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11759 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11760 
11761 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11762 		tg3_calc_crc_errors(tp);
11763 
11764 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11765 		get_stat64(&hw_stats->rx_discards);
11766 
11767 	stats->rx_dropped = tp->rx_dropped;
11768 	stats->tx_dropped = tp->tx_dropped;
11769 }
11770 
11771 static int tg3_get_regs_len(struct net_device *dev)
11772 {
11773 	return TG3_REG_BLK_SIZE;
11774 }
11775 
11776 static void tg3_get_regs(struct net_device *dev,
11777 		struct ethtool_regs *regs, void *_p)
11778 {
11779 	struct tg3 *tp = netdev_priv(dev);
11780 
11781 	regs->version = 0;
11782 
11783 	memset(_p, 0, TG3_REG_BLK_SIZE);
11784 
11785 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11786 		return;
11787 
11788 	tg3_full_lock(tp, 0);
11789 
11790 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11791 
11792 	tg3_full_unlock(tp);
11793 }
11794 
11795 static int tg3_get_eeprom_len(struct net_device *dev)
11796 {
11797 	struct tg3 *tp = netdev_priv(dev);
11798 
11799 	return tp->nvram_size;
11800 }
11801 
11802 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11803 {
11804 	struct tg3 *tp = netdev_priv(dev);
11805 	int ret;
11806 	u8  *pd;
11807 	u32 i, offset, len, b_offset, b_count;
11808 	__be32 val;
11809 
11810 	if (tg3_flag(tp, NO_NVRAM))
11811 		return -EINVAL;
11812 
11813 	offset = eeprom->offset;
11814 	len = eeprom->len;
11815 	eeprom->len = 0;
11816 
11817 	eeprom->magic = TG3_EEPROM_MAGIC;
11818 
11819 	if (offset & 3) {
11820 		/* adjustments to start on required 4 byte boundary */
11821 		b_offset = offset & 3;
11822 		b_count = 4 - b_offset;
11823 		if (b_count > len) {
11824 			/* i.e. offset=1 len=2 */
11825 			b_count = len;
11826 		}
11827 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11828 		if (ret)
11829 			return ret;
11830 		memcpy(data, ((char *)&val) + b_offset, b_count);
11831 		len -= b_count;
11832 		offset += b_count;
11833 		eeprom->len += b_count;
11834 	}
11835 
11836 	/* read bytes up to the last 4 byte boundary */
11837 	pd = &data[eeprom->len];
11838 	for (i = 0; i < (len - (len & 3)); i += 4) {
11839 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
11840 		if (ret) {
11841 			eeprom->len += i;
11842 			return ret;
11843 		}
11844 		memcpy(pd + i, &val, 4);
11845 	}
11846 	eeprom->len += i;
11847 
11848 	if (len & 3) {
11849 		/* read last bytes not ending on 4 byte boundary */
11850 		pd = &data[eeprom->len];
11851 		b_count = len & 3;
11852 		b_offset = offset + len - b_count;
11853 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
11854 		if (ret)
11855 			return ret;
11856 		memcpy(pd, &val, b_count);
11857 		eeprom->len += b_count;
11858 	}
11859 	return 0;
11860 }
11861 
11862 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11863 {
11864 	struct tg3 *tp = netdev_priv(dev);
11865 	int ret;
11866 	u32 offset, len, b_offset, odd_len;
11867 	u8 *buf;
11868 	__be32 start, end;
11869 
11870 	if (tg3_flag(tp, NO_NVRAM) ||
11871 	    eeprom->magic != TG3_EEPROM_MAGIC)
11872 		return -EINVAL;
11873 
11874 	offset = eeprom->offset;
11875 	len = eeprom->len;
11876 
11877 	if ((b_offset = (offset & 3))) {
11878 		/* adjustments to start on required 4 byte boundary */
11879 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11880 		if (ret)
11881 			return ret;
11882 		len += b_offset;
11883 		offset &= ~3;
11884 		if (len < 4)
11885 			len = 4;
11886 	}
11887 
11888 	odd_len = 0;
11889 	if (len & 3) {
11890 		/* adjustments to end on required 4 byte boundary */
11891 		odd_len = 1;
11892 		len = (len + 3) & ~3;
11893 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11894 		if (ret)
11895 			return ret;
11896 	}
11897 
11898 	buf = data;
11899 	if (b_offset || odd_len) {
11900 		buf = kmalloc(len, GFP_KERNEL);
11901 		if (!buf)
11902 			return -ENOMEM;
11903 		if (b_offset)
11904 			memcpy(buf, &start, 4);
11905 		if (odd_len)
11906 			memcpy(buf+len-4, &end, 4);
11907 		memcpy(buf + b_offset, data, eeprom->len);
11908 	}
11909 
11910 	ret = tg3_nvram_write_block(tp, offset, len, buf);
11911 
11912 	if (buf != data)
11913 		kfree(buf);
11914 
11915 	return ret;
11916 }
11917 
11918 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11919 {
11920 	struct tg3 *tp = netdev_priv(dev);
11921 
11922 	if (tg3_flag(tp, USE_PHYLIB)) {
11923 		struct phy_device *phydev;
11924 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11925 			return -EAGAIN;
11926 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
11927 		return phy_ethtool_gset(phydev, cmd);
11928 	}
11929 
11930 	cmd->supported = (SUPPORTED_Autoneg);
11931 
11932 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11933 		cmd->supported |= (SUPPORTED_1000baseT_Half |
11934 				   SUPPORTED_1000baseT_Full);
11935 
11936 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11937 		cmd->supported |= (SUPPORTED_100baseT_Half |
11938 				  SUPPORTED_100baseT_Full |
11939 				  SUPPORTED_10baseT_Half |
11940 				  SUPPORTED_10baseT_Full |
11941 				  SUPPORTED_TP);
11942 		cmd->port = PORT_TP;
11943 	} else {
11944 		cmd->supported |= SUPPORTED_FIBRE;
11945 		cmd->port = PORT_FIBRE;
11946 	}
11947 
11948 	cmd->advertising = tp->link_config.advertising;
11949 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
11950 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11951 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11952 				cmd->advertising |= ADVERTISED_Pause;
11953 			} else {
11954 				cmd->advertising |= ADVERTISED_Pause |
11955 						    ADVERTISED_Asym_Pause;
11956 			}
11957 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11958 			cmd->advertising |= ADVERTISED_Asym_Pause;
11959 		}
11960 	}
11961 	if (netif_running(dev) && tp->link_up) {
11962 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11963 		cmd->duplex = tp->link_config.active_duplex;
11964 		cmd->lp_advertising = tp->link_config.rmt_adv;
11965 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11966 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11967 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
11968 			else
11969 				cmd->eth_tp_mdix = ETH_TP_MDI;
11970 		}
11971 	} else {
11972 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11973 		cmd->duplex = DUPLEX_UNKNOWN;
11974 		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11975 	}
11976 	cmd->phy_address = tp->phy_addr;
11977 	cmd->transceiver = XCVR_INTERNAL;
11978 	cmd->autoneg = tp->link_config.autoneg;
11979 	cmd->maxtxpkt = 0;
11980 	cmd->maxrxpkt = 0;
11981 	return 0;
11982 }
11983 
11984 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11985 {
11986 	struct tg3 *tp = netdev_priv(dev);
11987 	u32 speed = ethtool_cmd_speed(cmd);
11988 
11989 	if (tg3_flag(tp, USE_PHYLIB)) {
11990 		struct phy_device *phydev;
11991 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11992 			return -EAGAIN;
11993 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
11994 		return phy_ethtool_sset(phydev, cmd);
11995 	}
11996 
11997 	if (cmd->autoneg != AUTONEG_ENABLE &&
11998 	    cmd->autoneg != AUTONEG_DISABLE)
11999 		return -EINVAL;
12000 
12001 	if (cmd->autoneg == AUTONEG_DISABLE &&
12002 	    cmd->duplex != DUPLEX_FULL &&
12003 	    cmd->duplex != DUPLEX_HALF)
12004 		return -EINVAL;
12005 
12006 	if (cmd->autoneg == AUTONEG_ENABLE) {
12007 		u32 mask = ADVERTISED_Autoneg |
12008 			   ADVERTISED_Pause |
12009 			   ADVERTISED_Asym_Pause;
12010 
12011 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12012 			mask |= ADVERTISED_1000baseT_Half |
12013 				ADVERTISED_1000baseT_Full;
12014 
12015 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12016 			mask |= ADVERTISED_100baseT_Half |
12017 				ADVERTISED_100baseT_Full |
12018 				ADVERTISED_10baseT_Half |
12019 				ADVERTISED_10baseT_Full |
12020 				ADVERTISED_TP;
12021 		else
12022 			mask |= ADVERTISED_FIBRE;
12023 
12024 		if (cmd->advertising & ~mask)
12025 			return -EINVAL;
12026 
12027 		mask &= (ADVERTISED_1000baseT_Half |
12028 			 ADVERTISED_1000baseT_Full |
12029 			 ADVERTISED_100baseT_Half |
12030 			 ADVERTISED_100baseT_Full |
12031 			 ADVERTISED_10baseT_Half |
12032 			 ADVERTISED_10baseT_Full);
12033 
12034 		cmd->advertising &= mask;
12035 	} else {
12036 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12037 			if (speed != SPEED_1000)
12038 				return -EINVAL;
12039 
12040 			if (cmd->duplex != DUPLEX_FULL)
12041 				return -EINVAL;
12042 		} else {
12043 			if (speed != SPEED_100 &&
12044 			    speed != SPEED_10)
12045 				return -EINVAL;
12046 		}
12047 	}
12048 
12049 	tg3_full_lock(tp, 0);
12050 
12051 	tp->link_config.autoneg = cmd->autoneg;
12052 	if (cmd->autoneg == AUTONEG_ENABLE) {
12053 		tp->link_config.advertising = (cmd->advertising |
12054 					      ADVERTISED_Autoneg);
12055 		tp->link_config.speed = SPEED_UNKNOWN;
12056 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12057 	} else {
12058 		tp->link_config.advertising = 0;
12059 		tp->link_config.speed = speed;
12060 		tp->link_config.duplex = cmd->duplex;
12061 	}
12062 
12063 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12064 
12065 	tg3_warn_mgmt_link_flap(tp);
12066 
12067 	if (netif_running(dev))
12068 		tg3_setup_phy(tp, true);
12069 
12070 	tg3_full_unlock(tp);
12071 
12072 	return 0;
12073 }
12074 
12075 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12076 {
12077 	struct tg3 *tp = netdev_priv(dev);
12078 
12079 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12080 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12081 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12082 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12083 }
12084 
12085 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12086 {
12087 	struct tg3 *tp = netdev_priv(dev);
12088 
12089 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12090 		wol->supported = WAKE_MAGIC;
12091 	else
12092 		wol->supported = 0;
12093 	wol->wolopts = 0;
12094 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12095 		wol->wolopts = WAKE_MAGIC;
12096 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12097 }
12098 
12099 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12100 {
12101 	struct tg3 *tp = netdev_priv(dev);
12102 	struct device *dp = &tp->pdev->dev;
12103 
12104 	if (wol->wolopts & ~WAKE_MAGIC)
12105 		return -EINVAL;
12106 	if ((wol->wolopts & WAKE_MAGIC) &&
12107 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12108 		return -EINVAL;
12109 
12110 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12111 
12112 	if (device_may_wakeup(dp))
12113 		tg3_flag_set(tp, WOL_ENABLE);
12114 	else
12115 		tg3_flag_clear(tp, WOL_ENABLE);
12116 
12117 	return 0;
12118 }
12119 
12120 static u32 tg3_get_msglevel(struct net_device *dev)
12121 {
12122 	struct tg3 *tp = netdev_priv(dev);
12123 	return tp->msg_enable;
12124 }
12125 
12126 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12127 {
12128 	struct tg3 *tp = netdev_priv(dev);
12129 	tp->msg_enable = value;
12130 }
12131 
12132 static int tg3_nway_reset(struct net_device *dev)
12133 {
12134 	struct tg3 *tp = netdev_priv(dev);
12135 	int r;
12136 
12137 	if (!netif_running(dev))
12138 		return -EAGAIN;
12139 
12140 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12141 		return -EINVAL;
12142 
12143 	tg3_warn_mgmt_link_flap(tp);
12144 
12145 	if (tg3_flag(tp, USE_PHYLIB)) {
12146 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12147 			return -EAGAIN;
12148 		r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12149 	} else {
12150 		u32 bmcr;
12151 
12152 		spin_lock_bh(&tp->lock);
12153 		r = -EINVAL;
12154 		tg3_readphy(tp, MII_BMCR, &bmcr);
12155 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12156 		    ((bmcr & BMCR_ANENABLE) ||
12157 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12158 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12159 						   BMCR_ANENABLE);
12160 			r = 0;
12161 		}
12162 		spin_unlock_bh(&tp->lock);
12163 	}
12164 
12165 	return r;
12166 }
12167 
12168 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12169 {
12170 	struct tg3 *tp = netdev_priv(dev);
12171 
12172 	ering->rx_max_pending = tp->rx_std_ring_mask;
12173 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12174 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12175 	else
12176 		ering->rx_jumbo_max_pending = 0;
12177 
12178 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12179 
12180 	ering->rx_pending = tp->rx_pending;
12181 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12182 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12183 	else
12184 		ering->rx_jumbo_pending = 0;
12185 
12186 	ering->tx_pending = tp->napi[0].tx_pending;
12187 }
12188 
12189 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12190 {
12191 	struct tg3 *tp = netdev_priv(dev);
12192 	int i, irq_sync = 0, err = 0;
12193 
12194 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12195 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12196 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12197 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12198 	    (tg3_flag(tp, TSO_BUG) &&
12199 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12200 		return -EINVAL;
12201 
12202 	if (netif_running(dev)) {
12203 		tg3_phy_stop(tp);
12204 		tg3_netif_stop(tp);
12205 		irq_sync = 1;
12206 	}
12207 
12208 	tg3_full_lock(tp, irq_sync);
12209 
12210 	tp->rx_pending = ering->rx_pending;
12211 
12212 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12213 	    tp->rx_pending > 63)
12214 		tp->rx_pending = 63;
12215 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12216 
12217 	for (i = 0; i < tp->irq_max; i++)
12218 		tp->napi[i].tx_pending = ering->tx_pending;
12219 
12220 	if (netif_running(dev)) {
12221 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12222 		err = tg3_restart_hw(tp, false);
12223 		if (!err)
12224 			tg3_netif_start(tp);
12225 	}
12226 
12227 	tg3_full_unlock(tp);
12228 
12229 	if (irq_sync && !err)
12230 		tg3_phy_start(tp);
12231 
12232 	return err;
12233 }
12234 
12235 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12236 {
12237 	struct tg3 *tp = netdev_priv(dev);
12238 
12239 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12240 
12241 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12242 		epause->rx_pause = 1;
12243 	else
12244 		epause->rx_pause = 0;
12245 
12246 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12247 		epause->tx_pause = 1;
12248 	else
12249 		epause->tx_pause = 0;
12250 }
12251 
12252 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12253 {
12254 	struct tg3 *tp = netdev_priv(dev);
12255 	int err = 0;
12256 
12257 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12258 		tg3_warn_mgmt_link_flap(tp);
12259 
12260 	if (tg3_flag(tp, USE_PHYLIB)) {
12261 		u32 newadv;
12262 		struct phy_device *phydev;
12263 
12264 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12265 
12266 		if (!(phydev->supported & SUPPORTED_Pause) ||
12267 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12268 		     (epause->rx_pause != epause->tx_pause)))
12269 			return -EINVAL;
12270 
12271 		tp->link_config.flowctrl = 0;
12272 		if (epause->rx_pause) {
12273 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12274 
12275 			if (epause->tx_pause) {
12276 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12277 				newadv = ADVERTISED_Pause;
12278 			} else
12279 				newadv = ADVERTISED_Pause |
12280 					 ADVERTISED_Asym_Pause;
12281 		} else if (epause->tx_pause) {
12282 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12283 			newadv = ADVERTISED_Asym_Pause;
12284 		} else
12285 			newadv = 0;
12286 
12287 		if (epause->autoneg)
12288 			tg3_flag_set(tp, PAUSE_AUTONEG);
12289 		else
12290 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12291 
12292 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12293 			u32 oldadv = phydev->advertising &
12294 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12295 			if (oldadv != newadv) {
12296 				phydev->advertising &=
12297 					~(ADVERTISED_Pause |
12298 					  ADVERTISED_Asym_Pause);
12299 				phydev->advertising |= newadv;
12300 				if (phydev->autoneg) {
12301 					/*
12302 					 * Always renegotiate the link to
12303 					 * inform our link partner of our
12304 					 * flow control settings, even if the
12305 					 * flow control is forced.  Let
12306 					 * tg3_adjust_link() do the final
12307 					 * flow control setup.
12308 					 */
12309 					return phy_start_aneg(phydev);
12310 				}
12311 			}
12312 
12313 			if (!epause->autoneg)
12314 				tg3_setup_flow_control(tp, 0, 0);
12315 		} else {
12316 			tp->link_config.advertising &=
12317 					~(ADVERTISED_Pause |
12318 					  ADVERTISED_Asym_Pause);
12319 			tp->link_config.advertising |= newadv;
12320 		}
12321 	} else {
12322 		int irq_sync = 0;
12323 
12324 		if (netif_running(dev)) {
12325 			tg3_netif_stop(tp);
12326 			irq_sync = 1;
12327 		}
12328 
12329 		tg3_full_lock(tp, irq_sync);
12330 
12331 		if (epause->autoneg)
12332 			tg3_flag_set(tp, PAUSE_AUTONEG);
12333 		else
12334 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12335 		if (epause->rx_pause)
12336 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12337 		else
12338 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12339 		if (epause->tx_pause)
12340 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12341 		else
12342 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12343 
12344 		if (netif_running(dev)) {
12345 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12346 			err = tg3_restart_hw(tp, false);
12347 			if (!err)
12348 				tg3_netif_start(tp);
12349 		}
12350 
12351 		tg3_full_unlock(tp);
12352 	}
12353 
12354 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12355 
12356 	return err;
12357 }
12358 
12359 static int tg3_get_sset_count(struct net_device *dev, int sset)
12360 {
12361 	switch (sset) {
12362 	case ETH_SS_TEST:
12363 		return TG3_NUM_TEST;
12364 	case ETH_SS_STATS:
12365 		return TG3_NUM_STATS;
12366 	default:
12367 		return -EOPNOTSUPP;
12368 	}
12369 }
12370 
12371 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12372 			 u32 *rules __always_unused)
12373 {
12374 	struct tg3 *tp = netdev_priv(dev);
12375 
12376 	if (!tg3_flag(tp, SUPPORT_MSIX))
12377 		return -EOPNOTSUPP;
12378 
12379 	switch (info->cmd) {
12380 	case ETHTOOL_GRXRINGS:
12381 		if (netif_running(tp->dev))
12382 			info->data = tp->rxq_cnt;
12383 		else {
12384 			info->data = num_online_cpus();
12385 			if (info->data > TG3_RSS_MAX_NUM_QS)
12386 				info->data = TG3_RSS_MAX_NUM_QS;
12387 		}
12388 
12389 		/* The first interrupt vector only
12390 		 * handles link interrupts.
12391 		 */
12392 		info->data -= 1;
12393 		return 0;
12394 
12395 	default:
12396 		return -EOPNOTSUPP;
12397 	}
12398 }
12399 
12400 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12401 {
12402 	u32 size = 0;
12403 	struct tg3 *tp = netdev_priv(dev);
12404 
12405 	if (tg3_flag(tp, SUPPORT_MSIX))
12406 		size = TG3_RSS_INDIR_TBL_SIZE;
12407 
12408 	return size;
12409 }
12410 
12411 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12412 {
12413 	struct tg3 *tp = netdev_priv(dev);
12414 	int i;
12415 
12416 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12417 		indir[i] = tp->rss_ind_tbl[i];
12418 
12419 	return 0;
12420 }
12421 
12422 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12423 {
12424 	struct tg3 *tp = netdev_priv(dev);
12425 	size_t i;
12426 
12427 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12428 		tp->rss_ind_tbl[i] = indir[i];
12429 
12430 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12431 		return 0;
12432 
12433 	/* It is legal to write the indirection
12434 	 * table while the device is running.
12435 	 */
12436 	tg3_full_lock(tp, 0);
12437 	tg3_rss_write_indir_tbl(tp);
12438 	tg3_full_unlock(tp);
12439 
12440 	return 0;
12441 }
12442 
12443 static void tg3_get_channels(struct net_device *dev,
12444 			     struct ethtool_channels *channel)
12445 {
12446 	struct tg3 *tp = netdev_priv(dev);
12447 	u32 deflt_qs = netif_get_num_default_rss_queues();
12448 
12449 	channel->max_rx = tp->rxq_max;
12450 	channel->max_tx = tp->txq_max;
12451 
12452 	if (netif_running(dev)) {
12453 		channel->rx_count = tp->rxq_cnt;
12454 		channel->tx_count = tp->txq_cnt;
12455 	} else {
12456 		if (tp->rxq_req)
12457 			channel->rx_count = tp->rxq_req;
12458 		else
12459 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12460 
12461 		if (tp->txq_req)
12462 			channel->tx_count = tp->txq_req;
12463 		else
12464 			channel->tx_count = min(deflt_qs, tp->txq_max);
12465 	}
12466 }
12467 
12468 static int tg3_set_channels(struct net_device *dev,
12469 			    struct ethtool_channels *channel)
12470 {
12471 	struct tg3 *tp = netdev_priv(dev);
12472 
12473 	if (!tg3_flag(tp, SUPPORT_MSIX))
12474 		return -EOPNOTSUPP;
12475 
12476 	if (channel->rx_count > tp->rxq_max ||
12477 	    channel->tx_count > tp->txq_max)
12478 		return -EINVAL;
12479 
12480 	tp->rxq_req = channel->rx_count;
12481 	tp->txq_req = channel->tx_count;
12482 
12483 	if (!netif_running(dev))
12484 		return 0;
12485 
12486 	tg3_stop(tp);
12487 
12488 	tg3_carrier_off(tp);
12489 
12490 	tg3_start(tp, true, false, false);
12491 
12492 	return 0;
12493 }
12494 
12495 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12496 {
12497 	switch (stringset) {
12498 	case ETH_SS_STATS:
12499 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12500 		break;
12501 	case ETH_SS_TEST:
12502 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12503 		break;
12504 	default:
12505 		WARN_ON(1);	/* we need a WARN() */
12506 		break;
12507 	}
12508 }
12509 
12510 static int tg3_set_phys_id(struct net_device *dev,
12511 			    enum ethtool_phys_id_state state)
12512 {
12513 	struct tg3 *tp = netdev_priv(dev);
12514 
12515 	if (!netif_running(tp->dev))
12516 		return -EAGAIN;
12517 
12518 	switch (state) {
12519 	case ETHTOOL_ID_ACTIVE:
12520 		return 1;	/* cycle on/off once per second */
12521 
12522 	case ETHTOOL_ID_ON:
12523 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12524 		     LED_CTRL_1000MBPS_ON |
12525 		     LED_CTRL_100MBPS_ON |
12526 		     LED_CTRL_10MBPS_ON |
12527 		     LED_CTRL_TRAFFIC_OVERRIDE |
12528 		     LED_CTRL_TRAFFIC_BLINK |
12529 		     LED_CTRL_TRAFFIC_LED);
12530 		break;
12531 
12532 	case ETHTOOL_ID_OFF:
12533 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12534 		     LED_CTRL_TRAFFIC_OVERRIDE);
12535 		break;
12536 
12537 	case ETHTOOL_ID_INACTIVE:
12538 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12539 		break;
12540 	}
12541 
12542 	return 0;
12543 }
12544 
12545 static void tg3_get_ethtool_stats(struct net_device *dev,
12546 				   struct ethtool_stats *estats, u64 *tmp_stats)
12547 {
12548 	struct tg3 *tp = netdev_priv(dev);
12549 
12550 	if (tp->hw_stats)
12551 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12552 	else
12553 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12554 }
12555 
12556 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12557 {
12558 	int i;
12559 	__be32 *buf;
12560 	u32 offset = 0, len = 0;
12561 	u32 magic, val;
12562 
12563 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12564 		return NULL;
12565 
12566 	if (magic == TG3_EEPROM_MAGIC) {
12567 		for (offset = TG3_NVM_DIR_START;
12568 		     offset < TG3_NVM_DIR_END;
12569 		     offset += TG3_NVM_DIRENT_SIZE) {
12570 			if (tg3_nvram_read(tp, offset, &val))
12571 				return NULL;
12572 
12573 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12574 			    TG3_NVM_DIRTYPE_EXTVPD)
12575 				break;
12576 		}
12577 
12578 		if (offset != TG3_NVM_DIR_END) {
12579 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12580 			if (tg3_nvram_read(tp, offset + 4, &offset))
12581 				return NULL;
12582 
12583 			offset = tg3_nvram_logical_addr(tp, offset);
12584 		}
12585 	}
12586 
12587 	if (!offset || !len) {
12588 		offset = TG3_NVM_VPD_OFF;
12589 		len = TG3_NVM_VPD_LEN;
12590 	}
12591 
12592 	buf = kmalloc(len, GFP_KERNEL);
12593 	if (buf == NULL)
12594 		return NULL;
12595 
12596 	if (magic == TG3_EEPROM_MAGIC) {
12597 		for (i = 0; i < len; i += 4) {
12598 			/* The data is in little-endian format in NVRAM.
12599 			 * Use the big-endian read routines to preserve
12600 			 * the byte order as it exists in NVRAM.
12601 			 */
12602 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12603 				goto error;
12604 		}
12605 	} else {
12606 		u8 *ptr;
12607 		ssize_t cnt;
12608 		unsigned int pos = 0;
12609 
12610 		ptr = (u8 *)&buf[0];
12611 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12612 			cnt = pci_read_vpd(tp->pdev, pos,
12613 					   len - pos, ptr);
12614 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12615 				cnt = 0;
12616 			else if (cnt < 0)
12617 				goto error;
12618 		}
12619 		if (pos != len)
12620 			goto error;
12621 	}
12622 
12623 	*vpdlen = len;
12624 
12625 	return buf;
12626 
12627 error:
12628 	kfree(buf);
12629 	return NULL;
12630 }
12631 
12632 #define NVRAM_TEST_SIZE 0x100
12633 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12634 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12635 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12636 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12637 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12638 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12639 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12640 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12641 
12642 static int tg3_test_nvram(struct tg3 *tp)
12643 {
12644 	u32 csum, magic, len;
12645 	__be32 *buf;
12646 	int i, j, k, err = 0, size;
12647 
12648 	if (tg3_flag(tp, NO_NVRAM))
12649 		return 0;
12650 
12651 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12652 		return -EIO;
12653 
12654 	if (magic == TG3_EEPROM_MAGIC)
12655 		size = NVRAM_TEST_SIZE;
12656 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12657 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12658 		    TG3_EEPROM_SB_FORMAT_1) {
12659 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12660 			case TG3_EEPROM_SB_REVISION_0:
12661 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12662 				break;
12663 			case TG3_EEPROM_SB_REVISION_2:
12664 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12665 				break;
12666 			case TG3_EEPROM_SB_REVISION_3:
12667 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12668 				break;
12669 			case TG3_EEPROM_SB_REVISION_4:
12670 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12671 				break;
12672 			case TG3_EEPROM_SB_REVISION_5:
12673 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12674 				break;
12675 			case TG3_EEPROM_SB_REVISION_6:
12676 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12677 				break;
12678 			default:
12679 				return -EIO;
12680 			}
12681 		} else
12682 			return 0;
12683 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12684 		size = NVRAM_SELFBOOT_HW_SIZE;
12685 	else
12686 		return -EIO;
12687 
12688 	buf = kmalloc(size, GFP_KERNEL);
12689 	if (buf == NULL)
12690 		return -ENOMEM;
12691 
12692 	err = -EIO;
12693 	for (i = 0, j = 0; i < size; i += 4, j++) {
12694 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12695 		if (err)
12696 			break;
12697 	}
12698 	if (i < size)
12699 		goto out;
12700 
12701 	/* Selfboot format */
12702 	magic = be32_to_cpu(buf[0]);
12703 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12704 	    TG3_EEPROM_MAGIC_FW) {
12705 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12706 
12707 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12708 		    TG3_EEPROM_SB_REVISION_2) {
12709 			/* For rev 2, the csum doesn't include the MBA. */
12710 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12711 				csum8 += buf8[i];
12712 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12713 				csum8 += buf8[i];
12714 		} else {
12715 			for (i = 0; i < size; i++)
12716 				csum8 += buf8[i];
12717 		}
12718 
12719 		if (csum8 == 0) {
12720 			err = 0;
12721 			goto out;
12722 		}
12723 
12724 		err = -EIO;
12725 		goto out;
12726 	}
12727 
12728 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12729 	    TG3_EEPROM_MAGIC_HW) {
12730 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12731 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12732 		u8 *buf8 = (u8 *) buf;
12733 
12734 		/* Separate the parity bits and the data bytes.  */
12735 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12736 			if ((i == 0) || (i == 8)) {
12737 				int l;
12738 				u8 msk;
12739 
12740 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12741 					parity[k++] = buf8[i] & msk;
12742 				i++;
12743 			} else if (i == 16) {
12744 				int l;
12745 				u8 msk;
12746 
12747 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12748 					parity[k++] = buf8[i] & msk;
12749 				i++;
12750 
12751 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12752 					parity[k++] = buf8[i] & msk;
12753 				i++;
12754 			}
12755 			data[j++] = buf8[i];
12756 		}
12757 
12758 		err = -EIO;
12759 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12760 			u8 hw8 = hweight8(data[i]);
12761 
12762 			if ((hw8 & 0x1) && parity[i])
12763 				goto out;
12764 			else if (!(hw8 & 0x1) && !parity[i])
12765 				goto out;
12766 		}
12767 		err = 0;
12768 		goto out;
12769 	}
12770 
12771 	err = -EIO;
12772 
12773 	/* Bootstrap checksum at offset 0x10 */
12774 	csum = calc_crc((unsigned char *) buf, 0x10);
12775 	if (csum != le32_to_cpu(buf[0x10/4]))
12776 		goto out;
12777 
12778 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12779 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12780 	if (csum != le32_to_cpu(buf[0xfc/4]))
12781 		goto out;
12782 
12783 	kfree(buf);
12784 
12785 	buf = tg3_vpd_readblock(tp, &len);
12786 	if (!buf)
12787 		return -ENOMEM;
12788 
12789 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12790 	if (i > 0) {
12791 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12792 		if (j < 0)
12793 			goto out;
12794 
12795 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12796 			goto out;
12797 
12798 		i += PCI_VPD_LRDT_TAG_SIZE;
12799 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12800 					      PCI_VPD_RO_KEYWORD_CHKSUM);
12801 		if (j > 0) {
12802 			u8 csum8 = 0;
12803 
12804 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
12805 
12806 			for (i = 0; i <= j; i++)
12807 				csum8 += ((u8 *)buf)[i];
12808 
12809 			if (csum8)
12810 				goto out;
12811 		}
12812 	}
12813 
12814 	err = 0;
12815 
12816 out:
12817 	kfree(buf);
12818 	return err;
12819 }
12820 
12821 #define TG3_SERDES_TIMEOUT_SEC	2
12822 #define TG3_COPPER_TIMEOUT_SEC	6
12823 
12824 static int tg3_test_link(struct tg3 *tp)
12825 {
12826 	int i, max;
12827 
12828 	if (!netif_running(tp->dev))
12829 		return -ENODEV;
12830 
12831 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12832 		max = TG3_SERDES_TIMEOUT_SEC;
12833 	else
12834 		max = TG3_COPPER_TIMEOUT_SEC;
12835 
12836 	for (i = 0; i < max; i++) {
12837 		if (tp->link_up)
12838 			return 0;
12839 
12840 		if (msleep_interruptible(1000))
12841 			break;
12842 	}
12843 
12844 	return -EIO;
12845 }
12846 
12847 /* Only test the commonly used registers */
12848 static int tg3_test_registers(struct tg3 *tp)
12849 {
12850 	int i, is_5705, is_5750;
12851 	u32 offset, read_mask, write_mask, val, save_val, read_val;
12852 	static struct {
12853 		u16 offset;
12854 		u16 flags;
12855 #define TG3_FL_5705	0x1
12856 #define TG3_FL_NOT_5705	0x2
12857 #define TG3_FL_NOT_5788	0x4
12858 #define TG3_FL_NOT_5750	0x8
12859 		u32 read_mask;
12860 		u32 write_mask;
12861 	} reg_tbl[] = {
12862 		/* MAC Control Registers */
12863 		{ MAC_MODE, TG3_FL_NOT_5705,
12864 			0x00000000, 0x00ef6f8c },
12865 		{ MAC_MODE, TG3_FL_5705,
12866 			0x00000000, 0x01ef6b8c },
12867 		{ MAC_STATUS, TG3_FL_NOT_5705,
12868 			0x03800107, 0x00000000 },
12869 		{ MAC_STATUS, TG3_FL_5705,
12870 			0x03800100, 0x00000000 },
12871 		{ MAC_ADDR_0_HIGH, 0x0000,
12872 			0x00000000, 0x0000ffff },
12873 		{ MAC_ADDR_0_LOW, 0x0000,
12874 			0x00000000, 0xffffffff },
12875 		{ MAC_RX_MTU_SIZE, 0x0000,
12876 			0x00000000, 0x0000ffff },
12877 		{ MAC_TX_MODE, 0x0000,
12878 			0x00000000, 0x00000070 },
12879 		{ MAC_TX_LENGTHS, 0x0000,
12880 			0x00000000, 0x00003fff },
12881 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
12882 			0x00000000, 0x000007fc },
12883 		{ MAC_RX_MODE, TG3_FL_5705,
12884 			0x00000000, 0x000007dc },
12885 		{ MAC_HASH_REG_0, 0x0000,
12886 			0x00000000, 0xffffffff },
12887 		{ MAC_HASH_REG_1, 0x0000,
12888 			0x00000000, 0xffffffff },
12889 		{ MAC_HASH_REG_2, 0x0000,
12890 			0x00000000, 0xffffffff },
12891 		{ MAC_HASH_REG_3, 0x0000,
12892 			0x00000000, 0xffffffff },
12893 
12894 		/* Receive Data and Receive BD Initiator Control Registers. */
12895 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12896 			0x00000000, 0xffffffff },
12897 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12898 			0x00000000, 0xffffffff },
12899 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12900 			0x00000000, 0x00000003 },
12901 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12902 			0x00000000, 0xffffffff },
12903 		{ RCVDBDI_STD_BD+0, 0x0000,
12904 			0x00000000, 0xffffffff },
12905 		{ RCVDBDI_STD_BD+4, 0x0000,
12906 			0x00000000, 0xffffffff },
12907 		{ RCVDBDI_STD_BD+8, 0x0000,
12908 			0x00000000, 0xffff0002 },
12909 		{ RCVDBDI_STD_BD+0xc, 0x0000,
12910 			0x00000000, 0xffffffff },
12911 
12912 		/* Receive BD Initiator Control Registers. */
12913 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12914 			0x00000000, 0xffffffff },
12915 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
12916 			0x00000000, 0x000003ff },
12917 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12918 			0x00000000, 0xffffffff },
12919 
12920 		/* Host Coalescing Control Registers. */
12921 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
12922 			0x00000000, 0x00000004 },
12923 		{ HOSTCC_MODE, TG3_FL_5705,
12924 			0x00000000, 0x000000f6 },
12925 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12926 			0x00000000, 0xffffffff },
12927 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12928 			0x00000000, 0x000003ff },
12929 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12930 			0x00000000, 0xffffffff },
12931 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12932 			0x00000000, 0x000003ff },
12933 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12934 			0x00000000, 0xffffffff },
12935 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12936 			0x00000000, 0x000000ff },
12937 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12938 			0x00000000, 0xffffffff },
12939 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12940 			0x00000000, 0x000000ff },
12941 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12942 			0x00000000, 0xffffffff },
12943 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12944 			0x00000000, 0xffffffff },
12945 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12946 			0x00000000, 0xffffffff },
12947 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12948 			0x00000000, 0x000000ff },
12949 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12950 			0x00000000, 0xffffffff },
12951 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12952 			0x00000000, 0x000000ff },
12953 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12954 			0x00000000, 0xffffffff },
12955 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12956 			0x00000000, 0xffffffff },
12957 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12958 			0x00000000, 0xffffffff },
12959 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12960 			0x00000000, 0xffffffff },
12961 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12962 			0x00000000, 0xffffffff },
12963 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12964 			0xffffffff, 0x00000000 },
12965 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12966 			0xffffffff, 0x00000000 },
12967 
12968 		/* Buffer Manager Control Registers. */
12969 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12970 			0x00000000, 0x007fff80 },
12971 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12972 			0x00000000, 0x007fffff },
12973 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12974 			0x00000000, 0x0000003f },
12975 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12976 			0x00000000, 0x000001ff },
12977 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
12978 			0x00000000, 0x000001ff },
12979 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12980 			0xffffffff, 0x00000000 },
12981 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12982 			0xffffffff, 0x00000000 },
12983 
12984 		/* Mailbox Registers */
12985 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12986 			0x00000000, 0x000001ff },
12987 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12988 			0x00000000, 0x000001ff },
12989 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12990 			0x00000000, 0x000007ff },
12991 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12992 			0x00000000, 0x000001ff },
12993 
12994 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
12995 	};
12996 
12997 	is_5705 = is_5750 = 0;
12998 	if (tg3_flag(tp, 5705_PLUS)) {
12999 		is_5705 = 1;
13000 		if (tg3_flag(tp, 5750_PLUS))
13001 			is_5750 = 1;
13002 	}
13003 
13004 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13005 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13006 			continue;
13007 
13008 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13009 			continue;
13010 
13011 		if (tg3_flag(tp, IS_5788) &&
13012 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13013 			continue;
13014 
13015 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13016 			continue;
13017 
13018 		offset = (u32) reg_tbl[i].offset;
13019 		read_mask = reg_tbl[i].read_mask;
13020 		write_mask = reg_tbl[i].write_mask;
13021 
13022 		/* Save the original register content */
13023 		save_val = tr32(offset);
13024 
13025 		/* Determine the read-only value. */
13026 		read_val = save_val & read_mask;
13027 
13028 		/* Write zero to the register, then make sure the read-only bits
13029 		 * are not changed and the read/write bits are all zeros.
13030 		 */
13031 		tw32(offset, 0);
13032 
13033 		val = tr32(offset);
13034 
13035 		/* Test the read-only and read/write bits. */
13036 		if (((val & read_mask) != read_val) || (val & write_mask))
13037 			goto out;
13038 
13039 		/* Write ones to all the bits defined by RdMask and WrMask, then
13040 		 * make sure the read-only bits are not changed and the
13041 		 * read/write bits are all ones.
13042 		 */
13043 		tw32(offset, read_mask | write_mask);
13044 
13045 		val = tr32(offset);
13046 
13047 		/* Test the read-only bits. */
13048 		if ((val & read_mask) != read_val)
13049 			goto out;
13050 
13051 		/* Test the read/write bits. */
13052 		if ((val & write_mask) != write_mask)
13053 			goto out;
13054 
13055 		tw32(offset, save_val);
13056 	}
13057 
13058 	return 0;
13059 
13060 out:
13061 	if (netif_msg_hw(tp))
13062 		netdev_err(tp->dev,
13063 			   "Register test failed at offset %x\n", offset);
13064 	tw32(offset, save_val);
13065 	return -EIO;
13066 }
13067 
13068 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13069 {
13070 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13071 	int i;
13072 	u32 j;
13073 
13074 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13075 		for (j = 0; j < len; j += 4) {
13076 			u32 val;
13077 
13078 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13079 			tg3_read_mem(tp, offset + j, &val);
13080 			if (val != test_pattern[i])
13081 				return -EIO;
13082 		}
13083 	}
13084 	return 0;
13085 }
13086 
13087 static int tg3_test_memory(struct tg3 *tp)
13088 {
13089 	static struct mem_entry {
13090 		u32 offset;
13091 		u32 len;
13092 	} mem_tbl_570x[] = {
13093 		{ 0x00000000, 0x00b50},
13094 		{ 0x00002000, 0x1c000},
13095 		{ 0xffffffff, 0x00000}
13096 	}, mem_tbl_5705[] = {
13097 		{ 0x00000100, 0x0000c},
13098 		{ 0x00000200, 0x00008},
13099 		{ 0x00004000, 0x00800},
13100 		{ 0x00006000, 0x01000},
13101 		{ 0x00008000, 0x02000},
13102 		{ 0x00010000, 0x0e000},
13103 		{ 0xffffffff, 0x00000}
13104 	}, mem_tbl_5755[] = {
13105 		{ 0x00000200, 0x00008},
13106 		{ 0x00004000, 0x00800},
13107 		{ 0x00006000, 0x00800},
13108 		{ 0x00008000, 0x02000},
13109 		{ 0x00010000, 0x0c000},
13110 		{ 0xffffffff, 0x00000}
13111 	}, mem_tbl_5906[] = {
13112 		{ 0x00000200, 0x00008},
13113 		{ 0x00004000, 0x00400},
13114 		{ 0x00006000, 0x00400},
13115 		{ 0x00008000, 0x01000},
13116 		{ 0x00010000, 0x01000},
13117 		{ 0xffffffff, 0x00000}
13118 	}, mem_tbl_5717[] = {
13119 		{ 0x00000200, 0x00008},
13120 		{ 0x00010000, 0x0a000},
13121 		{ 0x00020000, 0x13c00},
13122 		{ 0xffffffff, 0x00000}
13123 	}, mem_tbl_57765[] = {
13124 		{ 0x00000200, 0x00008},
13125 		{ 0x00004000, 0x00800},
13126 		{ 0x00006000, 0x09800},
13127 		{ 0x00010000, 0x0a000},
13128 		{ 0xffffffff, 0x00000}
13129 	};
13130 	struct mem_entry *mem_tbl;
13131 	int err = 0;
13132 	int i;
13133 
13134 	if (tg3_flag(tp, 5717_PLUS))
13135 		mem_tbl = mem_tbl_5717;
13136 	else if (tg3_flag(tp, 57765_CLASS) ||
13137 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13138 		mem_tbl = mem_tbl_57765;
13139 	else if (tg3_flag(tp, 5755_PLUS))
13140 		mem_tbl = mem_tbl_5755;
13141 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13142 		mem_tbl = mem_tbl_5906;
13143 	else if (tg3_flag(tp, 5705_PLUS))
13144 		mem_tbl = mem_tbl_5705;
13145 	else
13146 		mem_tbl = mem_tbl_570x;
13147 
13148 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13149 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13150 		if (err)
13151 			break;
13152 	}
13153 
13154 	return err;
13155 }
13156 
13157 #define TG3_TSO_MSS		500
13158 
13159 #define TG3_TSO_IP_HDR_LEN	20
13160 #define TG3_TSO_TCP_HDR_LEN	20
13161 #define TG3_TSO_TCP_OPT_LEN	12
13162 
13163 static const u8 tg3_tso_header[] = {
13164 0x08, 0x00,
13165 0x45, 0x00, 0x00, 0x00,
13166 0x00, 0x00, 0x40, 0x00,
13167 0x40, 0x06, 0x00, 0x00,
13168 0x0a, 0x00, 0x00, 0x01,
13169 0x0a, 0x00, 0x00, 0x02,
13170 0x0d, 0x00, 0xe0, 0x00,
13171 0x00, 0x00, 0x01, 0x00,
13172 0x00, 0x00, 0x02, 0x00,
13173 0x80, 0x10, 0x10, 0x00,
13174 0x14, 0x09, 0x00, 0x00,
13175 0x01, 0x01, 0x08, 0x0a,
13176 0x11, 0x11, 0x11, 0x11,
13177 0x11, 0x11, 0x11, 0x11,
13178 };
13179 
13180 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13181 {
13182 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13183 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13184 	u32 budget;
13185 	struct sk_buff *skb;
13186 	u8 *tx_data, *rx_data;
13187 	dma_addr_t map;
13188 	int num_pkts, tx_len, rx_len, i, err;
13189 	struct tg3_rx_buffer_desc *desc;
13190 	struct tg3_napi *tnapi, *rnapi;
13191 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13192 
13193 	tnapi = &tp->napi[0];
13194 	rnapi = &tp->napi[0];
13195 	if (tp->irq_cnt > 1) {
13196 		if (tg3_flag(tp, ENABLE_RSS))
13197 			rnapi = &tp->napi[1];
13198 		if (tg3_flag(tp, ENABLE_TSS))
13199 			tnapi = &tp->napi[1];
13200 	}
13201 	coal_now = tnapi->coal_now | rnapi->coal_now;
13202 
13203 	err = -EIO;
13204 
13205 	tx_len = pktsz;
13206 	skb = netdev_alloc_skb(tp->dev, tx_len);
13207 	if (!skb)
13208 		return -ENOMEM;
13209 
13210 	tx_data = skb_put(skb, tx_len);
13211 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13212 	memset(tx_data + ETH_ALEN, 0x0, 8);
13213 
13214 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13215 
13216 	if (tso_loopback) {
13217 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13218 
13219 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13220 			      TG3_TSO_TCP_OPT_LEN;
13221 
13222 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13223 		       sizeof(tg3_tso_header));
13224 		mss = TG3_TSO_MSS;
13225 
13226 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13227 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13228 
13229 		/* Set the total length field in the IP header */
13230 		iph->tot_len = htons((u16)(mss + hdr_len));
13231 
13232 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13233 			      TXD_FLAG_CPU_POST_DMA);
13234 
13235 		if (tg3_flag(tp, HW_TSO_1) ||
13236 		    tg3_flag(tp, HW_TSO_2) ||
13237 		    tg3_flag(tp, HW_TSO_3)) {
13238 			struct tcphdr *th;
13239 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13240 			th = (struct tcphdr *)&tx_data[val];
13241 			th->check = 0;
13242 		} else
13243 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13244 
13245 		if (tg3_flag(tp, HW_TSO_3)) {
13246 			mss |= (hdr_len & 0xc) << 12;
13247 			if (hdr_len & 0x10)
13248 				base_flags |= 0x00000010;
13249 			base_flags |= (hdr_len & 0x3e0) << 5;
13250 		} else if (tg3_flag(tp, HW_TSO_2))
13251 			mss |= hdr_len << 9;
13252 		else if (tg3_flag(tp, HW_TSO_1) ||
13253 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13254 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13255 		} else {
13256 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13257 		}
13258 
13259 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13260 	} else {
13261 		num_pkts = 1;
13262 		data_off = ETH_HLEN;
13263 
13264 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13265 		    tx_len > VLAN_ETH_FRAME_LEN)
13266 			base_flags |= TXD_FLAG_JMB_PKT;
13267 	}
13268 
13269 	for (i = data_off; i < tx_len; i++)
13270 		tx_data[i] = (u8) (i & 0xff);
13271 
13272 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13273 	if (pci_dma_mapping_error(tp->pdev, map)) {
13274 		dev_kfree_skb(skb);
13275 		return -EIO;
13276 	}
13277 
13278 	val = tnapi->tx_prod;
13279 	tnapi->tx_buffers[val].skb = skb;
13280 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13281 
13282 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13283 	       rnapi->coal_now);
13284 
13285 	udelay(10);
13286 
13287 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13288 
13289 	budget = tg3_tx_avail(tnapi);
13290 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13291 			    base_flags | TXD_FLAG_END, mss, 0)) {
13292 		tnapi->tx_buffers[val].skb = NULL;
13293 		dev_kfree_skb(skb);
13294 		return -EIO;
13295 	}
13296 
13297 	tnapi->tx_prod++;
13298 
13299 	/* Sync BD data before updating mailbox */
13300 	wmb();
13301 
13302 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13303 	tr32_mailbox(tnapi->prodmbox);
13304 
13305 	udelay(10);
13306 
13307 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13308 	for (i = 0; i < 35; i++) {
13309 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13310 		       coal_now);
13311 
13312 		udelay(10);
13313 
13314 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13315 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13316 		if ((tx_idx == tnapi->tx_prod) &&
13317 		    (rx_idx == (rx_start_idx + num_pkts)))
13318 			break;
13319 	}
13320 
13321 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13322 	dev_kfree_skb(skb);
13323 
13324 	if (tx_idx != tnapi->tx_prod)
13325 		goto out;
13326 
13327 	if (rx_idx != rx_start_idx + num_pkts)
13328 		goto out;
13329 
13330 	val = data_off;
13331 	while (rx_idx != rx_start_idx) {
13332 		desc = &rnapi->rx_rcb[rx_start_idx++];
13333 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13334 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13335 
13336 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13337 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13338 			goto out;
13339 
13340 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13341 			 - ETH_FCS_LEN;
13342 
13343 		if (!tso_loopback) {
13344 			if (rx_len != tx_len)
13345 				goto out;
13346 
13347 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13348 				if (opaque_key != RXD_OPAQUE_RING_STD)
13349 					goto out;
13350 			} else {
13351 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13352 					goto out;
13353 			}
13354 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13355 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13356 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13357 			goto out;
13358 		}
13359 
13360 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13361 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13362 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13363 					     mapping);
13364 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13365 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13366 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13367 					     mapping);
13368 		} else
13369 			goto out;
13370 
13371 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13372 					    PCI_DMA_FROMDEVICE);
13373 
13374 		rx_data += TG3_RX_OFFSET(tp);
13375 		for (i = data_off; i < rx_len; i++, val++) {
13376 			if (*(rx_data + i) != (u8) (val & 0xff))
13377 				goto out;
13378 		}
13379 	}
13380 
13381 	err = 0;
13382 
13383 	/* tg3_free_rings will unmap and free the rx_data */
13384 out:
13385 	return err;
13386 }
13387 
13388 #define TG3_STD_LOOPBACK_FAILED		1
13389 #define TG3_JMB_LOOPBACK_FAILED		2
13390 #define TG3_TSO_LOOPBACK_FAILED		4
13391 #define TG3_LOOPBACK_FAILED \
13392 	(TG3_STD_LOOPBACK_FAILED | \
13393 	 TG3_JMB_LOOPBACK_FAILED | \
13394 	 TG3_TSO_LOOPBACK_FAILED)
13395 
13396 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13397 {
13398 	int err = -EIO;
13399 	u32 eee_cap;
13400 	u32 jmb_pkt_sz = 9000;
13401 
13402 	if (tp->dma_limit)
13403 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13404 
13405 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13406 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13407 
13408 	if (!netif_running(tp->dev)) {
13409 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13410 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13411 		if (do_extlpbk)
13412 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13413 		goto done;
13414 	}
13415 
13416 	err = tg3_reset_hw(tp, true);
13417 	if (err) {
13418 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13419 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13420 		if (do_extlpbk)
13421 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13422 		goto done;
13423 	}
13424 
13425 	if (tg3_flag(tp, ENABLE_RSS)) {
13426 		int i;
13427 
13428 		/* Reroute all rx packets to the 1st queue */
13429 		for (i = MAC_RSS_INDIR_TBL_0;
13430 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13431 			tw32(i, 0x0);
13432 	}
13433 
13434 	/* HW errata - mac loopback fails in some cases on 5780.
13435 	 * Normal traffic and PHY loopback are not affected by
13436 	 * errata.  Also, the MAC loopback test is deprecated for
13437 	 * all newer ASIC revisions.
13438 	 */
13439 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13440 	    !tg3_flag(tp, CPMU_PRESENT)) {
13441 		tg3_mac_loopback(tp, true);
13442 
13443 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13444 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13445 
13446 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13447 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13448 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13449 
13450 		tg3_mac_loopback(tp, false);
13451 	}
13452 
13453 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13454 	    !tg3_flag(tp, USE_PHYLIB)) {
13455 		int i;
13456 
13457 		tg3_phy_lpbk_set(tp, 0, false);
13458 
13459 		/* Wait for link */
13460 		for (i = 0; i < 100; i++) {
13461 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13462 				break;
13463 			mdelay(1);
13464 		}
13465 
13466 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13467 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13468 		if (tg3_flag(tp, TSO_CAPABLE) &&
13469 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13470 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13471 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13472 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13473 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13474 
13475 		if (do_extlpbk) {
13476 			tg3_phy_lpbk_set(tp, 0, true);
13477 
13478 			/* All link indications report up, but the hardware
13479 			 * isn't really ready for about 20 msec.  Double it
13480 			 * to be sure.
13481 			 */
13482 			mdelay(40);
13483 
13484 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13485 				data[TG3_EXT_LOOPB_TEST] |=
13486 							TG3_STD_LOOPBACK_FAILED;
13487 			if (tg3_flag(tp, TSO_CAPABLE) &&
13488 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13489 				data[TG3_EXT_LOOPB_TEST] |=
13490 							TG3_TSO_LOOPBACK_FAILED;
13491 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13492 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13493 				data[TG3_EXT_LOOPB_TEST] |=
13494 							TG3_JMB_LOOPBACK_FAILED;
13495 		}
13496 
13497 		/* Re-enable gphy autopowerdown. */
13498 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13499 			tg3_phy_toggle_apd(tp, true);
13500 	}
13501 
13502 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13503 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13504 
13505 done:
13506 	tp->phy_flags |= eee_cap;
13507 
13508 	return err;
13509 }
13510 
13511 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13512 			  u64 *data)
13513 {
13514 	struct tg3 *tp = netdev_priv(dev);
13515 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13516 
13517 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13518 		if (tg3_power_up(tp)) {
13519 			etest->flags |= ETH_TEST_FL_FAILED;
13520 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13521 			return;
13522 		}
13523 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13524 	}
13525 
13526 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13527 
13528 	if (tg3_test_nvram(tp) != 0) {
13529 		etest->flags |= ETH_TEST_FL_FAILED;
13530 		data[TG3_NVRAM_TEST] = 1;
13531 	}
13532 	if (!doextlpbk && tg3_test_link(tp)) {
13533 		etest->flags |= ETH_TEST_FL_FAILED;
13534 		data[TG3_LINK_TEST] = 1;
13535 	}
13536 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13537 		int err, err2 = 0, irq_sync = 0;
13538 
13539 		if (netif_running(dev)) {
13540 			tg3_phy_stop(tp);
13541 			tg3_netif_stop(tp);
13542 			irq_sync = 1;
13543 		}
13544 
13545 		tg3_full_lock(tp, irq_sync);
13546 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13547 		err = tg3_nvram_lock(tp);
13548 		tg3_halt_cpu(tp, RX_CPU_BASE);
13549 		if (!tg3_flag(tp, 5705_PLUS))
13550 			tg3_halt_cpu(tp, TX_CPU_BASE);
13551 		if (!err)
13552 			tg3_nvram_unlock(tp);
13553 
13554 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13555 			tg3_phy_reset(tp);
13556 
13557 		if (tg3_test_registers(tp) != 0) {
13558 			etest->flags |= ETH_TEST_FL_FAILED;
13559 			data[TG3_REGISTER_TEST] = 1;
13560 		}
13561 
13562 		if (tg3_test_memory(tp) != 0) {
13563 			etest->flags |= ETH_TEST_FL_FAILED;
13564 			data[TG3_MEMORY_TEST] = 1;
13565 		}
13566 
13567 		if (doextlpbk)
13568 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13569 
13570 		if (tg3_test_loopback(tp, data, doextlpbk))
13571 			etest->flags |= ETH_TEST_FL_FAILED;
13572 
13573 		tg3_full_unlock(tp);
13574 
13575 		if (tg3_test_interrupt(tp) != 0) {
13576 			etest->flags |= ETH_TEST_FL_FAILED;
13577 			data[TG3_INTERRUPT_TEST] = 1;
13578 		}
13579 
13580 		tg3_full_lock(tp, 0);
13581 
13582 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13583 		if (netif_running(dev)) {
13584 			tg3_flag_set(tp, INIT_COMPLETE);
13585 			err2 = tg3_restart_hw(tp, true);
13586 			if (!err2)
13587 				tg3_netif_start(tp);
13588 		}
13589 
13590 		tg3_full_unlock(tp);
13591 
13592 		if (irq_sync && !err2)
13593 			tg3_phy_start(tp);
13594 	}
13595 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13596 		tg3_power_down_prepare(tp);
13597 
13598 }
13599 
13600 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13601 {
13602 	struct tg3 *tp = netdev_priv(dev);
13603 	struct hwtstamp_config stmpconf;
13604 
13605 	if (!tg3_flag(tp, PTP_CAPABLE))
13606 		return -EOPNOTSUPP;
13607 
13608 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13609 		return -EFAULT;
13610 
13611 	if (stmpconf.flags)
13612 		return -EINVAL;
13613 
13614 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13615 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13616 		return -ERANGE;
13617 
13618 	switch (stmpconf.rx_filter) {
13619 	case HWTSTAMP_FILTER_NONE:
13620 		tp->rxptpctl = 0;
13621 		break;
13622 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13623 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13624 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13625 		break;
13626 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13627 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13628 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13629 		break;
13630 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13631 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13632 			       TG3_RX_PTP_CTL_DELAY_REQ;
13633 		break;
13634 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13635 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13636 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13637 		break;
13638 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13639 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13640 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13641 		break;
13642 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13643 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13644 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13645 		break;
13646 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13647 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13648 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13649 		break;
13650 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13651 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13652 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13653 		break;
13654 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13655 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13656 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13657 		break;
13658 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13659 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13660 			       TG3_RX_PTP_CTL_DELAY_REQ;
13661 		break;
13662 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13663 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13664 			       TG3_RX_PTP_CTL_DELAY_REQ;
13665 		break;
13666 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13667 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13668 			       TG3_RX_PTP_CTL_DELAY_REQ;
13669 		break;
13670 	default:
13671 		return -ERANGE;
13672 	}
13673 
13674 	if (netif_running(dev) && tp->rxptpctl)
13675 		tw32(TG3_RX_PTP_CTL,
13676 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13677 
13678 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13679 		tg3_flag_set(tp, TX_TSTAMP_EN);
13680 	else
13681 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13682 
13683 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13684 		-EFAULT : 0;
13685 }
13686 
13687 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13688 {
13689 	struct tg3 *tp = netdev_priv(dev);
13690 	struct hwtstamp_config stmpconf;
13691 
13692 	if (!tg3_flag(tp, PTP_CAPABLE))
13693 		return -EOPNOTSUPP;
13694 
13695 	stmpconf.flags = 0;
13696 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13697 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13698 
13699 	switch (tp->rxptpctl) {
13700 	case 0:
13701 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13702 		break;
13703 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13704 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13705 		break;
13706 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13707 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13708 		break;
13709 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13710 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13711 		break;
13712 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13713 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13714 		break;
13715 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13716 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13717 		break;
13718 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13719 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13720 		break;
13721 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13722 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13723 		break;
13724 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13725 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13726 		break;
13727 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13728 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13729 		break;
13730 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13731 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13732 		break;
13733 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13734 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13735 		break;
13736 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13737 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13738 		break;
13739 	default:
13740 		WARN_ON_ONCE(1);
13741 		return -ERANGE;
13742 	}
13743 
13744 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13745 		-EFAULT : 0;
13746 }
13747 
13748 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13749 {
13750 	struct mii_ioctl_data *data = if_mii(ifr);
13751 	struct tg3 *tp = netdev_priv(dev);
13752 	int err;
13753 
13754 	if (tg3_flag(tp, USE_PHYLIB)) {
13755 		struct phy_device *phydev;
13756 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13757 			return -EAGAIN;
13758 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13759 		return phy_mii_ioctl(phydev, ifr, cmd);
13760 	}
13761 
13762 	switch (cmd) {
13763 	case SIOCGMIIPHY:
13764 		data->phy_id = tp->phy_addr;
13765 
13766 		/* fallthru */
13767 	case SIOCGMIIREG: {
13768 		u32 mii_regval;
13769 
13770 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13771 			break;			/* We have no PHY */
13772 
13773 		if (!netif_running(dev))
13774 			return -EAGAIN;
13775 
13776 		spin_lock_bh(&tp->lock);
13777 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
13778 				    data->reg_num & 0x1f, &mii_regval);
13779 		spin_unlock_bh(&tp->lock);
13780 
13781 		data->val_out = mii_regval;
13782 
13783 		return err;
13784 	}
13785 
13786 	case SIOCSMIIREG:
13787 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13788 			break;			/* We have no PHY */
13789 
13790 		if (!netif_running(dev))
13791 			return -EAGAIN;
13792 
13793 		spin_lock_bh(&tp->lock);
13794 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
13795 				     data->reg_num & 0x1f, data->val_in);
13796 		spin_unlock_bh(&tp->lock);
13797 
13798 		return err;
13799 
13800 	case SIOCSHWTSTAMP:
13801 		return tg3_hwtstamp_set(dev, ifr);
13802 
13803 	case SIOCGHWTSTAMP:
13804 		return tg3_hwtstamp_get(dev, ifr);
13805 
13806 	default:
13807 		/* do nothing */
13808 		break;
13809 	}
13810 	return -EOPNOTSUPP;
13811 }
13812 
13813 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13814 {
13815 	struct tg3 *tp = netdev_priv(dev);
13816 
13817 	memcpy(ec, &tp->coal, sizeof(*ec));
13818 	return 0;
13819 }
13820 
13821 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13822 {
13823 	struct tg3 *tp = netdev_priv(dev);
13824 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13825 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13826 
13827 	if (!tg3_flag(tp, 5705_PLUS)) {
13828 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13829 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13830 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13831 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13832 	}
13833 
13834 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13835 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13836 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13837 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13838 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13839 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13840 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13841 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13842 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13843 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13844 		return -EINVAL;
13845 
13846 	/* No rx interrupts will be generated if both are zero */
13847 	if ((ec->rx_coalesce_usecs == 0) &&
13848 	    (ec->rx_max_coalesced_frames == 0))
13849 		return -EINVAL;
13850 
13851 	/* No tx interrupts will be generated if both are zero */
13852 	if ((ec->tx_coalesce_usecs == 0) &&
13853 	    (ec->tx_max_coalesced_frames == 0))
13854 		return -EINVAL;
13855 
13856 	/* Only copy relevant parameters, ignore all others. */
13857 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13858 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13859 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13860 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13861 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13862 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13863 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13864 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13865 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13866 
13867 	if (netif_running(dev)) {
13868 		tg3_full_lock(tp, 0);
13869 		__tg3_set_coalesce(tp, &tp->coal);
13870 		tg3_full_unlock(tp);
13871 	}
13872 	return 0;
13873 }
13874 
13875 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13876 {
13877 	struct tg3 *tp = netdev_priv(dev);
13878 
13879 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13880 		netdev_warn(tp->dev, "Board does not support EEE!\n");
13881 		return -EOPNOTSUPP;
13882 	}
13883 
13884 	if (edata->advertised != tp->eee.advertised) {
13885 		netdev_warn(tp->dev,
13886 			    "Direct manipulation of EEE advertisement is not supported\n");
13887 		return -EINVAL;
13888 	}
13889 
13890 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13891 		netdev_warn(tp->dev,
13892 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
13893 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13894 		return -EINVAL;
13895 	}
13896 
13897 	tp->eee = *edata;
13898 
13899 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13900 	tg3_warn_mgmt_link_flap(tp);
13901 
13902 	if (netif_running(tp->dev)) {
13903 		tg3_full_lock(tp, 0);
13904 		tg3_setup_eee(tp);
13905 		tg3_phy_reset(tp);
13906 		tg3_full_unlock(tp);
13907 	}
13908 
13909 	return 0;
13910 }
13911 
13912 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13913 {
13914 	struct tg3 *tp = netdev_priv(dev);
13915 
13916 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13917 		netdev_warn(tp->dev,
13918 			    "Board does not support EEE!\n");
13919 		return -EOPNOTSUPP;
13920 	}
13921 
13922 	*edata = tp->eee;
13923 	return 0;
13924 }
13925 
13926 static const struct ethtool_ops tg3_ethtool_ops = {
13927 	.get_settings		= tg3_get_settings,
13928 	.set_settings		= tg3_set_settings,
13929 	.get_drvinfo		= tg3_get_drvinfo,
13930 	.get_regs_len		= tg3_get_regs_len,
13931 	.get_regs		= tg3_get_regs,
13932 	.get_wol		= tg3_get_wol,
13933 	.set_wol		= tg3_set_wol,
13934 	.get_msglevel		= tg3_get_msglevel,
13935 	.set_msglevel		= tg3_set_msglevel,
13936 	.nway_reset		= tg3_nway_reset,
13937 	.get_link		= ethtool_op_get_link,
13938 	.get_eeprom_len		= tg3_get_eeprom_len,
13939 	.get_eeprom		= tg3_get_eeprom,
13940 	.set_eeprom		= tg3_set_eeprom,
13941 	.get_ringparam		= tg3_get_ringparam,
13942 	.set_ringparam		= tg3_set_ringparam,
13943 	.get_pauseparam		= tg3_get_pauseparam,
13944 	.set_pauseparam		= tg3_set_pauseparam,
13945 	.self_test		= tg3_self_test,
13946 	.get_strings		= tg3_get_strings,
13947 	.set_phys_id		= tg3_set_phys_id,
13948 	.get_ethtool_stats	= tg3_get_ethtool_stats,
13949 	.get_coalesce		= tg3_get_coalesce,
13950 	.set_coalesce		= tg3_set_coalesce,
13951 	.get_sset_count		= tg3_get_sset_count,
13952 	.get_rxnfc		= tg3_get_rxnfc,
13953 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13954 	.get_rxfh_indir		= tg3_get_rxfh_indir,
13955 	.set_rxfh_indir		= tg3_set_rxfh_indir,
13956 	.get_channels		= tg3_get_channels,
13957 	.set_channels		= tg3_set_channels,
13958 	.get_ts_info		= tg3_get_ts_info,
13959 	.get_eee		= tg3_get_eee,
13960 	.set_eee		= tg3_set_eee,
13961 };
13962 
13963 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13964 						struct rtnl_link_stats64 *stats)
13965 {
13966 	struct tg3 *tp = netdev_priv(dev);
13967 
13968 	spin_lock_bh(&tp->lock);
13969 	if (!tp->hw_stats) {
13970 		spin_unlock_bh(&tp->lock);
13971 		return &tp->net_stats_prev;
13972 	}
13973 
13974 	tg3_get_nstats(tp, stats);
13975 	spin_unlock_bh(&tp->lock);
13976 
13977 	return stats;
13978 }
13979 
13980 static void tg3_set_rx_mode(struct net_device *dev)
13981 {
13982 	struct tg3 *tp = netdev_priv(dev);
13983 
13984 	if (!netif_running(dev))
13985 		return;
13986 
13987 	tg3_full_lock(tp, 0);
13988 	__tg3_set_rx_mode(dev);
13989 	tg3_full_unlock(tp);
13990 }
13991 
13992 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13993 			       int new_mtu)
13994 {
13995 	dev->mtu = new_mtu;
13996 
13997 	if (new_mtu > ETH_DATA_LEN) {
13998 		if (tg3_flag(tp, 5780_CLASS)) {
13999 			netdev_update_features(dev);
14000 			tg3_flag_clear(tp, TSO_CAPABLE);
14001 		} else {
14002 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14003 		}
14004 	} else {
14005 		if (tg3_flag(tp, 5780_CLASS)) {
14006 			tg3_flag_set(tp, TSO_CAPABLE);
14007 			netdev_update_features(dev);
14008 		}
14009 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14010 	}
14011 }
14012 
14013 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14014 {
14015 	struct tg3 *tp = netdev_priv(dev);
14016 	int err;
14017 	bool reset_phy = false;
14018 
14019 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
14020 		return -EINVAL;
14021 
14022 	if (!netif_running(dev)) {
14023 		/* We'll just catch it later when the
14024 		 * device is up'd.
14025 		 */
14026 		tg3_set_mtu(dev, tp, new_mtu);
14027 		return 0;
14028 	}
14029 
14030 	tg3_phy_stop(tp);
14031 
14032 	tg3_netif_stop(tp);
14033 
14034 	tg3_full_lock(tp, 1);
14035 
14036 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14037 
14038 	tg3_set_mtu(dev, tp, new_mtu);
14039 
14040 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14041 	 * breaks all requests to 256 bytes.
14042 	 */
14043 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
14044 		reset_phy = true;
14045 
14046 	err = tg3_restart_hw(tp, reset_phy);
14047 
14048 	if (!err)
14049 		tg3_netif_start(tp);
14050 
14051 	tg3_full_unlock(tp);
14052 
14053 	if (!err)
14054 		tg3_phy_start(tp);
14055 
14056 	return err;
14057 }
14058 
14059 static const struct net_device_ops tg3_netdev_ops = {
14060 	.ndo_open		= tg3_open,
14061 	.ndo_stop		= tg3_close,
14062 	.ndo_start_xmit		= tg3_start_xmit,
14063 	.ndo_get_stats64	= tg3_get_stats64,
14064 	.ndo_validate_addr	= eth_validate_addr,
14065 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14066 	.ndo_set_mac_address	= tg3_set_mac_addr,
14067 	.ndo_do_ioctl		= tg3_ioctl,
14068 	.ndo_tx_timeout		= tg3_tx_timeout,
14069 	.ndo_change_mtu		= tg3_change_mtu,
14070 	.ndo_fix_features	= tg3_fix_features,
14071 	.ndo_set_features	= tg3_set_features,
14072 #ifdef CONFIG_NET_POLL_CONTROLLER
14073 	.ndo_poll_controller	= tg3_poll_controller,
14074 #endif
14075 };
14076 
14077 static void tg3_get_eeprom_size(struct tg3 *tp)
14078 {
14079 	u32 cursize, val, magic;
14080 
14081 	tp->nvram_size = EEPROM_CHIP_SIZE;
14082 
14083 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14084 		return;
14085 
14086 	if ((magic != TG3_EEPROM_MAGIC) &&
14087 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14088 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14089 		return;
14090 
14091 	/*
14092 	 * Size the chip by reading offsets at increasing powers of two.
14093 	 * When we encounter our validation signature, we know the addressing
14094 	 * has wrapped around, and thus have our chip size.
14095 	 */
14096 	cursize = 0x10;
14097 
14098 	while (cursize < tp->nvram_size) {
14099 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14100 			return;
14101 
14102 		if (val == magic)
14103 			break;
14104 
14105 		cursize <<= 1;
14106 	}
14107 
14108 	tp->nvram_size = cursize;
14109 }
14110 
14111 static void tg3_get_nvram_size(struct tg3 *tp)
14112 {
14113 	u32 val;
14114 
14115 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14116 		return;
14117 
14118 	/* Selfboot format */
14119 	if (val != TG3_EEPROM_MAGIC) {
14120 		tg3_get_eeprom_size(tp);
14121 		return;
14122 	}
14123 
14124 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14125 		if (val != 0) {
14126 			/* This is confusing.  We want to operate on the
14127 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14128 			 * call will read from NVRAM and byteswap the data
14129 			 * according to the byteswapping settings for all
14130 			 * other register accesses.  This ensures the data we
14131 			 * want will always reside in the lower 16-bits.
14132 			 * However, the data in NVRAM is in LE format, which
14133 			 * means the data from the NVRAM read will always be
14134 			 * opposite the endianness of the CPU.  The 16-bit
14135 			 * byteswap then brings the data to CPU endianness.
14136 			 */
14137 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14138 			return;
14139 		}
14140 	}
14141 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14142 }
14143 
14144 static void tg3_get_nvram_info(struct tg3 *tp)
14145 {
14146 	u32 nvcfg1;
14147 
14148 	nvcfg1 = tr32(NVRAM_CFG1);
14149 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14150 		tg3_flag_set(tp, FLASH);
14151 	} else {
14152 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14153 		tw32(NVRAM_CFG1, nvcfg1);
14154 	}
14155 
14156 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14157 	    tg3_flag(tp, 5780_CLASS)) {
14158 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14159 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14160 			tp->nvram_jedecnum = JEDEC_ATMEL;
14161 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14162 			tg3_flag_set(tp, NVRAM_BUFFERED);
14163 			break;
14164 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14165 			tp->nvram_jedecnum = JEDEC_ATMEL;
14166 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14167 			break;
14168 		case FLASH_VENDOR_ATMEL_EEPROM:
14169 			tp->nvram_jedecnum = JEDEC_ATMEL;
14170 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14171 			tg3_flag_set(tp, NVRAM_BUFFERED);
14172 			break;
14173 		case FLASH_VENDOR_ST:
14174 			tp->nvram_jedecnum = JEDEC_ST;
14175 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14176 			tg3_flag_set(tp, NVRAM_BUFFERED);
14177 			break;
14178 		case FLASH_VENDOR_SAIFUN:
14179 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14180 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14181 			break;
14182 		case FLASH_VENDOR_SST_SMALL:
14183 		case FLASH_VENDOR_SST_LARGE:
14184 			tp->nvram_jedecnum = JEDEC_SST;
14185 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14186 			break;
14187 		}
14188 	} else {
14189 		tp->nvram_jedecnum = JEDEC_ATMEL;
14190 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14191 		tg3_flag_set(tp, NVRAM_BUFFERED);
14192 	}
14193 }
14194 
14195 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14196 {
14197 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14198 	case FLASH_5752PAGE_SIZE_256:
14199 		tp->nvram_pagesize = 256;
14200 		break;
14201 	case FLASH_5752PAGE_SIZE_512:
14202 		tp->nvram_pagesize = 512;
14203 		break;
14204 	case FLASH_5752PAGE_SIZE_1K:
14205 		tp->nvram_pagesize = 1024;
14206 		break;
14207 	case FLASH_5752PAGE_SIZE_2K:
14208 		tp->nvram_pagesize = 2048;
14209 		break;
14210 	case FLASH_5752PAGE_SIZE_4K:
14211 		tp->nvram_pagesize = 4096;
14212 		break;
14213 	case FLASH_5752PAGE_SIZE_264:
14214 		tp->nvram_pagesize = 264;
14215 		break;
14216 	case FLASH_5752PAGE_SIZE_528:
14217 		tp->nvram_pagesize = 528;
14218 		break;
14219 	}
14220 }
14221 
14222 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14223 {
14224 	u32 nvcfg1;
14225 
14226 	nvcfg1 = tr32(NVRAM_CFG1);
14227 
14228 	/* NVRAM protection for TPM */
14229 	if (nvcfg1 & (1 << 27))
14230 		tg3_flag_set(tp, PROTECTED_NVRAM);
14231 
14232 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14233 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14234 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14235 		tp->nvram_jedecnum = JEDEC_ATMEL;
14236 		tg3_flag_set(tp, NVRAM_BUFFERED);
14237 		break;
14238 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14239 		tp->nvram_jedecnum = JEDEC_ATMEL;
14240 		tg3_flag_set(tp, NVRAM_BUFFERED);
14241 		tg3_flag_set(tp, FLASH);
14242 		break;
14243 	case FLASH_5752VENDOR_ST_M45PE10:
14244 	case FLASH_5752VENDOR_ST_M45PE20:
14245 	case FLASH_5752VENDOR_ST_M45PE40:
14246 		tp->nvram_jedecnum = JEDEC_ST;
14247 		tg3_flag_set(tp, NVRAM_BUFFERED);
14248 		tg3_flag_set(tp, FLASH);
14249 		break;
14250 	}
14251 
14252 	if (tg3_flag(tp, FLASH)) {
14253 		tg3_nvram_get_pagesize(tp, nvcfg1);
14254 	} else {
14255 		/* For eeprom, set pagesize to maximum eeprom size */
14256 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14257 
14258 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14259 		tw32(NVRAM_CFG1, nvcfg1);
14260 	}
14261 }
14262 
14263 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14264 {
14265 	u32 nvcfg1, protect = 0;
14266 
14267 	nvcfg1 = tr32(NVRAM_CFG1);
14268 
14269 	/* NVRAM protection for TPM */
14270 	if (nvcfg1 & (1 << 27)) {
14271 		tg3_flag_set(tp, PROTECTED_NVRAM);
14272 		protect = 1;
14273 	}
14274 
14275 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14276 	switch (nvcfg1) {
14277 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14278 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14279 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14280 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14281 		tp->nvram_jedecnum = JEDEC_ATMEL;
14282 		tg3_flag_set(tp, NVRAM_BUFFERED);
14283 		tg3_flag_set(tp, FLASH);
14284 		tp->nvram_pagesize = 264;
14285 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14286 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14287 			tp->nvram_size = (protect ? 0x3e200 :
14288 					  TG3_NVRAM_SIZE_512KB);
14289 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14290 			tp->nvram_size = (protect ? 0x1f200 :
14291 					  TG3_NVRAM_SIZE_256KB);
14292 		else
14293 			tp->nvram_size = (protect ? 0x1f200 :
14294 					  TG3_NVRAM_SIZE_128KB);
14295 		break;
14296 	case FLASH_5752VENDOR_ST_M45PE10:
14297 	case FLASH_5752VENDOR_ST_M45PE20:
14298 	case FLASH_5752VENDOR_ST_M45PE40:
14299 		tp->nvram_jedecnum = JEDEC_ST;
14300 		tg3_flag_set(tp, NVRAM_BUFFERED);
14301 		tg3_flag_set(tp, FLASH);
14302 		tp->nvram_pagesize = 256;
14303 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14304 			tp->nvram_size = (protect ?
14305 					  TG3_NVRAM_SIZE_64KB :
14306 					  TG3_NVRAM_SIZE_128KB);
14307 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14308 			tp->nvram_size = (protect ?
14309 					  TG3_NVRAM_SIZE_64KB :
14310 					  TG3_NVRAM_SIZE_256KB);
14311 		else
14312 			tp->nvram_size = (protect ?
14313 					  TG3_NVRAM_SIZE_128KB :
14314 					  TG3_NVRAM_SIZE_512KB);
14315 		break;
14316 	}
14317 }
14318 
14319 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14320 {
14321 	u32 nvcfg1;
14322 
14323 	nvcfg1 = tr32(NVRAM_CFG1);
14324 
14325 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14326 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14327 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14328 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14329 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14330 		tp->nvram_jedecnum = JEDEC_ATMEL;
14331 		tg3_flag_set(tp, NVRAM_BUFFERED);
14332 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14333 
14334 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14335 		tw32(NVRAM_CFG1, nvcfg1);
14336 		break;
14337 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14338 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14339 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14340 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14341 		tp->nvram_jedecnum = JEDEC_ATMEL;
14342 		tg3_flag_set(tp, NVRAM_BUFFERED);
14343 		tg3_flag_set(tp, FLASH);
14344 		tp->nvram_pagesize = 264;
14345 		break;
14346 	case FLASH_5752VENDOR_ST_M45PE10:
14347 	case FLASH_5752VENDOR_ST_M45PE20:
14348 	case FLASH_5752VENDOR_ST_M45PE40:
14349 		tp->nvram_jedecnum = JEDEC_ST;
14350 		tg3_flag_set(tp, NVRAM_BUFFERED);
14351 		tg3_flag_set(tp, FLASH);
14352 		tp->nvram_pagesize = 256;
14353 		break;
14354 	}
14355 }
14356 
14357 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14358 {
14359 	u32 nvcfg1, protect = 0;
14360 
14361 	nvcfg1 = tr32(NVRAM_CFG1);
14362 
14363 	/* NVRAM protection for TPM */
14364 	if (nvcfg1 & (1 << 27)) {
14365 		tg3_flag_set(tp, PROTECTED_NVRAM);
14366 		protect = 1;
14367 	}
14368 
14369 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14370 	switch (nvcfg1) {
14371 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14372 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14373 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14374 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14375 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14376 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14377 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14378 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14379 		tp->nvram_jedecnum = JEDEC_ATMEL;
14380 		tg3_flag_set(tp, NVRAM_BUFFERED);
14381 		tg3_flag_set(tp, FLASH);
14382 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14383 		tp->nvram_pagesize = 256;
14384 		break;
14385 	case FLASH_5761VENDOR_ST_A_M45PE20:
14386 	case FLASH_5761VENDOR_ST_A_M45PE40:
14387 	case FLASH_5761VENDOR_ST_A_M45PE80:
14388 	case FLASH_5761VENDOR_ST_A_M45PE16:
14389 	case FLASH_5761VENDOR_ST_M_M45PE20:
14390 	case FLASH_5761VENDOR_ST_M_M45PE40:
14391 	case FLASH_5761VENDOR_ST_M_M45PE80:
14392 	case FLASH_5761VENDOR_ST_M_M45PE16:
14393 		tp->nvram_jedecnum = JEDEC_ST;
14394 		tg3_flag_set(tp, NVRAM_BUFFERED);
14395 		tg3_flag_set(tp, FLASH);
14396 		tp->nvram_pagesize = 256;
14397 		break;
14398 	}
14399 
14400 	if (protect) {
14401 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14402 	} else {
14403 		switch (nvcfg1) {
14404 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14405 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14406 		case FLASH_5761VENDOR_ST_A_M45PE16:
14407 		case FLASH_5761VENDOR_ST_M_M45PE16:
14408 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14409 			break;
14410 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14411 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14412 		case FLASH_5761VENDOR_ST_A_M45PE80:
14413 		case FLASH_5761VENDOR_ST_M_M45PE80:
14414 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14415 			break;
14416 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14417 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14418 		case FLASH_5761VENDOR_ST_A_M45PE40:
14419 		case FLASH_5761VENDOR_ST_M_M45PE40:
14420 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14421 			break;
14422 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14423 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14424 		case FLASH_5761VENDOR_ST_A_M45PE20:
14425 		case FLASH_5761VENDOR_ST_M_M45PE20:
14426 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14427 			break;
14428 		}
14429 	}
14430 }
14431 
14432 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14433 {
14434 	tp->nvram_jedecnum = JEDEC_ATMEL;
14435 	tg3_flag_set(tp, NVRAM_BUFFERED);
14436 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14437 }
14438 
14439 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14440 {
14441 	u32 nvcfg1;
14442 
14443 	nvcfg1 = tr32(NVRAM_CFG1);
14444 
14445 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14446 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14447 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14448 		tp->nvram_jedecnum = JEDEC_ATMEL;
14449 		tg3_flag_set(tp, NVRAM_BUFFERED);
14450 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14451 
14452 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14453 		tw32(NVRAM_CFG1, nvcfg1);
14454 		return;
14455 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14456 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14457 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14458 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14459 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14460 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14461 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14462 		tp->nvram_jedecnum = JEDEC_ATMEL;
14463 		tg3_flag_set(tp, NVRAM_BUFFERED);
14464 		tg3_flag_set(tp, FLASH);
14465 
14466 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14467 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14468 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14469 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14470 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14471 			break;
14472 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14473 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14474 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14475 			break;
14476 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14477 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14478 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14479 			break;
14480 		}
14481 		break;
14482 	case FLASH_5752VENDOR_ST_M45PE10:
14483 	case FLASH_5752VENDOR_ST_M45PE20:
14484 	case FLASH_5752VENDOR_ST_M45PE40:
14485 		tp->nvram_jedecnum = JEDEC_ST;
14486 		tg3_flag_set(tp, NVRAM_BUFFERED);
14487 		tg3_flag_set(tp, FLASH);
14488 
14489 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14490 		case FLASH_5752VENDOR_ST_M45PE10:
14491 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14492 			break;
14493 		case FLASH_5752VENDOR_ST_M45PE20:
14494 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14495 			break;
14496 		case FLASH_5752VENDOR_ST_M45PE40:
14497 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14498 			break;
14499 		}
14500 		break;
14501 	default:
14502 		tg3_flag_set(tp, NO_NVRAM);
14503 		return;
14504 	}
14505 
14506 	tg3_nvram_get_pagesize(tp, nvcfg1);
14507 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14508 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14509 }
14510 
14511 
14512 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14513 {
14514 	u32 nvcfg1;
14515 
14516 	nvcfg1 = tr32(NVRAM_CFG1);
14517 
14518 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14519 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14520 	case FLASH_5717VENDOR_MICRO_EEPROM:
14521 		tp->nvram_jedecnum = JEDEC_ATMEL;
14522 		tg3_flag_set(tp, NVRAM_BUFFERED);
14523 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14524 
14525 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14526 		tw32(NVRAM_CFG1, nvcfg1);
14527 		return;
14528 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14529 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14530 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14531 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14532 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14533 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14534 	case FLASH_5717VENDOR_ATMEL_45USPT:
14535 		tp->nvram_jedecnum = JEDEC_ATMEL;
14536 		tg3_flag_set(tp, NVRAM_BUFFERED);
14537 		tg3_flag_set(tp, FLASH);
14538 
14539 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14540 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14541 			/* Detect size with tg3_nvram_get_size() */
14542 			break;
14543 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14544 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14545 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14546 			break;
14547 		default:
14548 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14549 			break;
14550 		}
14551 		break;
14552 	case FLASH_5717VENDOR_ST_M_M25PE10:
14553 	case FLASH_5717VENDOR_ST_A_M25PE10:
14554 	case FLASH_5717VENDOR_ST_M_M45PE10:
14555 	case FLASH_5717VENDOR_ST_A_M45PE10:
14556 	case FLASH_5717VENDOR_ST_M_M25PE20:
14557 	case FLASH_5717VENDOR_ST_A_M25PE20:
14558 	case FLASH_5717VENDOR_ST_M_M45PE20:
14559 	case FLASH_5717VENDOR_ST_A_M45PE20:
14560 	case FLASH_5717VENDOR_ST_25USPT:
14561 	case FLASH_5717VENDOR_ST_45USPT:
14562 		tp->nvram_jedecnum = JEDEC_ST;
14563 		tg3_flag_set(tp, NVRAM_BUFFERED);
14564 		tg3_flag_set(tp, FLASH);
14565 
14566 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14567 		case FLASH_5717VENDOR_ST_M_M25PE20:
14568 		case FLASH_5717VENDOR_ST_M_M45PE20:
14569 			/* Detect size with tg3_nvram_get_size() */
14570 			break;
14571 		case FLASH_5717VENDOR_ST_A_M25PE20:
14572 		case FLASH_5717VENDOR_ST_A_M45PE20:
14573 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14574 			break;
14575 		default:
14576 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14577 			break;
14578 		}
14579 		break;
14580 	default:
14581 		tg3_flag_set(tp, NO_NVRAM);
14582 		return;
14583 	}
14584 
14585 	tg3_nvram_get_pagesize(tp, nvcfg1);
14586 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14587 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14588 }
14589 
14590 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14591 {
14592 	u32 nvcfg1, nvmpinstrp;
14593 
14594 	nvcfg1 = tr32(NVRAM_CFG1);
14595 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14596 
14597 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14598 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14599 			tg3_flag_set(tp, NO_NVRAM);
14600 			return;
14601 		}
14602 
14603 		switch (nvmpinstrp) {
14604 		case FLASH_5762_EEPROM_HD:
14605 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14606 			break;
14607 		case FLASH_5762_EEPROM_LD:
14608 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14609 			break;
14610 		case FLASH_5720VENDOR_M_ST_M45PE20:
14611 			/* This pinstrap supports multiple sizes, so force it
14612 			 * to read the actual size from location 0xf0.
14613 			 */
14614 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14615 			break;
14616 		}
14617 	}
14618 
14619 	switch (nvmpinstrp) {
14620 	case FLASH_5720_EEPROM_HD:
14621 	case FLASH_5720_EEPROM_LD:
14622 		tp->nvram_jedecnum = JEDEC_ATMEL;
14623 		tg3_flag_set(tp, NVRAM_BUFFERED);
14624 
14625 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14626 		tw32(NVRAM_CFG1, nvcfg1);
14627 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14628 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14629 		else
14630 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14631 		return;
14632 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14633 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14634 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14635 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14636 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14637 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14638 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14639 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14640 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14641 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14642 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14643 	case FLASH_5720VENDOR_ATMEL_45USPT:
14644 		tp->nvram_jedecnum = JEDEC_ATMEL;
14645 		tg3_flag_set(tp, NVRAM_BUFFERED);
14646 		tg3_flag_set(tp, FLASH);
14647 
14648 		switch (nvmpinstrp) {
14649 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14650 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14651 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14652 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14653 			break;
14654 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14655 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14656 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14657 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14658 			break;
14659 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14660 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14661 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14662 			break;
14663 		default:
14664 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14665 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14666 			break;
14667 		}
14668 		break;
14669 	case FLASH_5720VENDOR_M_ST_M25PE10:
14670 	case FLASH_5720VENDOR_M_ST_M45PE10:
14671 	case FLASH_5720VENDOR_A_ST_M25PE10:
14672 	case FLASH_5720VENDOR_A_ST_M45PE10:
14673 	case FLASH_5720VENDOR_M_ST_M25PE20:
14674 	case FLASH_5720VENDOR_M_ST_M45PE20:
14675 	case FLASH_5720VENDOR_A_ST_M25PE20:
14676 	case FLASH_5720VENDOR_A_ST_M45PE20:
14677 	case FLASH_5720VENDOR_M_ST_M25PE40:
14678 	case FLASH_5720VENDOR_M_ST_M45PE40:
14679 	case FLASH_5720VENDOR_A_ST_M25PE40:
14680 	case FLASH_5720VENDOR_A_ST_M45PE40:
14681 	case FLASH_5720VENDOR_M_ST_M25PE80:
14682 	case FLASH_5720VENDOR_M_ST_M45PE80:
14683 	case FLASH_5720VENDOR_A_ST_M25PE80:
14684 	case FLASH_5720VENDOR_A_ST_M45PE80:
14685 	case FLASH_5720VENDOR_ST_25USPT:
14686 	case FLASH_5720VENDOR_ST_45USPT:
14687 		tp->nvram_jedecnum = JEDEC_ST;
14688 		tg3_flag_set(tp, NVRAM_BUFFERED);
14689 		tg3_flag_set(tp, FLASH);
14690 
14691 		switch (nvmpinstrp) {
14692 		case FLASH_5720VENDOR_M_ST_M25PE20:
14693 		case FLASH_5720VENDOR_M_ST_M45PE20:
14694 		case FLASH_5720VENDOR_A_ST_M25PE20:
14695 		case FLASH_5720VENDOR_A_ST_M45PE20:
14696 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14697 			break;
14698 		case FLASH_5720VENDOR_M_ST_M25PE40:
14699 		case FLASH_5720VENDOR_M_ST_M45PE40:
14700 		case FLASH_5720VENDOR_A_ST_M25PE40:
14701 		case FLASH_5720VENDOR_A_ST_M45PE40:
14702 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14703 			break;
14704 		case FLASH_5720VENDOR_M_ST_M25PE80:
14705 		case FLASH_5720VENDOR_M_ST_M45PE80:
14706 		case FLASH_5720VENDOR_A_ST_M25PE80:
14707 		case FLASH_5720VENDOR_A_ST_M45PE80:
14708 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14709 			break;
14710 		default:
14711 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14712 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14713 			break;
14714 		}
14715 		break;
14716 	default:
14717 		tg3_flag_set(tp, NO_NVRAM);
14718 		return;
14719 	}
14720 
14721 	tg3_nvram_get_pagesize(tp, nvcfg1);
14722 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14723 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14724 
14725 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14726 		u32 val;
14727 
14728 		if (tg3_nvram_read(tp, 0, &val))
14729 			return;
14730 
14731 		if (val != TG3_EEPROM_MAGIC &&
14732 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14733 			tg3_flag_set(tp, NO_NVRAM);
14734 	}
14735 }
14736 
14737 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14738 static void tg3_nvram_init(struct tg3 *tp)
14739 {
14740 	if (tg3_flag(tp, IS_SSB_CORE)) {
14741 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14742 		tg3_flag_clear(tp, NVRAM);
14743 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14744 		tg3_flag_set(tp, NO_NVRAM);
14745 		return;
14746 	}
14747 
14748 	tw32_f(GRC_EEPROM_ADDR,
14749 	     (EEPROM_ADDR_FSM_RESET |
14750 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14751 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14752 
14753 	msleep(1);
14754 
14755 	/* Enable seeprom accesses. */
14756 	tw32_f(GRC_LOCAL_CTRL,
14757 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14758 	udelay(100);
14759 
14760 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14761 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
14762 		tg3_flag_set(tp, NVRAM);
14763 
14764 		if (tg3_nvram_lock(tp)) {
14765 			netdev_warn(tp->dev,
14766 				    "Cannot get nvram lock, %s failed\n",
14767 				    __func__);
14768 			return;
14769 		}
14770 		tg3_enable_nvram_access(tp);
14771 
14772 		tp->nvram_size = 0;
14773 
14774 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
14775 			tg3_get_5752_nvram_info(tp);
14776 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14777 			tg3_get_5755_nvram_info(tp);
14778 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14779 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14780 			 tg3_asic_rev(tp) == ASIC_REV_5785)
14781 			tg3_get_5787_nvram_info(tp);
14782 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14783 			tg3_get_5761_nvram_info(tp);
14784 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14785 			tg3_get_5906_nvram_info(tp);
14786 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14787 			 tg3_flag(tp, 57765_CLASS))
14788 			tg3_get_57780_nvram_info(tp);
14789 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14790 			 tg3_asic_rev(tp) == ASIC_REV_5719)
14791 			tg3_get_5717_nvram_info(tp);
14792 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14793 			 tg3_asic_rev(tp) == ASIC_REV_5762)
14794 			tg3_get_5720_nvram_info(tp);
14795 		else
14796 			tg3_get_nvram_info(tp);
14797 
14798 		if (tp->nvram_size == 0)
14799 			tg3_get_nvram_size(tp);
14800 
14801 		tg3_disable_nvram_access(tp);
14802 		tg3_nvram_unlock(tp);
14803 
14804 	} else {
14805 		tg3_flag_clear(tp, NVRAM);
14806 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14807 
14808 		tg3_get_eeprom_size(tp);
14809 	}
14810 }
14811 
14812 struct subsys_tbl_ent {
14813 	u16 subsys_vendor, subsys_devid;
14814 	u32 phy_id;
14815 };
14816 
14817 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14818 	/* Broadcom boards. */
14819 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14820 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14821 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14822 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14823 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14824 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14825 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14826 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14827 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14828 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14829 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14830 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14831 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14832 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14833 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14834 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14835 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14836 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14837 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14838 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14839 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14840 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14841 
14842 	/* 3com boards. */
14843 	{ TG3PCI_SUBVENDOR_ID_3COM,
14844 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14845 	{ TG3PCI_SUBVENDOR_ID_3COM,
14846 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14847 	{ TG3PCI_SUBVENDOR_ID_3COM,
14848 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14849 	{ TG3PCI_SUBVENDOR_ID_3COM,
14850 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14851 	{ TG3PCI_SUBVENDOR_ID_3COM,
14852 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14853 
14854 	/* DELL boards. */
14855 	{ TG3PCI_SUBVENDOR_ID_DELL,
14856 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14857 	{ TG3PCI_SUBVENDOR_ID_DELL,
14858 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14859 	{ TG3PCI_SUBVENDOR_ID_DELL,
14860 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14861 	{ TG3PCI_SUBVENDOR_ID_DELL,
14862 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14863 
14864 	/* Compaq boards. */
14865 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14866 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14867 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14868 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14869 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14870 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14871 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14872 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14873 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14874 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14875 
14876 	/* IBM boards. */
14877 	{ TG3PCI_SUBVENDOR_ID_IBM,
14878 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14879 };
14880 
14881 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14882 {
14883 	int i;
14884 
14885 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14886 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
14887 		     tp->pdev->subsystem_vendor) &&
14888 		    (subsys_id_to_phy_id[i].subsys_devid ==
14889 		     tp->pdev->subsystem_device))
14890 			return &subsys_id_to_phy_id[i];
14891 	}
14892 	return NULL;
14893 }
14894 
14895 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14896 {
14897 	u32 val;
14898 
14899 	tp->phy_id = TG3_PHY_ID_INVALID;
14900 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14901 
14902 	/* Assume an onboard device and WOL capable by default.  */
14903 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
14904 	tg3_flag_set(tp, WOL_CAP);
14905 
14906 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14907 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14908 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14909 			tg3_flag_set(tp, IS_NIC);
14910 		}
14911 		val = tr32(VCPU_CFGSHDW);
14912 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
14913 			tg3_flag_set(tp, ASPM_WORKAROUND);
14914 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14915 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14916 			tg3_flag_set(tp, WOL_ENABLE);
14917 			device_set_wakeup_enable(&tp->pdev->dev, true);
14918 		}
14919 		goto done;
14920 	}
14921 
14922 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14923 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14924 		u32 nic_cfg, led_cfg;
14925 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
14926 		u32 nic_phy_id, ver, eeprom_phy_id;
14927 		int eeprom_phy_serdes = 0;
14928 
14929 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14930 		tp->nic_sram_data_cfg = nic_cfg;
14931 
14932 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14933 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
14934 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14935 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
14936 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
14937 		    (ver > 0) && (ver < 0x100))
14938 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14939 
14940 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
14941 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14942 
14943 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14944 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14945 		    tg3_asic_rev(tp) == ASIC_REV_5720)
14946 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
14947 
14948 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14949 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14950 			eeprom_phy_serdes = 1;
14951 
14952 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14953 		if (nic_phy_id != 0) {
14954 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14955 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14956 
14957 			eeprom_phy_id  = (id1 >> 16) << 10;
14958 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
14959 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14960 		} else
14961 			eeprom_phy_id = 0;
14962 
14963 		tp->phy_id = eeprom_phy_id;
14964 		if (eeprom_phy_serdes) {
14965 			if (!tg3_flag(tp, 5705_PLUS))
14966 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14967 			else
14968 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14969 		}
14970 
14971 		if (tg3_flag(tp, 5750_PLUS))
14972 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14973 				    SHASTA_EXT_LED_MODE_MASK);
14974 		else
14975 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14976 
14977 		switch (led_cfg) {
14978 		default:
14979 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14980 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14981 			break;
14982 
14983 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14984 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14985 			break;
14986 
14987 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14988 			tp->led_ctrl = LED_CTRL_MODE_MAC;
14989 
14990 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
14991 			 * read on some older 5700/5701 bootcode.
14992 			 */
14993 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14994 			    tg3_asic_rev(tp) == ASIC_REV_5701)
14995 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14996 
14997 			break;
14998 
14999 		case SHASTA_EXT_LED_SHARED:
15000 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15001 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15002 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15003 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15004 						 LED_CTRL_MODE_PHY_2);
15005 
15006 			if (tg3_flag(tp, 5717_PLUS) ||
15007 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15008 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15009 						LED_CTRL_BLINK_RATE_MASK;
15010 
15011 			break;
15012 
15013 		case SHASTA_EXT_LED_MAC:
15014 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15015 			break;
15016 
15017 		case SHASTA_EXT_LED_COMBO:
15018 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15019 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15020 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15021 						 LED_CTRL_MODE_PHY_2);
15022 			break;
15023 
15024 		}
15025 
15026 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15027 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15028 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15029 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15030 
15031 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15032 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15033 
15034 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15035 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15036 			if ((tp->pdev->subsystem_vendor ==
15037 			     PCI_VENDOR_ID_ARIMA) &&
15038 			    (tp->pdev->subsystem_device == 0x205a ||
15039 			     tp->pdev->subsystem_device == 0x2063))
15040 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15041 		} else {
15042 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15043 			tg3_flag_set(tp, IS_NIC);
15044 		}
15045 
15046 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15047 			tg3_flag_set(tp, ENABLE_ASF);
15048 			if (tg3_flag(tp, 5750_PLUS))
15049 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15050 		}
15051 
15052 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15053 		    tg3_flag(tp, 5750_PLUS))
15054 			tg3_flag_set(tp, ENABLE_APE);
15055 
15056 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15057 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15058 			tg3_flag_clear(tp, WOL_CAP);
15059 
15060 		if (tg3_flag(tp, WOL_CAP) &&
15061 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15062 			tg3_flag_set(tp, WOL_ENABLE);
15063 			device_set_wakeup_enable(&tp->pdev->dev, true);
15064 		}
15065 
15066 		if (cfg2 & (1 << 17))
15067 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15068 
15069 		/* serdes signal pre-emphasis in register 0x590 set by */
15070 		/* bootcode if bit 18 is set */
15071 		if (cfg2 & (1 << 18))
15072 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15073 
15074 		if ((tg3_flag(tp, 57765_PLUS) ||
15075 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15076 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15077 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15078 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15079 
15080 		if (tg3_flag(tp, PCI_EXPRESS)) {
15081 			u32 cfg3;
15082 
15083 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15084 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15085 			    !tg3_flag(tp, 57765_PLUS) &&
15086 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15087 				tg3_flag_set(tp, ASPM_WORKAROUND);
15088 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15089 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15090 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15091 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15092 		}
15093 
15094 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15095 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15096 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15097 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15098 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15099 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15100 
15101 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15102 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15103 	}
15104 done:
15105 	if (tg3_flag(tp, WOL_CAP))
15106 		device_set_wakeup_enable(&tp->pdev->dev,
15107 					 tg3_flag(tp, WOL_ENABLE));
15108 	else
15109 		device_set_wakeup_capable(&tp->pdev->dev, false);
15110 }
15111 
15112 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15113 {
15114 	int i, err;
15115 	u32 val2, off = offset * 8;
15116 
15117 	err = tg3_nvram_lock(tp);
15118 	if (err)
15119 		return err;
15120 
15121 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15122 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15123 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15124 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15125 	udelay(10);
15126 
15127 	for (i = 0; i < 100; i++) {
15128 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15129 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15130 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15131 			break;
15132 		}
15133 		udelay(10);
15134 	}
15135 
15136 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15137 
15138 	tg3_nvram_unlock(tp);
15139 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15140 		return 0;
15141 
15142 	return -EBUSY;
15143 }
15144 
15145 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15146 {
15147 	int i;
15148 	u32 val;
15149 
15150 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15151 	tw32(OTP_CTRL, cmd);
15152 
15153 	/* Wait for up to 1 ms for command to execute. */
15154 	for (i = 0; i < 100; i++) {
15155 		val = tr32(OTP_STATUS);
15156 		if (val & OTP_STATUS_CMD_DONE)
15157 			break;
15158 		udelay(10);
15159 	}
15160 
15161 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15162 }
15163 
15164 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15165  * configuration is a 32-bit value that straddles the alignment boundary.
15166  * We do two 32-bit reads and then shift and merge the results.
15167  */
15168 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15169 {
15170 	u32 bhalf_otp, thalf_otp;
15171 
15172 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15173 
15174 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15175 		return 0;
15176 
15177 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15178 
15179 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15180 		return 0;
15181 
15182 	thalf_otp = tr32(OTP_READ_DATA);
15183 
15184 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15185 
15186 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15187 		return 0;
15188 
15189 	bhalf_otp = tr32(OTP_READ_DATA);
15190 
15191 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15192 }
15193 
15194 static void tg3_phy_init_link_config(struct tg3 *tp)
15195 {
15196 	u32 adv = ADVERTISED_Autoneg;
15197 
15198 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15199 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15200 			adv |= ADVERTISED_1000baseT_Half;
15201 		adv |= ADVERTISED_1000baseT_Full;
15202 	}
15203 
15204 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15205 		adv |= ADVERTISED_100baseT_Half |
15206 		       ADVERTISED_100baseT_Full |
15207 		       ADVERTISED_10baseT_Half |
15208 		       ADVERTISED_10baseT_Full |
15209 		       ADVERTISED_TP;
15210 	else
15211 		adv |= ADVERTISED_FIBRE;
15212 
15213 	tp->link_config.advertising = adv;
15214 	tp->link_config.speed = SPEED_UNKNOWN;
15215 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15216 	tp->link_config.autoneg = AUTONEG_ENABLE;
15217 	tp->link_config.active_speed = SPEED_UNKNOWN;
15218 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15219 
15220 	tp->old_link = -1;
15221 }
15222 
15223 static int tg3_phy_probe(struct tg3 *tp)
15224 {
15225 	u32 hw_phy_id_1, hw_phy_id_2;
15226 	u32 hw_phy_id, hw_phy_id_masked;
15227 	int err;
15228 
15229 	/* flow control autonegotiation is default behavior */
15230 	tg3_flag_set(tp, PAUSE_AUTONEG);
15231 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15232 
15233 	if (tg3_flag(tp, ENABLE_APE)) {
15234 		switch (tp->pci_fn) {
15235 		case 0:
15236 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15237 			break;
15238 		case 1:
15239 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15240 			break;
15241 		case 2:
15242 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15243 			break;
15244 		case 3:
15245 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15246 			break;
15247 		}
15248 	}
15249 
15250 	if (!tg3_flag(tp, ENABLE_ASF) &&
15251 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15252 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15253 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15254 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15255 
15256 	if (tg3_flag(tp, USE_PHYLIB))
15257 		return tg3_phy_init(tp);
15258 
15259 	/* Reading the PHY ID register can conflict with ASF
15260 	 * firmware access to the PHY hardware.
15261 	 */
15262 	err = 0;
15263 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15264 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15265 	} else {
15266 		/* Now read the physical PHY_ID from the chip and verify
15267 		 * that it is sane.  If it doesn't look good, we fall back
15268 		 * to either the hard-coded table based PHY_ID and failing
15269 		 * that the value found in the eeprom area.
15270 		 */
15271 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15272 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15273 
15274 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15275 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15276 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15277 
15278 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15279 	}
15280 
15281 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15282 		tp->phy_id = hw_phy_id;
15283 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15284 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15285 		else
15286 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15287 	} else {
15288 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15289 			/* Do nothing, phy ID already set up in
15290 			 * tg3_get_eeprom_hw_cfg().
15291 			 */
15292 		} else {
15293 			struct subsys_tbl_ent *p;
15294 
15295 			/* No eeprom signature?  Try the hardcoded
15296 			 * subsys device table.
15297 			 */
15298 			p = tg3_lookup_by_subsys(tp);
15299 			if (p) {
15300 				tp->phy_id = p->phy_id;
15301 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15302 				/* For now we saw the IDs 0xbc050cd0,
15303 				 * 0xbc050f80 and 0xbc050c30 on devices
15304 				 * connected to an BCM4785 and there are
15305 				 * probably more. Just assume that the phy is
15306 				 * supported when it is connected to a SSB core
15307 				 * for now.
15308 				 */
15309 				return -ENODEV;
15310 			}
15311 
15312 			if (!tp->phy_id ||
15313 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15314 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15315 		}
15316 	}
15317 
15318 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15319 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15320 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15321 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15322 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15323 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15324 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15325 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15326 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15327 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15328 
15329 		tp->eee.supported = SUPPORTED_100baseT_Full |
15330 				    SUPPORTED_1000baseT_Full;
15331 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15332 				     ADVERTISED_1000baseT_Full;
15333 		tp->eee.eee_enabled = 1;
15334 		tp->eee.tx_lpi_enabled = 1;
15335 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15336 	}
15337 
15338 	tg3_phy_init_link_config(tp);
15339 
15340 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15341 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15342 	    !tg3_flag(tp, ENABLE_APE) &&
15343 	    !tg3_flag(tp, ENABLE_ASF)) {
15344 		u32 bmsr, dummy;
15345 
15346 		tg3_readphy(tp, MII_BMSR, &bmsr);
15347 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15348 		    (bmsr & BMSR_LSTATUS))
15349 			goto skip_phy_reset;
15350 
15351 		err = tg3_phy_reset(tp);
15352 		if (err)
15353 			return err;
15354 
15355 		tg3_phy_set_wirespeed(tp);
15356 
15357 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15358 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15359 					    tp->link_config.flowctrl);
15360 
15361 			tg3_writephy(tp, MII_BMCR,
15362 				     BMCR_ANENABLE | BMCR_ANRESTART);
15363 		}
15364 	}
15365 
15366 skip_phy_reset:
15367 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15368 		err = tg3_init_5401phy_dsp(tp);
15369 		if (err)
15370 			return err;
15371 
15372 		err = tg3_init_5401phy_dsp(tp);
15373 	}
15374 
15375 	return err;
15376 }
15377 
15378 static void tg3_read_vpd(struct tg3 *tp)
15379 {
15380 	u8 *vpd_data;
15381 	unsigned int block_end, rosize, len;
15382 	u32 vpdlen;
15383 	int j, i = 0;
15384 
15385 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15386 	if (!vpd_data)
15387 		goto out_no_vpd;
15388 
15389 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15390 	if (i < 0)
15391 		goto out_not_found;
15392 
15393 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15394 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15395 	i += PCI_VPD_LRDT_TAG_SIZE;
15396 
15397 	if (block_end > vpdlen)
15398 		goto out_not_found;
15399 
15400 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15401 				      PCI_VPD_RO_KEYWORD_MFR_ID);
15402 	if (j > 0) {
15403 		len = pci_vpd_info_field_size(&vpd_data[j]);
15404 
15405 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15406 		if (j + len > block_end || len != 4 ||
15407 		    memcmp(&vpd_data[j], "1028", 4))
15408 			goto partno;
15409 
15410 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15411 					      PCI_VPD_RO_KEYWORD_VENDOR0);
15412 		if (j < 0)
15413 			goto partno;
15414 
15415 		len = pci_vpd_info_field_size(&vpd_data[j]);
15416 
15417 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15418 		if (j + len > block_end)
15419 			goto partno;
15420 
15421 		if (len >= sizeof(tp->fw_ver))
15422 			len = sizeof(tp->fw_ver) - 1;
15423 		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15424 		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15425 			 &vpd_data[j]);
15426 	}
15427 
15428 partno:
15429 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15430 				      PCI_VPD_RO_KEYWORD_PARTNO);
15431 	if (i < 0)
15432 		goto out_not_found;
15433 
15434 	len = pci_vpd_info_field_size(&vpd_data[i]);
15435 
15436 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15437 	if (len > TG3_BPN_SIZE ||
15438 	    (len + i) > vpdlen)
15439 		goto out_not_found;
15440 
15441 	memcpy(tp->board_part_number, &vpd_data[i], len);
15442 
15443 out_not_found:
15444 	kfree(vpd_data);
15445 	if (tp->board_part_number[0])
15446 		return;
15447 
15448 out_no_vpd:
15449 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15450 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15451 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15452 			strcpy(tp->board_part_number, "BCM5717");
15453 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15454 			strcpy(tp->board_part_number, "BCM5718");
15455 		else
15456 			goto nomatch;
15457 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15458 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15459 			strcpy(tp->board_part_number, "BCM57780");
15460 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15461 			strcpy(tp->board_part_number, "BCM57760");
15462 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15463 			strcpy(tp->board_part_number, "BCM57790");
15464 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15465 			strcpy(tp->board_part_number, "BCM57788");
15466 		else
15467 			goto nomatch;
15468 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15469 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15470 			strcpy(tp->board_part_number, "BCM57761");
15471 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15472 			strcpy(tp->board_part_number, "BCM57765");
15473 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15474 			strcpy(tp->board_part_number, "BCM57781");
15475 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15476 			strcpy(tp->board_part_number, "BCM57785");
15477 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15478 			strcpy(tp->board_part_number, "BCM57791");
15479 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15480 			strcpy(tp->board_part_number, "BCM57795");
15481 		else
15482 			goto nomatch;
15483 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15484 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15485 			strcpy(tp->board_part_number, "BCM57762");
15486 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15487 			strcpy(tp->board_part_number, "BCM57766");
15488 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15489 			strcpy(tp->board_part_number, "BCM57782");
15490 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15491 			strcpy(tp->board_part_number, "BCM57786");
15492 		else
15493 			goto nomatch;
15494 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15495 		strcpy(tp->board_part_number, "BCM95906");
15496 	} else {
15497 nomatch:
15498 		strcpy(tp->board_part_number, "none");
15499 	}
15500 }
15501 
15502 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15503 {
15504 	u32 val;
15505 
15506 	if (tg3_nvram_read(tp, offset, &val) ||
15507 	    (val & 0xfc000000) != 0x0c000000 ||
15508 	    tg3_nvram_read(tp, offset + 4, &val) ||
15509 	    val != 0)
15510 		return 0;
15511 
15512 	return 1;
15513 }
15514 
15515 static void tg3_read_bc_ver(struct tg3 *tp)
15516 {
15517 	u32 val, offset, start, ver_offset;
15518 	int i, dst_off;
15519 	bool newver = false;
15520 
15521 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15522 	    tg3_nvram_read(tp, 0x4, &start))
15523 		return;
15524 
15525 	offset = tg3_nvram_logical_addr(tp, offset);
15526 
15527 	if (tg3_nvram_read(tp, offset, &val))
15528 		return;
15529 
15530 	if ((val & 0xfc000000) == 0x0c000000) {
15531 		if (tg3_nvram_read(tp, offset + 4, &val))
15532 			return;
15533 
15534 		if (val == 0)
15535 			newver = true;
15536 	}
15537 
15538 	dst_off = strlen(tp->fw_ver);
15539 
15540 	if (newver) {
15541 		if (TG3_VER_SIZE - dst_off < 16 ||
15542 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15543 			return;
15544 
15545 		offset = offset + ver_offset - start;
15546 		for (i = 0; i < 16; i += 4) {
15547 			__be32 v;
15548 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15549 				return;
15550 
15551 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15552 		}
15553 	} else {
15554 		u32 major, minor;
15555 
15556 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15557 			return;
15558 
15559 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15560 			TG3_NVM_BCVER_MAJSFT;
15561 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15562 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15563 			 "v%d.%02d", major, minor);
15564 	}
15565 }
15566 
15567 static void tg3_read_hwsb_ver(struct tg3 *tp)
15568 {
15569 	u32 val, major, minor;
15570 
15571 	/* Use native endian representation */
15572 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15573 		return;
15574 
15575 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15576 		TG3_NVM_HWSB_CFG1_MAJSFT;
15577 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15578 		TG3_NVM_HWSB_CFG1_MINSFT;
15579 
15580 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15581 }
15582 
15583 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15584 {
15585 	u32 offset, major, minor, build;
15586 
15587 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15588 
15589 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15590 		return;
15591 
15592 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15593 	case TG3_EEPROM_SB_REVISION_0:
15594 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15595 		break;
15596 	case TG3_EEPROM_SB_REVISION_2:
15597 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15598 		break;
15599 	case TG3_EEPROM_SB_REVISION_3:
15600 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15601 		break;
15602 	case TG3_EEPROM_SB_REVISION_4:
15603 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15604 		break;
15605 	case TG3_EEPROM_SB_REVISION_5:
15606 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15607 		break;
15608 	case TG3_EEPROM_SB_REVISION_6:
15609 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15610 		break;
15611 	default:
15612 		return;
15613 	}
15614 
15615 	if (tg3_nvram_read(tp, offset, &val))
15616 		return;
15617 
15618 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15619 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15620 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15621 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15622 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15623 
15624 	if (minor > 99 || build > 26)
15625 		return;
15626 
15627 	offset = strlen(tp->fw_ver);
15628 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15629 		 " v%d.%02d", major, minor);
15630 
15631 	if (build > 0) {
15632 		offset = strlen(tp->fw_ver);
15633 		if (offset < TG3_VER_SIZE - 1)
15634 			tp->fw_ver[offset] = 'a' + build - 1;
15635 	}
15636 }
15637 
15638 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15639 {
15640 	u32 val, offset, start;
15641 	int i, vlen;
15642 
15643 	for (offset = TG3_NVM_DIR_START;
15644 	     offset < TG3_NVM_DIR_END;
15645 	     offset += TG3_NVM_DIRENT_SIZE) {
15646 		if (tg3_nvram_read(tp, offset, &val))
15647 			return;
15648 
15649 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15650 			break;
15651 	}
15652 
15653 	if (offset == TG3_NVM_DIR_END)
15654 		return;
15655 
15656 	if (!tg3_flag(tp, 5705_PLUS))
15657 		start = 0x08000000;
15658 	else if (tg3_nvram_read(tp, offset - 4, &start))
15659 		return;
15660 
15661 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15662 	    !tg3_fw_img_is_valid(tp, offset) ||
15663 	    tg3_nvram_read(tp, offset + 8, &val))
15664 		return;
15665 
15666 	offset += val - start;
15667 
15668 	vlen = strlen(tp->fw_ver);
15669 
15670 	tp->fw_ver[vlen++] = ',';
15671 	tp->fw_ver[vlen++] = ' ';
15672 
15673 	for (i = 0; i < 4; i++) {
15674 		__be32 v;
15675 		if (tg3_nvram_read_be32(tp, offset, &v))
15676 			return;
15677 
15678 		offset += sizeof(v);
15679 
15680 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15681 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15682 			break;
15683 		}
15684 
15685 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15686 		vlen += sizeof(v);
15687 	}
15688 }
15689 
15690 static void tg3_probe_ncsi(struct tg3 *tp)
15691 {
15692 	u32 apedata;
15693 
15694 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15695 	if (apedata != APE_SEG_SIG_MAGIC)
15696 		return;
15697 
15698 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15699 	if (!(apedata & APE_FW_STATUS_READY))
15700 		return;
15701 
15702 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15703 		tg3_flag_set(tp, APE_HAS_NCSI);
15704 }
15705 
15706 static void tg3_read_dash_ver(struct tg3 *tp)
15707 {
15708 	int vlen;
15709 	u32 apedata;
15710 	char *fwtype;
15711 
15712 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15713 
15714 	if (tg3_flag(tp, APE_HAS_NCSI))
15715 		fwtype = "NCSI";
15716 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15717 		fwtype = "SMASH";
15718 	else
15719 		fwtype = "DASH";
15720 
15721 	vlen = strlen(tp->fw_ver);
15722 
15723 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15724 		 fwtype,
15725 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15726 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15727 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15728 		 (apedata & APE_FW_VERSION_BLDMSK));
15729 }
15730 
15731 static void tg3_read_otp_ver(struct tg3 *tp)
15732 {
15733 	u32 val, val2;
15734 
15735 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15736 		return;
15737 
15738 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15739 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15740 	    TG3_OTP_MAGIC0_VALID(val)) {
15741 		u64 val64 = (u64) val << 32 | val2;
15742 		u32 ver = 0;
15743 		int i, vlen;
15744 
15745 		for (i = 0; i < 7; i++) {
15746 			if ((val64 & 0xff) == 0)
15747 				break;
15748 			ver = val64 & 0xff;
15749 			val64 >>= 8;
15750 		}
15751 		vlen = strlen(tp->fw_ver);
15752 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15753 	}
15754 }
15755 
15756 static void tg3_read_fw_ver(struct tg3 *tp)
15757 {
15758 	u32 val;
15759 	bool vpd_vers = false;
15760 
15761 	if (tp->fw_ver[0] != 0)
15762 		vpd_vers = true;
15763 
15764 	if (tg3_flag(tp, NO_NVRAM)) {
15765 		strcat(tp->fw_ver, "sb");
15766 		tg3_read_otp_ver(tp);
15767 		return;
15768 	}
15769 
15770 	if (tg3_nvram_read(tp, 0, &val))
15771 		return;
15772 
15773 	if (val == TG3_EEPROM_MAGIC)
15774 		tg3_read_bc_ver(tp);
15775 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15776 		tg3_read_sb_ver(tp, val);
15777 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15778 		tg3_read_hwsb_ver(tp);
15779 
15780 	if (tg3_flag(tp, ENABLE_ASF)) {
15781 		if (tg3_flag(tp, ENABLE_APE)) {
15782 			tg3_probe_ncsi(tp);
15783 			if (!vpd_vers)
15784 				tg3_read_dash_ver(tp);
15785 		} else if (!vpd_vers) {
15786 			tg3_read_mgmtfw_ver(tp);
15787 		}
15788 	}
15789 
15790 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15791 }
15792 
15793 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15794 {
15795 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
15796 		return TG3_RX_RET_MAX_SIZE_5717;
15797 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15798 		return TG3_RX_RET_MAX_SIZE_5700;
15799 	else
15800 		return TG3_RX_RET_MAX_SIZE_5705;
15801 }
15802 
15803 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15804 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15805 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15806 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15807 	{ },
15808 };
15809 
15810 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15811 {
15812 	struct pci_dev *peer;
15813 	unsigned int func, devnr = tp->pdev->devfn & ~7;
15814 
15815 	for (func = 0; func < 8; func++) {
15816 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
15817 		if (peer && peer != tp->pdev)
15818 			break;
15819 		pci_dev_put(peer);
15820 	}
15821 	/* 5704 can be configured in single-port mode, set peer to
15822 	 * tp->pdev in that case.
15823 	 */
15824 	if (!peer) {
15825 		peer = tp->pdev;
15826 		return peer;
15827 	}
15828 
15829 	/*
15830 	 * We don't need to keep the refcount elevated; there's no way
15831 	 * to remove one half of this device without removing the other
15832 	 */
15833 	pci_dev_put(peer);
15834 
15835 	return peer;
15836 }
15837 
15838 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15839 {
15840 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15841 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15842 		u32 reg;
15843 
15844 		/* All devices that use the alternate
15845 		 * ASIC REV location have a CPMU.
15846 		 */
15847 		tg3_flag_set(tp, CPMU_PRESENT);
15848 
15849 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15850 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15851 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15852 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15853 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15854 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
15855 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
15856 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15857 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15858 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
15859 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
15860 			reg = TG3PCI_GEN2_PRODID_ASICREV;
15861 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15862 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15863 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15864 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15865 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15866 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15867 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15868 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15869 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15870 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15871 			reg = TG3PCI_GEN15_PRODID_ASICREV;
15872 		else
15873 			reg = TG3PCI_PRODID_ASICREV;
15874 
15875 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15876 	}
15877 
15878 	/* Wrong chip ID in 5752 A0. This code can be removed later
15879 	 * as A0 is not in production.
15880 	 */
15881 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15882 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15883 
15884 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15885 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15886 
15887 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15888 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15889 	    tg3_asic_rev(tp) == ASIC_REV_5720)
15890 		tg3_flag_set(tp, 5717_PLUS);
15891 
15892 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15893 	    tg3_asic_rev(tp) == ASIC_REV_57766)
15894 		tg3_flag_set(tp, 57765_CLASS);
15895 
15896 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15897 	     tg3_asic_rev(tp) == ASIC_REV_5762)
15898 		tg3_flag_set(tp, 57765_PLUS);
15899 
15900 	/* Intentionally exclude ASIC_REV_5906 */
15901 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15902 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
15903 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
15904 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
15905 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
15906 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
15907 	    tg3_flag(tp, 57765_PLUS))
15908 		tg3_flag_set(tp, 5755_PLUS);
15909 
15910 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15911 	    tg3_asic_rev(tp) == ASIC_REV_5714)
15912 		tg3_flag_set(tp, 5780_CLASS);
15913 
15914 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15915 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
15916 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
15917 	    tg3_flag(tp, 5755_PLUS) ||
15918 	    tg3_flag(tp, 5780_CLASS))
15919 		tg3_flag_set(tp, 5750_PLUS);
15920 
15921 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15922 	    tg3_flag(tp, 5750_PLUS))
15923 		tg3_flag_set(tp, 5705_PLUS);
15924 }
15925 
15926 static bool tg3_10_100_only_device(struct tg3 *tp,
15927 				   const struct pci_device_id *ent)
15928 {
15929 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15930 
15931 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15932 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15933 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
15934 		return true;
15935 
15936 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15937 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15938 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15939 				return true;
15940 		} else {
15941 			return true;
15942 		}
15943 	}
15944 
15945 	return false;
15946 }
15947 
15948 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15949 {
15950 	u32 misc_ctrl_reg;
15951 	u32 pci_state_reg, grc_misc_cfg;
15952 	u32 val;
15953 	u16 pci_cmd;
15954 	int err;
15955 
15956 	/* Force memory write invalidate off.  If we leave it on,
15957 	 * then on 5700_BX chips we have to enable a workaround.
15958 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15959 	 * to match the cacheline size.  The Broadcom driver have this
15960 	 * workaround but turns MWI off all the times so never uses
15961 	 * it.  This seems to suggest that the workaround is insufficient.
15962 	 */
15963 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15964 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15965 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15966 
15967 	/* Important! -- Make sure register accesses are byteswapped
15968 	 * correctly.  Also, for those chips that require it, make
15969 	 * sure that indirect register accesses are enabled before
15970 	 * the first operation.
15971 	 */
15972 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15973 			      &misc_ctrl_reg);
15974 	tp->misc_host_ctrl |= (misc_ctrl_reg &
15975 			       MISC_HOST_CTRL_CHIPREV);
15976 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15977 			       tp->misc_host_ctrl);
15978 
15979 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
15980 
15981 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15982 	 * we need to disable memory and use config. cycles
15983 	 * only to access all registers. The 5702/03 chips
15984 	 * can mistakenly decode the special cycles from the
15985 	 * ICH chipsets as memory write cycles, causing corruption
15986 	 * of register and memory space. Only certain ICH bridges
15987 	 * will drive special cycles with non-zero data during the
15988 	 * address phase which can fall within the 5703's address
15989 	 * range. This is not an ICH bug as the PCI spec allows
15990 	 * non-zero address during special cycles. However, only
15991 	 * these ICH bridges are known to drive non-zero addresses
15992 	 * during special cycles.
15993 	 *
15994 	 * Since special cycles do not cross PCI bridges, we only
15995 	 * enable this workaround if the 5703 is on the secondary
15996 	 * bus of these ICH bridges.
15997 	 */
15998 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15999 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16000 		static struct tg3_dev_id {
16001 			u32	vendor;
16002 			u32	device;
16003 			u32	rev;
16004 		} ich_chipsets[] = {
16005 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16006 			  PCI_ANY_ID },
16007 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16008 			  PCI_ANY_ID },
16009 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16010 			  0xa },
16011 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16012 			  PCI_ANY_ID },
16013 			{ },
16014 		};
16015 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16016 		struct pci_dev *bridge = NULL;
16017 
16018 		while (pci_id->vendor != 0) {
16019 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16020 						bridge);
16021 			if (!bridge) {
16022 				pci_id++;
16023 				continue;
16024 			}
16025 			if (pci_id->rev != PCI_ANY_ID) {
16026 				if (bridge->revision > pci_id->rev)
16027 					continue;
16028 			}
16029 			if (bridge->subordinate &&
16030 			    (bridge->subordinate->number ==
16031 			     tp->pdev->bus->number)) {
16032 				tg3_flag_set(tp, ICH_WORKAROUND);
16033 				pci_dev_put(bridge);
16034 				break;
16035 			}
16036 		}
16037 	}
16038 
16039 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16040 		static struct tg3_dev_id {
16041 			u32	vendor;
16042 			u32	device;
16043 		} bridge_chipsets[] = {
16044 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16045 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16046 			{ },
16047 		};
16048 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16049 		struct pci_dev *bridge = NULL;
16050 
16051 		while (pci_id->vendor != 0) {
16052 			bridge = pci_get_device(pci_id->vendor,
16053 						pci_id->device,
16054 						bridge);
16055 			if (!bridge) {
16056 				pci_id++;
16057 				continue;
16058 			}
16059 			if (bridge->subordinate &&
16060 			    (bridge->subordinate->number <=
16061 			     tp->pdev->bus->number) &&
16062 			    (bridge->subordinate->busn_res.end >=
16063 			     tp->pdev->bus->number)) {
16064 				tg3_flag_set(tp, 5701_DMA_BUG);
16065 				pci_dev_put(bridge);
16066 				break;
16067 			}
16068 		}
16069 	}
16070 
16071 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16072 	 * DMA addresses > 40-bit. This bridge may have other additional
16073 	 * 57xx devices behind it in some 4-port NIC designs for example.
16074 	 * Any tg3 device found behind the bridge will also need the 40-bit
16075 	 * DMA workaround.
16076 	 */
16077 	if (tg3_flag(tp, 5780_CLASS)) {
16078 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16079 		tp->msi_cap = tp->pdev->msi_cap;
16080 	} else {
16081 		struct pci_dev *bridge = NULL;
16082 
16083 		do {
16084 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16085 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16086 						bridge);
16087 			if (bridge && bridge->subordinate &&
16088 			    (bridge->subordinate->number <=
16089 			     tp->pdev->bus->number) &&
16090 			    (bridge->subordinate->busn_res.end >=
16091 			     tp->pdev->bus->number)) {
16092 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16093 				pci_dev_put(bridge);
16094 				break;
16095 			}
16096 		} while (bridge);
16097 	}
16098 
16099 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16100 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16101 		tp->pdev_peer = tg3_find_peer(tp);
16102 
16103 	/* Determine TSO capabilities */
16104 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16105 		; /* Do nothing. HW bug. */
16106 	else if (tg3_flag(tp, 57765_PLUS))
16107 		tg3_flag_set(tp, HW_TSO_3);
16108 	else if (tg3_flag(tp, 5755_PLUS) ||
16109 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16110 		tg3_flag_set(tp, HW_TSO_2);
16111 	else if (tg3_flag(tp, 5750_PLUS)) {
16112 		tg3_flag_set(tp, HW_TSO_1);
16113 		tg3_flag_set(tp, TSO_BUG);
16114 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16115 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16116 			tg3_flag_clear(tp, TSO_BUG);
16117 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16118 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16119 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16120 		tg3_flag_set(tp, FW_TSO);
16121 		tg3_flag_set(tp, TSO_BUG);
16122 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16123 			tp->fw_needed = FIRMWARE_TG3TSO5;
16124 		else
16125 			tp->fw_needed = FIRMWARE_TG3TSO;
16126 	}
16127 
16128 	/* Selectively allow TSO based on operating conditions */
16129 	if (tg3_flag(tp, HW_TSO_1) ||
16130 	    tg3_flag(tp, HW_TSO_2) ||
16131 	    tg3_flag(tp, HW_TSO_3) ||
16132 	    tg3_flag(tp, FW_TSO)) {
16133 		/* For firmware TSO, assume ASF is disabled.
16134 		 * We'll disable TSO later if we discover ASF
16135 		 * is enabled in tg3_get_eeprom_hw_cfg().
16136 		 */
16137 		tg3_flag_set(tp, TSO_CAPABLE);
16138 	} else {
16139 		tg3_flag_clear(tp, TSO_CAPABLE);
16140 		tg3_flag_clear(tp, TSO_BUG);
16141 		tp->fw_needed = NULL;
16142 	}
16143 
16144 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16145 		tp->fw_needed = FIRMWARE_TG3;
16146 
16147 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16148 		tp->fw_needed = FIRMWARE_TG357766;
16149 
16150 	tp->irq_max = 1;
16151 
16152 	if (tg3_flag(tp, 5750_PLUS)) {
16153 		tg3_flag_set(tp, SUPPORT_MSI);
16154 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16155 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16156 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16157 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16158 		     tp->pdev_peer == tp->pdev))
16159 			tg3_flag_clear(tp, SUPPORT_MSI);
16160 
16161 		if (tg3_flag(tp, 5755_PLUS) ||
16162 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16163 			tg3_flag_set(tp, 1SHOT_MSI);
16164 		}
16165 
16166 		if (tg3_flag(tp, 57765_PLUS)) {
16167 			tg3_flag_set(tp, SUPPORT_MSIX);
16168 			tp->irq_max = TG3_IRQ_MAX_VECS;
16169 		}
16170 	}
16171 
16172 	tp->txq_max = 1;
16173 	tp->rxq_max = 1;
16174 	if (tp->irq_max > 1) {
16175 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16176 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16177 
16178 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16179 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16180 			tp->txq_max = tp->irq_max - 1;
16181 	}
16182 
16183 	if (tg3_flag(tp, 5755_PLUS) ||
16184 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16185 		tg3_flag_set(tp, SHORT_DMA_BUG);
16186 
16187 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16188 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16189 
16190 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16191 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16192 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16193 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16194 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16195 
16196 	if (tg3_flag(tp, 57765_PLUS) &&
16197 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16198 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16199 
16200 	if (!tg3_flag(tp, 5705_PLUS) ||
16201 	    tg3_flag(tp, 5780_CLASS) ||
16202 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16203 		tg3_flag_set(tp, JUMBO_CAPABLE);
16204 
16205 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16206 			      &pci_state_reg);
16207 
16208 	if (pci_is_pcie(tp->pdev)) {
16209 		u16 lnkctl;
16210 
16211 		tg3_flag_set(tp, PCI_EXPRESS);
16212 
16213 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16214 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16215 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16216 				tg3_flag_clear(tp, HW_TSO_2);
16217 				tg3_flag_clear(tp, TSO_CAPABLE);
16218 			}
16219 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16220 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16221 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16222 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16223 				tg3_flag_set(tp, CLKREQ_BUG);
16224 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16225 			tg3_flag_set(tp, L1PLLPD_EN);
16226 		}
16227 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16228 		/* BCM5785 devices are effectively PCIe devices, and should
16229 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16230 		 * section.
16231 		 */
16232 		tg3_flag_set(tp, PCI_EXPRESS);
16233 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16234 		   tg3_flag(tp, 5780_CLASS)) {
16235 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16236 		if (!tp->pcix_cap) {
16237 			dev_err(&tp->pdev->dev,
16238 				"Cannot find PCI-X capability, aborting\n");
16239 			return -EIO;
16240 		}
16241 
16242 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16243 			tg3_flag_set(tp, PCIX_MODE);
16244 	}
16245 
16246 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16247 	 * reordering to the mailbox registers done by the host
16248 	 * controller can cause major troubles.  We read back from
16249 	 * every mailbox register write to force the writes to be
16250 	 * posted to the chip in order.
16251 	 */
16252 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16253 	    !tg3_flag(tp, PCI_EXPRESS))
16254 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16255 
16256 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16257 			     &tp->pci_cacheline_sz);
16258 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16259 			     &tp->pci_lat_timer);
16260 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16261 	    tp->pci_lat_timer < 64) {
16262 		tp->pci_lat_timer = 64;
16263 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16264 				      tp->pci_lat_timer);
16265 	}
16266 
16267 	/* Important! -- It is critical that the PCI-X hw workaround
16268 	 * situation is decided before the first MMIO register access.
16269 	 */
16270 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16271 		/* 5700 BX chips need to have their TX producer index
16272 		 * mailboxes written twice to workaround a bug.
16273 		 */
16274 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16275 
16276 		/* If we are in PCI-X mode, enable register write workaround.
16277 		 *
16278 		 * The workaround is to use indirect register accesses
16279 		 * for all chip writes not to mailbox registers.
16280 		 */
16281 		if (tg3_flag(tp, PCIX_MODE)) {
16282 			u32 pm_reg;
16283 
16284 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16285 
16286 			/* The chip can have it's power management PCI config
16287 			 * space registers clobbered due to this bug.
16288 			 * So explicitly force the chip into D0 here.
16289 			 */
16290 			pci_read_config_dword(tp->pdev,
16291 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16292 					      &pm_reg);
16293 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16294 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16295 			pci_write_config_dword(tp->pdev,
16296 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16297 					       pm_reg);
16298 
16299 			/* Also, force SERR#/PERR# in PCI command. */
16300 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16301 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16302 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16303 		}
16304 	}
16305 
16306 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16307 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16308 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16309 		tg3_flag_set(tp, PCI_32BIT);
16310 
16311 	/* Chip-specific fixup from Broadcom driver */
16312 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16313 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16314 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16315 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16316 	}
16317 
16318 	/* Default fast path register access methods */
16319 	tp->read32 = tg3_read32;
16320 	tp->write32 = tg3_write32;
16321 	tp->read32_mbox = tg3_read32;
16322 	tp->write32_mbox = tg3_write32;
16323 	tp->write32_tx_mbox = tg3_write32;
16324 	tp->write32_rx_mbox = tg3_write32;
16325 
16326 	/* Various workaround register access methods */
16327 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16328 		tp->write32 = tg3_write_indirect_reg32;
16329 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16330 		 (tg3_flag(tp, PCI_EXPRESS) &&
16331 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16332 		/*
16333 		 * Back to back register writes can cause problems on these
16334 		 * chips, the workaround is to read back all reg writes
16335 		 * except those to mailbox regs.
16336 		 *
16337 		 * See tg3_write_indirect_reg32().
16338 		 */
16339 		tp->write32 = tg3_write_flush_reg32;
16340 	}
16341 
16342 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16343 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16344 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16345 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16346 	}
16347 
16348 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16349 		tp->read32 = tg3_read_indirect_reg32;
16350 		tp->write32 = tg3_write_indirect_reg32;
16351 		tp->read32_mbox = tg3_read_indirect_mbox;
16352 		tp->write32_mbox = tg3_write_indirect_mbox;
16353 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16354 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16355 
16356 		iounmap(tp->regs);
16357 		tp->regs = NULL;
16358 
16359 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16360 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16361 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16362 	}
16363 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16364 		tp->read32_mbox = tg3_read32_mbox_5906;
16365 		tp->write32_mbox = tg3_write32_mbox_5906;
16366 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16367 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16368 	}
16369 
16370 	if (tp->write32 == tg3_write_indirect_reg32 ||
16371 	    (tg3_flag(tp, PCIX_MODE) &&
16372 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16373 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16374 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16375 
16376 	/* The memory arbiter has to be enabled in order for SRAM accesses
16377 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16378 	 * sure it is enabled, but other entities such as system netboot
16379 	 * code might disable it.
16380 	 */
16381 	val = tr32(MEMARB_MODE);
16382 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16383 
16384 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16385 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16386 	    tg3_flag(tp, 5780_CLASS)) {
16387 		if (tg3_flag(tp, PCIX_MODE)) {
16388 			pci_read_config_dword(tp->pdev,
16389 					      tp->pcix_cap + PCI_X_STATUS,
16390 					      &val);
16391 			tp->pci_fn = val & 0x7;
16392 		}
16393 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16394 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16395 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16396 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16397 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16398 			val = tr32(TG3_CPMU_STATUS);
16399 
16400 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16401 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16402 		else
16403 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16404 				     TG3_CPMU_STATUS_FSHFT_5719;
16405 	}
16406 
16407 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16408 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16409 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16410 	}
16411 
16412 	/* Get eeprom hw config before calling tg3_set_power_state().
16413 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16414 	 * determined before calling tg3_set_power_state() so that
16415 	 * we know whether or not to switch out of Vaux power.
16416 	 * When the flag is set, it means that GPIO1 is used for eeprom
16417 	 * write protect and also implies that it is a LOM where GPIOs
16418 	 * are not used to switch power.
16419 	 */
16420 	tg3_get_eeprom_hw_cfg(tp);
16421 
16422 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16423 		tg3_flag_clear(tp, TSO_CAPABLE);
16424 		tg3_flag_clear(tp, TSO_BUG);
16425 		tp->fw_needed = NULL;
16426 	}
16427 
16428 	if (tg3_flag(tp, ENABLE_APE)) {
16429 		/* Allow reads and writes to the
16430 		 * APE register and memory space.
16431 		 */
16432 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16433 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16434 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16435 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16436 				       pci_state_reg);
16437 
16438 		tg3_ape_lock_init(tp);
16439 	}
16440 
16441 	/* Set up tp->grc_local_ctrl before calling
16442 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16443 	 * will bring 5700's external PHY out of reset.
16444 	 * It is also used as eeprom write protect on LOMs.
16445 	 */
16446 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16447 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16448 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16449 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16450 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16451 	/* Unused GPIO3 must be driven as output on 5752 because there
16452 	 * are no pull-up resistors on unused GPIO pins.
16453 	 */
16454 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16455 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16456 
16457 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16458 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16459 	    tg3_flag(tp, 57765_CLASS))
16460 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16461 
16462 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16463 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16464 		/* Turn off the debug UART. */
16465 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16466 		if (tg3_flag(tp, IS_NIC))
16467 			/* Keep VMain power. */
16468 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16469 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16470 	}
16471 
16472 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16473 		tp->grc_local_ctrl |=
16474 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16475 
16476 	/* Switch out of Vaux if it is a NIC */
16477 	tg3_pwrsrc_switch_to_vmain(tp);
16478 
16479 	/* Derive initial jumbo mode from MTU assigned in
16480 	 * ether_setup() via the alloc_etherdev() call
16481 	 */
16482 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16483 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16484 
16485 	/* Determine WakeOnLan speed to use. */
16486 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16487 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16488 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16489 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16490 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16491 	} else {
16492 		tg3_flag_set(tp, WOL_SPEED_100MB);
16493 	}
16494 
16495 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16496 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16497 
16498 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16499 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16500 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16501 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16502 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16503 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16504 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16505 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16506 
16507 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16508 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16509 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16510 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16511 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16512 
16513 	if (tg3_flag(tp, 5705_PLUS) &&
16514 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16515 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16516 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16517 	    !tg3_flag(tp, 57765_PLUS)) {
16518 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16519 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16520 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16521 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16522 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16523 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16524 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16525 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16526 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16527 		} else
16528 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16529 	}
16530 
16531 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16532 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16533 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16534 		if (tp->phy_otp == 0)
16535 			tp->phy_otp = TG3_OTP_DEFAULT;
16536 	}
16537 
16538 	if (tg3_flag(tp, CPMU_PRESENT))
16539 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16540 	else
16541 		tp->mi_mode = MAC_MI_MODE_BASE;
16542 
16543 	tp->coalesce_mode = 0;
16544 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16545 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16546 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16547 
16548 	/* Set these bits to enable statistics workaround. */
16549 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16550 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16551 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16552 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16553 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16554 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16555 	}
16556 
16557 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16558 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16559 		tg3_flag_set(tp, USE_PHYLIB);
16560 
16561 	err = tg3_mdio_init(tp);
16562 	if (err)
16563 		return err;
16564 
16565 	/* Initialize data/descriptor byte/word swapping. */
16566 	val = tr32(GRC_MODE);
16567 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16568 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16569 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16570 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16571 			GRC_MODE_B2HRX_ENABLE |
16572 			GRC_MODE_HTX2B_ENABLE |
16573 			GRC_MODE_HOST_STACKUP);
16574 	else
16575 		val &= GRC_MODE_HOST_STACKUP;
16576 
16577 	tw32(GRC_MODE, val | tp->grc_mode);
16578 
16579 	tg3_switch_clocks(tp);
16580 
16581 	/* Clear this out for sanity. */
16582 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16583 
16584 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16585 			      &pci_state_reg);
16586 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16587 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16588 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16589 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16590 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16591 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16592 			void __iomem *sram_base;
16593 
16594 			/* Write some dummy words into the SRAM status block
16595 			 * area, see if it reads back correctly.  If the return
16596 			 * value is bad, force enable the PCIX workaround.
16597 			 */
16598 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16599 
16600 			writel(0x00000000, sram_base);
16601 			writel(0x00000000, sram_base + 4);
16602 			writel(0xffffffff, sram_base + 4);
16603 			if (readl(sram_base) != 0x00000000)
16604 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16605 		}
16606 	}
16607 
16608 	udelay(50);
16609 	tg3_nvram_init(tp);
16610 
16611 	/* If the device has an NVRAM, no need to load patch firmware */
16612 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16613 	    !tg3_flag(tp, NO_NVRAM))
16614 		tp->fw_needed = NULL;
16615 
16616 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16617 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16618 
16619 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16620 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16621 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16622 		tg3_flag_set(tp, IS_5788);
16623 
16624 	if (!tg3_flag(tp, IS_5788) &&
16625 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16626 		tg3_flag_set(tp, TAGGED_STATUS);
16627 	if (tg3_flag(tp, TAGGED_STATUS)) {
16628 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16629 				      HOSTCC_MODE_CLRTICK_TXBD);
16630 
16631 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16632 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16633 				       tp->misc_host_ctrl);
16634 	}
16635 
16636 	/* Preserve the APE MAC_MODE bits */
16637 	if (tg3_flag(tp, ENABLE_APE))
16638 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16639 	else
16640 		tp->mac_mode = 0;
16641 
16642 	if (tg3_10_100_only_device(tp, ent))
16643 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16644 
16645 	err = tg3_phy_probe(tp);
16646 	if (err) {
16647 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16648 		/* ... but do not return immediately ... */
16649 		tg3_mdio_fini(tp);
16650 	}
16651 
16652 	tg3_read_vpd(tp);
16653 	tg3_read_fw_ver(tp);
16654 
16655 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16656 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16657 	} else {
16658 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16659 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16660 		else
16661 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16662 	}
16663 
16664 	/* 5700 {AX,BX} chips have a broken status block link
16665 	 * change bit implementation, so we must use the
16666 	 * status register in those cases.
16667 	 */
16668 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16669 		tg3_flag_set(tp, USE_LINKCHG_REG);
16670 	else
16671 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16672 
16673 	/* The led_ctrl is set during tg3_phy_probe, here we might
16674 	 * have to force the link status polling mechanism based
16675 	 * upon subsystem IDs.
16676 	 */
16677 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16678 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16679 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16680 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16681 		tg3_flag_set(tp, USE_LINKCHG_REG);
16682 	}
16683 
16684 	/* For all SERDES we poll the MAC status register. */
16685 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16686 		tg3_flag_set(tp, POLL_SERDES);
16687 	else
16688 		tg3_flag_clear(tp, POLL_SERDES);
16689 
16690 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16691 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16692 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16693 	    tg3_flag(tp, PCIX_MODE)) {
16694 		tp->rx_offset = NET_SKB_PAD;
16695 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16696 		tp->rx_copy_thresh = ~(u16)0;
16697 #endif
16698 	}
16699 
16700 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16701 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16702 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16703 
16704 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16705 
16706 	/* Increment the rx prod index on the rx std ring by at most
16707 	 * 8 for these chips to workaround hw errata.
16708 	 */
16709 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16710 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16711 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16712 		tp->rx_std_max_post = 8;
16713 
16714 	if (tg3_flag(tp, ASPM_WORKAROUND))
16715 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16716 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16717 
16718 	return err;
16719 }
16720 
16721 #ifdef CONFIG_SPARC
16722 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16723 {
16724 	struct net_device *dev = tp->dev;
16725 	struct pci_dev *pdev = tp->pdev;
16726 	struct device_node *dp = pci_device_to_OF_node(pdev);
16727 	const unsigned char *addr;
16728 	int len;
16729 
16730 	addr = of_get_property(dp, "local-mac-address", &len);
16731 	if (addr && len == ETH_ALEN) {
16732 		memcpy(dev->dev_addr, addr, ETH_ALEN);
16733 		return 0;
16734 	}
16735 	return -ENODEV;
16736 }
16737 
16738 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16739 {
16740 	struct net_device *dev = tp->dev;
16741 
16742 	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16743 	return 0;
16744 }
16745 #endif
16746 
16747 static int tg3_get_device_address(struct tg3 *tp)
16748 {
16749 	struct net_device *dev = tp->dev;
16750 	u32 hi, lo, mac_offset;
16751 	int addr_ok = 0;
16752 	int err;
16753 
16754 #ifdef CONFIG_SPARC
16755 	if (!tg3_get_macaddr_sparc(tp))
16756 		return 0;
16757 #endif
16758 
16759 	if (tg3_flag(tp, IS_SSB_CORE)) {
16760 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16761 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16762 			return 0;
16763 	}
16764 
16765 	mac_offset = 0x7c;
16766 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16767 	    tg3_flag(tp, 5780_CLASS)) {
16768 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16769 			mac_offset = 0xcc;
16770 		if (tg3_nvram_lock(tp))
16771 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16772 		else
16773 			tg3_nvram_unlock(tp);
16774 	} else if (tg3_flag(tp, 5717_PLUS)) {
16775 		if (tp->pci_fn & 1)
16776 			mac_offset = 0xcc;
16777 		if (tp->pci_fn > 1)
16778 			mac_offset += 0x18c;
16779 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16780 		mac_offset = 0x10;
16781 
16782 	/* First try to get it from MAC address mailbox. */
16783 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16784 	if ((hi >> 16) == 0x484b) {
16785 		dev->dev_addr[0] = (hi >>  8) & 0xff;
16786 		dev->dev_addr[1] = (hi >>  0) & 0xff;
16787 
16788 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16789 		dev->dev_addr[2] = (lo >> 24) & 0xff;
16790 		dev->dev_addr[3] = (lo >> 16) & 0xff;
16791 		dev->dev_addr[4] = (lo >>  8) & 0xff;
16792 		dev->dev_addr[5] = (lo >>  0) & 0xff;
16793 
16794 		/* Some old bootcode may report a 0 MAC address in SRAM */
16795 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16796 	}
16797 	if (!addr_ok) {
16798 		/* Next, try NVRAM. */
16799 		if (!tg3_flag(tp, NO_NVRAM) &&
16800 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16801 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16802 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16803 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16804 		}
16805 		/* Finally just fetch it out of the MAC control regs. */
16806 		else {
16807 			hi = tr32(MAC_ADDR_0_HIGH);
16808 			lo = tr32(MAC_ADDR_0_LOW);
16809 
16810 			dev->dev_addr[5] = lo & 0xff;
16811 			dev->dev_addr[4] = (lo >> 8) & 0xff;
16812 			dev->dev_addr[3] = (lo >> 16) & 0xff;
16813 			dev->dev_addr[2] = (lo >> 24) & 0xff;
16814 			dev->dev_addr[1] = hi & 0xff;
16815 			dev->dev_addr[0] = (hi >> 8) & 0xff;
16816 		}
16817 	}
16818 
16819 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16820 #ifdef CONFIG_SPARC
16821 		if (!tg3_get_default_macaddr_sparc(tp))
16822 			return 0;
16823 #endif
16824 		return -EINVAL;
16825 	}
16826 	return 0;
16827 }
16828 
16829 #define BOUNDARY_SINGLE_CACHELINE	1
16830 #define BOUNDARY_MULTI_CACHELINE	2
16831 
16832 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16833 {
16834 	int cacheline_size;
16835 	u8 byte;
16836 	int goal;
16837 
16838 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16839 	if (byte == 0)
16840 		cacheline_size = 1024;
16841 	else
16842 		cacheline_size = (int) byte * 4;
16843 
16844 	/* On 5703 and later chips, the boundary bits have no
16845 	 * effect.
16846 	 */
16847 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16848 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16849 	    !tg3_flag(tp, PCI_EXPRESS))
16850 		goto out;
16851 
16852 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16853 	goal = BOUNDARY_MULTI_CACHELINE;
16854 #else
16855 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16856 	goal = BOUNDARY_SINGLE_CACHELINE;
16857 #else
16858 	goal = 0;
16859 #endif
16860 #endif
16861 
16862 	if (tg3_flag(tp, 57765_PLUS)) {
16863 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16864 		goto out;
16865 	}
16866 
16867 	if (!goal)
16868 		goto out;
16869 
16870 	/* PCI controllers on most RISC systems tend to disconnect
16871 	 * when a device tries to burst across a cache-line boundary.
16872 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16873 	 *
16874 	 * Unfortunately, for PCI-E there are only limited
16875 	 * write-side controls for this, and thus for reads
16876 	 * we will still get the disconnects.  We'll also waste
16877 	 * these PCI cycles for both read and write for chips
16878 	 * other than 5700 and 5701 which do not implement the
16879 	 * boundary bits.
16880 	 */
16881 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16882 		switch (cacheline_size) {
16883 		case 16:
16884 		case 32:
16885 		case 64:
16886 		case 128:
16887 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16888 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16889 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16890 			} else {
16891 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16892 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16893 			}
16894 			break;
16895 
16896 		case 256:
16897 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16898 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16899 			break;
16900 
16901 		default:
16902 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16903 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16904 			break;
16905 		}
16906 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
16907 		switch (cacheline_size) {
16908 		case 16:
16909 		case 32:
16910 		case 64:
16911 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16912 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16913 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16914 				break;
16915 			}
16916 			/* fallthrough */
16917 		case 128:
16918 		default:
16919 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16920 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16921 			break;
16922 		}
16923 	} else {
16924 		switch (cacheline_size) {
16925 		case 16:
16926 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16927 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
16928 					DMA_RWCTRL_WRITE_BNDRY_16);
16929 				break;
16930 			}
16931 			/* fallthrough */
16932 		case 32:
16933 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16934 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
16935 					DMA_RWCTRL_WRITE_BNDRY_32);
16936 				break;
16937 			}
16938 			/* fallthrough */
16939 		case 64:
16940 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16941 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
16942 					DMA_RWCTRL_WRITE_BNDRY_64);
16943 				break;
16944 			}
16945 			/* fallthrough */
16946 		case 128:
16947 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16948 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
16949 					DMA_RWCTRL_WRITE_BNDRY_128);
16950 				break;
16951 			}
16952 			/* fallthrough */
16953 		case 256:
16954 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
16955 				DMA_RWCTRL_WRITE_BNDRY_256);
16956 			break;
16957 		case 512:
16958 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
16959 				DMA_RWCTRL_WRITE_BNDRY_512);
16960 			break;
16961 		case 1024:
16962 		default:
16963 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16964 				DMA_RWCTRL_WRITE_BNDRY_1024);
16965 			break;
16966 		}
16967 	}
16968 
16969 out:
16970 	return val;
16971 }
16972 
16973 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16974 			   int size, bool to_device)
16975 {
16976 	struct tg3_internal_buffer_desc test_desc;
16977 	u32 sram_dma_descs;
16978 	int i, ret;
16979 
16980 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16981 
16982 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16983 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16984 	tw32(RDMAC_STATUS, 0);
16985 	tw32(WDMAC_STATUS, 0);
16986 
16987 	tw32(BUFMGR_MODE, 0);
16988 	tw32(FTQ_RESET, 0);
16989 
16990 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
16991 	test_desc.addr_lo = buf_dma & 0xffffffff;
16992 	test_desc.nic_mbuf = 0x00002100;
16993 	test_desc.len = size;
16994 
16995 	/*
16996 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16997 	 * the *second* time the tg3 driver was getting loaded after an
16998 	 * initial scan.
16999 	 *
17000 	 * Broadcom tells me:
17001 	 *   ...the DMA engine is connected to the GRC block and a DMA
17002 	 *   reset may affect the GRC block in some unpredictable way...
17003 	 *   The behavior of resets to individual blocks has not been tested.
17004 	 *
17005 	 * Broadcom noted the GRC reset will also reset all sub-components.
17006 	 */
17007 	if (to_device) {
17008 		test_desc.cqid_sqid = (13 << 8) | 2;
17009 
17010 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17011 		udelay(40);
17012 	} else {
17013 		test_desc.cqid_sqid = (16 << 8) | 7;
17014 
17015 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17016 		udelay(40);
17017 	}
17018 	test_desc.flags = 0x00000005;
17019 
17020 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17021 		u32 val;
17022 
17023 		val = *(((u32 *)&test_desc) + i);
17024 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17025 				       sram_dma_descs + (i * sizeof(u32)));
17026 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17027 	}
17028 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17029 
17030 	if (to_device)
17031 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17032 	else
17033 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17034 
17035 	ret = -ENODEV;
17036 	for (i = 0; i < 40; i++) {
17037 		u32 val;
17038 
17039 		if (to_device)
17040 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17041 		else
17042 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17043 		if ((val & 0xffff) == sram_dma_descs) {
17044 			ret = 0;
17045 			break;
17046 		}
17047 
17048 		udelay(100);
17049 	}
17050 
17051 	return ret;
17052 }
17053 
17054 #define TEST_BUFFER_SIZE	0x2000
17055 
17056 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
17057 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17058 	{ },
17059 };
17060 
17061 static int tg3_test_dma(struct tg3 *tp)
17062 {
17063 	dma_addr_t buf_dma;
17064 	u32 *buf, saved_dma_rwctrl;
17065 	int ret = 0;
17066 
17067 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17068 				 &buf_dma, GFP_KERNEL);
17069 	if (!buf) {
17070 		ret = -ENOMEM;
17071 		goto out_nofree;
17072 	}
17073 
17074 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17075 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17076 
17077 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17078 
17079 	if (tg3_flag(tp, 57765_PLUS))
17080 		goto out;
17081 
17082 	if (tg3_flag(tp, PCI_EXPRESS)) {
17083 		/* DMA read watermark not used on PCIE */
17084 		tp->dma_rwctrl |= 0x00180000;
17085 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17086 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17087 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17088 			tp->dma_rwctrl |= 0x003f0000;
17089 		else
17090 			tp->dma_rwctrl |= 0x003f000f;
17091 	} else {
17092 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17093 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17094 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17095 			u32 read_water = 0x7;
17096 
17097 			/* If the 5704 is behind the EPB bridge, we can
17098 			 * do the less restrictive ONE_DMA workaround for
17099 			 * better performance.
17100 			 */
17101 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17102 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17103 				tp->dma_rwctrl |= 0x8000;
17104 			else if (ccval == 0x6 || ccval == 0x7)
17105 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17106 
17107 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17108 				read_water = 4;
17109 			/* Set bit 23 to enable PCIX hw bug fix */
17110 			tp->dma_rwctrl |=
17111 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17112 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17113 				(1 << 23);
17114 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17115 			/* 5780 always in PCIX mode */
17116 			tp->dma_rwctrl |= 0x00144000;
17117 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17118 			/* 5714 always in PCIX mode */
17119 			tp->dma_rwctrl |= 0x00148000;
17120 		} else {
17121 			tp->dma_rwctrl |= 0x001b000f;
17122 		}
17123 	}
17124 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17125 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17126 
17127 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17128 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17129 		tp->dma_rwctrl &= 0xfffffff0;
17130 
17131 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17132 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17133 		/* Remove this if it causes problems for some boards. */
17134 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17135 
17136 		/* On 5700/5701 chips, we need to set this bit.
17137 		 * Otherwise the chip will issue cacheline transactions
17138 		 * to streamable DMA memory with not all the byte
17139 		 * enables turned on.  This is an error on several
17140 		 * RISC PCI controllers, in particular sparc64.
17141 		 *
17142 		 * On 5703/5704 chips, this bit has been reassigned
17143 		 * a different meaning.  In particular, it is used
17144 		 * on those chips to enable a PCI-X workaround.
17145 		 */
17146 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17147 	}
17148 
17149 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17150 
17151 
17152 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17153 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17154 		goto out;
17155 
17156 	/* It is best to perform DMA test with maximum write burst size
17157 	 * to expose the 5700/5701 write DMA bug.
17158 	 */
17159 	saved_dma_rwctrl = tp->dma_rwctrl;
17160 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17161 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17162 
17163 	while (1) {
17164 		u32 *p = buf, i;
17165 
17166 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17167 			p[i] = i;
17168 
17169 		/* Send the buffer to the chip. */
17170 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17171 		if (ret) {
17172 			dev_err(&tp->pdev->dev,
17173 				"%s: Buffer write failed. err = %d\n",
17174 				__func__, ret);
17175 			break;
17176 		}
17177 
17178 		/* Now read it back. */
17179 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17180 		if (ret) {
17181 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17182 				"err = %d\n", __func__, ret);
17183 			break;
17184 		}
17185 
17186 		/* Verify it. */
17187 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17188 			if (p[i] == i)
17189 				continue;
17190 
17191 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17192 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17193 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17194 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17195 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17196 				break;
17197 			} else {
17198 				dev_err(&tp->pdev->dev,
17199 					"%s: Buffer corrupted on read back! "
17200 					"(%d != %d)\n", __func__, p[i], i);
17201 				ret = -ENODEV;
17202 				goto out;
17203 			}
17204 		}
17205 
17206 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17207 			/* Success. */
17208 			ret = 0;
17209 			break;
17210 		}
17211 	}
17212 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17213 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17214 		/* DMA test passed without adjusting DMA boundary,
17215 		 * now look for chipsets that are known to expose the
17216 		 * DMA bug without failing the test.
17217 		 */
17218 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17219 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17220 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17221 		} else {
17222 			/* Safe to use the calculated DMA boundary. */
17223 			tp->dma_rwctrl = saved_dma_rwctrl;
17224 		}
17225 
17226 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17227 	}
17228 
17229 out:
17230 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17231 out_nofree:
17232 	return ret;
17233 }
17234 
17235 static void tg3_init_bufmgr_config(struct tg3 *tp)
17236 {
17237 	if (tg3_flag(tp, 57765_PLUS)) {
17238 		tp->bufmgr_config.mbuf_read_dma_low_water =
17239 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17240 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17241 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17242 		tp->bufmgr_config.mbuf_high_water =
17243 			DEFAULT_MB_HIGH_WATER_57765;
17244 
17245 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17246 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17247 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17248 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17249 		tp->bufmgr_config.mbuf_high_water_jumbo =
17250 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17251 	} else if (tg3_flag(tp, 5705_PLUS)) {
17252 		tp->bufmgr_config.mbuf_read_dma_low_water =
17253 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17254 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17255 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17256 		tp->bufmgr_config.mbuf_high_water =
17257 			DEFAULT_MB_HIGH_WATER_5705;
17258 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17259 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17260 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17261 			tp->bufmgr_config.mbuf_high_water =
17262 				DEFAULT_MB_HIGH_WATER_5906;
17263 		}
17264 
17265 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17266 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17267 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17268 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17269 		tp->bufmgr_config.mbuf_high_water_jumbo =
17270 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17271 	} else {
17272 		tp->bufmgr_config.mbuf_read_dma_low_water =
17273 			DEFAULT_MB_RDMA_LOW_WATER;
17274 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17275 			DEFAULT_MB_MACRX_LOW_WATER;
17276 		tp->bufmgr_config.mbuf_high_water =
17277 			DEFAULT_MB_HIGH_WATER;
17278 
17279 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17280 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17281 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17282 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17283 		tp->bufmgr_config.mbuf_high_water_jumbo =
17284 			DEFAULT_MB_HIGH_WATER_JUMBO;
17285 	}
17286 
17287 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17288 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17289 }
17290 
17291 static char *tg3_phy_string(struct tg3 *tp)
17292 {
17293 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17294 	case TG3_PHY_ID_BCM5400:	return "5400";
17295 	case TG3_PHY_ID_BCM5401:	return "5401";
17296 	case TG3_PHY_ID_BCM5411:	return "5411";
17297 	case TG3_PHY_ID_BCM5701:	return "5701";
17298 	case TG3_PHY_ID_BCM5703:	return "5703";
17299 	case TG3_PHY_ID_BCM5704:	return "5704";
17300 	case TG3_PHY_ID_BCM5705:	return "5705";
17301 	case TG3_PHY_ID_BCM5750:	return "5750";
17302 	case TG3_PHY_ID_BCM5752:	return "5752";
17303 	case TG3_PHY_ID_BCM5714:	return "5714";
17304 	case TG3_PHY_ID_BCM5780:	return "5780";
17305 	case TG3_PHY_ID_BCM5755:	return "5755";
17306 	case TG3_PHY_ID_BCM5787:	return "5787";
17307 	case TG3_PHY_ID_BCM5784:	return "5784";
17308 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17309 	case TG3_PHY_ID_BCM5906:	return "5906";
17310 	case TG3_PHY_ID_BCM5761:	return "5761";
17311 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17312 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17313 	case TG3_PHY_ID_BCM57765:	return "57765";
17314 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17315 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17316 	case TG3_PHY_ID_BCM5762:	return "5762C";
17317 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17318 	case 0:			return "serdes";
17319 	default:		return "unknown";
17320 	}
17321 }
17322 
17323 static char *tg3_bus_string(struct tg3 *tp, char *str)
17324 {
17325 	if (tg3_flag(tp, PCI_EXPRESS)) {
17326 		strcpy(str, "PCI Express");
17327 		return str;
17328 	} else if (tg3_flag(tp, PCIX_MODE)) {
17329 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17330 
17331 		strcpy(str, "PCIX:");
17332 
17333 		if ((clock_ctrl == 7) ||
17334 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17335 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17336 			strcat(str, "133MHz");
17337 		else if (clock_ctrl == 0)
17338 			strcat(str, "33MHz");
17339 		else if (clock_ctrl == 2)
17340 			strcat(str, "50MHz");
17341 		else if (clock_ctrl == 4)
17342 			strcat(str, "66MHz");
17343 		else if (clock_ctrl == 6)
17344 			strcat(str, "100MHz");
17345 	} else {
17346 		strcpy(str, "PCI:");
17347 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17348 			strcat(str, "66MHz");
17349 		else
17350 			strcat(str, "33MHz");
17351 	}
17352 	if (tg3_flag(tp, PCI_32BIT))
17353 		strcat(str, ":32-bit");
17354 	else
17355 		strcat(str, ":64-bit");
17356 	return str;
17357 }
17358 
17359 static void tg3_init_coal(struct tg3 *tp)
17360 {
17361 	struct ethtool_coalesce *ec = &tp->coal;
17362 
17363 	memset(ec, 0, sizeof(*ec));
17364 	ec->cmd = ETHTOOL_GCOALESCE;
17365 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17366 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17367 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17368 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17369 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17370 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17371 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17372 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17373 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17374 
17375 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17376 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17377 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17378 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17379 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17380 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17381 	}
17382 
17383 	if (tg3_flag(tp, 5705_PLUS)) {
17384 		ec->rx_coalesce_usecs_irq = 0;
17385 		ec->tx_coalesce_usecs_irq = 0;
17386 		ec->stats_block_coalesce_usecs = 0;
17387 	}
17388 }
17389 
17390 static int tg3_init_one(struct pci_dev *pdev,
17391 				  const struct pci_device_id *ent)
17392 {
17393 	struct net_device *dev;
17394 	struct tg3 *tp;
17395 	int i, err;
17396 	u32 sndmbx, rcvmbx, intmbx;
17397 	char str[40];
17398 	u64 dma_mask, persist_dma_mask;
17399 	netdev_features_t features = 0;
17400 
17401 	printk_once(KERN_INFO "%s\n", version);
17402 
17403 	err = pci_enable_device(pdev);
17404 	if (err) {
17405 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17406 		return err;
17407 	}
17408 
17409 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17410 	if (err) {
17411 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17412 		goto err_out_disable_pdev;
17413 	}
17414 
17415 	pci_set_master(pdev);
17416 
17417 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17418 	if (!dev) {
17419 		err = -ENOMEM;
17420 		goto err_out_free_res;
17421 	}
17422 
17423 	SET_NETDEV_DEV(dev, &pdev->dev);
17424 
17425 	tp = netdev_priv(dev);
17426 	tp->pdev = pdev;
17427 	tp->dev = dev;
17428 	tp->rx_mode = TG3_DEF_RX_MODE;
17429 	tp->tx_mode = TG3_DEF_TX_MODE;
17430 	tp->irq_sync = 1;
17431 
17432 	if (tg3_debug > 0)
17433 		tp->msg_enable = tg3_debug;
17434 	else
17435 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17436 
17437 	if (pdev_is_ssb_gige_core(pdev)) {
17438 		tg3_flag_set(tp, IS_SSB_CORE);
17439 		if (ssb_gige_must_flush_posted_writes(pdev))
17440 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17441 		if (ssb_gige_one_dma_at_once(pdev))
17442 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17443 		if (ssb_gige_have_roboswitch(pdev)) {
17444 			tg3_flag_set(tp, USE_PHYLIB);
17445 			tg3_flag_set(tp, ROBOSWITCH);
17446 		}
17447 		if (ssb_gige_is_rgmii(pdev))
17448 			tg3_flag_set(tp, RGMII_MODE);
17449 	}
17450 
17451 	/* The word/byte swap controls here control register access byte
17452 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17453 	 * setting below.
17454 	 */
17455 	tp->misc_host_ctrl =
17456 		MISC_HOST_CTRL_MASK_PCI_INT |
17457 		MISC_HOST_CTRL_WORD_SWAP |
17458 		MISC_HOST_CTRL_INDIR_ACCESS |
17459 		MISC_HOST_CTRL_PCISTATE_RW;
17460 
17461 	/* The NONFRM (non-frame) byte/word swap controls take effect
17462 	 * on descriptor entries, anything which isn't packet data.
17463 	 *
17464 	 * The StrongARM chips on the board (one for tx, one for rx)
17465 	 * are running in big-endian mode.
17466 	 */
17467 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17468 			GRC_MODE_WSWAP_NONFRM_DATA);
17469 #ifdef __BIG_ENDIAN
17470 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17471 #endif
17472 	spin_lock_init(&tp->lock);
17473 	spin_lock_init(&tp->indirect_lock);
17474 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17475 
17476 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17477 	if (!tp->regs) {
17478 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17479 		err = -ENOMEM;
17480 		goto err_out_free_dev;
17481 	}
17482 
17483 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17484 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17485 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17486 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17487 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17488 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17489 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17490 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17491 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17492 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17493 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17494 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17495 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17496 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17497 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17498 		tg3_flag_set(tp, ENABLE_APE);
17499 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17500 		if (!tp->aperegs) {
17501 			dev_err(&pdev->dev,
17502 				"Cannot map APE registers, aborting\n");
17503 			err = -ENOMEM;
17504 			goto err_out_iounmap;
17505 		}
17506 	}
17507 
17508 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17509 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17510 
17511 	dev->ethtool_ops = &tg3_ethtool_ops;
17512 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17513 	dev->netdev_ops = &tg3_netdev_ops;
17514 	dev->irq = pdev->irq;
17515 
17516 	err = tg3_get_invariants(tp, ent);
17517 	if (err) {
17518 		dev_err(&pdev->dev,
17519 			"Problem fetching invariants of chip, aborting\n");
17520 		goto err_out_apeunmap;
17521 	}
17522 
17523 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17524 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17525 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17526 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17527 	 * do DMA address check in tg3_start_xmit().
17528 	 */
17529 	if (tg3_flag(tp, IS_5788))
17530 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17531 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17532 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17533 #ifdef CONFIG_HIGHMEM
17534 		dma_mask = DMA_BIT_MASK(64);
17535 #endif
17536 	} else
17537 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17538 
17539 	/* Configure DMA attributes. */
17540 	if (dma_mask > DMA_BIT_MASK(32)) {
17541 		err = pci_set_dma_mask(pdev, dma_mask);
17542 		if (!err) {
17543 			features |= NETIF_F_HIGHDMA;
17544 			err = pci_set_consistent_dma_mask(pdev,
17545 							  persist_dma_mask);
17546 			if (err < 0) {
17547 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17548 					"DMA for consistent allocations\n");
17549 				goto err_out_apeunmap;
17550 			}
17551 		}
17552 	}
17553 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17554 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17555 		if (err) {
17556 			dev_err(&pdev->dev,
17557 				"No usable DMA configuration, aborting\n");
17558 			goto err_out_apeunmap;
17559 		}
17560 	}
17561 
17562 	tg3_init_bufmgr_config(tp);
17563 
17564 	features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17565 
17566 	/* 5700 B0 chips do not support checksumming correctly due
17567 	 * to hardware bugs.
17568 	 */
17569 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17570 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17571 
17572 		if (tg3_flag(tp, 5755_PLUS))
17573 			features |= NETIF_F_IPV6_CSUM;
17574 	}
17575 
17576 	/* TSO is on by default on chips that support hardware TSO.
17577 	 * Firmware TSO on older chips gives lower performance, so it
17578 	 * is off by default, but can be enabled using ethtool.
17579 	 */
17580 	if ((tg3_flag(tp, HW_TSO_1) ||
17581 	     tg3_flag(tp, HW_TSO_2) ||
17582 	     tg3_flag(tp, HW_TSO_3)) &&
17583 	    (features & NETIF_F_IP_CSUM))
17584 		features |= NETIF_F_TSO;
17585 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17586 		if (features & NETIF_F_IPV6_CSUM)
17587 			features |= NETIF_F_TSO6;
17588 		if (tg3_flag(tp, HW_TSO_3) ||
17589 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17590 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17591 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17592 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17593 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17594 			features |= NETIF_F_TSO_ECN;
17595 	}
17596 
17597 	dev->features |= features;
17598 	dev->vlan_features |= features;
17599 
17600 	/*
17601 	 * Add loopback capability only for a subset of devices that support
17602 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17603 	 * loopback for the remaining devices.
17604 	 */
17605 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17606 	    !tg3_flag(tp, CPMU_PRESENT))
17607 		/* Add the loopback capability */
17608 		features |= NETIF_F_LOOPBACK;
17609 
17610 	dev->hw_features |= features;
17611 
17612 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17613 	    !tg3_flag(tp, TSO_CAPABLE) &&
17614 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17615 		tg3_flag_set(tp, MAX_RXPEND_64);
17616 		tp->rx_pending = 63;
17617 	}
17618 
17619 	err = tg3_get_device_address(tp);
17620 	if (err) {
17621 		dev_err(&pdev->dev,
17622 			"Could not obtain valid ethernet address, aborting\n");
17623 		goto err_out_apeunmap;
17624 	}
17625 
17626 	/*
17627 	 * Reset chip in case UNDI or EFI driver did not shutdown
17628 	 * DMA self test will enable WDMAC and we'll see (spurious)
17629 	 * pending DMA on the PCI bus at that point.
17630 	 */
17631 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17632 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17633 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17634 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17635 	}
17636 
17637 	err = tg3_test_dma(tp);
17638 	if (err) {
17639 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17640 		goto err_out_apeunmap;
17641 	}
17642 
17643 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17644 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17645 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17646 	for (i = 0; i < tp->irq_max; i++) {
17647 		struct tg3_napi *tnapi = &tp->napi[i];
17648 
17649 		tnapi->tp = tp;
17650 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17651 
17652 		tnapi->int_mbox = intmbx;
17653 		if (i <= 4)
17654 			intmbx += 0x8;
17655 		else
17656 			intmbx += 0x4;
17657 
17658 		tnapi->consmbox = rcvmbx;
17659 		tnapi->prodmbox = sndmbx;
17660 
17661 		if (i)
17662 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17663 		else
17664 			tnapi->coal_now = HOSTCC_MODE_NOW;
17665 
17666 		if (!tg3_flag(tp, SUPPORT_MSIX))
17667 			break;
17668 
17669 		/*
17670 		 * If we support MSIX, we'll be using RSS.  If we're using
17671 		 * RSS, the first vector only handles link interrupts and the
17672 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17673 		 * mailbox values for the next iteration.  The values we setup
17674 		 * above are still useful for the single vectored mode.
17675 		 */
17676 		if (!i)
17677 			continue;
17678 
17679 		rcvmbx += 0x8;
17680 
17681 		if (sndmbx & 0x4)
17682 			sndmbx -= 0x4;
17683 		else
17684 			sndmbx += 0xc;
17685 	}
17686 
17687 	tg3_init_coal(tp);
17688 
17689 	pci_set_drvdata(pdev, dev);
17690 
17691 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17692 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17693 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17694 		tg3_flag_set(tp, PTP_CAPABLE);
17695 
17696 	tg3_timer_init(tp);
17697 
17698 	tg3_carrier_off(tp);
17699 
17700 	err = register_netdev(dev);
17701 	if (err) {
17702 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17703 		goto err_out_apeunmap;
17704 	}
17705 
17706 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17707 		    tp->board_part_number,
17708 		    tg3_chip_rev_id(tp),
17709 		    tg3_bus_string(tp, str),
17710 		    dev->dev_addr);
17711 
17712 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17713 		struct phy_device *phydev;
17714 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17715 		netdev_info(dev,
17716 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17717 			    phydev->drv->name, dev_name(&phydev->dev));
17718 	} else {
17719 		char *ethtype;
17720 
17721 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17722 			ethtype = "10/100Base-TX";
17723 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17724 			ethtype = "1000Base-SX";
17725 		else
17726 			ethtype = "10/100/1000Base-T";
17727 
17728 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17729 			    "(WireSpeed[%d], EEE[%d])\n",
17730 			    tg3_phy_string(tp), ethtype,
17731 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17732 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17733 	}
17734 
17735 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17736 		    (dev->features & NETIF_F_RXCSUM) != 0,
17737 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17738 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17739 		    tg3_flag(tp, ENABLE_ASF) != 0,
17740 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17741 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17742 		    tp->dma_rwctrl,
17743 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17744 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17745 
17746 	pci_save_state(pdev);
17747 
17748 	return 0;
17749 
17750 err_out_apeunmap:
17751 	if (tp->aperegs) {
17752 		iounmap(tp->aperegs);
17753 		tp->aperegs = NULL;
17754 	}
17755 
17756 err_out_iounmap:
17757 	if (tp->regs) {
17758 		iounmap(tp->regs);
17759 		tp->regs = NULL;
17760 	}
17761 
17762 err_out_free_dev:
17763 	free_netdev(dev);
17764 
17765 err_out_free_res:
17766 	pci_release_regions(pdev);
17767 
17768 err_out_disable_pdev:
17769 	if (pci_is_enabled(pdev))
17770 		pci_disable_device(pdev);
17771 	return err;
17772 }
17773 
17774 static void tg3_remove_one(struct pci_dev *pdev)
17775 {
17776 	struct net_device *dev = pci_get_drvdata(pdev);
17777 
17778 	if (dev) {
17779 		struct tg3 *tp = netdev_priv(dev);
17780 
17781 		release_firmware(tp->fw);
17782 
17783 		tg3_reset_task_cancel(tp);
17784 
17785 		if (tg3_flag(tp, USE_PHYLIB)) {
17786 			tg3_phy_fini(tp);
17787 			tg3_mdio_fini(tp);
17788 		}
17789 
17790 		unregister_netdev(dev);
17791 		if (tp->aperegs) {
17792 			iounmap(tp->aperegs);
17793 			tp->aperegs = NULL;
17794 		}
17795 		if (tp->regs) {
17796 			iounmap(tp->regs);
17797 			tp->regs = NULL;
17798 		}
17799 		free_netdev(dev);
17800 		pci_release_regions(pdev);
17801 		pci_disable_device(pdev);
17802 	}
17803 }
17804 
17805 #ifdef CONFIG_PM_SLEEP
17806 static int tg3_suspend(struct device *device)
17807 {
17808 	struct pci_dev *pdev = to_pci_dev(device);
17809 	struct net_device *dev = pci_get_drvdata(pdev);
17810 	struct tg3 *tp = netdev_priv(dev);
17811 	int err = 0;
17812 
17813 	rtnl_lock();
17814 
17815 	if (!netif_running(dev))
17816 		goto unlock;
17817 
17818 	tg3_reset_task_cancel(tp);
17819 	tg3_phy_stop(tp);
17820 	tg3_netif_stop(tp);
17821 
17822 	tg3_timer_stop(tp);
17823 
17824 	tg3_full_lock(tp, 1);
17825 	tg3_disable_ints(tp);
17826 	tg3_full_unlock(tp);
17827 
17828 	netif_device_detach(dev);
17829 
17830 	tg3_full_lock(tp, 0);
17831 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17832 	tg3_flag_clear(tp, INIT_COMPLETE);
17833 	tg3_full_unlock(tp);
17834 
17835 	err = tg3_power_down_prepare(tp);
17836 	if (err) {
17837 		int err2;
17838 
17839 		tg3_full_lock(tp, 0);
17840 
17841 		tg3_flag_set(tp, INIT_COMPLETE);
17842 		err2 = tg3_restart_hw(tp, true);
17843 		if (err2)
17844 			goto out;
17845 
17846 		tg3_timer_start(tp);
17847 
17848 		netif_device_attach(dev);
17849 		tg3_netif_start(tp);
17850 
17851 out:
17852 		tg3_full_unlock(tp);
17853 
17854 		if (!err2)
17855 			tg3_phy_start(tp);
17856 	}
17857 
17858 unlock:
17859 	rtnl_unlock();
17860 	return err;
17861 }
17862 
17863 static int tg3_resume(struct device *device)
17864 {
17865 	struct pci_dev *pdev = to_pci_dev(device);
17866 	struct net_device *dev = pci_get_drvdata(pdev);
17867 	struct tg3 *tp = netdev_priv(dev);
17868 	int err = 0;
17869 
17870 	rtnl_lock();
17871 
17872 	if (!netif_running(dev))
17873 		goto unlock;
17874 
17875 	netif_device_attach(dev);
17876 
17877 	tg3_full_lock(tp, 0);
17878 
17879 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17880 
17881 	tg3_flag_set(tp, INIT_COMPLETE);
17882 	err = tg3_restart_hw(tp,
17883 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17884 	if (err)
17885 		goto out;
17886 
17887 	tg3_timer_start(tp);
17888 
17889 	tg3_netif_start(tp);
17890 
17891 out:
17892 	tg3_full_unlock(tp);
17893 
17894 	if (!err)
17895 		tg3_phy_start(tp);
17896 
17897 unlock:
17898 	rtnl_unlock();
17899 	return err;
17900 }
17901 #endif /* CONFIG_PM_SLEEP */
17902 
17903 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17904 
17905 static void tg3_shutdown(struct pci_dev *pdev)
17906 {
17907 	struct net_device *dev = pci_get_drvdata(pdev);
17908 	struct tg3 *tp = netdev_priv(dev);
17909 
17910 	rtnl_lock();
17911 	netif_device_detach(dev);
17912 
17913 	if (netif_running(dev))
17914 		dev_close(dev);
17915 
17916 	if (system_state == SYSTEM_POWER_OFF)
17917 		tg3_power_down(tp);
17918 
17919 	rtnl_unlock();
17920 }
17921 
17922 /**
17923  * tg3_io_error_detected - called when PCI error is detected
17924  * @pdev: Pointer to PCI device
17925  * @state: The current pci connection state
17926  *
17927  * This function is called after a PCI bus error affecting
17928  * this device has been detected.
17929  */
17930 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17931 					      pci_channel_state_t state)
17932 {
17933 	struct net_device *netdev = pci_get_drvdata(pdev);
17934 	struct tg3 *tp = netdev_priv(netdev);
17935 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17936 
17937 	netdev_info(netdev, "PCI I/O error detected\n");
17938 
17939 	rtnl_lock();
17940 
17941 	/* We probably don't have netdev yet */
17942 	if (!netdev || !netif_running(netdev))
17943 		goto done;
17944 
17945 	tg3_phy_stop(tp);
17946 
17947 	tg3_netif_stop(tp);
17948 
17949 	tg3_timer_stop(tp);
17950 
17951 	/* Want to make sure that the reset task doesn't run */
17952 	tg3_reset_task_cancel(tp);
17953 
17954 	netif_device_detach(netdev);
17955 
17956 	/* Clean up software state, even if MMIO is blocked */
17957 	tg3_full_lock(tp, 0);
17958 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17959 	tg3_full_unlock(tp);
17960 
17961 done:
17962 	if (state == pci_channel_io_perm_failure) {
17963 		if (netdev) {
17964 			tg3_napi_enable(tp);
17965 			dev_close(netdev);
17966 		}
17967 		err = PCI_ERS_RESULT_DISCONNECT;
17968 	} else {
17969 		pci_disable_device(pdev);
17970 	}
17971 
17972 	rtnl_unlock();
17973 
17974 	return err;
17975 }
17976 
17977 /**
17978  * tg3_io_slot_reset - called after the pci bus has been reset.
17979  * @pdev: Pointer to PCI device
17980  *
17981  * Restart the card from scratch, as if from a cold-boot.
17982  * At this point, the card has exprienced a hard reset,
17983  * followed by fixups by BIOS, and has its config space
17984  * set up identically to what it was at cold boot.
17985  */
17986 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17987 {
17988 	struct net_device *netdev = pci_get_drvdata(pdev);
17989 	struct tg3 *tp = netdev_priv(netdev);
17990 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17991 	int err;
17992 
17993 	rtnl_lock();
17994 
17995 	if (pci_enable_device(pdev)) {
17996 		dev_err(&pdev->dev,
17997 			"Cannot re-enable PCI device after reset.\n");
17998 		goto done;
17999 	}
18000 
18001 	pci_set_master(pdev);
18002 	pci_restore_state(pdev);
18003 	pci_save_state(pdev);
18004 
18005 	if (!netdev || !netif_running(netdev)) {
18006 		rc = PCI_ERS_RESULT_RECOVERED;
18007 		goto done;
18008 	}
18009 
18010 	err = tg3_power_up(tp);
18011 	if (err)
18012 		goto done;
18013 
18014 	rc = PCI_ERS_RESULT_RECOVERED;
18015 
18016 done:
18017 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18018 		tg3_napi_enable(tp);
18019 		dev_close(netdev);
18020 	}
18021 	rtnl_unlock();
18022 
18023 	return rc;
18024 }
18025 
18026 /**
18027  * tg3_io_resume - called when traffic can start flowing again.
18028  * @pdev: Pointer to PCI device
18029  *
18030  * This callback is called when the error recovery driver tells
18031  * us that its OK to resume normal operation.
18032  */
18033 static void tg3_io_resume(struct pci_dev *pdev)
18034 {
18035 	struct net_device *netdev = pci_get_drvdata(pdev);
18036 	struct tg3 *tp = netdev_priv(netdev);
18037 	int err;
18038 
18039 	rtnl_lock();
18040 
18041 	if (!netif_running(netdev))
18042 		goto done;
18043 
18044 	tg3_full_lock(tp, 0);
18045 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18046 	tg3_flag_set(tp, INIT_COMPLETE);
18047 	err = tg3_restart_hw(tp, true);
18048 	if (err) {
18049 		tg3_full_unlock(tp);
18050 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18051 		goto done;
18052 	}
18053 
18054 	netif_device_attach(netdev);
18055 
18056 	tg3_timer_start(tp);
18057 
18058 	tg3_netif_start(tp);
18059 
18060 	tg3_full_unlock(tp);
18061 
18062 	tg3_phy_start(tp);
18063 
18064 done:
18065 	rtnl_unlock();
18066 }
18067 
18068 static const struct pci_error_handlers tg3_err_handler = {
18069 	.error_detected	= tg3_io_error_detected,
18070 	.slot_reset	= tg3_io_slot_reset,
18071 	.resume		= tg3_io_resume
18072 };
18073 
18074 static struct pci_driver tg3_driver = {
18075 	.name		= DRV_MODULE_NAME,
18076 	.id_table	= tg3_pci_tbl,
18077 	.probe		= tg3_init_one,
18078 	.remove		= tg3_remove_one,
18079 	.err_handler	= &tg3_err_handler,
18080 	.driver.pm	= &tg3_pm_ops,
18081 	.shutdown	= tg3_shutdown,
18082 };
18083 
18084 module_pci_driver(tg3_driver);
18085