xref: /linux/drivers/net/ethernet/broadcom/tg3.c (revision 1abd4986f4445b0280a07bc46aefa3d0d30258f9)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *	Derived from proprietary unpublished source code,
11  *	Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *	Permission is hereby granted for the distribution of this firmware
14  *	data in hexadecimal or equivalent format, provided this copyright
15  *	notice is accompanying it.
16  */
17 
18 
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50 
51 #include <net/checksum.h>
52 #include <net/ip.h>
53 
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57 
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60 
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65 
66 #define BAR_0	0
67 #define BAR_2	2
68 
69 #include "tg3.h"
70 
71 /* Functions & macros to verify TG3_FLAGS types */
72 
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75 	return test_bit(flag, bits);
76 }
77 
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	set_bit(flag, bits);
81 }
82 
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 	clear_bit(flag, bits);
86 }
87 
88 #define tg3_flag(tp, flag)				\
89 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)				\
91 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)			\
93 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94 
95 #define DRV_MODULE_NAME		"tg3"
96 #define TG3_MAJ_NUM			3
97 #define TG3_MIN_NUM			134
98 #define DRV_MODULE_VERSION	\
99 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE	"Sep 16, 2013"
101 
102 #define RESET_KIND_SHUTDOWN	0
103 #define RESET_KIND_INIT		1
104 #define RESET_KIND_SUSPEND	2
105 
106 #define TG3_DEF_RX_MODE		0
107 #define TG3_DEF_TX_MODE		0
108 #define TG3_DEF_MSG_ENABLE	  \
109 	(NETIF_MSG_DRV		| \
110 	 NETIF_MSG_PROBE	| \
111 	 NETIF_MSG_LINK		| \
112 	 NETIF_MSG_TIMER	| \
113 	 NETIF_MSG_IFDOWN	| \
114 	 NETIF_MSG_IFUP		| \
115 	 NETIF_MSG_RX_ERR	| \
116 	 NETIF_MSG_TX_ERR)
117 
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
119 
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123 
124 #define TG3_TX_TIMEOUT			(5 * HZ)
125 
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU			60
128 #define TG3_MAX_MTU(tp)	\
129 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130 
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING		200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
143 
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150 
151 #define TG3_TX_RING_SIZE		512
152 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
153 
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
161 				 TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
163 
164 #define TG3_DMA_BYTE_ENAB		64
165 
166 #define TG3_RX_STD_DMA_SZ		1536
167 #define TG3_RX_JMB_DMA_SZ		9046
168 
169 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
170 
171 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173 
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176 
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179 
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD		256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
194 #else
195 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
196 #endif
197 
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
202 #endif
203 
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K		2048
207 #define TG3_TX_BD_DMA_MAX_4K		4096
208 
209 #define TG3_RAW_IP_ALIGN 2
210 
211 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
212 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213 
214 #define FIRMWARE_TG3		"tigon/tg3.bin"
215 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
218 
219 static char version[] =
220 	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221 
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229 
230 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233 
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
236 
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 			TG3_DRV_DATA_FLAG_5705_10_100},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 			TG3_DRV_DATA_FLAG_5705_10_100},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 			TG3_DRV_DATA_FLAG_5705_10_100},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 			PCI_VENDOR_ID_LENOVO,
288 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
353 	{}
354 };
355 
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
357 
358 static const struct {
359 	const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
361 	{ "rx_octets" },
362 	{ "rx_fragments" },
363 	{ "rx_ucast_packets" },
364 	{ "rx_mcast_packets" },
365 	{ "rx_bcast_packets" },
366 	{ "rx_fcs_errors" },
367 	{ "rx_align_errors" },
368 	{ "rx_xon_pause_rcvd" },
369 	{ "rx_xoff_pause_rcvd" },
370 	{ "rx_mac_ctrl_rcvd" },
371 	{ "rx_xoff_entered" },
372 	{ "rx_frame_too_long_errors" },
373 	{ "rx_jabbers" },
374 	{ "rx_undersize_packets" },
375 	{ "rx_in_length_errors" },
376 	{ "rx_out_length_errors" },
377 	{ "rx_64_or_less_octet_packets" },
378 	{ "rx_65_to_127_octet_packets" },
379 	{ "rx_128_to_255_octet_packets" },
380 	{ "rx_256_to_511_octet_packets" },
381 	{ "rx_512_to_1023_octet_packets" },
382 	{ "rx_1024_to_1522_octet_packets" },
383 	{ "rx_1523_to_2047_octet_packets" },
384 	{ "rx_2048_to_4095_octet_packets" },
385 	{ "rx_4096_to_8191_octet_packets" },
386 	{ "rx_8192_to_9022_octet_packets" },
387 
388 	{ "tx_octets" },
389 	{ "tx_collisions" },
390 
391 	{ "tx_xon_sent" },
392 	{ "tx_xoff_sent" },
393 	{ "tx_flow_control" },
394 	{ "tx_mac_errors" },
395 	{ "tx_single_collisions" },
396 	{ "tx_mult_collisions" },
397 	{ "tx_deferred" },
398 	{ "tx_excessive_collisions" },
399 	{ "tx_late_collisions" },
400 	{ "tx_collide_2times" },
401 	{ "tx_collide_3times" },
402 	{ "tx_collide_4times" },
403 	{ "tx_collide_5times" },
404 	{ "tx_collide_6times" },
405 	{ "tx_collide_7times" },
406 	{ "tx_collide_8times" },
407 	{ "tx_collide_9times" },
408 	{ "tx_collide_10times" },
409 	{ "tx_collide_11times" },
410 	{ "tx_collide_12times" },
411 	{ "tx_collide_13times" },
412 	{ "tx_collide_14times" },
413 	{ "tx_collide_15times" },
414 	{ "tx_ucast_packets" },
415 	{ "tx_mcast_packets" },
416 	{ "tx_bcast_packets" },
417 	{ "tx_carrier_sense_errors" },
418 	{ "tx_discards" },
419 	{ "tx_errors" },
420 
421 	{ "dma_writeq_full" },
422 	{ "dma_write_prioq_full" },
423 	{ "rxbds_empty" },
424 	{ "rx_discards" },
425 	{ "rx_errors" },
426 	{ "rx_threshold_hit" },
427 
428 	{ "dma_readq_full" },
429 	{ "dma_read_prioq_full" },
430 	{ "tx_comp_queue_full" },
431 
432 	{ "ring_set_send_prod_index" },
433 	{ "ring_status_update" },
434 	{ "nic_irqs" },
435 	{ "nic_avoided_irqs" },
436 	{ "nic_tx_threshold_hit" },
437 
438 	{ "mbuf_lwm_thresh_hit" },
439 };
440 
441 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST		0
443 #define TG3_LINK_TEST		1
444 #define TG3_REGISTER_TEST	2
445 #define TG3_MEMORY_TEST		3
446 #define TG3_MAC_LOOPB_TEST	4
447 #define TG3_PHY_LOOPB_TEST	5
448 #define TG3_EXT_LOOPB_TEST	6
449 #define TG3_INTERRUPT_TEST	7
450 
451 
452 static const struct {
453 	const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
456 	[TG3_LINK_TEST]		= { "link test         (online) " },
457 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
458 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
459 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
460 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
461 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
462 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
463 };
464 
465 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
466 
467 
468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
469 {
470 	writel(val, tp->regs + off);
471 }
472 
473 static u32 tg3_read32(struct tg3 *tp, u32 off)
474 {
475 	return readl(tp->regs + off);
476 }
477 
478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
479 {
480 	writel(val, tp->aperegs + off);
481 }
482 
483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
484 {
485 	return readl(tp->aperegs + off);
486 }
487 
488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
489 {
490 	unsigned long flags;
491 
492 	spin_lock_irqsave(&tp->indirect_lock, flags);
493 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
496 }
497 
498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
499 {
500 	writel(val, tp->regs + off);
501 	readl(tp->regs + off);
502 }
503 
504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
505 {
506 	unsigned long flags;
507 	u32 val;
508 
509 	spin_lock_irqsave(&tp->indirect_lock, flags);
510 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
513 	return val;
514 }
515 
516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
517 {
518 	unsigned long flags;
519 
520 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522 				       TG3_64BIT_REG_LOW, val);
523 		return;
524 	}
525 	if (off == TG3_RX_STD_PROD_IDX_REG) {
526 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527 				       TG3_64BIT_REG_LOW, val);
528 		return;
529 	}
530 
531 	spin_lock_irqsave(&tp->indirect_lock, flags);
532 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
535 
536 	/* In indirect mode when disabling interrupts, we also need
537 	 * to clear the interrupt bit in the GRC local ctrl register.
538 	 */
539 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
540 	    (val == 0x1)) {
541 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
543 	}
544 }
545 
546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
547 {
548 	unsigned long flags;
549 	u32 val;
550 
551 	spin_lock_irqsave(&tp->indirect_lock, flags);
552 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
555 	return val;
556 }
557 
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559  * where it is unsafe to read back the register without some delay.
560  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
562  */
563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
564 {
565 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566 		/* Non-posted methods */
567 		tp->write32(tp, off, val);
568 	else {
569 		/* Posted method */
570 		tg3_write32(tp, off, val);
571 		if (usec_wait)
572 			udelay(usec_wait);
573 		tp->read32(tp, off);
574 	}
575 	/* Wait again after the read for the posted method to guarantee that
576 	 * the wait time is met.
577 	 */
578 	if (usec_wait)
579 		udelay(usec_wait);
580 }
581 
582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
583 {
584 	tp->write32_mbox(tp, off, val);
585 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587 	     !tg3_flag(tp, ICH_WORKAROUND)))
588 		tp->read32_mbox(tp, off);
589 }
590 
591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
592 {
593 	void __iomem *mbox = tp->regs + off;
594 	writel(val, mbox);
595 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
596 		writel(val, mbox);
597 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
599 		readl(mbox);
600 }
601 
602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
603 {
604 	return readl(tp->regs + off + GRCMBOX_BASE);
605 }
606 
607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
608 {
609 	writel(val, tp->regs + off + GRCMBOX_BASE);
610 }
611 
612 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
617 
618 #define tw32(reg, val)			tp->write32(tp, reg, val)
619 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg)			tp->read32(tp, reg)
622 
623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
624 {
625 	unsigned long flags;
626 
627 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
629 		return;
630 
631 	spin_lock_irqsave(&tp->indirect_lock, flags);
632 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
635 
636 		/* Always leave this as zero. */
637 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
638 	} else {
639 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
641 
642 		/* Always leave this as zero. */
643 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
644 	}
645 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
646 }
647 
648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
649 {
650 	unsigned long flags;
651 
652 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
654 		*val = 0;
655 		return;
656 	}
657 
658 	spin_lock_irqsave(&tp->indirect_lock, flags);
659 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
662 
663 		/* Always leave this as zero. */
664 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
665 	} else {
666 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667 		*val = tr32(TG3PCI_MEM_WIN_DATA);
668 
669 		/* Always leave this as zero. */
670 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
671 	}
672 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
673 }
674 
675 static void tg3_ape_lock_init(struct tg3 *tp)
676 {
677 	int i;
678 	u32 regbase, bit;
679 
680 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
681 		regbase = TG3_APE_LOCK_GRANT;
682 	else
683 		regbase = TG3_APE_PER_LOCK_GRANT;
684 
685 	/* Make sure the driver hasn't any stale locks. */
686 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
687 		switch (i) {
688 		case TG3_APE_LOCK_PHY0:
689 		case TG3_APE_LOCK_PHY1:
690 		case TG3_APE_LOCK_PHY2:
691 		case TG3_APE_LOCK_PHY3:
692 			bit = APE_LOCK_GRANT_DRIVER;
693 			break;
694 		default:
695 			if (!tp->pci_fn)
696 				bit = APE_LOCK_GRANT_DRIVER;
697 			else
698 				bit = 1 << tp->pci_fn;
699 		}
700 		tg3_ape_write32(tp, regbase + 4 * i, bit);
701 	}
702 
703 }
704 
705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
706 {
707 	int i, off;
708 	int ret = 0;
709 	u32 status, req, gnt, bit;
710 
711 	if (!tg3_flag(tp, ENABLE_APE))
712 		return 0;
713 
714 	switch (locknum) {
715 	case TG3_APE_LOCK_GPIO:
716 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
717 			return 0;
718 	case TG3_APE_LOCK_GRC:
719 	case TG3_APE_LOCK_MEM:
720 		if (!tp->pci_fn)
721 			bit = APE_LOCK_REQ_DRIVER;
722 		else
723 			bit = 1 << tp->pci_fn;
724 		break;
725 	case TG3_APE_LOCK_PHY0:
726 	case TG3_APE_LOCK_PHY1:
727 	case TG3_APE_LOCK_PHY2:
728 	case TG3_APE_LOCK_PHY3:
729 		bit = APE_LOCK_REQ_DRIVER;
730 		break;
731 	default:
732 		return -EINVAL;
733 	}
734 
735 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
736 		req = TG3_APE_LOCK_REQ;
737 		gnt = TG3_APE_LOCK_GRANT;
738 	} else {
739 		req = TG3_APE_PER_LOCK_REQ;
740 		gnt = TG3_APE_PER_LOCK_GRANT;
741 	}
742 
743 	off = 4 * locknum;
744 
745 	tg3_ape_write32(tp, req + off, bit);
746 
747 	/* Wait for up to 1 millisecond to acquire lock. */
748 	for (i = 0; i < 100; i++) {
749 		status = tg3_ape_read32(tp, gnt + off);
750 		if (status == bit)
751 			break;
752 		if (pci_channel_offline(tp->pdev))
753 			break;
754 
755 		udelay(10);
756 	}
757 
758 	if (status != bit) {
759 		/* Revoke the lock request. */
760 		tg3_ape_write32(tp, gnt + off, bit);
761 		ret = -EBUSY;
762 	}
763 
764 	return ret;
765 }
766 
767 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
768 {
769 	u32 gnt, bit;
770 
771 	if (!tg3_flag(tp, ENABLE_APE))
772 		return;
773 
774 	switch (locknum) {
775 	case TG3_APE_LOCK_GPIO:
776 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
777 			return;
778 	case TG3_APE_LOCK_GRC:
779 	case TG3_APE_LOCK_MEM:
780 		if (!tp->pci_fn)
781 			bit = APE_LOCK_GRANT_DRIVER;
782 		else
783 			bit = 1 << tp->pci_fn;
784 		break;
785 	case TG3_APE_LOCK_PHY0:
786 	case TG3_APE_LOCK_PHY1:
787 	case TG3_APE_LOCK_PHY2:
788 	case TG3_APE_LOCK_PHY3:
789 		bit = APE_LOCK_GRANT_DRIVER;
790 		break;
791 	default:
792 		return;
793 	}
794 
795 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
796 		gnt = TG3_APE_LOCK_GRANT;
797 	else
798 		gnt = TG3_APE_PER_LOCK_GRANT;
799 
800 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
801 }
802 
803 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
804 {
805 	u32 apedata;
806 
807 	while (timeout_us) {
808 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
809 			return -EBUSY;
810 
811 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
812 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
813 			break;
814 
815 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
816 
817 		udelay(10);
818 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
819 	}
820 
821 	return timeout_us ? 0 : -EBUSY;
822 }
823 
824 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
825 {
826 	u32 i, apedata;
827 
828 	for (i = 0; i < timeout_us / 10; i++) {
829 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
830 
831 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
832 			break;
833 
834 		udelay(10);
835 	}
836 
837 	return i == timeout_us / 10;
838 }
839 
840 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
841 				   u32 len)
842 {
843 	int err;
844 	u32 i, bufoff, msgoff, maxlen, apedata;
845 
846 	if (!tg3_flag(tp, APE_HAS_NCSI))
847 		return 0;
848 
849 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
850 	if (apedata != APE_SEG_SIG_MAGIC)
851 		return -ENODEV;
852 
853 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
854 	if (!(apedata & APE_FW_STATUS_READY))
855 		return -EAGAIN;
856 
857 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
858 		 TG3_APE_SHMEM_BASE;
859 	msgoff = bufoff + 2 * sizeof(u32);
860 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
861 
862 	while (len) {
863 		u32 length;
864 
865 		/* Cap xfer sizes to scratchpad limits. */
866 		length = (len > maxlen) ? maxlen : len;
867 		len -= length;
868 
869 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
870 		if (!(apedata & APE_FW_STATUS_READY))
871 			return -EAGAIN;
872 
873 		/* Wait for up to 1 msec for APE to service previous event. */
874 		err = tg3_ape_event_lock(tp, 1000);
875 		if (err)
876 			return err;
877 
878 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
879 			  APE_EVENT_STATUS_SCRTCHPD_READ |
880 			  APE_EVENT_STATUS_EVENT_PENDING;
881 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
882 
883 		tg3_ape_write32(tp, bufoff, base_off);
884 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
885 
886 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
887 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
888 
889 		base_off += length;
890 
891 		if (tg3_ape_wait_for_event(tp, 30000))
892 			return -EAGAIN;
893 
894 		for (i = 0; length; i += 4, length -= 4) {
895 			u32 val = tg3_ape_read32(tp, msgoff + i);
896 			memcpy(data, &val, sizeof(u32));
897 			data++;
898 		}
899 	}
900 
901 	return 0;
902 }
903 
904 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
905 {
906 	int err;
907 	u32 apedata;
908 
909 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
910 	if (apedata != APE_SEG_SIG_MAGIC)
911 		return -EAGAIN;
912 
913 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
914 	if (!(apedata & APE_FW_STATUS_READY))
915 		return -EAGAIN;
916 
917 	/* Wait for up to 1 millisecond for APE to service previous event. */
918 	err = tg3_ape_event_lock(tp, 1000);
919 	if (err)
920 		return err;
921 
922 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
923 			event | APE_EVENT_STATUS_EVENT_PENDING);
924 
925 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
926 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
927 
928 	return 0;
929 }
930 
931 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
932 {
933 	u32 event;
934 	u32 apedata;
935 
936 	if (!tg3_flag(tp, ENABLE_APE))
937 		return;
938 
939 	switch (kind) {
940 	case RESET_KIND_INIT:
941 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
942 				APE_HOST_SEG_SIG_MAGIC);
943 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
944 				APE_HOST_SEG_LEN_MAGIC);
945 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
946 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
947 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
948 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
949 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
950 				APE_HOST_BEHAV_NO_PHYLOCK);
951 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
952 				    TG3_APE_HOST_DRVR_STATE_START);
953 
954 		event = APE_EVENT_STATUS_STATE_START;
955 		break;
956 	case RESET_KIND_SHUTDOWN:
957 		/* With the interface we are currently using,
958 		 * APE does not track driver state.  Wiping
959 		 * out the HOST SEGMENT SIGNATURE forces
960 		 * the APE to assume OS absent status.
961 		 */
962 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
963 
964 		if (device_may_wakeup(&tp->pdev->dev) &&
965 		    tg3_flag(tp, WOL_ENABLE)) {
966 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 					    TG3_APE_HOST_WOL_SPEED_AUTO);
968 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969 		} else
970 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
971 
972 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
973 
974 		event = APE_EVENT_STATUS_STATE_UNLOAD;
975 		break;
976 	default:
977 		return;
978 	}
979 
980 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
981 
982 	tg3_ape_send_event(tp, event);
983 }
984 
985 static void tg3_disable_ints(struct tg3 *tp)
986 {
987 	int i;
988 
989 	tw32(TG3PCI_MISC_HOST_CTRL,
990 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
991 	for (i = 0; i < tp->irq_max; i++)
992 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
993 }
994 
995 static void tg3_enable_ints(struct tg3 *tp)
996 {
997 	int i;
998 
999 	tp->irq_sync = 0;
1000 	wmb();
1001 
1002 	tw32(TG3PCI_MISC_HOST_CTRL,
1003 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1004 
1005 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1006 	for (i = 0; i < tp->irq_cnt; i++) {
1007 		struct tg3_napi *tnapi = &tp->napi[i];
1008 
1009 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1010 		if (tg3_flag(tp, 1SHOT_MSI))
1011 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1012 
1013 		tp->coal_now |= tnapi->coal_now;
1014 	}
1015 
1016 	/* Force an initial interrupt */
1017 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1018 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1019 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1020 	else
1021 		tw32(HOSTCC_MODE, tp->coal_now);
1022 
1023 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1024 }
1025 
1026 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1027 {
1028 	struct tg3 *tp = tnapi->tp;
1029 	struct tg3_hw_status *sblk = tnapi->hw_status;
1030 	unsigned int work_exists = 0;
1031 
1032 	/* check for phy events */
1033 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1034 		if (sblk->status & SD_STATUS_LINK_CHG)
1035 			work_exists = 1;
1036 	}
1037 
1038 	/* check for TX work to do */
1039 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1040 		work_exists = 1;
1041 
1042 	/* check for RX work to do */
1043 	if (tnapi->rx_rcb_prod_idx &&
1044 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1045 		work_exists = 1;
1046 
1047 	return work_exists;
1048 }
1049 
1050 /* tg3_int_reenable
1051  *  similar to tg3_enable_ints, but it accurately determines whether there
1052  *  is new work pending and can return without flushing the PIO write
1053  *  which reenables interrupts
1054  */
1055 static void tg3_int_reenable(struct tg3_napi *tnapi)
1056 {
1057 	struct tg3 *tp = tnapi->tp;
1058 
1059 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1060 	mmiowb();
1061 
1062 	/* When doing tagged status, this work check is unnecessary.
1063 	 * The last_tag we write above tells the chip which piece of
1064 	 * work we've completed.
1065 	 */
1066 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1067 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1068 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1069 }
1070 
1071 static void tg3_switch_clocks(struct tg3 *tp)
1072 {
1073 	u32 clock_ctrl;
1074 	u32 orig_clock_ctrl;
1075 
1076 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1077 		return;
1078 
1079 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1080 
1081 	orig_clock_ctrl = clock_ctrl;
1082 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1083 		       CLOCK_CTRL_CLKRUN_OENABLE |
1084 		       0x1f);
1085 	tp->pci_clock_ctrl = clock_ctrl;
1086 
1087 	if (tg3_flag(tp, 5705_PLUS)) {
1088 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1089 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1091 		}
1092 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1093 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1094 			    clock_ctrl |
1095 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1096 			    40);
1097 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1098 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1099 			    40);
1100 	}
1101 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1102 }
1103 
1104 #define PHY_BUSY_LOOPS	5000
1105 
1106 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1107 			 u32 *val)
1108 {
1109 	u32 frame_val;
1110 	unsigned int loops;
1111 	int ret;
1112 
1113 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1114 		tw32_f(MAC_MI_MODE,
1115 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1116 		udelay(80);
1117 	}
1118 
1119 	tg3_ape_lock(tp, tp->phy_ape_lock);
1120 
1121 	*val = 0x0;
1122 
1123 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1124 		      MI_COM_PHY_ADDR_MASK);
1125 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1126 		      MI_COM_REG_ADDR_MASK);
1127 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1128 
1129 	tw32_f(MAC_MI_COM, frame_val);
1130 
1131 	loops = PHY_BUSY_LOOPS;
1132 	while (loops != 0) {
1133 		udelay(10);
1134 		frame_val = tr32(MAC_MI_COM);
1135 
1136 		if ((frame_val & MI_COM_BUSY) == 0) {
1137 			udelay(5);
1138 			frame_val = tr32(MAC_MI_COM);
1139 			break;
1140 		}
1141 		loops -= 1;
1142 	}
1143 
1144 	ret = -EBUSY;
1145 	if (loops != 0) {
1146 		*val = frame_val & MI_COM_DATA_MASK;
1147 		ret = 0;
1148 	}
1149 
1150 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1151 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1152 		udelay(80);
1153 	}
1154 
1155 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1156 
1157 	return ret;
1158 }
1159 
1160 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1161 {
1162 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1163 }
1164 
1165 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1166 			  u32 val)
1167 {
1168 	u32 frame_val;
1169 	unsigned int loops;
1170 	int ret;
1171 
1172 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1173 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1174 		return 0;
1175 
1176 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1177 		tw32_f(MAC_MI_MODE,
1178 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1179 		udelay(80);
1180 	}
1181 
1182 	tg3_ape_lock(tp, tp->phy_ape_lock);
1183 
1184 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1185 		      MI_COM_PHY_ADDR_MASK);
1186 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1187 		      MI_COM_REG_ADDR_MASK);
1188 	frame_val |= (val & MI_COM_DATA_MASK);
1189 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1190 
1191 	tw32_f(MAC_MI_COM, frame_val);
1192 
1193 	loops = PHY_BUSY_LOOPS;
1194 	while (loops != 0) {
1195 		udelay(10);
1196 		frame_val = tr32(MAC_MI_COM);
1197 		if ((frame_val & MI_COM_BUSY) == 0) {
1198 			udelay(5);
1199 			frame_val = tr32(MAC_MI_COM);
1200 			break;
1201 		}
1202 		loops -= 1;
1203 	}
1204 
1205 	ret = -EBUSY;
1206 	if (loops != 0)
1207 		ret = 0;
1208 
1209 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1210 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1211 		udelay(80);
1212 	}
1213 
1214 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1215 
1216 	return ret;
1217 }
1218 
1219 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1220 {
1221 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1222 }
1223 
1224 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1225 {
1226 	int err;
1227 
1228 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1229 	if (err)
1230 		goto done;
1231 
1232 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1233 	if (err)
1234 		goto done;
1235 
1236 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1237 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1238 	if (err)
1239 		goto done;
1240 
1241 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1242 
1243 done:
1244 	return err;
1245 }
1246 
1247 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1248 {
1249 	int err;
1250 
1251 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1252 	if (err)
1253 		goto done;
1254 
1255 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1256 	if (err)
1257 		goto done;
1258 
1259 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1260 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1261 	if (err)
1262 		goto done;
1263 
1264 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1265 
1266 done:
1267 	return err;
1268 }
1269 
1270 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1271 {
1272 	int err;
1273 
1274 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1275 	if (!err)
1276 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1277 
1278 	return err;
1279 }
1280 
1281 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1282 {
1283 	int err;
1284 
1285 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286 	if (!err)
1287 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1288 
1289 	return err;
1290 }
1291 
1292 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1293 {
1294 	int err;
1295 
1296 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1297 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1298 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1299 	if (!err)
1300 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1301 
1302 	return err;
1303 }
1304 
1305 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1306 {
1307 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1308 		set |= MII_TG3_AUXCTL_MISC_WREN;
1309 
1310 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1311 }
1312 
1313 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1314 {
1315 	u32 val;
1316 	int err;
1317 
1318 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1319 
1320 	if (err)
1321 		return err;
1322 
1323 	if (enable)
1324 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1325 	else
1326 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1327 
1328 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1329 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1330 
1331 	return err;
1332 }
1333 
1334 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1335 {
1336 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1337 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1338 }
1339 
1340 static int tg3_bmcr_reset(struct tg3 *tp)
1341 {
1342 	u32 phy_control;
1343 	int limit, err;
1344 
1345 	/* OK, reset it, and poll the BMCR_RESET bit until it
1346 	 * clears or we time out.
1347 	 */
1348 	phy_control = BMCR_RESET;
1349 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1350 	if (err != 0)
1351 		return -EBUSY;
1352 
1353 	limit = 5000;
1354 	while (limit--) {
1355 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1356 		if (err != 0)
1357 			return -EBUSY;
1358 
1359 		if ((phy_control & BMCR_RESET) == 0) {
1360 			udelay(40);
1361 			break;
1362 		}
1363 		udelay(10);
1364 	}
1365 	if (limit < 0)
1366 		return -EBUSY;
1367 
1368 	return 0;
1369 }
1370 
1371 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1372 {
1373 	struct tg3 *tp = bp->priv;
1374 	u32 val;
1375 
1376 	spin_lock_bh(&tp->lock);
1377 
1378 	if (__tg3_readphy(tp, mii_id, reg, &val))
1379 		val = -EIO;
1380 
1381 	spin_unlock_bh(&tp->lock);
1382 
1383 	return val;
1384 }
1385 
1386 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1387 {
1388 	struct tg3 *tp = bp->priv;
1389 	u32 ret = 0;
1390 
1391 	spin_lock_bh(&tp->lock);
1392 
1393 	if (__tg3_writephy(tp, mii_id, reg, val))
1394 		ret = -EIO;
1395 
1396 	spin_unlock_bh(&tp->lock);
1397 
1398 	return ret;
1399 }
1400 
1401 static int tg3_mdio_reset(struct mii_bus *bp)
1402 {
1403 	return 0;
1404 }
1405 
1406 static void tg3_mdio_config_5785(struct tg3 *tp)
1407 {
1408 	u32 val;
1409 	struct phy_device *phydev;
1410 
1411 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1412 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1413 	case PHY_ID_BCM50610:
1414 	case PHY_ID_BCM50610M:
1415 		val = MAC_PHYCFG2_50610_LED_MODES;
1416 		break;
1417 	case PHY_ID_BCMAC131:
1418 		val = MAC_PHYCFG2_AC131_LED_MODES;
1419 		break;
1420 	case PHY_ID_RTL8211C:
1421 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1422 		break;
1423 	case PHY_ID_RTL8201E:
1424 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1425 		break;
1426 	default:
1427 		return;
1428 	}
1429 
1430 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1431 		tw32(MAC_PHYCFG2, val);
1432 
1433 		val = tr32(MAC_PHYCFG1);
1434 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1435 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1436 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1437 		tw32(MAC_PHYCFG1, val);
1438 
1439 		return;
1440 	}
1441 
1442 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1443 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1444 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1445 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1446 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1447 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1448 		       MAC_PHYCFG2_INBAND_ENABLE;
1449 
1450 	tw32(MAC_PHYCFG2, val);
1451 
1452 	val = tr32(MAC_PHYCFG1);
1453 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1454 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1455 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1456 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1457 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1458 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1459 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1460 	}
1461 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1462 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1463 	tw32(MAC_PHYCFG1, val);
1464 
1465 	val = tr32(MAC_EXT_RGMII_MODE);
1466 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1467 		 MAC_RGMII_MODE_RX_QUALITY |
1468 		 MAC_RGMII_MODE_RX_ACTIVITY |
1469 		 MAC_RGMII_MODE_RX_ENG_DET |
1470 		 MAC_RGMII_MODE_TX_ENABLE |
1471 		 MAC_RGMII_MODE_TX_LOWPWR |
1472 		 MAC_RGMII_MODE_TX_RESET);
1473 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1474 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1475 			val |= MAC_RGMII_MODE_RX_INT_B |
1476 			       MAC_RGMII_MODE_RX_QUALITY |
1477 			       MAC_RGMII_MODE_RX_ACTIVITY |
1478 			       MAC_RGMII_MODE_RX_ENG_DET;
1479 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1480 			val |= MAC_RGMII_MODE_TX_ENABLE |
1481 			       MAC_RGMII_MODE_TX_LOWPWR |
1482 			       MAC_RGMII_MODE_TX_RESET;
1483 	}
1484 	tw32(MAC_EXT_RGMII_MODE, val);
1485 }
1486 
1487 static void tg3_mdio_start(struct tg3 *tp)
1488 {
1489 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1490 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1491 	udelay(80);
1492 
1493 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1494 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1495 		tg3_mdio_config_5785(tp);
1496 }
1497 
1498 static int tg3_mdio_init(struct tg3 *tp)
1499 {
1500 	int i;
1501 	u32 reg;
1502 	struct phy_device *phydev;
1503 
1504 	if (tg3_flag(tp, 5717_PLUS)) {
1505 		u32 is_serdes;
1506 
1507 		tp->phy_addr = tp->pci_fn + 1;
1508 
1509 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1510 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1511 		else
1512 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1513 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1514 		if (is_serdes)
1515 			tp->phy_addr += 7;
1516 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1517 		int addr;
1518 
1519 		addr = ssb_gige_get_phyaddr(tp->pdev);
1520 		if (addr < 0)
1521 			return addr;
1522 		tp->phy_addr = addr;
1523 	} else
1524 		tp->phy_addr = TG3_PHY_MII_ADDR;
1525 
1526 	tg3_mdio_start(tp);
1527 
1528 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1529 		return 0;
1530 
1531 	tp->mdio_bus = mdiobus_alloc();
1532 	if (tp->mdio_bus == NULL)
1533 		return -ENOMEM;
1534 
1535 	tp->mdio_bus->name     = "tg3 mdio bus";
1536 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1537 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1538 	tp->mdio_bus->priv     = tp;
1539 	tp->mdio_bus->parent   = &tp->pdev->dev;
1540 	tp->mdio_bus->read     = &tg3_mdio_read;
1541 	tp->mdio_bus->write    = &tg3_mdio_write;
1542 	tp->mdio_bus->reset    = &tg3_mdio_reset;
1543 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1544 	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1545 
1546 	for (i = 0; i < PHY_MAX_ADDR; i++)
1547 		tp->mdio_bus->irq[i] = PHY_POLL;
1548 
1549 	/* The bus registration will look for all the PHYs on the mdio bus.
1550 	 * Unfortunately, it does not ensure the PHY is powered up before
1551 	 * accessing the PHY ID registers.  A chip reset is the
1552 	 * quickest way to bring the device back to an operational state..
1553 	 */
1554 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1555 		tg3_bmcr_reset(tp);
1556 
1557 	i = mdiobus_register(tp->mdio_bus);
1558 	if (i) {
1559 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560 		mdiobus_free(tp->mdio_bus);
1561 		return i;
1562 	}
1563 
1564 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1565 
1566 	if (!phydev || !phydev->drv) {
1567 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568 		mdiobus_unregister(tp->mdio_bus);
1569 		mdiobus_free(tp->mdio_bus);
1570 		return -ENODEV;
1571 	}
1572 
1573 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574 	case PHY_ID_BCM57780:
1575 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1576 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 		break;
1578 	case PHY_ID_BCM50610:
1579 	case PHY_ID_BCM50610M:
1580 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581 				     PHY_BRCM_RX_REFCLK_UNUSED |
1582 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1585 			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1586 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1587 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1588 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1589 			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1590 		/* fallthru */
1591 	case PHY_ID_RTL8211C:
1592 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1593 		break;
1594 	case PHY_ID_RTL8201E:
1595 	case PHY_ID_BCMAC131:
1596 		phydev->interface = PHY_INTERFACE_MODE_MII;
1597 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1598 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1599 		break;
1600 	}
1601 
1602 	tg3_flag_set(tp, MDIOBUS_INITED);
1603 
1604 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1605 		tg3_mdio_config_5785(tp);
1606 
1607 	return 0;
1608 }
1609 
1610 static void tg3_mdio_fini(struct tg3 *tp)
1611 {
1612 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1613 		tg3_flag_clear(tp, MDIOBUS_INITED);
1614 		mdiobus_unregister(tp->mdio_bus);
1615 		mdiobus_free(tp->mdio_bus);
1616 	}
1617 }
1618 
1619 /* tp->lock is held. */
1620 static inline void tg3_generate_fw_event(struct tg3 *tp)
1621 {
1622 	u32 val;
1623 
1624 	val = tr32(GRC_RX_CPU_EVENT);
1625 	val |= GRC_RX_CPU_DRIVER_EVENT;
1626 	tw32_f(GRC_RX_CPU_EVENT, val);
1627 
1628 	tp->last_event_jiffies = jiffies;
1629 }
1630 
1631 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1632 
1633 /* tp->lock is held. */
1634 static void tg3_wait_for_event_ack(struct tg3 *tp)
1635 {
1636 	int i;
1637 	unsigned int delay_cnt;
1638 	long time_remain;
1639 
1640 	/* If enough time has passed, no wait is necessary. */
1641 	time_remain = (long)(tp->last_event_jiffies + 1 +
1642 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1643 		      (long)jiffies;
1644 	if (time_remain < 0)
1645 		return;
1646 
1647 	/* Check if we can shorten the wait time. */
1648 	delay_cnt = jiffies_to_usecs(time_remain);
1649 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1650 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1651 	delay_cnt = (delay_cnt >> 3) + 1;
1652 
1653 	for (i = 0; i < delay_cnt; i++) {
1654 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1655 			break;
1656 		if (pci_channel_offline(tp->pdev))
1657 			break;
1658 
1659 		udelay(8);
1660 	}
1661 }
1662 
1663 /* tp->lock is held. */
1664 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1665 {
1666 	u32 reg, val;
1667 
1668 	val = 0;
1669 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1670 		val = reg << 16;
1671 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1672 		val |= (reg & 0xffff);
1673 	*data++ = val;
1674 
1675 	val = 0;
1676 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1677 		val = reg << 16;
1678 	if (!tg3_readphy(tp, MII_LPA, &reg))
1679 		val |= (reg & 0xffff);
1680 	*data++ = val;
1681 
1682 	val = 0;
1683 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1684 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1685 			val = reg << 16;
1686 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1687 			val |= (reg & 0xffff);
1688 	}
1689 	*data++ = val;
1690 
1691 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1692 		val = reg << 16;
1693 	else
1694 		val = 0;
1695 	*data++ = val;
1696 }
1697 
1698 /* tp->lock is held. */
1699 static void tg3_ump_link_report(struct tg3 *tp)
1700 {
1701 	u32 data[4];
1702 
1703 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1704 		return;
1705 
1706 	tg3_phy_gather_ump_data(tp, data);
1707 
1708 	tg3_wait_for_event_ack(tp);
1709 
1710 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1711 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1712 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1713 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1714 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1715 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1716 
1717 	tg3_generate_fw_event(tp);
1718 }
1719 
1720 /* tp->lock is held. */
1721 static void tg3_stop_fw(struct tg3 *tp)
1722 {
1723 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1724 		/* Wait for RX cpu to ACK the previous event. */
1725 		tg3_wait_for_event_ack(tp);
1726 
1727 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1728 
1729 		tg3_generate_fw_event(tp);
1730 
1731 		/* Wait for RX cpu to ACK this event. */
1732 		tg3_wait_for_event_ack(tp);
1733 	}
1734 }
1735 
1736 /* tp->lock is held. */
1737 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1738 {
1739 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1740 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1741 
1742 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1743 		switch (kind) {
1744 		case RESET_KIND_INIT:
1745 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746 				      DRV_STATE_START);
1747 			break;
1748 
1749 		case RESET_KIND_SHUTDOWN:
1750 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751 				      DRV_STATE_UNLOAD);
1752 			break;
1753 
1754 		case RESET_KIND_SUSPEND:
1755 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1756 				      DRV_STATE_SUSPEND);
1757 			break;
1758 
1759 		default:
1760 			break;
1761 		}
1762 	}
1763 }
1764 
1765 /* tp->lock is held. */
1766 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1767 {
1768 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1769 		switch (kind) {
1770 		case RESET_KIND_INIT:
1771 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 				      DRV_STATE_START_DONE);
1773 			break;
1774 
1775 		case RESET_KIND_SHUTDOWN:
1776 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777 				      DRV_STATE_UNLOAD_DONE);
1778 			break;
1779 
1780 		default:
1781 			break;
1782 		}
1783 	}
1784 }
1785 
1786 /* tp->lock is held. */
1787 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1788 {
1789 	if (tg3_flag(tp, ENABLE_ASF)) {
1790 		switch (kind) {
1791 		case RESET_KIND_INIT:
1792 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793 				      DRV_STATE_START);
1794 			break;
1795 
1796 		case RESET_KIND_SHUTDOWN:
1797 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 				      DRV_STATE_UNLOAD);
1799 			break;
1800 
1801 		case RESET_KIND_SUSPEND:
1802 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1803 				      DRV_STATE_SUSPEND);
1804 			break;
1805 
1806 		default:
1807 			break;
1808 		}
1809 	}
1810 }
1811 
1812 static int tg3_poll_fw(struct tg3 *tp)
1813 {
1814 	int i;
1815 	u32 val;
1816 
1817 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1818 		return 0;
1819 
1820 	if (tg3_flag(tp, IS_SSB_CORE)) {
1821 		/* We don't use firmware. */
1822 		return 0;
1823 	}
1824 
1825 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1826 		/* Wait up to 20ms for init done. */
1827 		for (i = 0; i < 200; i++) {
1828 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1829 				return 0;
1830 			if (pci_channel_offline(tp->pdev))
1831 				return -ENODEV;
1832 
1833 			udelay(100);
1834 		}
1835 		return -ENODEV;
1836 	}
1837 
1838 	/* Wait for firmware initialization to complete. */
1839 	for (i = 0; i < 100000; i++) {
1840 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1841 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1842 			break;
1843 		if (pci_channel_offline(tp->pdev)) {
1844 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1845 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1846 				netdev_info(tp->dev, "No firmware running\n");
1847 			}
1848 
1849 			break;
1850 		}
1851 
1852 		udelay(10);
1853 	}
1854 
1855 	/* Chip might not be fitted with firmware.  Some Sun onboard
1856 	 * parts are configured like that.  So don't signal the timeout
1857 	 * of the above loop as an error, but do report the lack of
1858 	 * running firmware once.
1859 	 */
1860 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1861 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1862 
1863 		netdev_info(tp->dev, "No firmware running\n");
1864 	}
1865 
1866 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1867 		/* The 57765 A0 needs a little more
1868 		 * time to do some important work.
1869 		 */
1870 		mdelay(10);
1871 	}
1872 
1873 	return 0;
1874 }
1875 
1876 static void tg3_link_report(struct tg3 *tp)
1877 {
1878 	if (!netif_carrier_ok(tp->dev)) {
1879 		netif_info(tp, link, tp->dev, "Link is down\n");
1880 		tg3_ump_link_report(tp);
1881 	} else if (netif_msg_link(tp)) {
1882 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1883 			    (tp->link_config.active_speed == SPEED_1000 ?
1884 			     1000 :
1885 			     (tp->link_config.active_speed == SPEED_100 ?
1886 			      100 : 10)),
1887 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1888 			     "full" : "half"));
1889 
1890 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1891 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1892 			    "on" : "off",
1893 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1894 			    "on" : "off");
1895 
1896 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1897 			netdev_info(tp->dev, "EEE is %s\n",
1898 				    tp->setlpicnt ? "enabled" : "disabled");
1899 
1900 		tg3_ump_link_report(tp);
1901 	}
1902 
1903 	tp->link_up = netif_carrier_ok(tp->dev);
1904 }
1905 
1906 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1907 {
1908 	u32 flowctrl = 0;
1909 
1910 	if (adv & ADVERTISE_PAUSE_CAP) {
1911 		flowctrl |= FLOW_CTRL_RX;
1912 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1913 			flowctrl |= FLOW_CTRL_TX;
1914 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1915 		flowctrl |= FLOW_CTRL_TX;
1916 
1917 	return flowctrl;
1918 }
1919 
1920 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1921 {
1922 	u16 miireg;
1923 
1924 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1925 		miireg = ADVERTISE_1000XPAUSE;
1926 	else if (flow_ctrl & FLOW_CTRL_TX)
1927 		miireg = ADVERTISE_1000XPSE_ASYM;
1928 	else if (flow_ctrl & FLOW_CTRL_RX)
1929 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1930 	else
1931 		miireg = 0;
1932 
1933 	return miireg;
1934 }
1935 
1936 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1937 {
1938 	u32 flowctrl = 0;
1939 
1940 	if (adv & ADVERTISE_1000XPAUSE) {
1941 		flowctrl |= FLOW_CTRL_RX;
1942 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1943 			flowctrl |= FLOW_CTRL_TX;
1944 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1945 		flowctrl |= FLOW_CTRL_TX;
1946 
1947 	return flowctrl;
1948 }
1949 
1950 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1951 {
1952 	u8 cap = 0;
1953 
1954 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1955 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1956 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1957 		if (lcladv & ADVERTISE_1000XPAUSE)
1958 			cap = FLOW_CTRL_RX;
1959 		if (rmtadv & ADVERTISE_1000XPAUSE)
1960 			cap = FLOW_CTRL_TX;
1961 	}
1962 
1963 	return cap;
1964 }
1965 
1966 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1967 {
1968 	u8 autoneg;
1969 	u8 flowctrl = 0;
1970 	u32 old_rx_mode = tp->rx_mode;
1971 	u32 old_tx_mode = tp->tx_mode;
1972 
1973 	if (tg3_flag(tp, USE_PHYLIB))
1974 		autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1975 	else
1976 		autoneg = tp->link_config.autoneg;
1977 
1978 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1979 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1980 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1981 		else
1982 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1983 	} else
1984 		flowctrl = tp->link_config.flowctrl;
1985 
1986 	tp->link_config.active_flowctrl = flowctrl;
1987 
1988 	if (flowctrl & FLOW_CTRL_RX)
1989 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1990 	else
1991 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1992 
1993 	if (old_rx_mode != tp->rx_mode)
1994 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1995 
1996 	if (flowctrl & FLOW_CTRL_TX)
1997 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1998 	else
1999 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2000 
2001 	if (old_tx_mode != tp->tx_mode)
2002 		tw32_f(MAC_TX_MODE, tp->tx_mode);
2003 }
2004 
2005 static void tg3_adjust_link(struct net_device *dev)
2006 {
2007 	u8 oldflowctrl, linkmesg = 0;
2008 	u32 mac_mode, lcl_adv, rmt_adv;
2009 	struct tg3 *tp = netdev_priv(dev);
2010 	struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2011 
2012 	spin_lock_bh(&tp->lock);
2013 
2014 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2015 				    MAC_MODE_HALF_DUPLEX);
2016 
2017 	oldflowctrl = tp->link_config.active_flowctrl;
2018 
2019 	if (phydev->link) {
2020 		lcl_adv = 0;
2021 		rmt_adv = 0;
2022 
2023 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2024 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2025 		else if (phydev->speed == SPEED_1000 ||
2026 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2027 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2028 		else
2029 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2030 
2031 		if (phydev->duplex == DUPLEX_HALF)
2032 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2033 		else {
2034 			lcl_adv = mii_advertise_flowctrl(
2035 				  tp->link_config.flowctrl);
2036 
2037 			if (phydev->pause)
2038 				rmt_adv = LPA_PAUSE_CAP;
2039 			if (phydev->asym_pause)
2040 				rmt_adv |= LPA_PAUSE_ASYM;
2041 		}
2042 
2043 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2044 	} else
2045 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2046 
2047 	if (mac_mode != tp->mac_mode) {
2048 		tp->mac_mode = mac_mode;
2049 		tw32_f(MAC_MODE, tp->mac_mode);
2050 		udelay(40);
2051 	}
2052 
2053 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2054 		if (phydev->speed == SPEED_10)
2055 			tw32(MAC_MI_STAT,
2056 			     MAC_MI_STAT_10MBPS_MODE |
2057 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2058 		else
2059 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2060 	}
2061 
2062 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2063 		tw32(MAC_TX_LENGTHS,
2064 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2066 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067 	else
2068 		tw32(MAC_TX_LENGTHS,
2069 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2070 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2071 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2072 
2073 	if (phydev->link != tp->old_link ||
2074 	    phydev->speed != tp->link_config.active_speed ||
2075 	    phydev->duplex != tp->link_config.active_duplex ||
2076 	    oldflowctrl != tp->link_config.active_flowctrl)
2077 		linkmesg = 1;
2078 
2079 	tp->old_link = phydev->link;
2080 	tp->link_config.active_speed = phydev->speed;
2081 	tp->link_config.active_duplex = phydev->duplex;
2082 
2083 	spin_unlock_bh(&tp->lock);
2084 
2085 	if (linkmesg)
2086 		tg3_link_report(tp);
2087 }
2088 
2089 static int tg3_phy_init(struct tg3 *tp)
2090 {
2091 	struct phy_device *phydev;
2092 
2093 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2094 		return 0;
2095 
2096 	/* Bring the PHY back to a known state. */
2097 	tg3_bmcr_reset(tp);
2098 
2099 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2100 
2101 	/* Attach the MAC to the PHY. */
2102 	phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2103 			     tg3_adjust_link, phydev->interface);
2104 	if (IS_ERR(phydev)) {
2105 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2106 		return PTR_ERR(phydev);
2107 	}
2108 
2109 	/* Mask with MAC supported features. */
2110 	switch (phydev->interface) {
2111 	case PHY_INTERFACE_MODE_GMII:
2112 	case PHY_INTERFACE_MODE_RGMII:
2113 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2114 			phydev->supported &= (PHY_GBIT_FEATURES |
2115 					      SUPPORTED_Pause |
2116 					      SUPPORTED_Asym_Pause);
2117 			break;
2118 		}
2119 		/* fallthru */
2120 	case PHY_INTERFACE_MODE_MII:
2121 		phydev->supported &= (PHY_BASIC_FEATURES |
2122 				      SUPPORTED_Pause |
2123 				      SUPPORTED_Asym_Pause);
2124 		break;
2125 	default:
2126 		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2127 		return -EINVAL;
2128 	}
2129 
2130 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2131 
2132 	phydev->advertising = phydev->supported;
2133 
2134 	return 0;
2135 }
2136 
2137 static void tg3_phy_start(struct tg3 *tp)
2138 {
2139 	struct phy_device *phydev;
2140 
2141 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2142 		return;
2143 
2144 	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2145 
2146 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2147 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2148 		phydev->speed = tp->link_config.speed;
2149 		phydev->duplex = tp->link_config.duplex;
2150 		phydev->autoneg = tp->link_config.autoneg;
2151 		phydev->advertising = tp->link_config.advertising;
2152 	}
2153 
2154 	phy_start(phydev);
2155 
2156 	phy_start_aneg(phydev);
2157 }
2158 
2159 static void tg3_phy_stop(struct tg3 *tp)
2160 {
2161 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2162 		return;
2163 
2164 	phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2165 }
2166 
2167 static void tg3_phy_fini(struct tg3 *tp)
2168 {
2169 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2170 		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2171 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2172 	}
2173 }
2174 
2175 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2176 {
2177 	int err;
2178 	u32 val;
2179 
2180 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2181 		return 0;
2182 
2183 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2184 		/* Cannot do read-modify-write on 5401 */
2185 		err = tg3_phy_auxctl_write(tp,
2186 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2187 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2188 					   0x4c20);
2189 		goto done;
2190 	}
2191 
2192 	err = tg3_phy_auxctl_read(tp,
2193 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2194 	if (err)
2195 		return err;
2196 
2197 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2198 	err = tg3_phy_auxctl_write(tp,
2199 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2200 
2201 done:
2202 	return err;
2203 }
2204 
2205 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2206 {
2207 	u32 phytest;
2208 
2209 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2210 		u32 phy;
2211 
2212 		tg3_writephy(tp, MII_TG3_FET_TEST,
2213 			     phytest | MII_TG3_FET_SHADOW_EN);
2214 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2215 			if (enable)
2216 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2217 			else
2218 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2219 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2220 		}
2221 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2222 	}
2223 }
2224 
2225 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2226 {
2227 	u32 reg;
2228 
2229 	if (!tg3_flag(tp, 5705_PLUS) ||
2230 	    (tg3_flag(tp, 5717_PLUS) &&
2231 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2232 		return;
2233 
2234 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2235 		tg3_phy_fet_toggle_apd(tp, enable);
2236 		return;
2237 	}
2238 
2239 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2240 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2241 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2242 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2243 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2244 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2245 
2246 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2247 
2248 
2249 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2250 	if (enable)
2251 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2252 
2253 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2254 }
2255 
2256 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2257 {
2258 	u32 phy;
2259 
2260 	if (!tg3_flag(tp, 5705_PLUS) ||
2261 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2262 		return;
2263 
2264 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2265 		u32 ephy;
2266 
2267 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2268 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2269 
2270 			tg3_writephy(tp, MII_TG3_FET_TEST,
2271 				     ephy | MII_TG3_FET_SHADOW_EN);
2272 			if (!tg3_readphy(tp, reg, &phy)) {
2273 				if (enable)
2274 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2275 				else
2276 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2277 				tg3_writephy(tp, reg, phy);
2278 			}
2279 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2280 		}
2281 	} else {
2282 		int ret;
2283 
2284 		ret = tg3_phy_auxctl_read(tp,
2285 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2286 		if (!ret) {
2287 			if (enable)
2288 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2289 			else
2290 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2291 			tg3_phy_auxctl_write(tp,
2292 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2293 		}
2294 	}
2295 }
2296 
2297 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2298 {
2299 	int ret;
2300 	u32 val;
2301 
2302 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2303 		return;
2304 
2305 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2306 	if (!ret)
2307 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2308 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2309 }
2310 
2311 static void tg3_phy_apply_otp(struct tg3 *tp)
2312 {
2313 	u32 otp, phy;
2314 
2315 	if (!tp->phy_otp)
2316 		return;
2317 
2318 	otp = tp->phy_otp;
2319 
2320 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2321 		return;
2322 
2323 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2324 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2325 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2326 
2327 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2328 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2329 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2330 
2331 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2332 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2333 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2334 
2335 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2336 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2337 
2338 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2339 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2340 
2341 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2342 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2343 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2344 
2345 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2346 }
2347 
2348 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2349 {
2350 	u32 val;
2351 	struct ethtool_eee *dest = &tp->eee;
2352 
2353 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2354 		return;
2355 
2356 	if (eee)
2357 		dest = eee;
2358 
2359 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2360 		return;
2361 
2362 	/* Pull eee_active */
2363 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2364 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2365 		dest->eee_active = 1;
2366 	} else
2367 		dest->eee_active = 0;
2368 
2369 	/* Pull lp advertised settings */
2370 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2371 		return;
2372 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2373 
2374 	/* Pull advertised and eee_enabled settings */
2375 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2376 		return;
2377 	dest->eee_enabled = !!val;
2378 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2379 
2380 	/* Pull tx_lpi_enabled */
2381 	val = tr32(TG3_CPMU_EEE_MODE);
2382 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2383 
2384 	/* Pull lpi timer value */
2385 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2386 }
2387 
2388 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2389 {
2390 	u32 val;
2391 
2392 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2393 		return;
2394 
2395 	tp->setlpicnt = 0;
2396 
2397 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2398 	    current_link_up &&
2399 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2400 	    (tp->link_config.active_speed == SPEED_100 ||
2401 	     tp->link_config.active_speed == SPEED_1000)) {
2402 		u32 eeectl;
2403 
2404 		if (tp->link_config.active_speed == SPEED_1000)
2405 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2406 		else
2407 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2408 
2409 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2410 
2411 		tg3_eee_pull_config(tp, NULL);
2412 		if (tp->eee.eee_active)
2413 			tp->setlpicnt = 2;
2414 	}
2415 
2416 	if (!tp->setlpicnt) {
2417 		if (current_link_up &&
2418 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2419 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2420 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2421 		}
2422 
2423 		val = tr32(TG3_CPMU_EEE_MODE);
2424 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2425 	}
2426 }
2427 
2428 static void tg3_phy_eee_enable(struct tg3 *tp)
2429 {
2430 	u32 val;
2431 
2432 	if (tp->link_config.active_speed == SPEED_1000 &&
2433 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2434 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2435 	     tg3_flag(tp, 57765_CLASS)) &&
2436 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2437 		val = MII_TG3_DSP_TAP26_ALNOKO |
2438 		      MII_TG3_DSP_TAP26_RMRXSTO;
2439 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2440 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2441 	}
2442 
2443 	val = tr32(TG3_CPMU_EEE_MODE);
2444 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2445 }
2446 
2447 static int tg3_wait_macro_done(struct tg3 *tp)
2448 {
2449 	int limit = 100;
2450 
2451 	while (limit--) {
2452 		u32 tmp32;
2453 
2454 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2455 			if ((tmp32 & 0x1000) == 0)
2456 				break;
2457 		}
2458 	}
2459 	if (limit < 0)
2460 		return -EBUSY;
2461 
2462 	return 0;
2463 }
2464 
2465 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2466 {
2467 	static const u32 test_pat[4][6] = {
2468 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2469 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2470 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2471 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2472 	};
2473 	int chan;
2474 
2475 	for (chan = 0; chan < 4; chan++) {
2476 		int i;
2477 
2478 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479 			     (chan * 0x2000) | 0x0200);
2480 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2481 
2482 		for (i = 0; i < 6; i++)
2483 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2484 				     test_pat[chan][i]);
2485 
2486 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2487 		if (tg3_wait_macro_done(tp)) {
2488 			*resetp = 1;
2489 			return -EBUSY;
2490 		}
2491 
2492 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2493 			     (chan * 0x2000) | 0x0200);
2494 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2495 		if (tg3_wait_macro_done(tp)) {
2496 			*resetp = 1;
2497 			return -EBUSY;
2498 		}
2499 
2500 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2501 		if (tg3_wait_macro_done(tp)) {
2502 			*resetp = 1;
2503 			return -EBUSY;
2504 		}
2505 
2506 		for (i = 0; i < 6; i += 2) {
2507 			u32 low, high;
2508 
2509 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2510 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2511 			    tg3_wait_macro_done(tp)) {
2512 				*resetp = 1;
2513 				return -EBUSY;
2514 			}
2515 			low &= 0x7fff;
2516 			high &= 0x000f;
2517 			if (low != test_pat[chan][i] ||
2518 			    high != test_pat[chan][i+1]) {
2519 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2520 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2521 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2522 
2523 				return -EBUSY;
2524 			}
2525 		}
2526 	}
2527 
2528 	return 0;
2529 }
2530 
2531 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2532 {
2533 	int chan;
2534 
2535 	for (chan = 0; chan < 4; chan++) {
2536 		int i;
2537 
2538 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2539 			     (chan * 0x2000) | 0x0200);
2540 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2541 		for (i = 0; i < 6; i++)
2542 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2543 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2544 		if (tg3_wait_macro_done(tp))
2545 			return -EBUSY;
2546 	}
2547 
2548 	return 0;
2549 }
2550 
2551 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2552 {
2553 	u32 reg32, phy9_orig;
2554 	int retries, do_phy_reset, err;
2555 
2556 	retries = 10;
2557 	do_phy_reset = 1;
2558 	do {
2559 		if (do_phy_reset) {
2560 			err = tg3_bmcr_reset(tp);
2561 			if (err)
2562 				return err;
2563 			do_phy_reset = 0;
2564 		}
2565 
2566 		/* Disable transmitter and interrupt.  */
2567 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2568 			continue;
2569 
2570 		reg32 |= 0x3000;
2571 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2572 
2573 		/* Set full-duplex, 1000 mbps.  */
2574 		tg3_writephy(tp, MII_BMCR,
2575 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2576 
2577 		/* Set to master mode.  */
2578 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2579 			continue;
2580 
2581 		tg3_writephy(tp, MII_CTRL1000,
2582 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2583 
2584 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2585 		if (err)
2586 			return err;
2587 
2588 		/* Block the PHY control access.  */
2589 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2590 
2591 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2592 		if (!err)
2593 			break;
2594 	} while (--retries);
2595 
2596 	err = tg3_phy_reset_chanpat(tp);
2597 	if (err)
2598 		return err;
2599 
2600 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2601 
2602 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2603 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2604 
2605 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2606 
2607 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2608 
2609 	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2610 		reg32 &= ~0x3000;
2611 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2612 	} else if (!err)
2613 		err = -EBUSY;
2614 
2615 	return err;
2616 }
2617 
2618 static void tg3_carrier_off(struct tg3 *tp)
2619 {
2620 	netif_carrier_off(tp->dev);
2621 	tp->link_up = false;
2622 }
2623 
2624 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2625 {
2626 	if (tg3_flag(tp, ENABLE_ASF))
2627 		netdev_warn(tp->dev,
2628 			    "Management side-band traffic will be interrupted during phy settings change\n");
2629 }
2630 
2631 /* This will reset the tigon3 PHY if there is no valid
2632  * link unless the FORCE argument is non-zero.
2633  */
2634 static int tg3_phy_reset(struct tg3 *tp)
2635 {
2636 	u32 val, cpmuctrl;
2637 	int err;
2638 
2639 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2640 		val = tr32(GRC_MISC_CFG);
2641 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2642 		udelay(40);
2643 	}
2644 	err  = tg3_readphy(tp, MII_BMSR, &val);
2645 	err |= tg3_readphy(tp, MII_BMSR, &val);
2646 	if (err != 0)
2647 		return -EBUSY;
2648 
2649 	if (netif_running(tp->dev) && tp->link_up) {
2650 		netif_carrier_off(tp->dev);
2651 		tg3_link_report(tp);
2652 	}
2653 
2654 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2655 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2656 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2657 		err = tg3_phy_reset_5703_4_5(tp);
2658 		if (err)
2659 			return err;
2660 		goto out;
2661 	}
2662 
2663 	cpmuctrl = 0;
2664 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2665 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2666 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2667 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2668 			tw32(TG3_CPMU_CTRL,
2669 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2670 	}
2671 
2672 	err = tg3_bmcr_reset(tp);
2673 	if (err)
2674 		return err;
2675 
2676 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2677 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2678 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2679 
2680 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2681 	}
2682 
2683 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2684 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2685 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2686 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2687 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2688 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2689 			udelay(40);
2690 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2691 		}
2692 	}
2693 
2694 	if (tg3_flag(tp, 5717_PLUS) &&
2695 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2696 		return 0;
2697 
2698 	tg3_phy_apply_otp(tp);
2699 
2700 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2701 		tg3_phy_toggle_apd(tp, true);
2702 	else
2703 		tg3_phy_toggle_apd(tp, false);
2704 
2705 out:
2706 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2707 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2708 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2709 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2710 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2711 	}
2712 
2713 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2714 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2715 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2716 	}
2717 
2718 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2719 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2720 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2721 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2722 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2723 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2724 		}
2725 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2726 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2727 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2728 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2729 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2730 				tg3_writephy(tp, MII_TG3_TEST1,
2731 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2732 			} else
2733 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2734 
2735 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2736 		}
2737 	}
2738 
2739 	/* Set Extended packet length bit (bit 14) on all chips that */
2740 	/* support jumbo frames */
2741 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2742 		/* Cannot do read-modify-write on 5401 */
2743 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2744 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2745 		/* Set bit 14 with read-modify-write to preserve other bits */
2746 		err = tg3_phy_auxctl_read(tp,
2747 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2748 		if (!err)
2749 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2750 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2751 	}
2752 
2753 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2754 	 * jumbo frames transmission.
2755 	 */
2756 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2757 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2758 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2759 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2760 	}
2761 
2762 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2763 		/* adjust output voltage */
2764 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2765 	}
2766 
2767 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2768 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2769 
2770 	tg3_phy_toggle_automdix(tp, true);
2771 	tg3_phy_set_wirespeed(tp);
2772 	return 0;
2773 }
2774 
2775 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2776 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2777 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2778 					  TG3_GPIO_MSG_NEED_VAUX)
2779 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2780 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2781 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2782 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2783 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2784 
2785 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2786 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2787 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2788 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2789 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2790 
2791 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2792 {
2793 	u32 status, shift;
2794 
2795 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2796 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2797 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2798 	else
2799 		status = tr32(TG3_CPMU_DRV_STATUS);
2800 
2801 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2802 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2803 	status |= (newstat << shift);
2804 
2805 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2807 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2808 	else
2809 		tw32(TG3_CPMU_DRV_STATUS, status);
2810 
2811 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2812 }
2813 
2814 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2815 {
2816 	if (!tg3_flag(tp, IS_NIC))
2817 		return 0;
2818 
2819 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2820 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2821 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2822 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2823 			return -EIO;
2824 
2825 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2826 
2827 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2829 
2830 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2831 	} else {
2832 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2833 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2834 	}
2835 
2836 	return 0;
2837 }
2838 
2839 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2840 {
2841 	u32 grc_local_ctrl;
2842 
2843 	if (!tg3_flag(tp, IS_NIC) ||
2844 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2845 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2846 		return;
2847 
2848 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2849 
2850 	tw32_wait_f(GRC_LOCAL_CTRL,
2851 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2852 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2853 
2854 	tw32_wait_f(GRC_LOCAL_CTRL,
2855 		    grc_local_ctrl,
2856 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2857 
2858 	tw32_wait_f(GRC_LOCAL_CTRL,
2859 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2860 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2861 }
2862 
2863 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2864 {
2865 	if (!tg3_flag(tp, IS_NIC))
2866 		return;
2867 
2868 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2869 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2870 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2871 			    (GRC_LCLCTRL_GPIO_OE0 |
2872 			     GRC_LCLCTRL_GPIO_OE1 |
2873 			     GRC_LCLCTRL_GPIO_OE2 |
2874 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2875 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2876 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2877 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2878 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2879 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2880 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2881 				     GRC_LCLCTRL_GPIO_OE1 |
2882 				     GRC_LCLCTRL_GPIO_OE2 |
2883 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2884 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2885 				     tp->grc_local_ctrl;
2886 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2887 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2888 
2889 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2890 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2891 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2892 
2893 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2894 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2895 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2896 	} else {
2897 		u32 no_gpio2;
2898 		u32 grc_local_ctrl = 0;
2899 
2900 		/* Workaround to prevent overdrawing Amps. */
2901 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2902 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2903 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2904 				    grc_local_ctrl,
2905 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2906 		}
2907 
2908 		/* On 5753 and variants, GPIO2 cannot be used. */
2909 		no_gpio2 = tp->nic_sram_data_cfg &
2910 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2911 
2912 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2913 				  GRC_LCLCTRL_GPIO_OE1 |
2914 				  GRC_LCLCTRL_GPIO_OE2 |
2915 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2916 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2917 		if (no_gpio2) {
2918 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2919 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2920 		}
2921 		tw32_wait_f(GRC_LOCAL_CTRL,
2922 			    tp->grc_local_ctrl | grc_local_ctrl,
2923 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2924 
2925 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2926 
2927 		tw32_wait_f(GRC_LOCAL_CTRL,
2928 			    tp->grc_local_ctrl | grc_local_ctrl,
2929 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2930 
2931 		if (!no_gpio2) {
2932 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2933 			tw32_wait_f(GRC_LOCAL_CTRL,
2934 				    tp->grc_local_ctrl | grc_local_ctrl,
2935 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2936 		}
2937 	}
2938 }
2939 
2940 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2941 {
2942 	u32 msg = 0;
2943 
2944 	/* Serialize power state transitions */
2945 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2946 		return;
2947 
2948 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2949 		msg = TG3_GPIO_MSG_NEED_VAUX;
2950 
2951 	msg = tg3_set_function_status(tp, msg);
2952 
2953 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2954 		goto done;
2955 
2956 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2957 		tg3_pwrsrc_switch_to_vaux(tp);
2958 	else
2959 		tg3_pwrsrc_die_with_vmain(tp);
2960 
2961 done:
2962 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2963 }
2964 
2965 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2966 {
2967 	bool need_vaux = false;
2968 
2969 	/* The GPIOs do something completely different on 57765. */
2970 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2971 		return;
2972 
2973 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2974 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2975 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2976 		tg3_frob_aux_power_5717(tp, include_wol ?
2977 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2978 		return;
2979 	}
2980 
2981 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2982 		struct net_device *dev_peer;
2983 
2984 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2985 
2986 		/* remove_one() may have been run on the peer. */
2987 		if (dev_peer) {
2988 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2989 
2990 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2991 				return;
2992 
2993 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2994 			    tg3_flag(tp_peer, ENABLE_ASF))
2995 				need_vaux = true;
2996 		}
2997 	}
2998 
2999 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3000 	    tg3_flag(tp, ENABLE_ASF))
3001 		need_vaux = true;
3002 
3003 	if (need_vaux)
3004 		tg3_pwrsrc_switch_to_vaux(tp);
3005 	else
3006 		tg3_pwrsrc_die_with_vmain(tp);
3007 }
3008 
3009 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3010 {
3011 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3012 		return 1;
3013 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3014 		if (speed != SPEED_10)
3015 			return 1;
3016 	} else if (speed == SPEED_10)
3017 		return 1;
3018 
3019 	return 0;
3020 }
3021 
3022 static bool tg3_phy_power_bug(struct tg3 *tp)
3023 {
3024 	switch (tg3_asic_rev(tp)) {
3025 	case ASIC_REV_5700:
3026 	case ASIC_REV_5704:
3027 		return true;
3028 	case ASIC_REV_5780:
3029 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3030 			return true;
3031 		return false;
3032 	case ASIC_REV_5717:
3033 		if (!tp->pci_fn)
3034 			return true;
3035 		return false;
3036 	case ASIC_REV_5719:
3037 	case ASIC_REV_5720:
3038 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3039 		    !tp->pci_fn)
3040 			return true;
3041 		return false;
3042 	}
3043 
3044 	return false;
3045 }
3046 
3047 static bool tg3_phy_led_bug(struct tg3 *tp)
3048 {
3049 	switch (tg3_asic_rev(tp)) {
3050 	case ASIC_REV_5719:
3051 	case ASIC_REV_5720:
3052 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3053 		    !tp->pci_fn)
3054 			return true;
3055 		return false;
3056 	}
3057 
3058 	return false;
3059 }
3060 
3061 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3062 {
3063 	u32 val;
3064 
3065 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3066 		return;
3067 
3068 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3069 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3070 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3071 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3072 
3073 			sg_dig_ctrl |=
3074 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3075 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3076 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3077 		}
3078 		return;
3079 	}
3080 
3081 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3082 		tg3_bmcr_reset(tp);
3083 		val = tr32(GRC_MISC_CFG);
3084 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3085 		udelay(40);
3086 		return;
3087 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3088 		u32 phytest;
3089 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3090 			u32 phy;
3091 
3092 			tg3_writephy(tp, MII_ADVERTISE, 0);
3093 			tg3_writephy(tp, MII_BMCR,
3094 				     BMCR_ANENABLE | BMCR_ANRESTART);
3095 
3096 			tg3_writephy(tp, MII_TG3_FET_TEST,
3097 				     phytest | MII_TG3_FET_SHADOW_EN);
3098 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3099 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3100 				tg3_writephy(tp,
3101 					     MII_TG3_FET_SHDW_AUXMODE4,
3102 					     phy);
3103 			}
3104 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3105 		}
3106 		return;
3107 	} else if (do_low_power) {
3108 		if (!tg3_phy_led_bug(tp))
3109 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3110 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3111 
3112 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3113 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3114 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3115 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3116 	}
3117 
3118 	/* The PHY should not be powered down on some chips because
3119 	 * of bugs.
3120 	 */
3121 	if (tg3_phy_power_bug(tp))
3122 		return;
3123 
3124 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3125 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3126 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3127 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3128 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3129 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3130 	}
3131 
3132 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3133 }
3134 
3135 /* tp->lock is held. */
3136 static int tg3_nvram_lock(struct tg3 *tp)
3137 {
3138 	if (tg3_flag(tp, NVRAM)) {
3139 		int i;
3140 
3141 		if (tp->nvram_lock_cnt == 0) {
3142 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3143 			for (i = 0; i < 8000; i++) {
3144 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3145 					break;
3146 				udelay(20);
3147 			}
3148 			if (i == 8000) {
3149 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3150 				return -ENODEV;
3151 			}
3152 		}
3153 		tp->nvram_lock_cnt++;
3154 	}
3155 	return 0;
3156 }
3157 
3158 /* tp->lock is held. */
3159 static void tg3_nvram_unlock(struct tg3 *tp)
3160 {
3161 	if (tg3_flag(tp, NVRAM)) {
3162 		if (tp->nvram_lock_cnt > 0)
3163 			tp->nvram_lock_cnt--;
3164 		if (tp->nvram_lock_cnt == 0)
3165 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3166 	}
3167 }
3168 
3169 /* tp->lock is held. */
3170 static void tg3_enable_nvram_access(struct tg3 *tp)
3171 {
3172 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3173 		u32 nvaccess = tr32(NVRAM_ACCESS);
3174 
3175 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3176 	}
3177 }
3178 
3179 /* tp->lock is held. */
3180 static void tg3_disable_nvram_access(struct tg3 *tp)
3181 {
3182 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3183 		u32 nvaccess = tr32(NVRAM_ACCESS);
3184 
3185 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3186 	}
3187 }
3188 
3189 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3190 					u32 offset, u32 *val)
3191 {
3192 	u32 tmp;
3193 	int i;
3194 
3195 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3196 		return -EINVAL;
3197 
3198 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3199 					EEPROM_ADDR_DEVID_MASK |
3200 					EEPROM_ADDR_READ);
3201 	tw32(GRC_EEPROM_ADDR,
3202 	     tmp |
3203 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3204 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3205 	      EEPROM_ADDR_ADDR_MASK) |
3206 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3207 
3208 	for (i = 0; i < 1000; i++) {
3209 		tmp = tr32(GRC_EEPROM_ADDR);
3210 
3211 		if (tmp & EEPROM_ADDR_COMPLETE)
3212 			break;
3213 		msleep(1);
3214 	}
3215 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3216 		return -EBUSY;
3217 
3218 	tmp = tr32(GRC_EEPROM_DATA);
3219 
3220 	/*
3221 	 * The data will always be opposite the native endian
3222 	 * format.  Perform a blind byteswap to compensate.
3223 	 */
3224 	*val = swab32(tmp);
3225 
3226 	return 0;
3227 }
3228 
3229 #define NVRAM_CMD_TIMEOUT 10000
3230 
3231 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3232 {
3233 	int i;
3234 
3235 	tw32(NVRAM_CMD, nvram_cmd);
3236 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3237 		udelay(10);
3238 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3239 			udelay(10);
3240 			break;
3241 		}
3242 	}
3243 
3244 	if (i == NVRAM_CMD_TIMEOUT)
3245 		return -EBUSY;
3246 
3247 	return 0;
3248 }
3249 
3250 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3251 {
3252 	if (tg3_flag(tp, NVRAM) &&
3253 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3254 	    tg3_flag(tp, FLASH) &&
3255 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3256 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3257 
3258 		addr = ((addr / tp->nvram_pagesize) <<
3259 			ATMEL_AT45DB0X1B_PAGE_POS) +
3260 		       (addr % tp->nvram_pagesize);
3261 
3262 	return addr;
3263 }
3264 
3265 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3266 {
3267 	if (tg3_flag(tp, NVRAM) &&
3268 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3269 	    tg3_flag(tp, FLASH) &&
3270 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3271 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3272 
3273 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3274 			tp->nvram_pagesize) +
3275 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3276 
3277 	return addr;
3278 }
3279 
3280 /* NOTE: Data read in from NVRAM is byteswapped according to
3281  * the byteswapping settings for all other register accesses.
3282  * tg3 devices are BE devices, so on a BE machine, the data
3283  * returned will be exactly as it is seen in NVRAM.  On a LE
3284  * machine, the 32-bit value will be byteswapped.
3285  */
3286 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3287 {
3288 	int ret;
3289 
3290 	if (!tg3_flag(tp, NVRAM))
3291 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3292 
3293 	offset = tg3_nvram_phys_addr(tp, offset);
3294 
3295 	if (offset > NVRAM_ADDR_MSK)
3296 		return -EINVAL;
3297 
3298 	ret = tg3_nvram_lock(tp);
3299 	if (ret)
3300 		return ret;
3301 
3302 	tg3_enable_nvram_access(tp);
3303 
3304 	tw32(NVRAM_ADDR, offset);
3305 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3306 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3307 
3308 	if (ret == 0)
3309 		*val = tr32(NVRAM_RDDATA);
3310 
3311 	tg3_disable_nvram_access(tp);
3312 
3313 	tg3_nvram_unlock(tp);
3314 
3315 	return ret;
3316 }
3317 
3318 /* Ensures NVRAM data is in bytestream format. */
3319 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3320 {
3321 	u32 v;
3322 	int res = tg3_nvram_read(tp, offset, &v);
3323 	if (!res)
3324 		*val = cpu_to_be32(v);
3325 	return res;
3326 }
3327 
3328 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3329 				    u32 offset, u32 len, u8 *buf)
3330 {
3331 	int i, j, rc = 0;
3332 	u32 val;
3333 
3334 	for (i = 0; i < len; i += 4) {
3335 		u32 addr;
3336 		__be32 data;
3337 
3338 		addr = offset + i;
3339 
3340 		memcpy(&data, buf + i, 4);
3341 
3342 		/*
3343 		 * The SEEPROM interface expects the data to always be opposite
3344 		 * the native endian format.  We accomplish this by reversing
3345 		 * all the operations that would have been performed on the
3346 		 * data from a call to tg3_nvram_read_be32().
3347 		 */
3348 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3349 
3350 		val = tr32(GRC_EEPROM_ADDR);
3351 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3352 
3353 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3354 			EEPROM_ADDR_READ);
3355 		tw32(GRC_EEPROM_ADDR, val |
3356 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3357 			(addr & EEPROM_ADDR_ADDR_MASK) |
3358 			EEPROM_ADDR_START |
3359 			EEPROM_ADDR_WRITE);
3360 
3361 		for (j = 0; j < 1000; j++) {
3362 			val = tr32(GRC_EEPROM_ADDR);
3363 
3364 			if (val & EEPROM_ADDR_COMPLETE)
3365 				break;
3366 			msleep(1);
3367 		}
3368 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3369 			rc = -EBUSY;
3370 			break;
3371 		}
3372 	}
3373 
3374 	return rc;
3375 }
3376 
3377 /* offset and length are dword aligned */
3378 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3379 		u8 *buf)
3380 {
3381 	int ret = 0;
3382 	u32 pagesize = tp->nvram_pagesize;
3383 	u32 pagemask = pagesize - 1;
3384 	u32 nvram_cmd;
3385 	u8 *tmp;
3386 
3387 	tmp = kmalloc(pagesize, GFP_KERNEL);
3388 	if (tmp == NULL)
3389 		return -ENOMEM;
3390 
3391 	while (len) {
3392 		int j;
3393 		u32 phy_addr, page_off, size;
3394 
3395 		phy_addr = offset & ~pagemask;
3396 
3397 		for (j = 0; j < pagesize; j += 4) {
3398 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3399 						  (__be32 *) (tmp + j));
3400 			if (ret)
3401 				break;
3402 		}
3403 		if (ret)
3404 			break;
3405 
3406 		page_off = offset & pagemask;
3407 		size = pagesize;
3408 		if (len < size)
3409 			size = len;
3410 
3411 		len -= size;
3412 
3413 		memcpy(tmp + page_off, buf, size);
3414 
3415 		offset = offset + (pagesize - page_off);
3416 
3417 		tg3_enable_nvram_access(tp);
3418 
3419 		/*
3420 		 * Before we can erase the flash page, we need
3421 		 * to issue a special "write enable" command.
3422 		 */
3423 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3424 
3425 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3426 			break;
3427 
3428 		/* Erase the target page */
3429 		tw32(NVRAM_ADDR, phy_addr);
3430 
3431 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3432 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3433 
3434 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435 			break;
3436 
3437 		/* Issue another write enable to start the write. */
3438 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3439 
3440 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3441 			break;
3442 
3443 		for (j = 0; j < pagesize; j += 4) {
3444 			__be32 data;
3445 
3446 			data = *((__be32 *) (tmp + j));
3447 
3448 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3449 
3450 			tw32(NVRAM_ADDR, phy_addr + j);
3451 
3452 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3453 				NVRAM_CMD_WR;
3454 
3455 			if (j == 0)
3456 				nvram_cmd |= NVRAM_CMD_FIRST;
3457 			else if (j == (pagesize - 4))
3458 				nvram_cmd |= NVRAM_CMD_LAST;
3459 
3460 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3461 			if (ret)
3462 				break;
3463 		}
3464 		if (ret)
3465 			break;
3466 	}
3467 
3468 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3469 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3470 
3471 	kfree(tmp);
3472 
3473 	return ret;
3474 }
3475 
3476 /* offset and length are dword aligned */
3477 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3478 		u8 *buf)
3479 {
3480 	int i, ret = 0;
3481 
3482 	for (i = 0; i < len; i += 4, offset += 4) {
3483 		u32 page_off, phy_addr, nvram_cmd;
3484 		__be32 data;
3485 
3486 		memcpy(&data, buf + i, 4);
3487 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3488 
3489 		page_off = offset % tp->nvram_pagesize;
3490 
3491 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3492 
3493 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3494 
3495 		if (page_off == 0 || i == 0)
3496 			nvram_cmd |= NVRAM_CMD_FIRST;
3497 		if (page_off == (tp->nvram_pagesize - 4))
3498 			nvram_cmd |= NVRAM_CMD_LAST;
3499 
3500 		if (i == (len - 4))
3501 			nvram_cmd |= NVRAM_CMD_LAST;
3502 
3503 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3504 		    !tg3_flag(tp, FLASH) ||
3505 		    !tg3_flag(tp, 57765_PLUS))
3506 			tw32(NVRAM_ADDR, phy_addr);
3507 
3508 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3509 		    !tg3_flag(tp, 5755_PLUS) &&
3510 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3511 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3512 			u32 cmd;
3513 
3514 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3515 			ret = tg3_nvram_exec_cmd(tp, cmd);
3516 			if (ret)
3517 				break;
3518 		}
3519 		if (!tg3_flag(tp, FLASH)) {
3520 			/* We always do complete word writes to eeprom. */
3521 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3522 		}
3523 
3524 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3525 		if (ret)
3526 			break;
3527 	}
3528 	return ret;
3529 }
3530 
3531 /* offset and length are dword aligned */
3532 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3533 {
3534 	int ret;
3535 
3536 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3537 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3538 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3539 		udelay(40);
3540 	}
3541 
3542 	if (!tg3_flag(tp, NVRAM)) {
3543 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3544 	} else {
3545 		u32 grc_mode;
3546 
3547 		ret = tg3_nvram_lock(tp);
3548 		if (ret)
3549 			return ret;
3550 
3551 		tg3_enable_nvram_access(tp);
3552 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3553 			tw32(NVRAM_WRITE1, 0x406);
3554 
3555 		grc_mode = tr32(GRC_MODE);
3556 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3557 
3558 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3559 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3560 				buf);
3561 		} else {
3562 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3563 				buf);
3564 		}
3565 
3566 		grc_mode = tr32(GRC_MODE);
3567 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3568 
3569 		tg3_disable_nvram_access(tp);
3570 		tg3_nvram_unlock(tp);
3571 	}
3572 
3573 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3574 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3575 		udelay(40);
3576 	}
3577 
3578 	return ret;
3579 }
3580 
3581 #define RX_CPU_SCRATCH_BASE	0x30000
3582 #define RX_CPU_SCRATCH_SIZE	0x04000
3583 #define TX_CPU_SCRATCH_BASE	0x34000
3584 #define TX_CPU_SCRATCH_SIZE	0x04000
3585 
3586 /* tp->lock is held. */
3587 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3588 {
3589 	int i;
3590 	const int iters = 10000;
3591 
3592 	for (i = 0; i < iters; i++) {
3593 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3594 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3595 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3596 			break;
3597 		if (pci_channel_offline(tp->pdev))
3598 			return -EBUSY;
3599 	}
3600 
3601 	return (i == iters) ? -EBUSY : 0;
3602 }
3603 
3604 /* tp->lock is held. */
3605 static int tg3_rxcpu_pause(struct tg3 *tp)
3606 {
3607 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3608 
3609 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3610 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3611 	udelay(10);
3612 
3613 	return rc;
3614 }
3615 
3616 /* tp->lock is held. */
3617 static int tg3_txcpu_pause(struct tg3 *tp)
3618 {
3619 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3620 }
3621 
3622 /* tp->lock is held. */
3623 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3624 {
3625 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3626 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3627 }
3628 
3629 /* tp->lock is held. */
3630 static void tg3_rxcpu_resume(struct tg3 *tp)
3631 {
3632 	tg3_resume_cpu(tp, RX_CPU_BASE);
3633 }
3634 
3635 /* tp->lock is held. */
3636 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3637 {
3638 	int rc;
3639 
3640 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3641 
3642 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3643 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3644 
3645 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3646 		return 0;
3647 	}
3648 	if (cpu_base == RX_CPU_BASE) {
3649 		rc = tg3_rxcpu_pause(tp);
3650 	} else {
3651 		/*
3652 		 * There is only an Rx CPU for the 5750 derivative in the
3653 		 * BCM4785.
3654 		 */
3655 		if (tg3_flag(tp, IS_SSB_CORE))
3656 			return 0;
3657 
3658 		rc = tg3_txcpu_pause(tp);
3659 	}
3660 
3661 	if (rc) {
3662 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3663 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3664 		return -ENODEV;
3665 	}
3666 
3667 	/* Clear firmware's nvram arbitration. */
3668 	if (tg3_flag(tp, NVRAM))
3669 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3670 	return 0;
3671 }
3672 
3673 static int tg3_fw_data_len(struct tg3 *tp,
3674 			   const struct tg3_firmware_hdr *fw_hdr)
3675 {
3676 	int fw_len;
3677 
3678 	/* Non fragmented firmware have one firmware header followed by a
3679 	 * contiguous chunk of data to be written. The length field in that
3680 	 * header is not the length of data to be written but the complete
3681 	 * length of the bss. The data length is determined based on
3682 	 * tp->fw->size minus headers.
3683 	 *
3684 	 * Fragmented firmware have a main header followed by multiple
3685 	 * fragments. Each fragment is identical to non fragmented firmware
3686 	 * with a firmware header followed by a contiguous chunk of data. In
3687 	 * the main header, the length field is unused and set to 0xffffffff.
3688 	 * In each fragment header the length is the entire size of that
3689 	 * fragment i.e. fragment data + header length. Data length is
3690 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3691 	 */
3692 	if (tp->fw_len == 0xffffffff)
3693 		fw_len = be32_to_cpu(fw_hdr->len);
3694 	else
3695 		fw_len = tp->fw->size;
3696 
3697 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3698 }
3699 
3700 /* tp->lock is held. */
3701 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3702 				 u32 cpu_scratch_base, int cpu_scratch_size,
3703 				 const struct tg3_firmware_hdr *fw_hdr)
3704 {
3705 	int err, i;
3706 	void (*write_op)(struct tg3 *, u32, u32);
3707 	int total_len = tp->fw->size;
3708 
3709 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3710 		netdev_err(tp->dev,
3711 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3712 			   __func__);
3713 		return -EINVAL;
3714 	}
3715 
3716 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3717 		write_op = tg3_write_mem;
3718 	else
3719 		write_op = tg3_write_indirect_reg32;
3720 
3721 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3722 		/* It is possible that bootcode is still loading at this point.
3723 		 * Get the nvram lock first before halting the cpu.
3724 		 */
3725 		int lock_err = tg3_nvram_lock(tp);
3726 		err = tg3_halt_cpu(tp, cpu_base);
3727 		if (!lock_err)
3728 			tg3_nvram_unlock(tp);
3729 		if (err)
3730 			goto out;
3731 
3732 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3733 			write_op(tp, cpu_scratch_base + i, 0);
3734 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3735 		tw32(cpu_base + CPU_MODE,
3736 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3737 	} else {
3738 		/* Subtract additional main header for fragmented firmware and
3739 		 * advance to the first fragment
3740 		 */
3741 		total_len -= TG3_FW_HDR_LEN;
3742 		fw_hdr++;
3743 	}
3744 
3745 	do {
3746 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3747 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3748 			write_op(tp, cpu_scratch_base +
3749 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3750 				     (i * sizeof(u32)),
3751 				 be32_to_cpu(fw_data[i]));
3752 
3753 		total_len -= be32_to_cpu(fw_hdr->len);
3754 
3755 		/* Advance to next fragment */
3756 		fw_hdr = (struct tg3_firmware_hdr *)
3757 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3758 	} while (total_len > 0);
3759 
3760 	err = 0;
3761 
3762 out:
3763 	return err;
3764 }
3765 
3766 /* tp->lock is held. */
3767 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3768 {
3769 	int i;
3770 	const int iters = 5;
3771 
3772 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3773 	tw32_f(cpu_base + CPU_PC, pc);
3774 
3775 	for (i = 0; i < iters; i++) {
3776 		if (tr32(cpu_base + CPU_PC) == pc)
3777 			break;
3778 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3779 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3780 		tw32_f(cpu_base + CPU_PC, pc);
3781 		udelay(1000);
3782 	}
3783 
3784 	return (i == iters) ? -EBUSY : 0;
3785 }
3786 
3787 /* tp->lock is held. */
3788 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3789 {
3790 	const struct tg3_firmware_hdr *fw_hdr;
3791 	int err;
3792 
3793 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3794 
3795 	/* Firmware blob starts with version numbers, followed by
3796 	   start address and length. We are setting complete length.
3797 	   length = end_address_of_bss - start_address_of_text.
3798 	   Remainder is the blob to be loaded contiguously
3799 	   from start address. */
3800 
3801 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3802 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3803 				    fw_hdr);
3804 	if (err)
3805 		return err;
3806 
3807 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3808 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3809 				    fw_hdr);
3810 	if (err)
3811 		return err;
3812 
3813 	/* Now startup only the RX cpu. */
3814 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3815 				       be32_to_cpu(fw_hdr->base_addr));
3816 	if (err) {
3817 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3818 			   "should be %08x\n", __func__,
3819 			   tr32(RX_CPU_BASE + CPU_PC),
3820 				be32_to_cpu(fw_hdr->base_addr));
3821 		return -ENODEV;
3822 	}
3823 
3824 	tg3_rxcpu_resume(tp);
3825 
3826 	return 0;
3827 }
3828 
3829 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3830 {
3831 	const int iters = 1000;
3832 	int i;
3833 	u32 val;
3834 
3835 	/* Wait for boot code to complete initialization and enter service
3836 	 * loop. It is then safe to download service patches
3837 	 */
3838 	for (i = 0; i < iters; i++) {
3839 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3840 			break;
3841 
3842 		udelay(10);
3843 	}
3844 
3845 	if (i == iters) {
3846 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3847 		return -EBUSY;
3848 	}
3849 
3850 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3851 	if (val & 0xff) {
3852 		netdev_warn(tp->dev,
3853 			    "Other patches exist. Not downloading EEE patch\n");
3854 		return -EEXIST;
3855 	}
3856 
3857 	return 0;
3858 }
3859 
3860 /* tp->lock is held. */
3861 static void tg3_load_57766_firmware(struct tg3 *tp)
3862 {
3863 	struct tg3_firmware_hdr *fw_hdr;
3864 
3865 	if (!tg3_flag(tp, NO_NVRAM))
3866 		return;
3867 
3868 	if (tg3_validate_rxcpu_state(tp))
3869 		return;
3870 
3871 	if (!tp->fw)
3872 		return;
3873 
3874 	/* This firmware blob has a different format than older firmware
3875 	 * releases as given below. The main difference is we have fragmented
3876 	 * data to be written to non-contiguous locations.
3877 	 *
3878 	 * In the beginning we have a firmware header identical to other
3879 	 * firmware which consists of version, base addr and length. The length
3880 	 * here is unused and set to 0xffffffff.
3881 	 *
3882 	 * This is followed by a series of firmware fragments which are
3883 	 * individually identical to previous firmware. i.e. they have the
3884 	 * firmware header and followed by data for that fragment. The version
3885 	 * field of the individual fragment header is unused.
3886 	 */
3887 
3888 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3889 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3890 		return;
3891 
3892 	if (tg3_rxcpu_pause(tp))
3893 		return;
3894 
3895 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3896 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3897 
3898 	tg3_rxcpu_resume(tp);
3899 }
3900 
3901 /* tp->lock is held. */
3902 static int tg3_load_tso_firmware(struct tg3 *tp)
3903 {
3904 	const struct tg3_firmware_hdr *fw_hdr;
3905 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3906 	int err;
3907 
3908 	if (!tg3_flag(tp, FW_TSO))
3909 		return 0;
3910 
3911 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3912 
3913 	/* Firmware blob starts with version numbers, followed by
3914 	   start address and length. We are setting complete length.
3915 	   length = end_address_of_bss - start_address_of_text.
3916 	   Remainder is the blob to be loaded contiguously
3917 	   from start address. */
3918 
3919 	cpu_scratch_size = tp->fw_len;
3920 
3921 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3922 		cpu_base = RX_CPU_BASE;
3923 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3924 	} else {
3925 		cpu_base = TX_CPU_BASE;
3926 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3927 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3928 	}
3929 
3930 	err = tg3_load_firmware_cpu(tp, cpu_base,
3931 				    cpu_scratch_base, cpu_scratch_size,
3932 				    fw_hdr);
3933 	if (err)
3934 		return err;
3935 
3936 	/* Now startup the cpu. */
3937 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3938 				       be32_to_cpu(fw_hdr->base_addr));
3939 	if (err) {
3940 		netdev_err(tp->dev,
3941 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3942 			   __func__, tr32(cpu_base + CPU_PC),
3943 			   be32_to_cpu(fw_hdr->base_addr));
3944 		return -ENODEV;
3945 	}
3946 
3947 	tg3_resume_cpu(tp, cpu_base);
3948 	return 0;
3949 }
3950 
3951 
3952 /* tp->lock is held. */
3953 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3954 {
3955 	u32 addr_high, addr_low;
3956 	int i;
3957 
3958 	addr_high = ((tp->dev->dev_addr[0] << 8) |
3959 		     tp->dev->dev_addr[1]);
3960 	addr_low = ((tp->dev->dev_addr[2] << 24) |
3961 		    (tp->dev->dev_addr[3] << 16) |
3962 		    (tp->dev->dev_addr[4] <<  8) |
3963 		    (tp->dev->dev_addr[5] <<  0));
3964 	for (i = 0; i < 4; i++) {
3965 		if (i == 1 && skip_mac_1)
3966 			continue;
3967 		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3968 		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3969 	}
3970 
3971 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3972 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3973 		for (i = 0; i < 12; i++) {
3974 			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3975 			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3976 		}
3977 	}
3978 
3979 	addr_high = (tp->dev->dev_addr[0] +
3980 		     tp->dev->dev_addr[1] +
3981 		     tp->dev->dev_addr[2] +
3982 		     tp->dev->dev_addr[3] +
3983 		     tp->dev->dev_addr[4] +
3984 		     tp->dev->dev_addr[5]) &
3985 		TX_BACKOFF_SEED_MASK;
3986 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3987 }
3988 
3989 static void tg3_enable_register_access(struct tg3 *tp)
3990 {
3991 	/*
3992 	 * Make sure register accesses (indirect or otherwise) will function
3993 	 * correctly.
3994 	 */
3995 	pci_write_config_dword(tp->pdev,
3996 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3997 }
3998 
3999 static int tg3_power_up(struct tg3 *tp)
4000 {
4001 	int err;
4002 
4003 	tg3_enable_register_access(tp);
4004 
4005 	err = pci_set_power_state(tp->pdev, PCI_D0);
4006 	if (!err) {
4007 		/* Switch out of Vaux if it is a NIC */
4008 		tg3_pwrsrc_switch_to_vmain(tp);
4009 	} else {
4010 		netdev_err(tp->dev, "Transition to D0 failed\n");
4011 	}
4012 
4013 	return err;
4014 }
4015 
4016 static int tg3_setup_phy(struct tg3 *, bool);
4017 
4018 static int tg3_power_down_prepare(struct tg3 *tp)
4019 {
4020 	u32 misc_host_ctrl;
4021 	bool device_should_wake, do_low_power;
4022 
4023 	tg3_enable_register_access(tp);
4024 
4025 	/* Restore the CLKREQ setting. */
4026 	if (tg3_flag(tp, CLKREQ_BUG))
4027 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4028 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4029 
4030 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4031 	tw32(TG3PCI_MISC_HOST_CTRL,
4032 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4033 
4034 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4035 			     tg3_flag(tp, WOL_ENABLE);
4036 
4037 	if (tg3_flag(tp, USE_PHYLIB)) {
4038 		do_low_power = false;
4039 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4040 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4041 			struct phy_device *phydev;
4042 			u32 phyid, advertising;
4043 
4044 			phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4045 
4046 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4047 
4048 			tp->link_config.speed = phydev->speed;
4049 			tp->link_config.duplex = phydev->duplex;
4050 			tp->link_config.autoneg = phydev->autoneg;
4051 			tp->link_config.advertising = phydev->advertising;
4052 
4053 			advertising = ADVERTISED_TP |
4054 				      ADVERTISED_Pause |
4055 				      ADVERTISED_Autoneg |
4056 				      ADVERTISED_10baseT_Half;
4057 
4058 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4059 				if (tg3_flag(tp, WOL_SPEED_100MB))
4060 					advertising |=
4061 						ADVERTISED_100baseT_Half |
4062 						ADVERTISED_100baseT_Full |
4063 						ADVERTISED_10baseT_Full;
4064 				else
4065 					advertising |= ADVERTISED_10baseT_Full;
4066 			}
4067 
4068 			phydev->advertising = advertising;
4069 
4070 			phy_start_aneg(phydev);
4071 
4072 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4073 			if (phyid != PHY_ID_BCMAC131) {
4074 				phyid &= PHY_BCM_OUI_MASK;
4075 				if (phyid == PHY_BCM_OUI_1 ||
4076 				    phyid == PHY_BCM_OUI_2 ||
4077 				    phyid == PHY_BCM_OUI_3)
4078 					do_low_power = true;
4079 			}
4080 		}
4081 	} else {
4082 		do_low_power = true;
4083 
4084 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4085 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4086 
4087 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4088 			tg3_setup_phy(tp, false);
4089 	}
4090 
4091 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4092 		u32 val;
4093 
4094 		val = tr32(GRC_VCPU_EXT_CTRL);
4095 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4096 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4097 		int i;
4098 		u32 val;
4099 
4100 		for (i = 0; i < 200; i++) {
4101 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4102 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4103 				break;
4104 			msleep(1);
4105 		}
4106 	}
4107 	if (tg3_flag(tp, WOL_CAP))
4108 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4109 						     WOL_DRV_STATE_SHUTDOWN |
4110 						     WOL_DRV_WOL |
4111 						     WOL_SET_MAGIC_PKT);
4112 
4113 	if (device_should_wake) {
4114 		u32 mac_mode;
4115 
4116 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4117 			if (do_low_power &&
4118 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4119 				tg3_phy_auxctl_write(tp,
4120 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4121 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4122 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4123 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4124 				udelay(40);
4125 			}
4126 
4127 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4128 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4129 			else if (tp->phy_flags &
4130 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4131 				if (tp->link_config.active_speed == SPEED_1000)
4132 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4133 				else
4134 					mac_mode = MAC_MODE_PORT_MODE_MII;
4135 			} else
4136 				mac_mode = MAC_MODE_PORT_MODE_MII;
4137 
4138 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4139 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4140 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4141 					     SPEED_100 : SPEED_10;
4142 				if (tg3_5700_link_polarity(tp, speed))
4143 					mac_mode |= MAC_MODE_LINK_POLARITY;
4144 				else
4145 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4146 			}
4147 		} else {
4148 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4149 		}
4150 
4151 		if (!tg3_flag(tp, 5750_PLUS))
4152 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4153 
4154 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4155 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4156 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4157 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4158 
4159 		if (tg3_flag(tp, ENABLE_APE))
4160 			mac_mode |= MAC_MODE_APE_TX_EN |
4161 				    MAC_MODE_APE_RX_EN |
4162 				    MAC_MODE_TDE_ENABLE;
4163 
4164 		tw32_f(MAC_MODE, mac_mode);
4165 		udelay(100);
4166 
4167 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4168 		udelay(10);
4169 	}
4170 
4171 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4172 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4173 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4174 		u32 base_val;
4175 
4176 		base_val = tp->pci_clock_ctrl;
4177 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4178 			     CLOCK_CTRL_TXCLK_DISABLE);
4179 
4180 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4181 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4182 	} else if (tg3_flag(tp, 5780_CLASS) ||
4183 		   tg3_flag(tp, CPMU_PRESENT) ||
4184 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4185 		/* do nothing */
4186 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4187 		u32 newbits1, newbits2;
4188 
4189 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4190 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4191 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4192 				    CLOCK_CTRL_TXCLK_DISABLE |
4193 				    CLOCK_CTRL_ALTCLK);
4194 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4195 		} else if (tg3_flag(tp, 5705_PLUS)) {
4196 			newbits1 = CLOCK_CTRL_625_CORE;
4197 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4198 		} else {
4199 			newbits1 = CLOCK_CTRL_ALTCLK;
4200 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4201 		}
4202 
4203 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4204 			    40);
4205 
4206 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4207 			    40);
4208 
4209 		if (!tg3_flag(tp, 5705_PLUS)) {
4210 			u32 newbits3;
4211 
4212 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4213 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4214 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4215 					    CLOCK_CTRL_TXCLK_DISABLE |
4216 					    CLOCK_CTRL_44MHZ_CORE);
4217 			} else {
4218 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4219 			}
4220 
4221 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4222 				    tp->pci_clock_ctrl | newbits3, 40);
4223 		}
4224 	}
4225 
4226 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4227 		tg3_power_down_phy(tp, do_low_power);
4228 
4229 	tg3_frob_aux_power(tp, true);
4230 
4231 	/* Workaround for unstable PLL clock */
4232 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4233 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4234 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4235 		u32 val = tr32(0x7d00);
4236 
4237 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4238 		tw32(0x7d00, val);
4239 		if (!tg3_flag(tp, ENABLE_ASF)) {
4240 			int err;
4241 
4242 			err = tg3_nvram_lock(tp);
4243 			tg3_halt_cpu(tp, RX_CPU_BASE);
4244 			if (!err)
4245 				tg3_nvram_unlock(tp);
4246 		}
4247 	}
4248 
4249 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4250 
4251 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4252 
4253 	return 0;
4254 }
4255 
4256 static void tg3_power_down(struct tg3 *tp)
4257 {
4258 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4259 	pci_set_power_state(tp->pdev, PCI_D3hot);
4260 }
4261 
4262 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4263 {
4264 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4265 	case MII_TG3_AUX_STAT_10HALF:
4266 		*speed = SPEED_10;
4267 		*duplex = DUPLEX_HALF;
4268 		break;
4269 
4270 	case MII_TG3_AUX_STAT_10FULL:
4271 		*speed = SPEED_10;
4272 		*duplex = DUPLEX_FULL;
4273 		break;
4274 
4275 	case MII_TG3_AUX_STAT_100HALF:
4276 		*speed = SPEED_100;
4277 		*duplex = DUPLEX_HALF;
4278 		break;
4279 
4280 	case MII_TG3_AUX_STAT_100FULL:
4281 		*speed = SPEED_100;
4282 		*duplex = DUPLEX_FULL;
4283 		break;
4284 
4285 	case MII_TG3_AUX_STAT_1000HALF:
4286 		*speed = SPEED_1000;
4287 		*duplex = DUPLEX_HALF;
4288 		break;
4289 
4290 	case MII_TG3_AUX_STAT_1000FULL:
4291 		*speed = SPEED_1000;
4292 		*duplex = DUPLEX_FULL;
4293 		break;
4294 
4295 	default:
4296 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4297 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4298 				 SPEED_10;
4299 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4300 				  DUPLEX_HALF;
4301 			break;
4302 		}
4303 		*speed = SPEED_UNKNOWN;
4304 		*duplex = DUPLEX_UNKNOWN;
4305 		break;
4306 	}
4307 }
4308 
4309 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4310 {
4311 	int err = 0;
4312 	u32 val, new_adv;
4313 
4314 	new_adv = ADVERTISE_CSMA;
4315 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4316 	new_adv |= mii_advertise_flowctrl(flowctrl);
4317 
4318 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4319 	if (err)
4320 		goto done;
4321 
4322 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4323 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4324 
4325 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4326 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4327 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4328 
4329 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4330 		if (err)
4331 			goto done;
4332 	}
4333 
4334 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4335 		goto done;
4336 
4337 	tw32(TG3_CPMU_EEE_MODE,
4338 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4339 
4340 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4341 	if (!err) {
4342 		u32 err2;
4343 
4344 		val = 0;
4345 		/* Advertise 100-BaseTX EEE ability */
4346 		if (advertise & ADVERTISED_100baseT_Full)
4347 			val |= MDIO_AN_EEE_ADV_100TX;
4348 		/* Advertise 1000-BaseT EEE ability */
4349 		if (advertise & ADVERTISED_1000baseT_Full)
4350 			val |= MDIO_AN_EEE_ADV_1000T;
4351 
4352 		if (!tp->eee.eee_enabled) {
4353 			val = 0;
4354 			tp->eee.advertised = 0;
4355 		} else {
4356 			tp->eee.advertised = advertise &
4357 					     (ADVERTISED_100baseT_Full |
4358 					      ADVERTISED_1000baseT_Full);
4359 		}
4360 
4361 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4362 		if (err)
4363 			val = 0;
4364 
4365 		switch (tg3_asic_rev(tp)) {
4366 		case ASIC_REV_5717:
4367 		case ASIC_REV_57765:
4368 		case ASIC_REV_57766:
4369 		case ASIC_REV_5719:
4370 			/* If we advertised any eee advertisements above... */
4371 			if (val)
4372 				val = MII_TG3_DSP_TAP26_ALNOKO |
4373 				      MII_TG3_DSP_TAP26_RMRXSTO |
4374 				      MII_TG3_DSP_TAP26_OPCSINPT;
4375 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4376 			/* Fall through */
4377 		case ASIC_REV_5720:
4378 		case ASIC_REV_5762:
4379 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4380 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4381 						 MII_TG3_DSP_CH34TP2_HIBW01);
4382 		}
4383 
4384 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4385 		if (!err)
4386 			err = err2;
4387 	}
4388 
4389 done:
4390 	return err;
4391 }
4392 
4393 static void tg3_phy_copper_begin(struct tg3 *tp)
4394 {
4395 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4396 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4397 		u32 adv, fc;
4398 
4399 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4400 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4401 			adv = ADVERTISED_10baseT_Half |
4402 			      ADVERTISED_10baseT_Full;
4403 			if (tg3_flag(tp, WOL_SPEED_100MB))
4404 				adv |= ADVERTISED_100baseT_Half |
4405 				       ADVERTISED_100baseT_Full;
4406 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4407 				adv |= ADVERTISED_1000baseT_Half |
4408 				       ADVERTISED_1000baseT_Full;
4409 
4410 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4411 		} else {
4412 			adv = tp->link_config.advertising;
4413 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4414 				adv &= ~(ADVERTISED_1000baseT_Half |
4415 					 ADVERTISED_1000baseT_Full);
4416 
4417 			fc = tp->link_config.flowctrl;
4418 		}
4419 
4420 		tg3_phy_autoneg_cfg(tp, adv, fc);
4421 
4422 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4423 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4424 			/* Normally during power down we want to autonegotiate
4425 			 * the lowest possible speed for WOL. However, to avoid
4426 			 * link flap, we leave it untouched.
4427 			 */
4428 			return;
4429 		}
4430 
4431 		tg3_writephy(tp, MII_BMCR,
4432 			     BMCR_ANENABLE | BMCR_ANRESTART);
4433 	} else {
4434 		int i;
4435 		u32 bmcr, orig_bmcr;
4436 
4437 		tp->link_config.active_speed = tp->link_config.speed;
4438 		tp->link_config.active_duplex = tp->link_config.duplex;
4439 
4440 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4441 			/* With autoneg disabled, 5715 only links up when the
4442 			 * advertisement register has the configured speed
4443 			 * enabled.
4444 			 */
4445 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4446 		}
4447 
4448 		bmcr = 0;
4449 		switch (tp->link_config.speed) {
4450 		default:
4451 		case SPEED_10:
4452 			break;
4453 
4454 		case SPEED_100:
4455 			bmcr |= BMCR_SPEED100;
4456 			break;
4457 
4458 		case SPEED_1000:
4459 			bmcr |= BMCR_SPEED1000;
4460 			break;
4461 		}
4462 
4463 		if (tp->link_config.duplex == DUPLEX_FULL)
4464 			bmcr |= BMCR_FULLDPLX;
4465 
4466 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4467 		    (bmcr != orig_bmcr)) {
4468 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4469 			for (i = 0; i < 1500; i++) {
4470 				u32 tmp;
4471 
4472 				udelay(10);
4473 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4474 				    tg3_readphy(tp, MII_BMSR, &tmp))
4475 					continue;
4476 				if (!(tmp & BMSR_LSTATUS)) {
4477 					udelay(40);
4478 					break;
4479 				}
4480 			}
4481 			tg3_writephy(tp, MII_BMCR, bmcr);
4482 			udelay(40);
4483 		}
4484 	}
4485 }
4486 
4487 static int tg3_phy_pull_config(struct tg3 *tp)
4488 {
4489 	int err;
4490 	u32 val;
4491 
4492 	err = tg3_readphy(tp, MII_BMCR, &val);
4493 	if (err)
4494 		goto done;
4495 
4496 	if (!(val & BMCR_ANENABLE)) {
4497 		tp->link_config.autoneg = AUTONEG_DISABLE;
4498 		tp->link_config.advertising = 0;
4499 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4500 
4501 		err = -EIO;
4502 
4503 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4504 		case 0:
4505 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4506 				goto done;
4507 
4508 			tp->link_config.speed = SPEED_10;
4509 			break;
4510 		case BMCR_SPEED100:
4511 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4512 				goto done;
4513 
4514 			tp->link_config.speed = SPEED_100;
4515 			break;
4516 		case BMCR_SPEED1000:
4517 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4518 				tp->link_config.speed = SPEED_1000;
4519 				break;
4520 			}
4521 			/* Fall through */
4522 		default:
4523 			goto done;
4524 		}
4525 
4526 		if (val & BMCR_FULLDPLX)
4527 			tp->link_config.duplex = DUPLEX_FULL;
4528 		else
4529 			tp->link_config.duplex = DUPLEX_HALF;
4530 
4531 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4532 
4533 		err = 0;
4534 		goto done;
4535 	}
4536 
4537 	tp->link_config.autoneg = AUTONEG_ENABLE;
4538 	tp->link_config.advertising = ADVERTISED_Autoneg;
4539 	tg3_flag_set(tp, PAUSE_AUTONEG);
4540 
4541 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4542 		u32 adv;
4543 
4544 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4545 		if (err)
4546 			goto done;
4547 
4548 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4549 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4550 
4551 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4552 	} else {
4553 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4554 	}
4555 
4556 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4557 		u32 adv;
4558 
4559 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4560 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4561 			if (err)
4562 				goto done;
4563 
4564 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4565 		} else {
4566 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4567 			if (err)
4568 				goto done;
4569 
4570 			adv = tg3_decode_flowctrl_1000X(val);
4571 			tp->link_config.flowctrl = adv;
4572 
4573 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4574 			adv = mii_adv_to_ethtool_adv_x(val);
4575 		}
4576 
4577 		tp->link_config.advertising |= adv;
4578 	}
4579 
4580 done:
4581 	return err;
4582 }
4583 
4584 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4585 {
4586 	int err;
4587 
4588 	/* Turn off tap power management. */
4589 	/* Set Extended packet length bit */
4590 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4591 
4592 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4593 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4594 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4595 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4596 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4597 
4598 	udelay(40);
4599 
4600 	return err;
4601 }
4602 
4603 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4604 {
4605 	struct ethtool_eee eee;
4606 
4607 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4608 		return true;
4609 
4610 	tg3_eee_pull_config(tp, &eee);
4611 
4612 	if (tp->eee.eee_enabled) {
4613 		if (tp->eee.advertised != eee.advertised ||
4614 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4615 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4616 			return false;
4617 	} else {
4618 		/* EEE is disabled but we're advertising */
4619 		if (eee.advertised)
4620 			return false;
4621 	}
4622 
4623 	return true;
4624 }
4625 
4626 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4627 {
4628 	u32 advmsk, tgtadv, advertising;
4629 
4630 	advertising = tp->link_config.advertising;
4631 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4632 
4633 	advmsk = ADVERTISE_ALL;
4634 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4635 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4636 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4637 	}
4638 
4639 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4640 		return false;
4641 
4642 	if ((*lcladv & advmsk) != tgtadv)
4643 		return false;
4644 
4645 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4646 		u32 tg3_ctrl;
4647 
4648 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4649 
4650 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4651 			return false;
4652 
4653 		if (tgtadv &&
4654 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4655 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4656 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4657 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4658 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4659 		} else {
4660 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4661 		}
4662 
4663 		if (tg3_ctrl != tgtadv)
4664 			return false;
4665 	}
4666 
4667 	return true;
4668 }
4669 
4670 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4671 {
4672 	u32 lpeth = 0;
4673 
4674 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4675 		u32 val;
4676 
4677 		if (tg3_readphy(tp, MII_STAT1000, &val))
4678 			return false;
4679 
4680 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4681 	}
4682 
4683 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4684 		return false;
4685 
4686 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4687 	tp->link_config.rmt_adv = lpeth;
4688 
4689 	return true;
4690 }
4691 
4692 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4693 {
4694 	if (curr_link_up != tp->link_up) {
4695 		if (curr_link_up) {
4696 			netif_carrier_on(tp->dev);
4697 		} else {
4698 			netif_carrier_off(tp->dev);
4699 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4700 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4701 		}
4702 
4703 		tg3_link_report(tp);
4704 		return true;
4705 	}
4706 
4707 	return false;
4708 }
4709 
4710 static void tg3_clear_mac_status(struct tg3 *tp)
4711 {
4712 	tw32(MAC_EVENT, 0);
4713 
4714 	tw32_f(MAC_STATUS,
4715 	       MAC_STATUS_SYNC_CHANGED |
4716 	       MAC_STATUS_CFG_CHANGED |
4717 	       MAC_STATUS_MI_COMPLETION |
4718 	       MAC_STATUS_LNKSTATE_CHANGED);
4719 	udelay(40);
4720 }
4721 
4722 static void tg3_setup_eee(struct tg3 *tp)
4723 {
4724 	u32 val;
4725 
4726 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4727 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4728 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4729 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4730 
4731 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4732 
4733 	tw32_f(TG3_CPMU_EEE_CTRL,
4734 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4735 
4736 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4737 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4738 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4739 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4740 
4741 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4742 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4743 
4744 	if (tg3_flag(tp, ENABLE_APE))
4745 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4746 
4747 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4748 
4749 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4750 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4751 	       (tp->eee.tx_lpi_timer & 0xffff));
4752 
4753 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4754 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4755 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4756 }
4757 
4758 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4759 {
4760 	bool current_link_up;
4761 	u32 bmsr, val;
4762 	u32 lcl_adv, rmt_adv;
4763 	u16 current_speed;
4764 	u8 current_duplex;
4765 	int i, err;
4766 
4767 	tg3_clear_mac_status(tp);
4768 
4769 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4770 		tw32_f(MAC_MI_MODE,
4771 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4772 		udelay(80);
4773 	}
4774 
4775 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4776 
4777 	/* Some third-party PHYs need to be reset on link going
4778 	 * down.
4779 	 */
4780 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4781 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4782 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4783 	    tp->link_up) {
4784 		tg3_readphy(tp, MII_BMSR, &bmsr);
4785 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4786 		    !(bmsr & BMSR_LSTATUS))
4787 			force_reset = true;
4788 	}
4789 	if (force_reset)
4790 		tg3_phy_reset(tp);
4791 
4792 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4793 		tg3_readphy(tp, MII_BMSR, &bmsr);
4794 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4795 		    !tg3_flag(tp, INIT_COMPLETE))
4796 			bmsr = 0;
4797 
4798 		if (!(bmsr & BMSR_LSTATUS)) {
4799 			err = tg3_init_5401phy_dsp(tp);
4800 			if (err)
4801 				return err;
4802 
4803 			tg3_readphy(tp, MII_BMSR, &bmsr);
4804 			for (i = 0; i < 1000; i++) {
4805 				udelay(10);
4806 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4807 				    (bmsr & BMSR_LSTATUS)) {
4808 					udelay(40);
4809 					break;
4810 				}
4811 			}
4812 
4813 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4814 			    TG3_PHY_REV_BCM5401_B0 &&
4815 			    !(bmsr & BMSR_LSTATUS) &&
4816 			    tp->link_config.active_speed == SPEED_1000) {
4817 				err = tg3_phy_reset(tp);
4818 				if (!err)
4819 					err = tg3_init_5401phy_dsp(tp);
4820 				if (err)
4821 					return err;
4822 			}
4823 		}
4824 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4825 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4826 		/* 5701 {A0,B0} CRC bug workaround */
4827 		tg3_writephy(tp, 0x15, 0x0a75);
4828 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4829 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4830 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4831 	}
4832 
4833 	/* Clear pending interrupts... */
4834 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4835 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4836 
4837 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4838 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4839 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4840 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4841 
4842 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4843 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4844 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4845 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4846 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4847 		else
4848 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4849 	}
4850 
4851 	current_link_up = false;
4852 	current_speed = SPEED_UNKNOWN;
4853 	current_duplex = DUPLEX_UNKNOWN;
4854 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4855 	tp->link_config.rmt_adv = 0;
4856 
4857 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4858 		err = tg3_phy_auxctl_read(tp,
4859 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4860 					  &val);
4861 		if (!err && !(val & (1 << 10))) {
4862 			tg3_phy_auxctl_write(tp,
4863 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4864 					     val | (1 << 10));
4865 			goto relink;
4866 		}
4867 	}
4868 
4869 	bmsr = 0;
4870 	for (i = 0; i < 100; i++) {
4871 		tg3_readphy(tp, MII_BMSR, &bmsr);
4872 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4873 		    (bmsr & BMSR_LSTATUS))
4874 			break;
4875 		udelay(40);
4876 	}
4877 
4878 	if (bmsr & BMSR_LSTATUS) {
4879 		u32 aux_stat, bmcr;
4880 
4881 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4882 		for (i = 0; i < 2000; i++) {
4883 			udelay(10);
4884 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4885 			    aux_stat)
4886 				break;
4887 		}
4888 
4889 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4890 					     &current_speed,
4891 					     &current_duplex);
4892 
4893 		bmcr = 0;
4894 		for (i = 0; i < 200; i++) {
4895 			tg3_readphy(tp, MII_BMCR, &bmcr);
4896 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4897 				continue;
4898 			if (bmcr && bmcr != 0x7fff)
4899 				break;
4900 			udelay(10);
4901 		}
4902 
4903 		lcl_adv = 0;
4904 		rmt_adv = 0;
4905 
4906 		tp->link_config.active_speed = current_speed;
4907 		tp->link_config.active_duplex = current_duplex;
4908 
4909 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4910 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4911 
4912 			if ((bmcr & BMCR_ANENABLE) &&
4913 			    eee_config_ok &&
4914 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4915 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4916 				current_link_up = true;
4917 
4918 			/* EEE settings changes take effect only after a phy
4919 			 * reset.  If we have skipped a reset due to Link Flap
4920 			 * Avoidance being enabled, do it now.
4921 			 */
4922 			if (!eee_config_ok &&
4923 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4924 			    !force_reset) {
4925 				tg3_setup_eee(tp);
4926 				tg3_phy_reset(tp);
4927 			}
4928 		} else {
4929 			if (!(bmcr & BMCR_ANENABLE) &&
4930 			    tp->link_config.speed == current_speed &&
4931 			    tp->link_config.duplex == current_duplex) {
4932 				current_link_up = true;
4933 			}
4934 		}
4935 
4936 		if (current_link_up &&
4937 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4938 			u32 reg, bit;
4939 
4940 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4941 				reg = MII_TG3_FET_GEN_STAT;
4942 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4943 			} else {
4944 				reg = MII_TG3_EXT_STAT;
4945 				bit = MII_TG3_EXT_STAT_MDIX;
4946 			}
4947 
4948 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4949 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4950 
4951 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4952 		}
4953 	}
4954 
4955 relink:
4956 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4957 		tg3_phy_copper_begin(tp);
4958 
4959 		if (tg3_flag(tp, ROBOSWITCH)) {
4960 			current_link_up = true;
4961 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4962 			current_speed = SPEED_1000;
4963 			current_duplex = DUPLEX_FULL;
4964 			tp->link_config.active_speed = current_speed;
4965 			tp->link_config.active_duplex = current_duplex;
4966 		}
4967 
4968 		tg3_readphy(tp, MII_BMSR, &bmsr);
4969 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4970 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4971 			current_link_up = true;
4972 	}
4973 
4974 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4975 	if (current_link_up) {
4976 		if (tp->link_config.active_speed == SPEED_100 ||
4977 		    tp->link_config.active_speed == SPEED_10)
4978 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4979 		else
4980 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4981 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4982 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4983 	else
4984 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4985 
4986 	/* In order for the 5750 core in BCM4785 chip to work properly
4987 	 * in RGMII mode, the Led Control Register must be set up.
4988 	 */
4989 	if (tg3_flag(tp, RGMII_MODE)) {
4990 		u32 led_ctrl = tr32(MAC_LED_CTRL);
4991 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4992 
4993 		if (tp->link_config.active_speed == SPEED_10)
4994 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4995 		else if (tp->link_config.active_speed == SPEED_100)
4996 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4997 				     LED_CTRL_100MBPS_ON);
4998 		else if (tp->link_config.active_speed == SPEED_1000)
4999 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5000 				     LED_CTRL_1000MBPS_ON);
5001 
5002 		tw32(MAC_LED_CTRL, led_ctrl);
5003 		udelay(40);
5004 	}
5005 
5006 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5007 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5008 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5009 
5010 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5011 		if (current_link_up &&
5012 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5013 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5014 		else
5015 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5016 	}
5017 
5018 	/* ??? Without this setting Netgear GA302T PHY does not
5019 	 * ??? send/receive packets...
5020 	 */
5021 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5022 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5023 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5024 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5025 		udelay(80);
5026 	}
5027 
5028 	tw32_f(MAC_MODE, tp->mac_mode);
5029 	udelay(40);
5030 
5031 	tg3_phy_eee_adjust(tp, current_link_up);
5032 
5033 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5034 		/* Polled via timer. */
5035 		tw32_f(MAC_EVENT, 0);
5036 	} else {
5037 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5038 	}
5039 	udelay(40);
5040 
5041 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5042 	    current_link_up &&
5043 	    tp->link_config.active_speed == SPEED_1000 &&
5044 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5045 		udelay(120);
5046 		tw32_f(MAC_STATUS,
5047 		     (MAC_STATUS_SYNC_CHANGED |
5048 		      MAC_STATUS_CFG_CHANGED));
5049 		udelay(40);
5050 		tg3_write_mem(tp,
5051 			      NIC_SRAM_FIRMWARE_MBOX,
5052 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5053 	}
5054 
5055 	/* Prevent send BD corruption. */
5056 	if (tg3_flag(tp, CLKREQ_BUG)) {
5057 		if (tp->link_config.active_speed == SPEED_100 ||
5058 		    tp->link_config.active_speed == SPEED_10)
5059 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5060 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5061 		else
5062 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5063 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5064 	}
5065 
5066 	tg3_test_and_report_link_chg(tp, current_link_up);
5067 
5068 	return 0;
5069 }
5070 
5071 struct tg3_fiber_aneginfo {
5072 	int state;
5073 #define ANEG_STATE_UNKNOWN		0
5074 #define ANEG_STATE_AN_ENABLE		1
5075 #define ANEG_STATE_RESTART_INIT		2
5076 #define ANEG_STATE_RESTART		3
5077 #define ANEG_STATE_DISABLE_LINK_OK	4
5078 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5079 #define ANEG_STATE_ABILITY_DETECT	6
5080 #define ANEG_STATE_ACK_DETECT_INIT	7
5081 #define ANEG_STATE_ACK_DETECT		8
5082 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5083 #define ANEG_STATE_COMPLETE_ACK		10
5084 #define ANEG_STATE_IDLE_DETECT_INIT	11
5085 #define ANEG_STATE_IDLE_DETECT		12
5086 #define ANEG_STATE_LINK_OK		13
5087 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5088 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5089 
5090 	u32 flags;
5091 #define MR_AN_ENABLE		0x00000001
5092 #define MR_RESTART_AN		0x00000002
5093 #define MR_AN_COMPLETE		0x00000004
5094 #define MR_PAGE_RX		0x00000008
5095 #define MR_NP_LOADED		0x00000010
5096 #define MR_TOGGLE_TX		0x00000020
5097 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5098 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5099 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5100 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5101 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5102 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5103 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5104 #define MR_TOGGLE_RX		0x00002000
5105 #define MR_NP_RX		0x00004000
5106 
5107 #define MR_LINK_OK		0x80000000
5108 
5109 	unsigned long link_time, cur_time;
5110 
5111 	u32 ability_match_cfg;
5112 	int ability_match_count;
5113 
5114 	char ability_match, idle_match, ack_match;
5115 
5116 	u32 txconfig, rxconfig;
5117 #define ANEG_CFG_NP		0x00000080
5118 #define ANEG_CFG_ACK		0x00000040
5119 #define ANEG_CFG_RF2		0x00000020
5120 #define ANEG_CFG_RF1		0x00000010
5121 #define ANEG_CFG_PS2		0x00000001
5122 #define ANEG_CFG_PS1		0x00008000
5123 #define ANEG_CFG_HD		0x00004000
5124 #define ANEG_CFG_FD		0x00002000
5125 #define ANEG_CFG_INVAL		0x00001f06
5126 
5127 };
5128 #define ANEG_OK		0
5129 #define ANEG_DONE	1
5130 #define ANEG_TIMER_ENAB	2
5131 #define ANEG_FAILED	-1
5132 
5133 #define ANEG_STATE_SETTLE_TIME	10000
5134 
5135 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5136 				   struct tg3_fiber_aneginfo *ap)
5137 {
5138 	u16 flowctrl;
5139 	unsigned long delta;
5140 	u32 rx_cfg_reg;
5141 	int ret;
5142 
5143 	if (ap->state == ANEG_STATE_UNKNOWN) {
5144 		ap->rxconfig = 0;
5145 		ap->link_time = 0;
5146 		ap->cur_time = 0;
5147 		ap->ability_match_cfg = 0;
5148 		ap->ability_match_count = 0;
5149 		ap->ability_match = 0;
5150 		ap->idle_match = 0;
5151 		ap->ack_match = 0;
5152 	}
5153 	ap->cur_time++;
5154 
5155 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5156 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5157 
5158 		if (rx_cfg_reg != ap->ability_match_cfg) {
5159 			ap->ability_match_cfg = rx_cfg_reg;
5160 			ap->ability_match = 0;
5161 			ap->ability_match_count = 0;
5162 		} else {
5163 			if (++ap->ability_match_count > 1) {
5164 				ap->ability_match = 1;
5165 				ap->ability_match_cfg = rx_cfg_reg;
5166 			}
5167 		}
5168 		if (rx_cfg_reg & ANEG_CFG_ACK)
5169 			ap->ack_match = 1;
5170 		else
5171 			ap->ack_match = 0;
5172 
5173 		ap->idle_match = 0;
5174 	} else {
5175 		ap->idle_match = 1;
5176 		ap->ability_match_cfg = 0;
5177 		ap->ability_match_count = 0;
5178 		ap->ability_match = 0;
5179 		ap->ack_match = 0;
5180 
5181 		rx_cfg_reg = 0;
5182 	}
5183 
5184 	ap->rxconfig = rx_cfg_reg;
5185 	ret = ANEG_OK;
5186 
5187 	switch (ap->state) {
5188 	case ANEG_STATE_UNKNOWN:
5189 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5190 			ap->state = ANEG_STATE_AN_ENABLE;
5191 
5192 		/* fallthru */
5193 	case ANEG_STATE_AN_ENABLE:
5194 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5195 		if (ap->flags & MR_AN_ENABLE) {
5196 			ap->link_time = 0;
5197 			ap->cur_time = 0;
5198 			ap->ability_match_cfg = 0;
5199 			ap->ability_match_count = 0;
5200 			ap->ability_match = 0;
5201 			ap->idle_match = 0;
5202 			ap->ack_match = 0;
5203 
5204 			ap->state = ANEG_STATE_RESTART_INIT;
5205 		} else {
5206 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5207 		}
5208 		break;
5209 
5210 	case ANEG_STATE_RESTART_INIT:
5211 		ap->link_time = ap->cur_time;
5212 		ap->flags &= ~(MR_NP_LOADED);
5213 		ap->txconfig = 0;
5214 		tw32(MAC_TX_AUTO_NEG, 0);
5215 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5216 		tw32_f(MAC_MODE, tp->mac_mode);
5217 		udelay(40);
5218 
5219 		ret = ANEG_TIMER_ENAB;
5220 		ap->state = ANEG_STATE_RESTART;
5221 
5222 		/* fallthru */
5223 	case ANEG_STATE_RESTART:
5224 		delta = ap->cur_time - ap->link_time;
5225 		if (delta > ANEG_STATE_SETTLE_TIME)
5226 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5227 		else
5228 			ret = ANEG_TIMER_ENAB;
5229 		break;
5230 
5231 	case ANEG_STATE_DISABLE_LINK_OK:
5232 		ret = ANEG_DONE;
5233 		break;
5234 
5235 	case ANEG_STATE_ABILITY_DETECT_INIT:
5236 		ap->flags &= ~(MR_TOGGLE_TX);
5237 		ap->txconfig = ANEG_CFG_FD;
5238 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5239 		if (flowctrl & ADVERTISE_1000XPAUSE)
5240 			ap->txconfig |= ANEG_CFG_PS1;
5241 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5242 			ap->txconfig |= ANEG_CFG_PS2;
5243 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5244 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5245 		tw32_f(MAC_MODE, tp->mac_mode);
5246 		udelay(40);
5247 
5248 		ap->state = ANEG_STATE_ABILITY_DETECT;
5249 		break;
5250 
5251 	case ANEG_STATE_ABILITY_DETECT:
5252 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5253 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5254 		break;
5255 
5256 	case ANEG_STATE_ACK_DETECT_INIT:
5257 		ap->txconfig |= ANEG_CFG_ACK;
5258 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5259 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5260 		tw32_f(MAC_MODE, tp->mac_mode);
5261 		udelay(40);
5262 
5263 		ap->state = ANEG_STATE_ACK_DETECT;
5264 
5265 		/* fallthru */
5266 	case ANEG_STATE_ACK_DETECT:
5267 		if (ap->ack_match != 0) {
5268 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5269 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5270 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5271 			} else {
5272 				ap->state = ANEG_STATE_AN_ENABLE;
5273 			}
5274 		} else if (ap->ability_match != 0 &&
5275 			   ap->rxconfig == 0) {
5276 			ap->state = ANEG_STATE_AN_ENABLE;
5277 		}
5278 		break;
5279 
5280 	case ANEG_STATE_COMPLETE_ACK_INIT:
5281 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5282 			ret = ANEG_FAILED;
5283 			break;
5284 		}
5285 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5286 			       MR_LP_ADV_HALF_DUPLEX |
5287 			       MR_LP_ADV_SYM_PAUSE |
5288 			       MR_LP_ADV_ASYM_PAUSE |
5289 			       MR_LP_ADV_REMOTE_FAULT1 |
5290 			       MR_LP_ADV_REMOTE_FAULT2 |
5291 			       MR_LP_ADV_NEXT_PAGE |
5292 			       MR_TOGGLE_RX |
5293 			       MR_NP_RX);
5294 		if (ap->rxconfig & ANEG_CFG_FD)
5295 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5296 		if (ap->rxconfig & ANEG_CFG_HD)
5297 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5298 		if (ap->rxconfig & ANEG_CFG_PS1)
5299 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5300 		if (ap->rxconfig & ANEG_CFG_PS2)
5301 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5302 		if (ap->rxconfig & ANEG_CFG_RF1)
5303 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5304 		if (ap->rxconfig & ANEG_CFG_RF2)
5305 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5306 		if (ap->rxconfig & ANEG_CFG_NP)
5307 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5308 
5309 		ap->link_time = ap->cur_time;
5310 
5311 		ap->flags ^= (MR_TOGGLE_TX);
5312 		if (ap->rxconfig & 0x0008)
5313 			ap->flags |= MR_TOGGLE_RX;
5314 		if (ap->rxconfig & ANEG_CFG_NP)
5315 			ap->flags |= MR_NP_RX;
5316 		ap->flags |= MR_PAGE_RX;
5317 
5318 		ap->state = ANEG_STATE_COMPLETE_ACK;
5319 		ret = ANEG_TIMER_ENAB;
5320 		break;
5321 
5322 	case ANEG_STATE_COMPLETE_ACK:
5323 		if (ap->ability_match != 0 &&
5324 		    ap->rxconfig == 0) {
5325 			ap->state = ANEG_STATE_AN_ENABLE;
5326 			break;
5327 		}
5328 		delta = ap->cur_time - ap->link_time;
5329 		if (delta > ANEG_STATE_SETTLE_TIME) {
5330 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5331 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5332 			} else {
5333 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5334 				    !(ap->flags & MR_NP_RX)) {
5335 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5336 				} else {
5337 					ret = ANEG_FAILED;
5338 				}
5339 			}
5340 		}
5341 		break;
5342 
5343 	case ANEG_STATE_IDLE_DETECT_INIT:
5344 		ap->link_time = ap->cur_time;
5345 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5346 		tw32_f(MAC_MODE, tp->mac_mode);
5347 		udelay(40);
5348 
5349 		ap->state = ANEG_STATE_IDLE_DETECT;
5350 		ret = ANEG_TIMER_ENAB;
5351 		break;
5352 
5353 	case ANEG_STATE_IDLE_DETECT:
5354 		if (ap->ability_match != 0 &&
5355 		    ap->rxconfig == 0) {
5356 			ap->state = ANEG_STATE_AN_ENABLE;
5357 			break;
5358 		}
5359 		delta = ap->cur_time - ap->link_time;
5360 		if (delta > ANEG_STATE_SETTLE_TIME) {
5361 			/* XXX another gem from the Broadcom driver :( */
5362 			ap->state = ANEG_STATE_LINK_OK;
5363 		}
5364 		break;
5365 
5366 	case ANEG_STATE_LINK_OK:
5367 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5368 		ret = ANEG_DONE;
5369 		break;
5370 
5371 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5372 		/* ??? unimplemented */
5373 		break;
5374 
5375 	case ANEG_STATE_NEXT_PAGE_WAIT:
5376 		/* ??? unimplemented */
5377 		break;
5378 
5379 	default:
5380 		ret = ANEG_FAILED;
5381 		break;
5382 	}
5383 
5384 	return ret;
5385 }
5386 
5387 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5388 {
5389 	int res = 0;
5390 	struct tg3_fiber_aneginfo aninfo;
5391 	int status = ANEG_FAILED;
5392 	unsigned int tick;
5393 	u32 tmp;
5394 
5395 	tw32_f(MAC_TX_AUTO_NEG, 0);
5396 
5397 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5398 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5399 	udelay(40);
5400 
5401 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5402 	udelay(40);
5403 
5404 	memset(&aninfo, 0, sizeof(aninfo));
5405 	aninfo.flags |= MR_AN_ENABLE;
5406 	aninfo.state = ANEG_STATE_UNKNOWN;
5407 	aninfo.cur_time = 0;
5408 	tick = 0;
5409 	while (++tick < 195000) {
5410 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5411 		if (status == ANEG_DONE || status == ANEG_FAILED)
5412 			break;
5413 
5414 		udelay(1);
5415 	}
5416 
5417 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5418 	tw32_f(MAC_MODE, tp->mac_mode);
5419 	udelay(40);
5420 
5421 	*txflags = aninfo.txconfig;
5422 	*rxflags = aninfo.flags;
5423 
5424 	if (status == ANEG_DONE &&
5425 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5426 			     MR_LP_ADV_FULL_DUPLEX)))
5427 		res = 1;
5428 
5429 	return res;
5430 }
5431 
5432 static void tg3_init_bcm8002(struct tg3 *tp)
5433 {
5434 	u32 mac_status = tr32(MAC_STATUS);
5435 	int i;
5436 
5437 	/* Reset when initting first time or we have a link. */
5438 	if (tg3_flag(tp, INIT_COMPLETE) &&
5439 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5440 		return;
5441 
5442 	/* Set PLL lock range. */
5443 	tg3_writephy(tp, 0x16, 0x8007);
5444 
5445 	/* SW reset */
5446 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5447 
5448 	/* Wait for reset to complete. */
5449 	/* XXX schedule_timeout() ... */
5450 	for (i = 0; i < 500; i++)
5451 		udelay(10);
5452 
5453 	/* Config mode; select PMA/Ch 1 regs. */
5454 	tg3_writephy(tp, 0x10, 0x8411);
5455 
5456 	/* Enable auto-lock and comdet, select txclk for tx. */
5457 	tg3_writephy(tp, 0x11, 0x0a10);
5458 
5459 	tg3_writephy(tp, 0x18, 0x00a0);
5460 	tg3_writephy(tp, 0x16, 0x41ff);
5461 
5462 	/* Assert and deassert POR. */
5463 	tg3_writephy(tp, 0x13, 0x0400);
5464 	udelay(40);
5465 	tg3_writephy(tp, 0x13, 0x0000);
5466 
5467 	tg3_writephy(tp, 0x11, 0x0a50);
5468 	udelay(40);
5469 	tg3_writephy(tp, 0x11, 0x0a10);
5470 
5471 	/* Wait for signal to stabilize */
5472 	/* XXX schedule_timeout() ... */
5473 	for (i = 0; i < 15000; i++)
5474 		udelay(10);
5475 
5476 	/* Deselect the channel register so we can read the PHYID
5477 	 * later.
5478 	 */
5479 	tg3_writephy(tp, 0x10, 0x8011);
5480 }
5481 
5482 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5483 {
5484 	u16 flowctrl;
5485 	bool current_link_up;
5486 	u32 sg_dig_ctrl, sg_dig_status;
5487 	u32 serdes_cfg, expected_sg_dig_ctrl;
5488 	int workaround, port_a;
5489 
5490 	serdes_cfg = 0;
5491 	expected_sg_dig_ctrl = 0;
5492 	workaround = 0;
5493 	port_a = 1;
5494 	current_link_up = false;
5495 
5496 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5497 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5498 		workaround = 1;
5499 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5500 			port_a = 0;
5501 
5502 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5503 		/* preserve bits 20-23 for voltage regulator */
5504 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5505 	}
5506 
5507 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5508 
5509 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5510 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5511 			if (workaround) {
5512 				u32 val = serdes_cfg;
5513 
5514 				if (port_a)
5515 					val |= 0xc010000;
5516 				else
5517 					val |= 0x4010000;
5518 				tw32_f(MAC_SERDES_CFG, val);
5519 			}
5520 
5521 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5522 		}
5523 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5524 			tg3_setup_flow_control(tp, 0, 0);
5525 			current_link_up = true;
5526 		}
5527 		goto out;
5528 	}
5529 
5530 	/* Want auto-negotiation.  */
5531 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5532 
5533 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5534 	if (flowctrl & ADVERTISE_1000XPAUSE)
5535 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5536 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5537 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5538 
5539 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5540 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5541 		    tp->serdes_counter &&
5542 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5543 				    MAC_STATUS_RCVD_CFG)) ==
5544 		     MAC_STATUS_PCS_SYNCED)) {
5545 			tp->serdes_counter--;
5546 			current_link_up = true;
5547 			goto out;
5548 		}
5549 restart_autoneg:
5550 		if (workaround)
5551 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5552 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5553 		udelay(5);
5554 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5555 
5556 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5557 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5558 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5559 				 MAC_STATUS_SIGNAL_DET)) {
5560 		sg_dig_status = tr32(SG_DIG_STATUS);
5561 		mac_status = tr32(MAC_STATUS);
5562 
5563 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5564 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5565 			u32 local_adv = 0, remote_adv = 0;
5566 
5567 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5568 				local_adv |= ADVERTISE_1000XPAUSE;
5569 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5570 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5571 
5572 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5573 				remote_adv |= LPA_1000XPAUSE;
5574 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5575 				remote_adv |= LPA_1000XPAUSE_ASYM;
5576 
5577 			tp->link_config.rmt_adv =
5578 					   mii_adv_to_ethtool_adv_x(remote_adv);
5579 
5580 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5581 			current_link_up = true;
5582 			tp->serdes_counter = 0;
5583 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5584 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5585 			if (tp->serdes_counter)
5586 				tp->serdes_counter--;
5587 			else {
5588 				if (workaround) {
5589 					u32 val = serdes_cfg;
5590 
5591 					if (port_a)
5592 						val |= 0xc010000;
5593 					else
5594 						val |= 0x4010000;
5595 
5596 					tw32_f(MAC_SERDES_CFG, val);
5597 				}
5598 
5599 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5600 				udelay(40);
5601 
5602 				/* Link parallel detection - link is up */
5603 				/* only if we have PCS_SYNC and not */
5604 				/* receiving config code words */
5605 				mac_status = tr32(MAC_STATUS);
5606 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5607 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5608 					tg3_setup_flow_control(tp, 0, 0);
5609 					current_link_up = true;
5610 					tp->phy_flags |=
5611 						TG3_PHYFLG_PARALLEL_DETECT;
5612 					tp->serdes_counter =
5613 						SERDES_PARALLEL_DET_TIMEOUT;
5614 				} else
5615 					goto restart_autoneg;
5616 			}
5617 		}
5618 	} else {
5619 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5620 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5621 	}
5622 
5623 out:
5624 	return current_link_up;
5625 }
5626 
5627 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5628 {
5629 	bool current_link_up = false;
5630 
5631 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5632 		goto out;
5633 
5634 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5635 		u32 txflags, rxflags;
5636 		int i;
5637 
5638 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5639 			u32 local_adv = 0, remote_adv = 0;
5640 
5641 			if (txflags & ANEG_CFG_PS1)
5642 				local_adv |= ADVERTISE_1000XPAUSE;
5643 			if (txflags & ANEG_CFG_PS2)
5644 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5645 
5646 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5647 				remote_adv |= LPA_1000XPAUSE;
5648 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5649 				remote_adv |= LPA_1000XPAUSE_ASYM;
5650 
5651 			tp->link_config.rmt_adv =
5652 					   mii_adv_to_ethtool_adv_x(remote_adv);
5653 
5654 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5655 
5656 			current_link_up = true;
5657 		}
5658 		for (i = 0; i < 30; i++) {
5659 			udelay(20);
5660 			tw32_f(MAC_STATUS,
5661 			       (MAC_STATUS_SYNC_CHANGED |
5662 				MAC_STATUS_CFG_CHANGED));
5663 			udelay(40);
5664 			if ((tr32(MAC_STATUS) &
5665 			     (MAC_STATUS_SYNC_CHANGED |
5666 			      MAC_STATUS_CFG_CHANGED)) == 0)
5667 				break;
5668 		}
5669 
5670 		mac_status = tr32(MAC_STATUS);
5671 		if (!current_link_up &&
5672 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5673 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5674 			current_link_up = true;
5675 	} else {
5676 		tg3_setup_flow_control(tp, 0, 0);
5677 
5678 		/* Forcing 1000FD link up. */
5679 		current_link_up = true;
5680 
5681 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5682 		udelay(40);
5683 
5684 		tw32_f(MAC_MODE, tp->mac_mode);
5685 		udelay(40);
5686 	}
5687 
5688 out:
5689 	return current_link_up;
5690 }
5691 
5692 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5693 {
5694 	u32 orig_pause_cfg;
5695 	u16 orig_active_speed;
5696 	u8 orig_active_duplex;
5697 	u32 mac_status;
5698 	bool current_link_up;
5699 	int i;
5700 
5701 	orig_pause_cfg = tp->link_config.active_flowctrl;
5702 	orig_active_speed = tp->link_config.active_speed;
5703 	orig_active_duplex = tp->link_config.active_duplex;
5704 
5705 	if (!tg3_flag(tp, HW_AUTONEG) &&
5706 	    tp->link_up &&
5707 	    tg3_flag(tp, INIT_COMPLETE)) {
5708 		mac_status = tr32(MAC_STATUS);
5709 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5710 			       MAC_STATUS_SIGNAL_DET |
5711 			       MAC_STATUS_CFG_CHANGED |
5712 			       MAC_STATUS_RCVD_CFG);
5713 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5714 				   MAC_STATUS_SIGNAL_DET)) {
5715 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5716 					    MAC_STATUS_CFG_CHANGED));
5717 			return 0;
5718 		}
5719 	}
5720 
5721 	tw32_f(MAC_TX_AUTO_NEG, 0);
5722 
5723 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5724 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5725 	tw32_f(MAC_MODE, tp->mac_mode);
5726 	udelay(40);
5727 
5728 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5729 		tg3_init_bcm8002(tp);
5730 
5731 	/* Enable link change event even when serdes polling.  */
5732 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5733 	udelay(40);
5734 
5735 	current_link_up = false;
5736 	tp->link_config.rmt_adv = 0;
5737 	mac_status = tr32(MAC_STATUS);
5738 
5739 	if (tg3_flag(tp, HW_AUTONEG))
5740 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5741 	else
5742 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5743 
5744 	tp->napi[0].hw_status->status =
5745 		(SD_STATUS_UPDATED |
5746 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5747 
5748 	for (i = 0; i < 100; i++) {
5749 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5750 				    MAC_STATUS_CFG_CHANGED));
5751 		udelay(5);
5752 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5753 					 MAC_STATUS_CFG_CHANGED |
5754 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5755 			break;
5756 	}
5757 
5758 	mac_status = tr32(MAC_STATUS);
5759 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5760 		current_link_up = false;
5761 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5762 		    tp->serdes_counter == 0) {
5763 			tw32_f(MAC_MODE, (tp->mac_mode |
5764 					  MAC_MODE_SEND_CONFIGS));
5765 			udelay(1);
5766 			tw32_f(MAC_MODE, tp->mac_mode);
5767 		}
5768 	}
5769 
5770 	if (current_link_up) {
5771 		tp->link_config.active_speed = SPEED_1000;
5772 		tp->link_config.active_duplex = DUPLEX_FULL;
5773 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5774 				    LED_CTRL_LNKLED_OVERRIDE |
5775 				    LED_CTRL_1000MBPS_ON));
5776 	} else {
5777 		tp->link_config.active_speed = SPEED_UNKNOWN;
5778 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5779 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5780 				    LED_CTRL_LNKLED_OVERRIDE |
5781 				    LED_CTRL_TRAFFIC_OVERRIDE));
5782 	}
5783 
5784 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5785 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5786 		if (orig_pause_cfg != now_pause_cfg ||
5787 		    orig_active_speed != tp->link_config.active_speed ||
5788 		    orig_active_duplex != tp->link_config.active_duplex)
5789 			tg3_link_report(tp);
5790 	}
5791 
5792 	return 0;
5793 }
5794 
5795 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5796 {
5797 	int err = 0;
5798 	u32 bmsr, bmcr;
5799 	u16 current_speed = SPEED_UNKNOWN;
5800 	u8 current_duplex = DUPLEX_UNKNOWN;
5801 	bool current_link_up = false;
5802 	u32 local_adv, remote_adv, sgsr;
5803 
5804 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5805 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5806 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5807 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5808 
5809 		if (force_reset)
5810 			tg3_phy_reset(tp);
5811 
5812 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5813 
5814 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5815 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5816 		} else {
5817 			current_link_up = true;
5818 			if (sgsr & SERDES_TG3_SPEED_1000) {
5819 				current_speed = SPEED_1000;
5820 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5821 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5822 				current_speed = SPEED_100;
5823 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5824 			} else {
5825 				current_speed = SPEED_10;
5826 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5827 			}
5828 
5829 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5830 				current_duplex = DUPLEX_FULL;
5831 			else
5832 				current_duplex = DUPLEX_HALF;
5833 		}
5834 
5835 		tw32_f(MAC_MODE, tp->mac_mode);
5836 		udelay(40);
5837 
5838 		tg3_clear_mac_status(tp);
5839 
5840 		goto fiber_setup_done;
5841 	}
5842 
5843 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5844 	tw32_f(MAC_MODE, tp->mac_mode);
5845 	udelay(40);
5846 
5847 	tg3_clear_mac_status(tp);
5848 
5849 	if (force_reset)
5850 		tg3_phy_reset(tp);
5851 
5852 	tp->link_config.rmt_adv = 0;
5853 
5854 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5855 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5856 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5857 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5858 			bmsr |= BMSR_LSTATUS;
5859 		else
5860 			bmsr &= ~BMSR_LSTATUS;
5861 	}
5862 
5863 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5864 
5865 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5866 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5867 		/* do nothing, just check for link up at the end */
5868 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5869 		u32 adv, newadv;
5870 
5871 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5872 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5873 				 ADVERTISE_1000XPAUSE |
5874 				 ADVERTISE_1000XPSE_ASYM |
5875 				 ADVERTISE_SLCT);
5876 
5877 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5878 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5879 
5880 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5881 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5882 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5883 			tg3_writephy(tp, MII_BMCR, bmcr);
5884 
5885 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5886 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5887 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5888 
5889 			return err;
5890 		}
5891 	} else {
5892 		u32 new_bmcr;
5893 
5894 		bmcr &= ~BMCR_SPEED1000;
5895 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5896 
5897 		if (tp->link_config.duplex == DUPLEX_FULL)
5898 			new_bmcr |= BMCR_FULLDPLX;
5899 
5900 		if (new_bmcr != bmcr) {
5901 			/* BMCR_SPEED1000 is a reserved bit that needs
5902 			 * to be set on write.
5903 			 */
5904 			new_bmcr |= BMCR_SPEED1000;
5905 
5906 			/* Force a linkdown */
5907 			if (tp->link_up) {
5908 				u32 adv;
5909 
5910 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5911 				adv &= ~(ADVERTISE_1000XFULL |
5912 					 ADVERTISE_1000XHALF |
5913 					 ADVERTISE_SLCT);
5914 				tg3_writephy(tp, MII_ADVERTISE, adv);
5915 				tg3_writephy(tp, MII_BMCR, bmcr |
5916 							   BMCR_ANRESTART |
5917 							   BMCR_ANENABLE);
5918 				udelay(10);
5919 				tg3_carrier_off(tp);
5920 			}
5921 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5922 			bmcr = new_bmcr;
5923 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5924 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5925 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5926 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5927 					bmsr |= BMSR_LSTATUS;
5928 				else
5929 					bmsr &= ~BMSR_LSTATUS;
5930 			}
5931 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5932 		}
5933 	}
5934 
5935 	if (bmsr & BMSR_LSTATUS) {
5936 		current_speed = SPEED_1000;
5937 		current_link_up = true;
5938 		if (bmcr & BMCR_FULLDPLX)
5939 			current_duplex = DUPLEX_FULL;
5940 		else
5941 			current_duplex = DUPLEX_HALF;
5942 
5943 		local_adv = 0;
5944 		remote_adv = 0;
5945 
5946 		if (bmcr & BMCR_ANENABLE) {
5947 			u32 common;
5948 
5949 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5950 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5951 			common = local_adv & remote_adv;
5952 			if (common & (ADVERTISE_1000XHALF |
5953 				      ADVERTISE_1000XFULL)) {
5954 				if (common & ADVERTISE_1000XFULL)
5955 					current_duplex = DUPLEX_FULL;
5956 				else
5957 					current_duplex = DUPLEX_HALF;
5958 
5959 				tp->link_config.rmt_adv =
5960 					   mii_adv_to_ethtool_adv_x(remote_adv);
5961 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5962 				/* Link is up via parallel detect */
5963 			} else {
5964 				current_link_up = false;
5965 			}
5966 		}
5967 	}
5968 
5969 fiber_setup_done:
5970 	if (current_link_up && current_duplex == DUPLEX_FULL)
5971 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5972 
5973 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5974 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5975 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5976 
5977 	tw32_f(MAC_MODE, tp->mac_mode);
5978 	udelay(40);
5979 
5980 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5981 
5982 	tp->link_config.active_speed = current_speed;
5983 	tp->link_config.active_duplex = current_duplex;
5984 
5985 	tg3_test_and_report_link_chg(tp, current_link_up);
5986 	return err;
5987 }
5988 
5989 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5990 {
5991 	if (tp->serdes_counter) {
5992 		/* Give autoneg time to complete. */
5993 		tp->serdes_counter--;
5994 		return;
5995 	}
5996 
5997 	if (!tp->link_up &&
5998 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5999 		u32 bmcr;
6000 
6001 		tg3_readphy(tp, MII_BMCR, &bmcr);
6002 		if (bmcr & BMCR_ANENABLE) {
6003 			u32 phy1, phy2;
6004 
6005 			/* Select shadow register 0x1f */
6006 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6007 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6008 
6009 			/* Select expansion interrupt status register */
6010 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6011 					 MII_TG3_DSP_EXP1_INT_STAT);
6012 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6013 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6014 
6015 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6016 				/* We have signal detect and not receiving
6017 				 * config code words, link is up by parallel
6018 				 * detection.
6019 				 */
6020 
6021 				bmcr &= ~BMCR_ANENABLE;
6022 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6023 				tg3_writephy(tp, MII_BMCR, bmcr);
6024 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6025 			}
6026 		}
6027 	} else if (tp->link_up &&
6028 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6029 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6030 		u32 phy2;
6031 
6032 		/* Select expansion interrupt status register */
6033 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6034 				 MII_TG3_DSP_EXP1_INT_STAT);
6035 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6036 		if (phy2 & 0x20) {
6037 			u32 bmcr;
6038 
6039 			/* Config code words received, turn on autoneg. */
6040 			tg3_readphy(tp, MII_BMCR, &bmcr);
6041 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6042 
6043 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6044 
6045 		}
6046 	}
6047 }
6048 
6049 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6050 {
6051 	u32 val;
6052 	int err;
6053 
6054 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6055 		err = tg3_setup_fiber_phy(tp, force_reset);
6056 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6057 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6058 	else
6059 		err = tg3_setup_copper_phy(tp, force_reset);
6060 
6061 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6062 		u32 scale;
6063 
6064 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6065 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6066 			scale = 65;
6067 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6068 			scale = 6;
6069 		else
6070 			scale = 12;
6071 
6072 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6073 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6074 		tw32(GRC_MISC_CFG, val);
6075 	}
6076 
6077 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6078 	      (6 << TX_LENGTHS_IPG_SHIFT);
6079 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6080 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6081 		val |= tr32(MAC_TX_LENGTHS) &
6082 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6083 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6084 
6085 	if (tp->link_config.active_speed == SPEED_1000 &&
6086 	    tp->link_config.active_duplex == DUPLEX_HALF)
6087 		tw32(MAC_TX_LENGTHS, val |
6088 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6089 	else
6090 		tw32(MAC_TX_LENGTHS, val |
6091 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6092 
6093 	if (!tg3_flag(tp, 5705_PLUS)) {
6094 		if (tp->link_up) {
6095 			tw32(HOSTCC_STAT_COAL_TICKS,
6096 			     tp->coal.stats_block_coalesce_usecs);
6097 		} else {
6098 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6099 		}
6100 	}
6101 
6102 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6103 		val = tr32(PCIE_PWR_MGMT_THRESH);
6104 		if (!tp->link_up)
6105 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6106 			      tp->pwrmgmt_thresh;
6107 		else
6108 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6109 		tw32(PCIE_PWR_MGMT_THRESH, val);
6110 	}
6111 
6112 	return err;
6113 }
6114 
6115 /* tp->lock must be held */
6116 static u64 tg3_refclk_read(struct tg3 *tp)
6117 {
6118 	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6119 	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6120 }
6121 
6122 /* tp->lock must be held */
6123 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6124 {
6125 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6126 
6127 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6128 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6129 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6130 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6131 }
6132 
6133 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6134 static inline void tg3_full_unlock(struct tg3 *tp);
6135 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6136 {
6137 	struct tg3 *tp = netdev_priv(dev);
6138 
6139 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6140 				SOF_TIMESTAMPING_RX_SOFTWARE |
6141 				SOF_TIMESTAMPING_SOFTWARE;
6142 
6143 	if (tg3_flag(tp, PTP_CAPABLE)) {
6144 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6145 					SOF_TIMESTAMPING_RX_HARDWARE |
6146 					SOF_TIMESTAMPING_RAW_HARDWARE;
6147 	}
6148 
6149 	if (tp->ptp_clock)
6150 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6151 	else
6152 		info->phc_index = -1;
6153 
6154 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6155 
6156 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6157 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6158 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6159 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6160 	return 0;
6161 }
6162 
6163 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6164 {
6165 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6166 	bool neg_adj = false;
6167 	u32 correction = 0;
6168 
6169 	if (ppb < 0) {
6170 		neg_adj = true;
6171 		ppb = -ppb;
6172 	}
6173 
6174 	/* Frequency adjustment is performed using hardware with a 24 bit
6175 	 * accumulator and a programmable correction value. On each clk, the
6176 	 * correction value gets added to the accumulator and when it
6177 	 * overflows, the time counter is incremented/decremented.
6178 	 *
6179 	 * So conversion from ppb to correction value is
6180 	 *		ppb * (1 << 24) / 1000000000
6181 	 */
6182 	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6183 		     TG3_EAV_REF_CLK_CORRECT_MASK;
6184 
6185 	tg3_full_lock(tp, 0);
6186 
6187 	if (correction)
6188 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6189 		     TG3_EAV_REF_CLK_CORRECT_EN |
6190 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6191 	else
6192 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6193 
6194 	tg3_full_unlock(tp);
6195 
6196 	return 0;
6197 }
6198 
6199 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6200 {
6201 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6202 
6203 	tg3_full_lock(tp, 0);
6204 	tp->ptp_adjust += delta;
6205 	tg3_full_unlock(tp);
6206 
6207 	return 0;
6208 }
6209 
6210 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6211 {
6212 	u64 ns;
6213 	u32 remainder;
6214 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6215 
6216 	tg3_full_lock(tp, 0);
6217 	ns = tg3_refclk_read(tp);
6218 	ns += tp->ptp_adjust;
6219 	tg3_full_unlock(tp);
6220 
6221 	ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6222 	ts->tv_nsec = remainder;
6223 
6224 	return 0;
6225 }
6226 
6227 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6228 			   const struct timespec *ts)
6229 {
6230 	u64 ns;
6231 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6232 
6233 	ns = timespec_to_ns(ts);
6234 
6235 	tg3_full_lock(tp, 0);
6236 	tg3_refclk_write(tp, ns);
6237 	tp->ptp_adjust = 0;
6238 	tg3_full_unlock(tp);
6239 
6240 	return 0;
6241 }
6242 
6243 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6244 			  struct ptp_clock_request *rq, int on)
6245 {
6246 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6247 	u32 clock_ctl;
6248 	int rval = 0;
6249 
6250 	switch (rq->type) {
6251 	case PTP_CLK_REQ_PEROUT:
6252 		if (rq->perout.index != 0)
6253 			return -EINVAL;
6254 
6255 		tg3_full_lock(tp, 0);
6256 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6257 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6258 
6259 		if (on) {
6260 			u64 nsec;
6261 
6262 			nsec = rq->perout.start.sec * 1000000000ULL +
6263 			       rq->perout.start.nsec;
6264 
6265 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6266 				netdev_warn(tp->dev,
6267 					    "Device supports only a one-shot timesync output, period must be 0\n");
6268 				rval = -EINVAL;
6269 				goto err_out;
6270 			}
6271 
6272 			if (nsec & (1ULL << 63)) {
6273 				netdev_warn(tp->dev,
6274 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6275 				rval = -EINVAL;
6276 				goto err_out;
6277 			}
6278 
6279 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6280 			tw32(TG3_EAV_WATCHDOG0_MSB,
6281 			     TG3_EAV_WATCHDOG0_EN |
6282 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6283 
6284 			tw32(TG3_EAV_REF_CLCK_CTL,
6285 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6286 		} else {
6287 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6288 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6289 		}
6290 
6291 err_out:
6292 		tg3_full_unlock(tp);
6293 		return rval;
6294 
6295 	default:
6296 		break;
6297 	}
6298 
6299 	return -EOPNOTSUPP;
6300 }
6301 
6302 static const struct ptp_clock_info tg3_ptp_caps = {
6303 	.owner		= THIS_MODULE,
6304 	.name		= "tg3 clock",
6305 	.max_adj	= 250000000,
6306 	.n_alarm	= 0,
6307 	.n_ext_ts	= 0,
6308 	.n_per_out	= 1,
6309 	.pps		= 0,
6310 	.adjfreq	= tg3_ptp_adjfreq,
6311 	.adjtime	= tg3_ptp_adjtime,
6312 	.gettime	= tg3_ptp_gettime,
6313 	.settime	= tg3_ptp_settime,
6314 	.enable		= tg3_ptp_enable,
6315 };
6316 
6317 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6318 				     struct skb_shared_hwtstamps *timestamp)
6319 {
6320 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6321 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6322 					   tp->ptp_adjust);
6323 }
6324 
6325 /* tp->lock must be held */
6326 static void tg3_ptp_init(struct tg3 *tp)
6327 {
6328 	if (!tg3_flag(tp, PTP_CAPABLE))
6329 		return;
6330 
6331 	/* Initialize the hardware clock to the system time. */
6332 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6333 	tp->ptp_adjust = 0;
6334 	tp->ptp_info = tg3_ptp_caps;
6335 }
6336 
6337 /* tp->lock must be held */
6338 static void tg3_ptp_resume(struct tg3 *tp)
6339 {
6340 	if (!tg3_flag(tp, PTP_CAPABLE))
6341 		return;
6342 
6343 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6344 	tp->ptp_adjust = 0;
6345 }
6346 
6347 static void tg3_ptp_fini(struct tg3 *tp)
6348 {
6349 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6350 		return;
6351 
6352 	ptp_clock_unregister(tp->ptp_clock);
6353 	tp->ptp_clock = NULL;
6354 	tp->ptp_adjust = 0;
6355 }
6356 
6357 static inline int tg3_irq_sync(struct tg3 *tp)
6358 {
6359 	return tp->irq_sync;
6360 }
6361 
6362 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6363 {
6364 	int i;
6365 
6366 	dst = (u32 *)((u8 *)dst + off);
6367 	for (i = 0; i < len; i += sizeof(u32))
6368 		*dst++ = tr32(off + i);
6369 }
6370 
6371 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6372 {
6373 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6374 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6375 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6376 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6377 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6378 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6379 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6380 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6381 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6382 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6383 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6384 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6385 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6386 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6387 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6388 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6389 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6390 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6391 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6392 
6393 	if (tg3_flag(tp, SUPPORT_MSIX))
6394 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6395 
6396 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6397 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6398 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6399 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6400 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6401 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6402 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6403 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6404 
6405 	if (!tg3_flag(tp, 5705_PLUS)) {
6406 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6407 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6408 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6409 	}
6410 
6411 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6412 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6413 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6414 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6415 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6416 
6417 	if (tg3_flag(tp, NVRAM))
6418 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6419 }
6420 
6421 static void tg3_dump_state(struct tg3 *tp)
6422 {
6423 	int i;
6424 	u32 *regs;
6425 
6426 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6427 	if (!regs)
6428 		return;
6429 
6430 	if (tg3_flag(tp, PCI_EXPRESS)) {
6431 		/* Read up to but not including private PCI registers */
6432 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6433 			regs[i / sizeof(u32)] = tr32(i);
6434 	} else
6435 		tg3_dump_legacy_regs(tp, regs);
6436 
6437 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6438 		if (!regs[i + 0] && !regs[i + 1] &&
6439 		    !regs[i + 2] && !regs[i + 3])
6440 			continue;
6441 
6442 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6443 			   i * 4,
6444 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6445 	}
6446 
6447 	kfree(regs);
6448 
6449 	for (i = 0; i < tp->irq_cnt; i++) {
6450 		struct tg3_napi *tnapi = &tp->napi[i];
6451 
6452 		/* SW status block */
6453 		netdev_err(tp->dev,
6454 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6455 			   i,
6456 			   tnapi->hw_status->status,
6457 			   tnapi->hw_status->status_tag,
6458 			   tnapi->hw_status->rx_jumbo_consumer,
6459 			   tnapi->hw_status->rx_consumer,
6460 			   tnapi->hw_status->rx_mini_consumer,
6461 			   tnapi->hw_status->idx[0].rx_producer,
6462 			   tnapi->hw_status->idx[0].tx_consumer);
6463 
6464 		netdev_err(tp->dev,
6465 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6466 			   i,
6467 			   tnapi->last_tag, tnapi->last_irq_tag,
6468 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6469 			   tnapi->rx_rcb_ptr,
6470 			   tnapi->prodring.rx_std_prod_idx,
6471 			   tnapi->prodring.rx_std_cons_idx,
6472 			   tnapi->prodring.rx_jmb_prod_idx,
6473 			   tnapi->prodring.rx_jmb_cons_idx);
6474 	}
6475 }
6476 
6477 /* This is called whenever we suspect that the system chipset is re-
6478  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6479  * is bogus tx completions. We try to recover by setting the
6480  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6481  * in the workqueue.
6482  */
6483 static void tg3_tx_recover(struct tg3 *tp)
6484 {
6485 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6486 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6487 
6488 	netdev_warn(tp->dev,
6489 		    "The system may be re-ordering memory-mapped I/O "
6490 		    "cycles to the network device, attempting to recover. "
6491 		    "Please report the problem to the driver maintainer "
6492 		    "and include system chipset information.\n");
6493 
6494 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6495 }
6496 
6497 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6498 {
6499 	/* Tell compiler to fetch tx indices from memory. */
6500 	barrier();
6501 	return tnapi->tx_pending -
6502 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6503 }
6504 
6505 /* Tigon3 never reports partial packet sends.  So we do not
6506  * need special logic to handle SKBs that have not had all
6507  * of their frags sent yet, like SunGEM does.
6508  */
6509 static void tg3_tx(struct tg3_napi *tnapi)
6510 {
6511 	struct tg3 *tp = tnapi->tp;
6512 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6513 	u32 sw_idx = tnapi->tx_cons;
6514 	struct netdev_queue *txq;
6515 	int index = tnapi - tp->napi;
6516 	unsigned int pkts_compl = 0, bytes_compl = 0;
6517 
6518 	if (tg3_flag(tp, ENABLE_TSS))
6519 		index--;
6520 
6521 	txq = netdev_get_tx_queue(tp->dev, index);
6522 
6523 	while (sw_idx != hw_idx) {
6524 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6525 		struct sk_buff *skb = ri->skb;
6526 		int i, tx_bug = 0;
6527 
6528 		if (unlikely(skb == NULL)) {
6529 			tg3_tx_recover(tp);
6530 			return;
6531 		}
6532 
6533 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6534 			struct skb_shared_hwtstamps timestamp;
6535 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6536 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6537 
6538 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6539 
6540 			skb_tstamp_tx(skb, &timestamp);
6541 		}
6542 
6543 		pci_unmap_single(tp->pdev,
6544 				 dma_unmap_addr(ri, mapping),
6545 				 skb_headlen(skb),
6546 				 PCI_DMA_TODEVICE);
6547 
6548 		ri->skb = NULL;
6549 
6550 		while (ri->fragmented) {
6551 			ri->fragmented = false;
6552 			sw_idx = NEXT_TX(sw_idx);
6553 			ri = &tnapi->tx_buffers[sw_idx];
6554 		}
6555 
6556 		sw_idx = NEXT_TX(sw_idx);
6557 
6558 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6559 			ri = &tnapi->tx_buffers[sw_idx];
6560 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6561 				tx_bug = 1;
6562 
6563 			pci_unmap_page(tp->pdev,
6564 				       dma_unmap_addr(ri, mapping),
6565 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6566 				       PCI_DMA_TODEVICE);
6567 
6568 			while (ri->fragmented) {
6569 				ri->fragmented = false;
6570 				sw_idx = NEXT_TX(sw_idx);
6571 				ri = &tnapi->tx_buffers[sw_idx];
6572 			}
6573 
6574 			sw_idx = NEXT_TX(sw_idx);
6575 		}
6576 
6577 		pkts_compl++;
6578 		bytes_compl += skb->len;
6579 
6580 		dev_kfree_skb(skb);
6581 
6582 		if (unlikely(tx_bug)) {
6583 			tg3_tx_recover(tp);
6584 			return;
6585 		}
6586 	}
6587 
6588 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6589 
6590 	tnapi->tx_cons = sw_idx;
6591 
6592 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6593 	 * before checking for netif_queue_stopped().  Without the
6594 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6595 	 * will miss it and cause the queue to be stopped forever.
6596 	 */
6597 	smp_mb();
6598 
6599 	if (unlikely(netif_tx_queue_stopped(txq) &&
6600 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6601 		__netif_tx_lock(txq, smp_processor_id());
6602 		if (netif_tx_queue_stopped(txq) &&
6603 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6604 			netif_tx_wake_queue(txq);
6605 		__netif_tx_unlock(txq);
6606 	}
6607 }
6608 
6609 static void tg3_frag_free(bool is_frag, void *data)
6610 {
6611 	if (is_frag)
6612 		put_page(virt_to_head_page(data));
6613 	else
6614 		kfree(data);
6615 }
6616 
6617 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6618 {
6619 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6620 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6621 
6622 	if (!ri->data)
6623 		return;
6624 
6625 	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6626 			 map_sz, PCI_DMA_FROMDEVICE);
6627 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6628 	ri->data = NULL;
6629 }
6630 
6631 
6632 /* Returns size of skb allocated or < 0 on error.
6633  *
6634  * We only need to fill in the address because the other members
6635  * of the RX descriptor are invariant, see tg3_init_rings.
6636  *
6637  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6638  * posting buffers we only dirty the first cache line of the RX
6639  * descriptor (containing the address).  Whereas for the RX status
6640  * buffers the cpu only reads the last cacheline of the RX descriptor
6641  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6642  */
6643 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6644 			     u32 opaque_key, u32 dest_idx_unmasked,
6645 			     unsigned int *frag_size)
6646 {
6647 	struct tg3_rx_buffer_desc *desc;
6648 	struct ring_info *map;
6649 	u8 *data;
6650 	dma_addr_t mapping;
6651 	int skb_size, data_size, dest_idx;
6652 
6653 	switch (opaque_key) {
6654 	case RXD_OPAQUE_RING_STD:
6655 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6656 		desc = &tpr->rx_std[dest_idx];
6657 		map = &tpr->rx_std_buffers[dest_idx];
6658 		data_size = tp->rx_pkt_map_sz;
6659 		break;
6660 
6661 	case RXD_OPAQUE_RING_JUMBO:
6662 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6663 		desc = &tpr->rx_jmb[dest_idx].std;
6664 		map = &tpr->rx_jmb_buffers[dest_idx];
6665 		data_size = TG3_RX_JMB_MAP_SZ;
6666 		break;
6667 
6668 	default:
6669 		return -EINVAL;
6670 	}
6671 
6672 	/* Do not overwrite any of the map or rp information
6673 	 * until we are sure we can commit to a new buffer.
6674 	 *
6675 	 * Callers depend upon this behavior and assume that
6676 	 * we leave everything unchanged if we fail.
6677 	 */
6678 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6679 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6680 	if (skb_size <= PAGE_SIZE) {
6681 		data = netdev_alloc_frag(skb_size);
6682 		*frag_size = skb_size;
6683 	} else {
6684 		data = kmalloc(skb_size, GFP_ATOMIC);
6685 		*frag_size = 0;
6686 	}
6687 	if (!data)
6688 		return -ENOMEM;
6689 
6690 	mapping = pci_map_single(tp->pdev,
6691 				 data + TG3_RX_OFFSET(tp),
6692 				 data_size,
6693 				 PCI_DMA_FROMDEVICE);
6694 	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6695 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6696 		return -EIO;
6697 	}
6698 
6699 	map->data = data;
6700 	dma_unmap_addr_set(map, mapping, mapping);
6701 
6702 	desc->addr_hi = ((u64)mapping >> 32);
6703 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6704 
6705 	return data_size;
6706 }
6707 
6708 /* We only need to move over in the address because the other
6709  * members of the RX descriptor are invariant.  See notes above
6710  * tg3_alloc_rx_data for full details.
6711  */
6712 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6713 			   struct tg3_rx_prodring_set *dpr,
6714 			   u32 opaque_key, int src_idx,
6715 			   u32 dest_idx_unmasked)
6716 {
6717 	struct tg3 *tp = tnapi->tp;
6718 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6719 	struct ring_info *src_map, *dest_map;
6720 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6721 	int dest_idx;
6722 
6723 	switch (opaque_key) {
6724 	case RXD_OPAQUE_RING_STD:
6725 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6726 		dest_desc = &dpr->rx_std[dest_idx];
6727 		dest_map = &dpr->rx_std_buffers[dest_idx];
6728 		src_desc = &spr->rx_std[src_idx];
6729 		src_map = &spr->rx_std_buffers[src_idx];
6730 		break;
6731 
6732 	case RXD_OPAQUE_RING_JUMBO:
6733 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6734 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6735 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6736 		src_desc = &spr->rx_jmb[src_idx].std;
6737 		src_map = &spr->rx_jmb_buffers[src_idx];
6738 		break;
6739 
6740 	default:
6741 		return;
6742 	}
6743 
6744 	dest_map->data = src_map->data;
6745 	dma_unmap_addr_set(dest_map, mapping,
6746 			   dma_unmap_addr(src_map, mapping));
6747 	dest_desc->addr_hi = src_desc->addr_hi;
6748 	dest_desc->addr_lo = src_desc->addr_lo;
6749 
6750 	/* Ensure that the update to the skb happens after the physical
6751 	 * addresses have been transferred to the new BD location.
6752 	 */
6753 	smp_wmb();
6754 
6755 	src_map->data = NULL;
6756 }
6757 
6758 /* The RX ring scheme is composed of multiple rings which post fresh
6759  * buffers to the chip, and one special ring the chip uses to report
6760  * status back to the host.
6761  *
6762  * The special ring reports the status of received packets to the
6763  * host.  The chip does not write into the original descriptor the
6764  * RX buffer was obtained from.  The chip simply takes the original
6765  * descriptor as provided by the host, updates the status and length
6766  * field, then writes this into the next status ring entry.
6767  *
6768  * Each ring the host uses to post buffers to the chip is described
6769  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6770  * it is first placed into the on-chip ram.  When the packet's length
6771  * is known, it walks down the TG3_BDINFO entries to select the ring.
6772  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6773  * which is within the range of the new packet's length is chosen.
6774  *
6775  * The "separate ring for rx status" scheme may sound queer, but it makes
6776  * sense from a cache coherency perspective.  If only the host writes
6777  * to the buffer post rings, and only the chip writes to the rx status
6778  * rings, then cache lines never move beyond shared-modified state.
6779  * If both the host and chip were to write into the same ring, cache line
6780  * eviction could occur since both entities want it in an exclusive state.
6781  */
6782 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6783 {
6784 	struct tg3 *tp = tnapi->tp;
6785 	u32 work_mask, rx_std_posted = 0;
6786 	u32 std_prod_idx, jmb_prod_idx;
6787 	u32 sw_idx = tnapi->rx_rcb_ptr;
6788 	u16 hw_idx;
6789 	int received;
6790 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6791 
6792 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6793 	/*
6794 	 * We need to order the read of hw_idx and the read of
6795 	 * the opaque cookie.
6796 	 */
6797 	rmb();
6798 	work_mask = 0;
6799 	received = 0;
6800 	std_prod_idx = tpr->rx_std_prod_idx;
6801 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6802 	while (sw_idx != hw_idx && budget > 0) {
6803 		struct ring_info *ri;
6804 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6805 		unsigned int len;
6806 		struct sk_buff *skb;
6807 		dma_addr_t dma_addr;
6808 		u32 opaque_key, desc_idx, *post_ptr;
6809 		u8 *data;
6810 		u64 tstamp = 0;
6811 
6812 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6813 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6814 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6815 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6816 			dma_addr = dma_unmap_addr(ri, mapping);
6817 			data = ri->data;
6818 			post_ptr = &std_prod_idx;
6819 			rx_std_posted++;
6820 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6821 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6822 			dma_addr = dma_unmap_addr(ri, mapping);
6823 			data = ri->data;
6824 			post_ptr = &jmb_prod_idx;
6825 		} else
6826 			goto next_pkt_nopost;
6827 
6828 		work_mask |= opaque_key;
6829 
6830 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6831 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6832 		drop_it:
6833 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6834 				       desc_idx, *post_ptr);
6835 		drop_it_no_recycle:
6836 			/* Other statistics kept track of by card. */
6837 			tp->rx_dropped++;
6838 			goto next_pkt;
6839 		}
6840 
6841 		prefetch(data + TG3_RX_OFFSET(tp));
6842 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6843 		      ETH_FCS_LEN;
6844 
6845 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6846 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6847 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6848 		     RXD_FLAG_PTPSTAT_PTPV2) {
6849 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6850 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6851 		}
6852 
6853 		if (len > TG3_RX_COPY_THRESH(tp)) {
6854 			int skb_size;
6855 			unsigned int frag_size;
6856 
6857 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6858 						    *post_ptr, &frag_size);
6859 			if (skb_size < 0)
6860 				goto drop_it;
6861 
6862 			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6863 					 PCI_DMA_FROMDEVICE);
6864 
6865 			/* Ensure that the update to the data happens
6866 			 * after the usage of the old DMA mapping.
6867 			 */
6868 			smp_wmb();
6869 
6870 			ri->data = NULL;
6871 
6872 			skb = build_skb(data, frag_size);
6873 			if (!skb) {
6874 				tg3_frag_free(frag_size != 0, data);
6875 				goto drop_it_no_recycle;
6876 			}
6877 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6878 		} else {
6879 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6880 				       desc_idx, *post_ptr);
6881 
6882 			skb = netdev_alloc_skb(tp->dev,
6883 					       len + TG3_RAW_IP_ALIGN);
6884 			if (skb == NULL)
6885 				goto drop_it_no_recycle;
6886 
6887 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6888 			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6889 			memcpy(skb->data,
6890 			       data + TG3_RX_OFFSET(tp),
6891 			       len);
6892 			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6893 		}
6894 
6895 		skb_put(skb, len);
6896 		if (tstamp)
6897 			tg3_hwclock_to_timestamp(tp, tstamp,
6898 						 skb_hwtstamps(skb));
6899 
6900 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6901 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6902 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6903 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6904 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6905 		else
6906 			skb_checksum_none_assert(skb);
6907 
6908 		skb->protocol = eth_type_trans(skb, tp->dev);
6909 
6910 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6911 		    skb->protocol != htons(ETH_P_8021Q)) {
6912 			dev_kfree_skb(skb);
6913 			goto drop_it_no_recycle;
6914 		}
6915 
6916 		if (desc->type_flags & RXD_FLAG_VLAN &&
6917 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6918 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6919 					       desc->err_vlan & RXD_VLAN_MASK);
6920 
6921 		napi_gro_receive(&tnapi->napi, skb);
6922 
6923 		received++;
6924 		budget--;
6925 
6926 next_pkt:
6927 		(*post_ptr)++;
6928 
6929 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6930 			tpr->rx_std_prod_idx = std_prod_idx &
6931 					       tp->rx_std_ring_mask;
6932 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6933 				     tpr->rx_std_prod_idx);
6934 			work_mask &= ~RXD_OPAQUE_RING_STD;
6935 			rx_std_posted = 0;
6936 		}
6937 next_pkt_nopost:
6938 		sw_idx++;
6939 		sw_idx &= tp->rx_ret_ring_mask;
6940 
6941 		/* Refresh hw_idx to see if there is new work */
6942 		if (sw_idx == hw_idx) {
6943 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6944 			rmb();
6945 		}
6946 	}
6947 
6948 	/* ACK the status ring. */
6949 	tnapi->rx_rcb_ptr = sw_idx;
6950 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6951 
6952 	/* Refill RX ring(s). */
6953 	if (!tg3_flag(tp, ENABLE_RSS)) {
6954 		/* Sync BD data before updating mailbox */
6955 		wmb();
6956 
6957 		if (work_mask & RXD_OPAQUE_RING_STD) {
6958 			tpr->rx_std_prod_idx = std_prod_idx &
6959 					       tp->rx_std_ring_mask;
6960 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6961 				     tpr->rx_std_prod_idx);
6962 		}
6963 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6964 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6965 					       tp->rx_jmb_ring_mask;
6966 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6967 				     tpr->rx_jmb_prod_idx);
6968 		}
6969 		mmiowb();
6970 	} else if (work_mask) {
6971 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6972 		 * updated before the producer indices can be updated.
6973 		 */
6974 		smp_wmb();
6975 
6976 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6977 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6978 
6979 		if (tnapi != &tp->napi[1]) {
6980 			tp->rx_refill = true;
6981 			napi_schedule(&tp->napi[1].napi);
6982 		}
6983 	}
6984 
6985 	return received;
6986 }
6987 
6988 static void tg3_poll_link(struct tg3 *tp)
6989 {
6990 	/* handle link change and other phy events */
6991 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6992 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6993 
6994 		if (sblk->status & SD_STATUS_LINK_CHG) {
6995 			sblk->status = SD_STATUS_UPDATED |
6996 				       (sblk->status & ~SD_STATUS_LINK_CHG);
6997 			spin_lock(&tp->lock);
6998 			if (tg3_flag(tp, USE_PHYLIB)) {
6999 				tw32_f(MAC_STATUS,
7000 				     (MAC_STATUS_SYNC_CHANGED |
7001 				      MAC_STATUS_CFG_CHANGED |
7002 				      MAC_STATUS_MI_COMPLETION |
7003 				      MAC_STATUS_LNKSTATE_CHANGED));
7004 				udelay(40);
7005 			} else
7006 				tg3_setup_phy(tp, false);
7007 			spin_unlock(&tp->lock);
7008 		}
7009 	}
7010 }
7011 
7012 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7013 				struct tg3_rx_prodring_set *dpr,
7014 				struct tg3_rx_prodring_set *spr)
7015 {
7016 	u32 si, di, cpycnt, src_prod_idx;
7017 	int i, err = 0;
7018 
7019 	while (1) {
7020 		src_prod_idx = spr->rx_std_prod_idx;
7021 
7022 		/* Make sure updates to the rx_std_buffers[] entries and the
7023 		 * standard producer index are seen in the correct order.
7024 		 */
7025 		smp_rmb();
7026 
7027 		if (spr->rx_std_cons_idx == src_prod_idx)
7028 			break;
7029 
7030 		if (spr->rx_std_cons_idx < src_prod_idx)
7031 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7032 		else
7033 			cpycnt = tp->rx_std_ring_mask + 1 -
7034 				 spr->rx_std_cons_idx;
7035 
7036 		cpycnt = min(cpycnt,
7037 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7038 
7039 		si = spr->rx_std_cons_idx;
7040 		di = dpr->rx_std_prod_idx;
7041 
7042 		for (i = di; i < di + cpycnt; i++) {
7043 			if (dpr->rx_std_buffers[i].data) {
7044 				cpycnt = i - di;
7045 				err = -ENOSPC;
7046 				break;
7047 			}
7048 		}
7049 
7050 		if (!cpycnt)
7051 			break;
7052 
7053 		/* Ensure that updates to the rx_std_buffers ring and the
7054 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7055 		 * ordered correctly WRT the skb check above.
7056 		 */
7057 		smp_rmb();
7058 
7059 		memcpy(&dpr->rx_std_buffers[di],
7060 		       &spr->rx_std_buffers[si],
7061 		       cpycnt * sizeof(struct ring_info));
7062 
7063 		for (i = 0; i < cpycnt; i++, di++, si++) {
7064 			struct tg3_rx_buffer_desc *sbd, *dbd;
7065 			sbd = &spr->rx_std[si];
7066 			dbd = &dpr->rx_std[di];
7067 			dbd->addr_hi = sbd->addr_hi;
7068 			dbd->addr_lo = sbd->addr_lo;
7069 		}
7070 
7071 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7072 				       tp->rx_std_ring_mask;
7073 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7074 				       tp->rx_std_ring_mask;
7075 	}
7076 
7077 	while (1) {
7078 		src_prod_idx = spr->rx_jmb_prod_idx;
7079 
7080 		/* Make sure updates to the rx_jmb_buffers[] entries and
7081 		 * the jumbo producer index are seen in the correct order.
7082 		 */
7083 		smp_rmb();
7084 
7085 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7086 			break;
7087 
7088 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7089 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7090 		else
7091 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7092 				 spr->rx_jmb_cons_idx;
7093 
7094 		cpycnt = min(cpycnt,
7095 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7096 
7097 		si = spr->rx_jmb_cons_idx;
7098 		di = dpr->rx_jmb_prod_idx;
7099 
7100 		for (i = di; i < di + cpycnt; i++) {
7101 			if (dpr->rx_jmb_buffers[i].data) {
7102 				cpycnt = i - di;
7103 				err = -ENOSPC;
7104 				break;
7105 			}
7106 		}
7107 
7108 		if (!cpycnt)
7109 			break;
7110 
7111 		/* Ensure that updates to the rx_jmb_buffers ring and the
7112 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7113 		 * ordered correctly WRT the skb check above.
7114 		 */
7115 		smp_rmb();
7116 
7117 		memcpy(&dpr->rx_jmb_buffers[di],
7118 		       &spr->rx_jmb_buffers[si],
7119 		       cpycnt * sizeof(struct ring_info));
7120 
7121 		for (i = 0; i < cpycnt; i++, di++, si++) {
7122 			struct tg3_rx_buffer_desc *sbd, *dbd;
7123 			sbd = &spr->rx_jmb[si].std;
7124 			dbd = &dpr->rx_jmb[di].std;
7125 			dbd->addr_hi = sbd->addr_hi;
7126 			dbd->addr_lo = sbd->addr_lo;
7127 		}
7128 
7129 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7130 				       tp->rx_jmb_ring_mask;
7131 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7132 				       tp->rx_jmb_ring_mask;
7133 	}
7134 
7135 	return err;
7136 }
7137 
7138 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7139 {
7140 	struct tg3 *tp = tnapi->tp;
7141 
7142 	/* run TX completion thread */
7143 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7144 		tg3_tx(tnapi);
7145 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7146 			return work_done;
7147 	}
7148 
7149 	if (!tnapi->rx_rcb_prod_idx)
7150 		return work_done;
7151 
7152 	/* run RX thread, within the bounds set by NAPI.
7153 	 * All RX "locking" is done by ensuring outside
7154 	 * code synchronizes with tg3->napi.poll()
7155 	 */
7156 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7157 		work_done += tg3_rx(tnapi, budget - work_done);
7158 
7159 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7160 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7161 		int i, err = 0;
7162 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7163 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7164 
7165 		tp->rx_refill = false;
7166 		for (i = 1; i <= tp->rxq_cnt; i++)
7167 			err |= tg3_rx_prodring_xfer(tp, dpr,
7168 						    &tp->napi[i].prodring);
7169 
7170 		wmb();
7171 
7172 		if (std_prod_idx != dpr->rx_std_prod_idx)
7173 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7174 				     dpr->rx_std_prod_idx);
7175 
7176 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7177 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7178 				     dpr->rx_jmb_prod_idx);
7179 
7180 		mmiowb();
7181 
7182 		if (err)
7183 			tw32_f(HOSTCC_MODE, tp->coal_now);
7184 	}
7185 
7186 	return work_done;
7187 }
7188 
7189 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7190 {
7191 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7192 		schedule_work(&tp->reset_task);
7193 }
7194 
7195 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7196 {
7197 	cancel_work_sync(&tp->reset_task);
7198 	tg3_flag_clear(tp, RESET_TASK_PENDING);
7199 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7200 }
7201 
7202 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7203 {
7204 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7205 	struct tg3 *tp = tnapi->tp;
7206 	int work_done = 0;
7207 	struct tg3_hw_status *sblk = tnapi->hw_status;
7208 
7209 	while (1) {
7210 		work_done = tg3_poll_work(tnapi, work_done, budget);
7211 
7212 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7213 			goto tx_recovery;
7214 
7215 		if (unlikely(work_done >= budget))
7216 			break;
7217 
7218 		/* tp->last_tag is used in tg3_int_reenable() below
7219 		 * to tell the hw how much work has been processed,
7220 		 * so we must read it before checking for more work.
7221 		 */
7222 		tnapi->last_tag = sblk->status_tag;
7223 		tnapi->last_irq_tag = tnapi->last_tag;
7224 		rmb();
7225 
7226 		/* check for RX/TX work to do */
7227 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7228 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7229 
7230 			/* This test here is not race free, but will reduce
7231 			 * the number of interrupts by looping again.
7232 			 */
7233 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7234 				continue;
7235 
7236 			napi_complete(napi);
7237 			/* Reenable interrupts. */
7238 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7239 
7240 			/* This test here is synchronized by napi_schedule()
7241 			 * and napi_complete() to close the race condition.
7242 			 */
7243 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7244 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7245 						  HOSTCC_MODE_ENABLE |
7246 						  tnapi->coal_now);
7247 			}
7248 			mmiowb();
7249 			break;
7250 		}
7251 	}
7252 
7253 	return work_done;
7254 
7255 tx_recovery:
7256 	/* work_done is guaranteed to be less than budget. */
7257 	napi_complete(napi);
7258 	tg3_reset_task_schedule(tp);
7259 	return work_done;
7260 }
7261 
7262 static void tg3_process_error(struct tg3 *tp)
7263 {
7264 	u32 val;
7265 	bool real_error = false;
7266 
7267 	if (tg3_flag(tp, ERROR_PROCESSED))
7268 		return;
7269 
7270 	/* Check Flow Attention register */
7271 	val = tr32(HOSTCC_FLOW_ATTN);
7272 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7273 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7274 		real_error = true;
7275 	}
7276 
7277 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7278 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7279 		real_error = true;
7280 	}
7281 
7282 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7283 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7284 		real_error = true;
7285 	}
7286 
7287 	if (!real_error)
7288 		return;
7289 
7290 	tg3_dump_state(tp);
7291 
7292 	tg3_flag_set(tp, ERROR_PROCESSED);
7293 	tg3_reset_task_schedule(tp);
7294 }
7295 
7296 static int tg3_poll(struct napi_struct *napi, int budget)
7297 {
7298 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7299 	struct tg3 *tp = tnapi->tp;
7300 	int work_done = 0;
7301 	struct tg3_hw_status *sblk = tnapi->hw_status;
7302 
7303 	while (1) {
7304 		if (sblk->status & SD_STATUS_ERROR)
7305 			tg3_process_error(tp);
7306 
7307 		tg3_poll_link(tp);
7308 
7309 		work_done = tg3_poll_work(tnapi, work_done, budget);
7310 
7311 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7312 			goto tx_recovery;
7313 
7314 		if (unlikely(work_done >= budget))
7315 			break;
7316 
7317 		if (tg3_flag(tp, TAGGED_STATUS)) {
7318 			/* tp->last_tag is used in tg3_int_reenable() below
7319 			 * to tell the hw how much work has been processed,
7320 			 * so we must read it before checking for more work.
7321 			 */
7322 			tnapi->last_tag = sblk->status_tag;
7323 			tnapi->last_irq_tag = tnapi->last_tag;
7324 			rmb();
7325 		} else
7326 			sblk->status &= ~SD_STATUS_UPDATED;
7327 
7328 		if (likely(!tg3_has_work(tnapi))) {
7329 			napi_complete(napi);
7330 			tg3_int_reenable(tnapi);
7331 			break;
7332 		}
7333 	}
7334 
7335 	return work_done;
7336 
7337 tx_recovery:
7338 	/* work_done is guaranteed to be less than budget. */
7339 	napi_complete(napi);
7340 	tg3_reset_task_schedule(tp);
7341 	return work_done;
7342 }
7343 
7344 static void tg3_napi_disable(struct tg3 *tp)
7345 {
7346 	int i;
7347 
7348 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7349 		napi_disable(&tp->napi[i].napi);
7350 }
7351 
7352 static void tg3_napi_enable(struct tg3 *tp)
7353 {
7354 	int i;
7355 
7356 	for (i = 0; i < tp->irq_cnt; i++)
7357 		napi_enable(&tp->napi[i].napi);
7358 }
7359 
7360 static void tg3_napi_init(struct tg3 *tp)
7361 {
7362 	int i;
7363 
7364 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7365 	for (i = 1; i < tp->irq_cnt; i++)
7366 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7367 }
7368 
7369 static void tg3_napi_fini(struct tg3 *tp)
7370 {
7371 	int i;
7372 
7373 	for (i = 0; i < tp->irq_cnt; i++)
7374 		netif_napi_del(&tp->napi[i].napi);
7375 }
7376 
7377 static inline void tg3_netif_stop(struct tg3 *tp)
7378 {
7379 	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
7380 	tg3_napi_disable(tp);
7381 	netif_carrier_off(tp->dev);
7382 	netif_tx_disable(tp->dev);
7383 }
7384 
7385 /* tp->lock must be held */
7386 static inline void tg3_netif_start(struct tg3 *tp)
7387 {
7388 	tg3_ptp_resume(tp);
7389 
7390 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7391 	 * appropriate so long as all callers are assured to
7392 	 * have free tx slots (such as after tg3_init_hw)
7393 	 */
7394 	netif_tx_wake_all_queues(tp->dev);
7395 
7396 	if (tp->link_up)
7397 		netif_carrier_on(tp->dev);
7398 
7399 	tg3_napi_enable(tp);
7400 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7401 	tg3_enable_ints(tp);
7402 }
7403 
7404 static void tg3_irq_quiesce(struct tg3 *tp)
7405 {
7406 	int i;
7407 
7408 	BUG_ON(tp->irq_sync);
7409 
7410 	tp->irq_sync = 1;
7411 	smp_mb();
7412 
7413 	for (i = 0; i < tp->irq_cnt; i++)
7414 		synchronize_irq(tp->napi[i].irq_vec);
7415 }
7416 
7417 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7418  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7419  * with as well.  Most of the time, this is not necessary except when
7420  * shutting down the device.
7421  */
7422 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7423 {
7424 	spin_lock_bh(&tp->lock);
7425 	if (irq_sync)
7426 		tg3_irq_quiesce(tp);
7427 }
7428 
7429 static inline void tg3_full_unlock(struct tg3 *tp)
7430 {
7431 	spin_unlock_bh(&tp->lock);
7432 }
7433 
7434 /* One-shot MSI handler - Chip automatically disables interrupt
7435  * after sending MSI so driver doesn't have to do it.
7436  */
7437 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7438 {
7439 	struct tg3_napi *tnapi = dev_id;
7440 	struct tg3 *tp = tnapi->tp;
7441 
7442 	prefetch(tnapi->hw_status);
7443 	if (tnapi->rx_rcb)
7444 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7445 
7446 	if (likely(!tg3_irq_sync(tp)))
7447 		napi_schedule(&tnapi->napi);
7448 
7449 	return IRQ_HANDLED;
7450 }
7451 
7452 /* MSI ISR - No need to check for interrupt sharing and no need to
7453  * flush status block and interrupt mailbox. PCI ordering rules
7454  * guarantee that MSI will arrive after the status block.
7455  */
7456 static irqreturn_t tg3_msi(int irq, void *dev_id)
7457 {
7458 	struct tg3_napi *tnapi = dev_id;
7459 	struct tg3 *tp = tnapi->tp;
7460 
7461 	prefetch(tnapi->hw_status);
7462 	if (tnapi->rx_rcb)
7463 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7464 	/*
7465 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7466 	 * chip-internal interrupt pending events.
7467 	 * Writing non-zero to intr-mbox-0 additional tells the
7468 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7469 	 * event coalescing.
7470 	 */
7471 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7472 	if (likely(!tg3_irq_sync(tp)))
7473 		napi_schedule(&tnapi->napi);
7474 
7475 	return IRQ_RETVAL(1);
7476 }
7477 
7478 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7479 {
7480 	struct tg3_napi *tnapi = dev_id;
7481 	struct tg3 *tp = tnapi->tp;
7482 	struct tg3_hw_status *sblk = tnapi->hw_status;
7483 	unsigned int handled = 1;
7484 
7485 	/* In INTx mode, it is possible for the interrupt to arrive at
7486 	 * the CPU before the status block posted prior to the interrupt.
7487 	 * Reading the PCI State register will confirm whether the
7488 	 * interrupt is ours and will flush the status block.
7489 	 */
7490 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7491 		if (tg3_flag(tp, CHIP_RESETTING) ||
7492 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7493 			handled = 0;
7494 			goto out;
7495 		}
7496 	}
7497 
7498 	/*
7499 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7500 	 * chip-internal interrupt pending events.
7501 	 * Writing non-zero to intr-mbox-0 additional tells the
7502 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7503 	 * event coalescing.
7504 	 *
7505 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7506 	 * spurious interrupts.  The flush impacts performance but
7507 	 * excessive spurious interrupts can be worse in some cases.
7508 	 */
7509 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7510 	if (tg3_irq_sync(tp))
7511 		goto out;
7512 	sblk->status &= ~SD_STATUS_UPDATED;
7513 	if (likely(tg3_has_work(tnapi))) {
7514 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7515 		napi_schedule(&tnapi->napi);
7516 	} else {
7517 		/* No work, shared interrupt perhaps?  re-enable
7518 		 * interrupts, and flush that PCI write
7519 		 */
7520 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7521 			       0x00000000);
7522 	}
7523 out:
7524 	return IRQ_RETVAL(handled);
7525 }
7526 
7527 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7528 {
7529 	struct tg3_napi *tnapi = dev_id;
7530 	struct tg3 *tp = tnapi->tp;
7531 	struct tg3_hw_status *sblk = tnapi->hw_status;
7532 	unsigned int handled = 1;
7533 
7534 	/* In INTx mode, it is possible for the interrupt to arrive at
7535 	 * the CPU before the status block posted prior to the interrupt.
7536 	 * Reading the PCI State register will confirm whether the
7537 	 * interrupt is ours and will flush the status block.
7538 	 */
7539 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7540 		if (tg3_flag(tp, CHIP_RESETTING) ||
7541 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7542 			handled = 0;
7543 			goto out;
7544 		}
7545 	}
7546 
7547 	/*
7548 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7549 	 * chip-internal interrupt pending events.
7550 	 * writing non-zero to intr-mbox-0 additional tells the
7551 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7552 	 * event coalescing.
7553 	 *
7554 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7555 	 * spurious interrupts.  The flush impacts performance but
7556 	 * excessive spurious interrupts can be worse in some cases.
7557 	 */
7558 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7559 
7560 	/*
7561 	 * In a shared interrupt configuration, sometimes other devices'
7562 	 * interrupts will scream.  We record the current status tag here
7563 	 * so that the above check can report that the screaming interrupts
7564 	 * are unhandled.  Eventually they will be silenced.
7565 	 */
7566 	tnapi->last_irq_tag = sblk->status_tag;
7567 
7568 	if (tg3_irq_sync(tp))
7569 		goto out;
7570 
7571 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7572 
7573 	napi_schedule(&tnapi->napi);
7574 
7575 out:
7576 	return IRQ_RETVAL(handled);
7577 }
7578 
7579 /* ISR for interrupt test */
7580 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7581 {
7582 	struct tg3_napi *tnapi = dev_id;
7583 	struct tg3 *tp = tnapi->tp;
7584 	struct tg3_hw_status *sblk = tnapi->hw_status;
7585 
7586 	if ((sblk->status & SD_STATUS_UPDATED) ||
7587 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7588 		tg3_disable_ints(tp);
7589 		return IRQ_RETVAL(1);
7590 	}
7591 	return IRQ_RETVAL(0);
7592 }
7593 
7594 #ifdef CONFIG_NET_POLL_CONTROLLER
7595 static void tg3_poll_controller(struct net_device *dev)
7596 {
7597 	int i;
7598 	struct tg3 *tp = netdev_priv(dev);
7599 
7600 	if (tg3_irq_sync(tp))
7601 		return;
7602 
7603 	for (i = 0; i < tp->irq_cnt; i++)
7604 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7605 }
7606 #endif
7607 
7608 static void tg3_tx_timeout(struct net_device *dev)
7609 {
7610 	struct tg3 *tp = netdev_priv(dev);
7611 
7612 	if (netif_msg_tx_err(tp)) {
7613 		netdev_err(dev, "transmit timed out, resetting\n");
7614 		tg3_dump_state(tp);
7615 	}
7616 
7617 	tg3_reset_task_schedule(tp);
7618 }
7619 
7620 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7621 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7622 {
7623 	u32 base = (u32) mapping & 0xffffffff;
7624 
7625 	return (base > 0xffffdcc0) && (base + len + 8 < base);
7626 }
7627 
7628 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7629  * of any 4GB boundaries: 4G, 8G, etc
7630  */
7631 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7632 					   u32 len, u32 mss)
7633 {
7634 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7635 		u32 base = (u32) mapping & 0xffffffff;
7636 
7637 		return ((base + len + (mss & 0x3fff)) < base);
7638 	}
7639 	return 0;
7640 }
7641 
7642 /* Test for DMA addresses > 40-bit */
7643 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7644 					  int len)
7645 {
7646 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7647 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7648 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7649 	return 0;
7650 #else
7651 	return 0;
7652 #endif
7653 }
7654 
7655 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7656 				 dma_addr_t mapping, u32 len, u32 flags,
7657 				 u32 mss, u32 vlan)
7658 {
7659 	txbd->addr_hi = ((u64) mapping >> 32);
7660 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7661 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7662 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7663 }
7664 
7665 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7666 			    dma_addr_t map, u32 len, u32 flags,
7667 			    u32 mss, u32 vlan)
7668 {
7669 	struct tg3 *tp = tnapi->tp;
7670 	bool hwbug = false;
7671 
7672 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7673 		hwbug = true;
7674 
7675 	if (tg3_4g_overflow_test(map, len))
7676 		hwbug = true;
7677 
7678 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7679 		hwbug = true;
7680 
7681 	if (tg3_40bit_overflow_test(tp, map, len))
7682 		hwbug = true;
7683 
7684 	if (tp->dma_limit) {
7685 		u32 prvidx = *entry;
7686 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7687 		while (len > tp->dma_limit && *budget) {
7688 			u32 frag_len = tp->dma_limit;
7689 			len -= tp->dma_limit;
7690 
7691 			/* Avoid the 8byte DMA problem */
7692 			if (len <= 8) {
7693 				len += tp->dma_limit / 2;
7694 				frag_len = tp->dma_limit / 2;
7695 			}
7696 
7697 			tnapi->tx_buffers[*entry].fragmented = true;
7698 
7699 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7700 				      frag_len, tmp_flag, mss, vlan);
7701 			*budget -= 1;
7702 			prvidx = *entry;
7703 			*entry = NEXT_TX(*entry);
7704 
7705 			map += frag_len;
7706 		}
7707 
7708 		if (len) {
7709 			if (*budget) {
7710 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7711 					      len, flags, mss, vlan);
7712 				*budget -= 1;
7713 				*entry = NEXT_TX(*entry);
7714 			} else {
7715 				hwbug = true;
7716 				tnapi->tx_buffers[prvidx].fragmented = false;
7717 			}
7718 		}
7719 	} else {
7720 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7721 			      len, flags, mss, vlan);
7722 		*entry = NEXT_TX(*entry);
7723 	}
7724 
7725 	return hwbug;
7726 }
7727 
7728 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7729 {
7730 	int i;
7731 	struct sk_buff *skb;
7732 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7733 
7734 	skb = txb->skb;
7735 	txb->skb = NULL;
7736 
7737 	pci_unmap_single(tnapi->tp->pdev,
7738 			 dma_unmap_addr(txb, mapping),
7739 			 skb_headlen(skb),
7740 			 PCI_DMA_TODEVICE);
7741 
7742 	while (txb->fragmented) {
7743 		txb->fragmented = false;
7744 		entry = NEXT_TX(entry);
7745 		txb = &tnapi->tx_buffers[entry];
7746 	}
7747 
7748 	for (i = 0; i <= last; i++) {
7749 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7750 
7751 		entry = NEXT_TX(entry);
7752 		txb = &tnapi->tx_buffers[entry];
7753 
7754 		pci_unmap_page(tnapi->tp->pdev,
7755 			       dma_unmap_addr(txb, mapping),
7756 			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7757 
7758 		while (txb->fragmented) {
7759 			txb->fragmented = false;
7760 			entry = NEXT_TX(entry);
7761 			txb = &tnapi->tx_buffers[entry];
7762 		}
7763 	}
7764 }
7765 
7766 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7767 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7768 				       struct sk_buff **pskb,
7769 				       u32 *entry, u32 *budget,
7770 				       u32 base_flags, u32 mss, u32 vlan)
7771 {
7772 	struct tg3 *tp = tnapi->tp;
7773 	struct sk_buff *new_skb, *skb = *pskb;
7774 	dma_addr_t new_addr = 0;
7775 	int ret = 0;
7776 
7777 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7778 		new_skb = skb_copy(skb, GFP_ATOMIC);
7779 	else {
7780 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7781 
7782 		new_skb = skb_copy_expand(skb,
7783 					  skb_headroom(skb) + more_headroom,
7784 					  skb_tailroom(skb), GFP_ATOMIC);
7785 	}
7786 
7787 	if (!new_skb) {
7788 		ret = -1;
7789 	} else {
7790 		/* New SKB is guaranteed to be linear. */
7791 		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7792 					  PCI_DMA_TODEVICE);
7793 		/* Make sure the mapping succeeded */
7794 		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7795 			dev_kfree_skb(new_skb);
7796 			ret = -1;
7797 		} else {
7798 			u32 save_entry = *entry;
7799 
7800 			base_flags |= TXD_FLAG_END;
7801 
7802 			tnapi->tx_buffers[*entry].skb = new_skb;
7803 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7804 					   mapping, new_addr);
7805 
7806 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7807 					    new_skb->len, base_flags,
7808 					    mss, vlan)) {
7809 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7810 				dev_kfree_skb(new_skb);
7811 				ret = -1;
7812 			}
7813 		}
7814 	}
7815 
7816 	dev_kfree_skb(skb);
7817 	*pskb = new_skb;
7818 	return ret;
7819 }
7820 
7821 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7822 
7823 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7824  * TSO header is greater than 80 bytes.
7825  */
7826 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7827 {
7828 	struct sk_buff *segs, *nskb;
7829 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7830 
7831 	/* Estimate the number of fragments in the worst case */
7832 	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7833 		netif_stop_queue(tp->dev);
7834 
7835 		/* netif_tx_stop_queue() must be done before checking
7836 		 * checking tx index in tg3_tx_avail() below, because in
7837 		 * tg3_tx(), we update tx index before checking for
7838 		 * netif_tx_queue_stopped().
7839 		 */
7840 		smp_mb();
7841 		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7842 			return NETDEV_TX_BUSY;
7843 
7844 		netif_wake_queue(tp->dev);
7845 	}
7846 
7847 	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7848 	if (IS_ERR(segs))
7849 		goto tg3_tso_bug_end;
7850 
7851 	do {
7852 		nskb = segs;
7853 		segs = segs->next;
7854 		nskb->next = NULL;
7855 		tg3_start_xmit(nskb, tp->dev);
7856 	} while (segs);
7857 
7858 tg3_tso_bug_end:
7859 	dev_kfree_skb(skb);
7860 
7861 	return NETDEV_TX_OK;
7862 }
7863 
7864 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7865  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7866  */
7867 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7868 {
7869 	struct tg3 *tp = netdev_priv(dev);
7870 	u32 len, entry, base_flags, mss, vlan = 0;
7871 	u32 budget;
7872 	int i = -1, would_hit_hwbug;
7873 	dma_addr_t mapping;
7874 	struct tg3_napi *tnapi;
7875 	struct netdev_queue *txq;
7876 	unsigned int last;
7877 
7878 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7879 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7880 	if (tg3_flag(tp, ENABLE_TSS))
7881 		tnapi++;
7882 
7883 	budget = tg3_tx_avail(tnapi);
7884 
7885 	/* We are running in BH disabled context with netif_tx_lock
7886 	 * and TX reclaim runs via tp->napi.poll inside of a software
7887 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7888 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7889 	 */
7890 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7891 		if (!netif_tx_queue_stopped(txq)) {
7892 			netif_tx_stop_queue(txq);
7893 
7894 			/* This is a hard error, log it. */
7895 			netdev_err(dev,
7896 				   "BUG! Tx Ring full when queue awake!\n");
7897 		}
7898 		return NETDEV_TX_BUSY;
7899 	}
7900 
7901 	entry = tnapi->tx_prod;
7902 	base_flags = 0;
7903 	if (skb->ip_summed == CHECKSUM_PARTIAL)
7904 		base_flags |= TXD_FLAG_TCPUDP_CSUM;
7905 
7906 	mss = skb_shinfo(skb)->gso_size;
7907 	if (mss) {
7908 		struct iphdr *iph;
7909 		u32 tcp_opt_len, hdr_len;
7910 
7911 		if (skb_header_cloned(skb) &&
7912 		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7913 			goto drop;
7914 
7915 		iph = ip_hdr(skb);
7916 		tcp_opt_len = tcp_optlen(skb);
7917 
7918 		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7919 
7920 		if (!skb_is_gso_v6(skb)) {
7921 			iph->check = 0;
7922 			iph->tot_len = htons(mss + hdr_len);
7923 		}
7924 
7925 		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7926 		    tg3_flag(tp, TSO_BUG))
7927 			return tg3_tso_bug(tp, skb);
7928 
7929 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7930 			       TXD_FLAG_CPU_POST_DMA);
7931 
7932 		if (tg3_flag(tp, HW_TSO_1) ||
7933 		    tg3_flag(tp, HW_TSO_2) ||
7934 		    tg3_flag(tp, HW_TSO_3)) {
7935 			tcp_hdr(skb)->check = 0;
7936 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7937 		} else
7938 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7939 								 iph->daddr, 0,
7940 								 IPPROTO_TCP,
7941 								 0);
7942 
7943 		if (tg3_flag(tp, HW_TSO_3)) {
7944 			mss |= (hdr_len & 0xc) << 12;
7945 			if (hdr_len & 0x10)
7946 				base_flags |= 0x00000010;
7947 			base_flags |= (hdr_len & 0x3e0) << 5;
7948 		} else if (tg3_flag(tp, HW_TSO_2))
7949 			mss |= hdr_len << 9;
7950 		else if (tg3_flag(tp, HW_TSO_1) ||
7951 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
7952 			if (tcp_opt_len || iph->ihl > 5) {
7953 				int tsflags;
7954 
7955 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7956 				mss |= (tsflags << 11);
7957 			}
7958 		} else {
7959 			if (tcp_opt_len || iph->ihl > 5) {
7960 				int tsflags;
7961 
7962 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7963 				base_flags |= tsflags << 12;
7964 			}
7965 		}
7966 	}
7967 
7968 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7969 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
7970 		base_flags |= TXD_FLAG_JMB_PKT;
7971 
7972 	if (vlan_tx_tag_present(skb)) {
7973 		base_flags |= TXD_FLAG_VLAN;
7974 		vlan = vlan_tx_tag_get(skb);
7975 	}
7976 
7977 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7978 	    tg3_flag(tp, TX_TSTAMP_EN)) {
7979 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7980 		base_flags |= TXD_FLAG_HWTSTAMP;
7981 	}
7982 
7983 	len = skb_headlen(skb);
7984 
7985 	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7986 	if (pci_dma_mapping_error(tp->pdev, mapping))
7987 		goto drop;
7988 
7989 
7990 	tnapi->tx_buffers[entry].skb = skb;
7991 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7992 
7993 	would_hit_hwbug = 0;
7994 
7995 	if (tg3_flag(tp, 5701_DMA_BUG))
7996 		would_hit_hwbug = 1;
7997 
7998 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7999 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8000 			    mss, vlan)) {
8001 		would_hit_hwbug = 1;
8002 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8003 		u32 tmp_mss = mss;
8004 
8005 		if (!tg3_flag(tp, HW_TSO_1) &&
8006 		    !tg3_flag(tp, HW_TSO_2) &&
8007 		    !tg3_flag(tp, HW_TSO_3))
8008 			tmp_mss = 0;
8009 
8010 		/* Now loop through additional data
8011 		 * fragments, and queue them.
8012 		 */
8013 		last = skb_shinfo(skb)->nr_frags - 1;
8014 		for (i = 0; i <= last; i++) {
8015 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8016 
8017 			len = skb_frag_size(frag);
8018 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8019 						   len, DMA_TO_DEVICE);
8020 
8021 			tnapi->tx_buffers[entry].skb = NULL;
8022 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8023 					   mapping);
8024 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8025 				goto dma_error;
8026 
8027 			if (!budget ||
8028 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8029 					    len, base_flags |
8030 					    ((i == last) ? TXD_FLAG_END : 0),
8031 					    tmp_mss, vlan)) {
8032 				would_hit_hwbug = 1;
8033 				break;
8034 			}
8035 		}
8036 	}
8037 
8038 	if (would_hit_hwbug) {
8039 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8040 
8041 		/* If the workaround fails due to memory/mapping
8042 		 * failure, silently drop this packet.
8043 		 */
8044 		entry = tnapi->tx_prod;
8045 		budget = tg3_tx_avail(tnapi);
8046 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8047 						base_flags, mss, vlan))
8048 			goto drop_nofree;
8049 	}
8050 
8051 	skb_tx_timestamp(skb);
8052 	netdev_tx_sent_queue(txq, skb->len);
8053 
8054 	/* Sync BD data before updating mailbox */
8055 	wmb();
8056 
8057 	/* Packets are ready, update Tx producer idx local and on card. */
8058 	tw32_tx_mbox(tnapi->prodmbox, entry);
8059 
8060 	tnapi->tx_prod = entry;
8061 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8062 		netif_tx_stop_queue(txq);
8063 
8064 		/* netif_tx_stop_queue() must be done before checking
8065 		 * checking tx index in tg3_tx_avail() below, because in
8066 		 * tg3_tx(), we update tx index before checking for
8067 		 * netif_tx_queue_stopped().
8068 		 */
8069 		smp_mb();
8070 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8071 			netif_tx_wake_queue(txq);
8072 	}
8073 
8074 	mmiowb();
8075 	return NETDEV_TX_OK;
8076 
8077 dma_error:
8078 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8079 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8080 drop:
8081 	dev_kfree_skb(skb);
8082 drop_nofree:
8083 	tp->tx_dropped++;
8084 	return NETDEV_TX_OK;
8085 }
8086 
8087 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8088 {
8089 	if (enable) {
8090 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8091 				  MAC_MODE_PORT_MODE_MASK);
8092 
8093 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8094 
8095 		if (!tg3_flag(tp, 5705_PLUS))
8096 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8097 
8098 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8099 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8100 		else
8101 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8102 	} else {
8103 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8104 
8105 		if (tg3_flag(tp, 5705_PLUS) ||
8106 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8107 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8108 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8109 	}
8110 
8111 	tw32(MAC_MODE, tp->mac_mode);
8112 	udelay(40);
8113 }
8114 
8115 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8116 {
8117 	u32 val, bmcr, mac_mode, ptest = 0;
8118 
8119 	tg3_phy_toggle_apd(tp, false);
8120 	tg3_phy_toggle_automdix(tp, false);
8121 
8122 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8123 		return -EIO;
8124 
8125 	bmcr = BMCR_FULLDPLX;
8126 	switch (speed) {
8127 	case SPEED_10:
8128 		break;
8129 	case SPEED_100:
8130 		bmcr |= BMCR_SPEED100;
8131 		break;
8132 	case SPEED_1000:
8133 	default:
8134 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8135 			speed = SPEED_100;
8136 			bmcr |= BMCR_SPEED100;
8137 		} else {
8138 			speed = SPEED_1000;
8139 			bmcr |= BMCR_SPEED1000;
8140 		}
8141 	}
8142 
8143 	if (extlpbk) {
8144 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8145 			tg3_readphy(tp, MII_CTRL1000, &val);
8146 			val |= CTL1000_AS_MASTER |
8147 			       CTL1000_ENABLE_MASTER;
8148 			tg3_writephy(tp, MII_CTRL1000, val);
8149 		} else {
8150 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8151 				MII_TG3_FET_PTEST_TRIM_2;
8152 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8153 		}
8154 	} else
8155 		bmcr |= BMCR_LOOPBACK;
8156 
8157 	tg3_writephy(tp, MII_BMCR, bmcr);
8158 
8159 	/* The write needs to be flushed for the FETs */
8160 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8161 		tg3_readphy(tp, MII_BMCR, &bmcr);
8162 
8163 	udelay(40);
8164 
8165 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8166 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8167 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8168 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8169 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8170 
8171 		/* The write needs to be flushed for the AC131 */
8172 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8173 	}
8174 
8175 	/* Reset to prevent losing 1st rx packet intermittently */
8176 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8177 	    tg3_flag(tp, 5780_CLASS)) {
8178 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8179 		udelay(10);
8180 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8181 	}
8182 
8183 	mac_mode = tp->mac_mode &
8184 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8185 	if (speed == SPEED_1000)
8186 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8187 	else
8188 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8189 
8190 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8191 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8192 
8193 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8194 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8195 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8196 			mac_mode |= MAC_MODE_LINK_POLARITY;
8197 
8198 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8199 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8200 	}
8201 
8202 	tw32(MAC_MODE, mac_mode);
8203 	udelay(40);
8204 
8205 	return 0;
8206 }
8207 
8208 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8209 {
8210 	struct tg3 *tp = netdev_priv(dev);
8211 
8212 	if (features & NETIF_F_LOOPBACK) {
8213 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8214 			return;
8215 
8216 		spin_lock_bh(&tp->lock);
8217 		tg3_mac_loopback(tp, true);
8218 		netif_carrier_on(tp->dev);
8219 		spin_unlock_bh(&tp->lock);
8220 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8221 	} else {
8222 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8223 			return;
8224 
8225 		spin_lock_bh(&tp->lock);
8226 		tg3_mac_loopback(tp, false);
8227 		/* Force link status check */
8228 		tg3_setup_phy(tp, true);
8229 		spin_unlock_bh(&tp->lock);
8230 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8231 	}
8232 }
8233 
8234 static netdev_features_t tg3_fix_features(struct net_device *dev,
8235 	netdev_features_t features)
8236 {
8237 	struct tg3 *tp = netdev_priv(dev);
8238 
8239 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8240 		features &= ~NETIF_F_ALL_TSO;
8241 
8242 	return features;
8243 }
8244 
8245 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8246 {
8247 	netdev_features_t changed = dev->features ^ features;
8248 
8249 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8250 		tg3_set_loopback(dev, features);
8251 
8252 	return 0;
8253 }
8254 
8255 static void tg3_rx_prodring_free(struct tg3 *tp,
8256 				 struct tg3_rx_prodring_set *tpr)
8257 {
8258 	int i;
8259 
8260 	if (tpr != &tp->napi[0].prodring) {
8261 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8262 		     i = (i + 1) & tp->rx_std_ring_mask)
8263 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8264 					tp->rx_pkt_map_sz);
8265 
8266 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8267 			for (i = tpr->rx_jmb_cons_idx;
8268 			     i != tpr->rx_jmb_prod_idx;
8269 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8270 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8271 						TG3_RX_JMB_MAP_SZ);
8272 			}
8273 		}
8274 
8275 		return;
8276 	}
8277 
8278 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8279 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8280 				tp->rx_pkt_map_sz);
8281 
8282 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8283 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8284 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8285 					TG3_RX_JMB_MAP_SZ);
8286 	}
8287 }
8288 
8289 /* Initialize rx rings for packet processing.
8290  *
8291  * The chip has been shut down and the driver detached from
8292  * the networking, so no interrupts or new tx packets will
8293  * end up in the driver.  tp->{tx,}lock are held and thus
8294  * we may not sleep.
8295  */
8296 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8297 				 struct tg3_rx_prodring_set *tpr)
8298 {
8299 	u32 i, rx_pkt_dma_sz;
8300 
8301 	tpr->rx_std_cons_idx = 0;
8302 	tpr->rx_std_prod_idx = 0;
8303 	tpr->rx_jmb_cons_idx = 0;
8304 	tpr->rx_jmb_prod_idx = 0;
8305 
8306 	if (tpr != &tp->napi[0].prodring) {
8307 		memset(&tpr->rx_std_buffers[0], 0,
8308 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8309 		if (tpr->rx_jmb_buffers)
8310 			memset(&tpr->rx_jmb_buffers[0], 0,
8311 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8312 		goto done;
8313 	}
8314 
8315 	/* Zero out all descriptors. */
8316 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8317 
8318 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8319 	if (tg3_flag(tp, 5780_CLASS) &&
8320 	    tp->dev->mtu > ETH_DATA_LEN)
8321 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8322 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8323 
8324 	/* Initialize invariants of the rings, we only set this
8325 	 * stuff once.  This works because the card does not
8326 	 * write into the rx buffer posting rings.
8327 	 */
8328 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8329 		struct tg3_rx_buffer_desc *rxd;
8330 
8331 		rxd = &tpr->rx_std[i];
8332 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8333 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8334 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8335 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8336 	}
8337 
8338 	/* Now allocate fresh SKBs for each rx ring. */
8339 	for (i = 0; i < tp->rx_pending; i++) {
8340 		unsigned int frag_size;
8341 
8342 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8343 				      &frag_size) < 0) {
8344 			netdev_warn(tp->dev,
8345 				    "Using a smaller RX standard ring. Only "
8346 				    "%d out of %d buffers were allocated "
8347 				    "successfully\n", i, tp->rx_pending);
8348 			if (i == 0)
8349 				goto initfail;
8350 			tp->rx_pending = i;
8351 			break;
8352 		}
8353 	}
8354 
8355 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8356 		goto done;
8357 
8358 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8359 
8360 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8361 		goto done;
8362 
8363 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8364 		struct tg3_rx_buffer_desc *rxd;
8365 
8366 		rxd = &tpr->rx_jmb[i].std;
8367 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8368 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8369 				  RXD_FLAG_JUMBO;
8370 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8371 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8372 	}
8373 
8374 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8375 		unsigned int frag_size;
8376 
8377 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8378 				      &frag_size) < 0) {
8379 			netdev_warn(tp->dev,
8380 				    "Using a smaller RX jumbo ring. Only %d "
8381 				    "out of %d buffers were allocated "
8382 				    "successfully\n", i, tp->rx_jumbo_pending);
8383 			if (i == 0)
8384 				goto initfail;
8385 			tp->rx_jumbo_pending = i;
8386 			break;
8387 		}
8388 	}
8389 
8390 done:
8391 	return 0;
8392 
8393 initfail:
8394 	tg3_rx_prodring_free(tp, tpr);
8395 	return -ENOMEM;
8396 }
8397 
8398 static void tg3_rx_prodring_fini(struct tg3 *tp,
8399 				 struct tg3_rx_prodring_set *tpr)
8400 {
8401 	kfree(tpr->rx_std_buffers);
8402 	tpr->rx_std_buffers = NULL;
8403 	kfree(tpr->rx_jmb_buffers);
8404 	tpr->rx_jmb_buffers = NULL;
8405 	if (tpr->rx_std) {
8406 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8407 				  tpr->rx_std, tpr->rx_std_mapping);
8408 		tpr->rx_std = NULL;
8409 	}
8410 	if (tpr->rx_jmb) {
8411 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8412 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8413 		tpr->rx_jmb = NULL;
8414 	}
8415 }
8416 
8417 static int tg3_rx_prodring_init(struct tg3 *tp,
8418 				struct tg3_rx_prodring_set *tpr)
8419 {
8420 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8421 				      GFP_KERNEL);
8422 	if (!tpr->rx_std_buffers)
8423 		return -ENOMEM;
8424 
8425 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8426 					 TG3_RX_STD_RING_BYTES(tp),
8427 					 &tpr->rx_std_mapping,
8428 					 GFP_KERNEL);
8429 	if (!tpr->rx_std)
8430 		goto err_out;
8431 
8432 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8433 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8434 					      GFP_KERNEL);
8435 		if (!tpr->rx_jmb_buffers)
8436 			goto err_out;
8437 
8438 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8439 						 TG3_RX_JMB_RING_BYTES(tp),
8440 						 &tpr->rx_jmb_mapping,
8441 						 GFP_KERNEL);
8442 		if (!tpr->rx_jmb)
8443 			goto err_out;
8444 	}
8445 
8446 	return 0;
8447 
8448 err_out:
8449 	tg3_rx_prodring_fini(tp, tpr);
8450 	return -ENOMEM;
8451 }
8452 
8453 /* Free up pending packets in all rx/tx rings.
8454  *
8455  * The chip has been shut down and the driver detached from
8456  * the networking, so no interrupts or new tx packets will
8457  * end up in the driver.  tp->{tx,}lock is not held and we are not
8458  * in an interrupt context and thus may sleep.
8459  */
8460 static void tg3_free_rings(struct tg3 *tp)
8461 {
8462 	int i, j;
8463 
8464 	for (j = 0; j < tp->irq_cnt; j++) {
8465 		struct tg3_napi *tnapi = &tp->napi[j];
8466 
8467 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8468 
8469 		if (!tnapi->tx_buffers)
8470 			continue;
8471 
8472 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8473 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8474 
8475 			if (!skb)
8476 				continue;
8477 
8478 			tg3_tx_skb_unmap(tnapi, i,
8479 					 skb_shinfo(skb)->nr_frags - 1);
8480 
8481 			dev_kfree_skb_any(skb);
8482 		}
8483 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8484 	}
8485 }
8486 
8487 /* Initialize tx/rx rings for packet processing.
8488  *
8489  * The chip has been shut down and the driver detached from
8490  * the networking, so no interrupts or new tx packets will
8491  * end up in the driver.  tp->{tx,}lock are held and thus
8492  * we may not sleep.
8493  */
8494 static int tg3_init_rings(struct tg3 *tp)
8495 {
8496 	int i;
8497 
8498 	/* Free up all the SKBs. */
8499 	tg3_free_rings(tp);
8500 
8501 	for (i = 0; i < tp->irq_cnt; i++) {
8502 		struct tg3_napi *tnapi = &tp->napi[i];
8503 
8504 		tnapi->last_tag = 0;
8505 		tnapi->last_irq_tag = 0;
8506 		tnapi->hw_status->status = 0;
8507 		tnapi->hw_status->status_tag = 0;
8508 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8509 
8510 		tnapi->tx_prod = 0;
8511 		tnapi->tx_cons = 0;
8512 		if (tnapi->tx_ring)
8513 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8514 
8515 		tnapi->rx_rcb_ptr = 0;
8516 		if (tnapi->rx_rcb)
8517 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8518 
8519 		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8520 			tg3_free_rings(tp);
8521 			return -ENOMEM;
8522 		}
8523 	}
8524 
8525 	return 0;
8526 }
8527 
8528 static void tg3_mem_tx_release(struct tg3 *tp)
8529 {
8530 	int i;
8531 
8532 	for (i = 0; i < tp->irq_max; i++) {
8533 		struct tg3_napi *tnapi = &tp->napi[i];
8534 
8535 		if (tnapi->tx_ring) {
8536 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8537 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8538 			tnapi->tx_ring = NULL;
8539 		}
8540 
8541 		kfree(tnapi->tx_buffers);
8542 		tnapi->tx_buffers = NULL;
8543 	}
8544 }
8545 
8546 static int tg3_mem_tx_acquire(struct tg3 *tp)
8547 {
8548 	int i;
8549 	struct tg3_napi *tnapi = &tp->napi[0];
8550 
8551 	/* If multivector TSS is enabled, vector 0 does not handle
8552 	 * tx interrupts.  Don't allocate any resources for it.
8553 	 */
8554 	if (tg3_flag(tp, ENABLE_TSS))
8555 		tnapi++;
8556 
8557 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8558 		tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8559 					    TG3_TX_RING_SIZE, GFP_KERNEL);
8560 		if (!tnapi->tx_buffers)
8561 			goto err_out;
8562 
8563 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8564 						    TG3_TX_RING_BYTES,
8565 						    &tnapi->tx_desc_mapping,
8566 						    GFP_KERNEL);
8567 		if (!tnapi->tx_ring)
8568 			goto err_out;
8569 	}
8570 
8571 	return 0;
8572 
8573 err_out:
8574 	tg3_mem_tx_release(tp);
8575 	return -ENOMEM;
8576 }
8577 
8578 static void tg3_mem_rx_release(struct tg3 *tp)
8579 {
8580 	int i;
8581 
8582 	for (i = 0; i < tp->irq_max; i++) {
8583 		struct tg3_napi *tnapi = &tp->napi[i];
8584 
8585 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8586 
8587 		if (!tnapi->rx_rcb)
8588 			continue;
8589 
8590 		dma_free_coherent(&tp->pdev->dev,
8591 				  TG3_RX_RCB_RING_BYTES(tp),
8592 				  tnapi->rx_rcb,
8593 				  tnapi->rx_rcb_mapping);
8594 		tnapi->rx_rcb = NULL;
8595 	}
8596 }
8597 
8598 static int tg3_mem_rx_acquire(struct tg3 *tp)
8599 {
8600 	unsigned int i, limit;
8601 
8602 	limit = tp->rxq_cnt;
8603 
8604 	/* If RSS is enabled, we need a (dummy) producer ring
8605 	 * set on vector zero.  This is the true hw prodring.
8606 	 */
8607 	if (tg3_flag(tp, ENABLE_RSS))
8608 		limit++;
8609 
8610 	for (i = 0; i < limit; i++) {
8611 		struct tg3_napi *tnapi = &tp->napi[i];
8612 
8613 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8614 			goto err_out;
8615 
8616 		/* If multivector RSS is enabled, vector 0
8617 		 * does not handle rx or tx interrupts.
8618 		 * Don't allocate any resources for it.
8619 		 */
8620 		if (!i && tg3_flag(tp, ENABLE_RSS))
8621 			continue;
8622 
8623 		tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8624 						    TG3_RX_RCB_RING_BYTES(tp),
8625 						    &tnapi->rx_rcb_mapping,
8626 						    GFP_KERNEL);
8627 		if (!tnapi->rx_rcb)
8628 			goto err_out;
8629 	}
8630 
8631 	return 0;
8632 
8633 err_out:
8634 	tg3_mem_rx_release(tp);
8635 	return -ENOMEM;
8636 }
8637 
8638 /*
8639  * Must not be invoked with interrupt sources disabled and
8640  * the hardware shutdown down.
8641  */
8642 static void tg3_free_consistent(struct tg3 *tp)
8643 {
8644 	int i;
8645 
8646 	for (i = 0; i < tp->irq_cnt; i++) {
8647 		struct tg3_napi *tnapi = &tp->napi[i];
8648 
8649 		if (tnapi->hw_status) {
8650 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8651 					  tnapi->hw_status,
8652 					  tnapi->status_mapping);
8653 			tnapi->hw_status = NULL;
8654 		}
8655 	}
8656 
8657 	tg3_mem_rx_release(tp);
8658 	tg3_mem_tx_release(tp);
8659 
8660 	if (tp->hw_stats) {
8661 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8662 				  tp->hw_stats, tp->stats_mapping);
8663 		tp->hw_stats = NULL;
8664 	}
8665 }
8666 
8667 /*
8668  * Must not be invoked with interrupt sources disabled and
8669  * the hardware shutdown down.  Can sleep.
8670  */
8671 static int tg3_alloc_consistent(struct tg3 *tp)
8672 {
8673 	int i;
8674 
8675 	tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8676 					   sizeof(struct tg3_hw_stats),
8677 					   &tp->stats_mapping, GFP_KERNEL);
8678 	if (!tp->hw_stats)
8679 		goto err_out;
8680 
8681 	for (i = 0; i < tp->irq_cnt; i++) {
8682 		struct tg3_napi *tnapi = &tp->napi[i];
8683 		struct tg3_hw_status *sblk;
8684 
8685 		tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8686 						       TG3_HW_STATUS_SIZE,
8687 						       &tnapi->status_mapping,
8688 						       GFP_KERNEL);
8689 		if (!tnapi->hw_status)
8690 			goto err_out;
8691 
8692 		sblk = tnapi->hw_status;
8693 
8694 		if (tg3_flag(tp, ENABLE_RSS)) {
8695 			u16 *prodptr = NULL;
8696 
8697 			/*
8698 			 * When RSS is enabled, the status block format changes
8699 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8700 			 * and "rx_mini_consumer" members get mapped to the
8701 			 * other three rx return ring producer indexes.
8702 			 */
8703 			switch (i) {
8704 			case 1:
8705 				prodptr = &sblk->idx[0].rx_producer;
8706 				break;
8707 			case 2:
8708 				prodptr = &sblk->rx_jumbo_consumer;
8709 				break;
8710 			case 3:
8711 				prodptr = &sblk->reserved;
8712 				break;
8713 			case 4:
8714 				prodptr = &sblk->rx_mini_consumer;
8715 				break;
8716 			}
8717 			tnapi->rx_rcb_prod_idx = prodptr;
8718 		} else {
8719 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8720 		}
8721 	}
8722 
8723 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8724 		goto err_out;
8725 
8726 	return 0;
8727 
8728 err_out:
8729 	tg3_free_consistent(tp);
8730 	return -ENOMEM;
8731 }
8732 
8733 #define MAX_WAIT_CNT 1000
8734 
8735 /* To stop a block, clear the enable bit and poll till it
8736  * clears.  tp->lock is held.
8737  */
8738 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8739 {
8740 	unsigned int i;
8741 	u32 val;
8742 
8743 	if (tg3_flag(tp, 5705_PLUS)) {
8744 		switch (ofs) {
8745 		case RCVLSC_MODE:
8746 		case DMAC_MODE:
8747 		case MBFREE_MODE:
8748 		case BUFMGR_MODE:
8749 		case MEMARB_MODE:
8750 			/* We can't enable/disable these bits of the
8751 			 * 5705/5750, just say success.
8752 			 */
8753 			return 0;
8754 
8755 		default:
8756 			break;
8757 		}
8758 	}
8759 
8760 	val = tr32(ofs);
8761 	val &= ~enable_bit;
8762 	tw32_f(ofs, val);
8763 
8764 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8765 		if (pci_channel_offline(tp->pdev)) {
8766 			dev_err(&tp->pdev->dev,
8767 				"tg3_stop_block device offline, "
8768 				"ofs=%lx enable_bit=%x\n",
8769 				ofs, enable_bit);
8770 			return -ENODEV;
8771 		}
8772 
8773 		udelay(100);
8774 		val = tr32(ofs);
8775 		if ((val & enable_bit) == 0)
8776 			break;
8777 	}
8778 
8779 	if (i == MAX_WAIT_CNT && !silent) {
8780 		dev_err(&tp->pdev->dev,
8781 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8782 			ofs, enable_bit);
8783 		return -ENODEV;
8784 	}
8785 
8786 	return 0;
8787 }
8788 
8789 /* tp->lock is held. */
8790 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8791 {
8792 	int i, err;
8793 
8794 	tg3_disable_ints(tp);
8795 
8796 	if (pci_channel_offline(tp->pdev)) {
8797 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8798 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8799 		err = -ENODEV;
8800 		goto err_no_dev;
8801 	}
8802 
8803 	tp->rx_mode &= ~RX_MODE_ENABLE;
8804 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8805 	udelay(10);
8806 
8807 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8808 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8809 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8810 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8811 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8812 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8813 
8814 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8815 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8816 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8817 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8818 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8819 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8820 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8821 
8822 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8823 	tw32_f(MAC_MODE, tp->mac_mode);
8824 	udelay(40);
8825 
8826 	tp->tx_mode &= ~TX_MODE_ENABLE;
8827 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8828 
8829 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8830 		udelay(100);
8831 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8832 			break;
8833 	}
8834 	if (i >= MAX_WAIT_CNT) {
8835 		dev_err(&tp->pdev->dev,
8836 			"%s timed out, TX_MODE_ENABLE will not clear "
8837 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8838 		err |= -ENODEV;
8839 	}
8840 
8841 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8842 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8843 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8844 
8845 	tw32(FTQ_RESET, 0xffffffff);
8846 	tw32(FTQ_RESET, 0x00000000);
8847 
8848 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8849 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8850 
8851 err_no_dev:
8852 	for (i = 0; i < tp->irq_cnt; i++) {
8853 		struct tg3_napi *tnapi = &tp->napi[i];
8854 		if (tnapi->hw_status)
8855 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8856 	}
8857 
8858 	return err;
8859 }
8860 
8861 /* Save PCI command register before chip reset */
8862 static void tg3_save_pci_state(struct tg3 *tp)
8863 {
8864 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8865 }
8866 
8867 /* Restore PCI state after chip reset */
8868 static void tg3_restore_pci_state(struct tg3 *tp)
8869 {
8870 	u32 val;
8871 
8872 	/* Re-enable indirect register accesses. */
8873 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8874 			       tp->misc_host_ctrl);
8875 
8876 	/* Set MAX PCI retry to zero. */
8877 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8878 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8879 	    tg3_flag(tp, PCIX_MODE))
8880 		val |= PCISTATE_RETRY_SAME_DMA;
8881 	/* Allow reads and writes to the APE register and memory space. */
8882 	if (tg3_flag(tp, ENABLE_APE))
8883 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8884 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8885 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8886 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8887 
8888 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8889 
8890 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8891 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8892 				      tp->pci_cacheline_sz);
8893 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8894 				      tp->pci_lat_timer);
8895 	}
8896 
8897 	/* Make sure PCI-X relaxed ordering bit is clear. */
8898 	if (tg3_flag(tp, PCIX_MODE)) {
8899 		u16 pcix_cmd;
8900 
8901 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8902 				     &pcix_cmd);
8903 		pcix_cmd &= ~PCI_X_CMD_ERO;
8904 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8905 				      pcix_cmd);
8906 	}
8907 
8908 	if (tg3_flag(tp, 5780_CLASS)) {
8909 
8910 		/* Chip reset on 5780 will reset MSI enable bit,
8911 		 * so need to restore it.
8912 		 */
8913 		if (tg3_flag(tp, USING_MSI)) {
8914 			u16 ctrl;
8915 
8916 			pci_read_config_word(tp->pdev,
8917 					     tp->msi_cap + PCI_MSI_FLAGS,
8918 					     &ctrl);
8919 			pci_write_config_word(tp->pdev,
8920 					      tp->msi_cap + PCI_MSI_FLAGS,
8921 					      ctrl | PCI_MSI_FLAGS_ENABLE);
8922 			val = tr32(MSGINT_MODE);
8923 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8924 		}
8925 	}
8926 }
8927 
8928 /* tp->lock is held. */
8929 static int tg3_chip_reset(struct tg3 *tp)
8930 {
8931 	u32 val;
8932 	void (*write_op)(struct tg3 *, u32, u32);
8933 	int i, err;
8934 
8935 	tg3_nvram_lock(tp);
8936 
8937 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8938 
8939 	/* No matching tg3_nvram_unlock() after this because
8940 	 * chip reset below will undo the nvram lock.
8941 	 */
8942 	tp->nvram_lock_cnt = 0;
8943 
8944 	/* GRC_MISC_CFG core clock reset will clear the memory
8945 	 * enable bit in PCI register 4 and the MSI enable bit
8946 	 * on some chips, so we save relevant registers here.
8947 	 */
8948 	tg3_save_pci_state(tp);
8949 
8950 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8951 	    tg3_flag(tp, 5755_PLUS))
8952 		tw32(GRC_FASTBOOT_PC, 0);
8953 
8954 	/*
8955 	 * We must avoid the readl() that normally takes place.
8956 	 * It locks machines, causes machine checks, and other
8957 	 * fun things.  So, temporarily disable the 5701
8958 	 * hardware workaround, while we do the reset.
8959 	 */
8960 	write_op = tp->write32;
8961 	if (write_op == tg3_write_flush_reg32)
8962 		tp->write32 = tg3_write32;
8963 
8964 	/* Prevent the irq handler from reading or writing PCI registers
8965 	 * during chip reset when the memory enable bit in the PCI command
8966 	 * register may be cleared.  The chip does not generate interrupt
8967 	 * at this time, but the irq handler may still be called due to irq
8968 	 * sharing or irqpoll.
8969 	 */
8970 	tg3_flag_set(tp, CHIP_RESETTING);
8971 	for (i = 0; i < tp->irq_cnt; i++) {
8972 		struct tg3_napi *tnapi = &tp->napi[i];
8973 		if (tnapi->hw_status) {
8974 			tnapi->hw_status->status = 0;
8975 			tnapi->hw_status->status_tag = 0;
8976 		}
8977 		tnapi->last_tag = 0;
8978 		tnapi->last_irq_tag = 0;
8979 	}
8980 	smp_mb();
8981 
8982 	for (i = 0; i < tp->irq_cnt; i++)
8983 		synchronize_irq(tp->napi[i].irq_vec);
8984 
8985 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8986 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8987 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8988 	}
8989 
8990 	/* do the reset */
8991 	val = GRC_MISC_CFG_CORECLK_RESET;
8992 
8993 	if (tg3_flag(tp, PCI_EXPRESS)) {
8994 		/* Force PCIe 1.0a mode */
8995 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8996 		    !tg3_flag(tp, 57765_PLUS) &&
8997 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
8998 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8999 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9000 
9001 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9002 			tw32(GRC_MISC_CFG, (1 << 29));
9003 			val |= (1 << 29);
9004 		}
9005 	}
9006 
9007 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9008 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9009 		tw32(GRC_VCPU_EXT_CTRL,
9010 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9011 	}
9012 
9013 	/* Manage gphy power for all CPMU absent PCIe devices. */
9014 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9015 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9016 
9017 	tw32(GRC_MISC_CFG, val);
9018 
9019 	/* restore 5701 hardware bug workaround write method */
9020 	tp->write32 = write_op;
9021 
9022 	/* Unfortunately, we have to delay before the PCI read back.
9023 	 * Some 575X chips even will not respond to a PCI cfg access
9024 	 * when the reset command is given to the chip.
9025 	 *
9026 	 * How do these hardware designers expect things to work
9027 	 * properly if the PCI write is posted for a long period
9028 	 * of time?  It is always necessary to have some method by
9029 	 * which a register read back can occur to push the write
9030 	 * out which does the reset.
9031 	 *
9032 	 * For most tg3 variants the trick below was working.
9033 	 * Ho hum...
9034 	 */
9035 	udelay(120);
9036 
9037 	/* Flush PCI posted writes.  The normal MMIO registers
9038 	 * are inaccessible at this time so this is the only
9039 	 * way to make this reliably (actually, this is no longer
9040 	 * the case, see above).  I tried to use indirect
9041 	 * register read/write but this upset some 5701 variants.
9042 	 */
9043 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9044 
9045 	udelay(120);
9046 
9047 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9048 		u16 val16;
9049 
9050 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9051 			int j;
9052 			u32 cfg_val;
9053 
9054 			/* Wait for link training to complete.  */
9055 			for (j = 0; j < 5000; j++)
9056 				udelay(100);
9057 
9058 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9059 			pci_write_config_dword(tp->pdev, 0xc4,
9060 					       cfg_val | (1 << 15));
9061 		}
9062 
9063 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9064 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9065 		/*
9066 		 * Older PCIe devices only support the 128 byte
9067 		 * MPS setting.  Enforce the restriction.
9068 		 */
9069 		if (!tg3_flag(tp, CPMU_PRESENT))
9070 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9071 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9072 
9073 		/* Clear error status */
9074 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9075 				      PCI_EXP_DEVSTA_CED |
9076 				      PCI_EXP_DEVSTA_NFED |
9077 				      PCI_EXP_DEVSTA_FED |
9078 				      PCI_EXP_DEVSTA_URD);
9079 	}
9080 
9081 	tg3_restore_pci_state(tp);
9082 
9083 	tg3_flag_clear(tp, CHIP_RESETTING);
9084 	tg3_flag_clear(tp, ERROR_PROCESSED);
9085 
9086 	val = 0;
9087 	if (tg3_flag(tp, 5780_CLASS))
9088 		val = tr32(MEMARB_MODE);
9089 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9090 
9091 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9092 		tg3_stop_fw(tp);
9093 		tw32(0x5000, 0x400);
9094 	}
9095 
9096 	if (tg3_flag(tp, IS_SSB_CORE)) {
9097 		/*
9098 		 * BCM4785: In order to avoid repercussions from using
9099 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9100 		 * which is not required.
9101 		 */
9102 		tg3_stop_fw(tp);
9103 		tg3_halt_cpu(tp, RX_CPU_BASE);
9104 	}
9105 
9106 	err = tg3_poll_fw(tp);
9107 	if (err)
9108 		return err;
9109 
9110 	tw32(GRC_MODE, tp->grc_mode);
9111 
9112 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9113 		val = tr32(0xc4);
9114 
9115 		tw32(0xc4, val | (1 << 15));
9116 	}
9117 
9118 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9119 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9120 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9121 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9122 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9123 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9124 	}
9125 
9126 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9127 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9128 		val = tp->mac_mode;
9129 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9130 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9131 		val = tp->mac_mode;
9132 	} else
9133 		val = 0;
9134 
9135 	tw32_f(MAC_MODE, val);
9136 	udelay(40);
9137 
9138 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9139 
9140 	tg3_mdio_start(tp);
9141 
9142 	if (tg3_flag(tp, PCI_EXPRESS) &&
9143 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9144 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9145 	    !tg3_flag(tp, 57765_PLUS)) {
9146 		val = tr32(0x7c00);
9147 
9148 		tw32(0x7c00, val | (1 << 25));
9149 	}
9150 
9151 	if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9152 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9153 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9154 	}
9155 
9156 	/* Reprobe ASF enable state.  */
9157 	tg3_flag_clear(tp, ENABLE_ASF);
9158 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9159 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9160 
9161 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9162 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9163 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9164 		u32 nic_cfg;
9165 
9166 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9167 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9168 			tg3_flag_set(tp, ENABLE_ASF);
9169 			tp->last_event_jiffies = jiffies;
9170 			if (tg3_flag(tp, 5750_PLUS))
9171 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9172 
9173 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9174 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9175 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9176 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9177 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9178 		}
9179 	}
9180 
9181 	return 0;
9182 }
9183 
9184 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9185 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9186 
9187 /* tp->lock is held. */
9188 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9189 {
9190 	int err;
9191 
9192 	tg3_stop_fw(tp);
9193 
9194 	tg3_write_sig_pre_reset(tp, kind);
9195 
9196 	tg3_abort_hw(tp, silent);
9197 	err = tg3_chip_reset(tp);
9198 
9199 	__tg3_set_mac_addr(tp, false);
9200 
9201 	tg3_write_sig_legacy(tp, kind);
9202 	tg3_write_sig_post_reset(tp, kind);
9203 
9204 	if (tp->hw_stats) {
9205 		/* Save the stats across chip resets... */
9206 		tg3_get_nstats(tp, &tp->net_stats_prev);
9207 		tg3_get_estats(tp, &tp->estats_prev);
9208 
9209 		/* And make sure the next sample is new data */
9210 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9211 	}
9212 
9213 	return err;
9214 }
9215 
9216 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9217 {
9218 	struct tg3 *tp = netdev_priv(dev);
9219 	struct sockaddr *addr = p;
9220 	int err = 0;
9221 	bool skip_mac_1 = false;
9222 
9223 	if (!is_valid_ether_addr(addr->sa_data))
9224 		return -EADDRNOTAVAIL;
9225 
9226 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9227 
9228 	if (!netif_running(dev))
9229 		return 0;
9230 
9231 	if (tg3_flag(tp, ENABLE_ASF)) {
9232 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9233 
9234 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9235 		addr0_low = tr32(MAC_ADDR_0_LOW);
9236 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9237 		addr1_low = tr32(MAC_ADDR_1_LOW);
9238 
9239 		/* Skip MAC addr 1 if ASF is using it. */
9240 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9241 		    !(addr1_high == 0 && addr1_low == 0))
9242 			skip_mac_1 = true;
9243 	}
9244 	spin_lock_bh(&tp->lock);
9245 	__tg3_set_mac_addr(tp, skip_mac_1);
9246 	spin_unlock_bh(&tp->lock);
9247 
9248 	return err;
9249 }
9250 
9251 /* tp->lock is held. */
9252 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9253 			   dma_addr_t mapping, u32 maxlen_flags,
9254 			   u32 nic_addr)
9255 {
9256 	tg3_write_mem(tp,
9257 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9258 		      ((u64) mapping >> 32));
9259 	tg3_write_mem(tp,
9260 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9261 		      ((u64) mapping & 0xffffffff));
9262 	tg3_write_mem(tp,
9263 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9264 		       maxlen_flags);
9265 
9266 	if (!tg3_flag(tp, 5705_PLUS))
9267 		tg3_write_mem(tp,
9268 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9269 			      nic_addr);
9270 }
9271 
9272 
9273 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9274 {
9275 	int i = 0;
9276 
9277 	if (!tg3_flag(tp, ENABLE_TSS)) {
9278 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9279 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9280 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9281 	} else {
9282 		tw32(HOSTCC_TXCOL_TICKS, 0);
9283 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9284 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9285 
9286 		for (; i < tp->txq_cnt; i++) {
9287 			u32 reg;
9288 
9289 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9290 			tw32(reg, ec->tx_coalesce_usecs);
9291 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9292 			tw32(reg, ec->tx_max_coalesced_frames);
9293 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9294 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9295 		}
9296 	}
9297 
9298 	for (; i < tp->irq_max - 1; i++) {
9299 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9300 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9301 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9302 	}
9303 }
9304 
9305 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9306 {
9307 	int i = 0;
9308 	u32 limit = tp->rxq_cnt;
9309 
9310 	if (!tg3_flag(tp, ENABLE_RSS)) {
9311 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9312 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9313 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9314 		limit--;
9315 	} else {
9316 		tw32(HOSTCC_RXCOL_TICKS, 0);
9317 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9318 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9319 	}
9320 
9321 	for (; i < limit; i++) {
9322 		u32 reg;
9323 
9324 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9325 		tw32(reg, ec->rx_coalesce_usecs);
9326 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9327 		tw32(reg, ec->rx_max_coalesced_frames);
9328 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9329 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9330 	}
9331 
9332 	for (; i < tp->irq_max - 1; i++) {
9333 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9334 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9335 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9336 	}
9337 }
9338 
9339 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9340 {
9341 	tg3_coal_tx_init(tp, ec);
9342 	tg3_coal_rx_init(tp, ec);
9343 
9344 	if (!tg3_flag(tp, 5705_PLUS)) {
9345 		u32 val = ec->stats_block_coalesce_usecs;
9346 
9347 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9348 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9349 
9350 		if (!tp->link_up)
9351 			val = 0;
9352 
9353 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9354 	}
9355 }
9356 
9357 /* tp->lock is held. */
9358 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9359 {
9360 	u32 txrcb, limit;
9361 
9362 	/* Disable all transmit rings but the first. */
9363 	if (!tg3_flag(tp, 5705_PLUS))
9364 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9365 	else if (tg3_flag(tp, 5717_PLUS))
9366 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9367 	else if (tg3_flag(tp, 57765_CLASS) ||
9368 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9369 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9370 	else
9371 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9372 
9373 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9374 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9375 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9376 			      BDINFO_FLAGS_DISABLED);
9377 }
9378 
9379 /* tp->lock is held. */
9380 static void tg3_tx_rcbs_init(struct tg3 *tp)
9381 {
9382 	int i = 0;
9383 	u32 txrcb = NIC_SRAM_SEND_RCB;
9384 
9385 	if (tg3_flag(tp, ENABLE_TSS))
9386 		i++;
9387 
9388 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9389 		struct tg3_napi *tnapi = &tp->napi[i];
9390 
9391 		if (!tnapi->tx_ring)
9392 			continue;
9393 
9394 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9395 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9396 			       NIC_SRAM_TX_BUFFER_DESC);
9397 	}
9398 }
9399 
9400 /* tp->lock is held. */
9401 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9402 {
9403 	u32 rxrcb, limit;
9404 
9405 	/* Disable all receive return rings but the first. */
9406 	if (tg3_flag(tp, 5717_PLUS))
9407 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9408 	else if (!tg3_flag(tp, 5705_PLUS))
9409 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9410 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9411 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9412 		 tg3_flag(tp, 57765_CLASS))
9413 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9414 	else
9415 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9416 
9417 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9418 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9419 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9420 			      BDINFO_FLAGS_DISABLED);
9421 }
9422 
9423 /* tp->lock is held. */
9424 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9425 {
9426 	int i = 0;
9427 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9428 
9429 	if (tg3_flag(tp, ENABLE_RSS))
9430 		i++;
9431 
9432 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9433 		struct tg3_napi *tnapi = &tp->napi[i];
9434 
9435 		if (!tnapi->rx_rcb)
9436 			continue;
9437 
9438 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9439 			       (tp->rx_ret_ring_mask + 1) <<
9440 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9441 	}
9442 }
9443 
9444 /* tp->lock is held. */
9445 static void tg3_rings_reset(struct tg3 *tp)
9446 {
9447 	int i;
9448 	u32 stblk;
9449 	struct tg3_napi *tnapi = &tp->napi[0];
9450 
9451 	tg3_tx_rcbs_disable(tp);
9452 
9453 	tg3_rx_ret_rcbs_disable(tp);
9454 
9455 	/* Disable interrupts */
9456 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9457 	tp->napi[0].chk_msi_cnt = 0;
9458 	tp->napi[0].last_rx_cons = 0;
9459 	tp->napi[0].last_tx_cons = 0;
9460 
9461 	/* Zero mailbox registers. */
9462 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9463 		for (i = 1; i < tp->irq_max; i++) {
9464 			tp->napi[i].tx_prod = 0;
9465 			tp->napi[i].tx_cons = 0;
9466 			if (tg3_flag(tp, ENABLE_TSS))
9467 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9468 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9469 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9470 			tp->napi[i].chk_msi_cnt = 0;
9471 			tp->napi[i].last_rx_cons = 0;
9472 			tp->napi[i].last_tx_cons = 0;
9473 		}
9474 		if (!tg3_flag(tp, ENABLE_TSS))
9475 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9476 	} else {
9477 		tp->napi[0].tx_prod = 0;
9478 		tp->napi[0].tx_cons = 0;
9479 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9480 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9481 	}
9482 
9483 	/* Make sure the NIC-based send BD rings are disabled. */
9484 	if (!tg3_flag(tp, 5705_PLUS)) {
9485 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9486 		for (i = 0; i < 16; i++)
9487 			tw32_tx_mbox(mbox + i * 8, 0);
9488 	}
9489 
9490 	/* Clear status block in ram. */
9491 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9492 
9493 	/* Set status block DMA address */
9494 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9495 	     ((u64) tnapi->status_mapping >> 32));
9496 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9497 	     ((u64) tnapi->status_mapping & 0xffffffff));
9498 
9499 	stblk = HOSTCC_STATBLCK_RING1;
9500 
9501 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9502 		u64 mapping = (u64)tnapi->status_mapping;
9503 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9504 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9505 		stblk += 8;
9506 
9507 		/* Clear status block in ram. */
9508 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9509 	}
9510 
9511 	tg3_tx_rcbs_init(tp);
9512 	tg3_rx_ret_rcbs_init(tp);
9513 }
9514 
9515 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9516 {
9517 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9518 
9519 	if (!tg3_flag(tp, 5750_PLUS) ||
9520 	    tg3_flag(tp, 5780_CLASS) ||
9521 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9522 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9523 	    tg3_flag(tp, 57765_PLUS))
9524 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9525 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9526 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9527 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9528 	else
9529 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9530 
9531 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9532 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9533 
9534 	val = min(nic_rep_thresh, host_rep_thresh);
9535 	tw32(RCVBDI_STD_THRESH, val);
9536 
9537 	if (tg3_flag(tp, 57765_PLUS))
9538 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9539 
9540 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9541 		return;
9542 
9543 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9544 
9545 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9546 
9547 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9548 	tw32(RCVBDI_JUMBO_THRESH, val);
9549 
9550 	if (tg3_flag(tp, 57765_PLUS))
9551 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9552 }
9553 
9554 static inline u32 calc_crc(unsigned char *buf, int len)
9555 {
9556 	u32 reg;
9557 	u32 tmp;
9558 	int j, k;
9559 
9560 	reg = 0xffffffff;
9561 
9562 	for (j = 0; j < len; j++) {
9563 		reg ^= buf[j];
9564 
9565 		for (k = 0; k < 8; k++) {
9566 			tmp = reg & 0x01;
9567 
9568 			reg >>= 1;
9569 
9570 			if (tmp)
9571 				reg ^= 0xedb88320;
9572 		}
9573 	}
9574 
9575 	return ~reg;
9576 }
9577 
9578 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9579 {
9580 	/* accept or reject all multicast frames */
9581 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9582 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9583 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9584 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9585 }
9586 
9587 static void __tg3_set_rx_mode(struct net_device *dev)
9588 {
9589 	struct tg3 *tp = netdev_priv(dev);
9590 	u32 rx_mode;
9591 
9592 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9593 				  RX_MODE_KEEP_VLAN_TAG);
9594 
9595 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9596 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9597 	 * flag clear.
9598 	 */
9599 	if (!tg3_flag(tp, ENABLE_ASF))
9600 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9601 #endif
9602 
9603 	if (dev->flags & IFF_PROMISC) {
9604 		/* Promiscuous mode. */
9605 		rx_mode |= RX_MODE_PROMISC;
9606 	} else if (dev->flags & IFF_ALLMULTI) {
9607 		/* Accept all multicast. */
9608 		tg3_set_multi(tp, 1);
9609 	} else if (netdev_mc_empty(dev)) {
9610 		/* Reject all multicast. */
9611 		tg3_set_multi(tp, 0);
9612 	} else {
9613 		/* Accept one or more multicast(s). */
9614 		struct netdev_hw_addr *ha;
9615 		u32 mc_filter[4] = { 0, };
9616 		u32 regidx;
9617 		u32 bit;
9618 		u32 crc;
9619 
9620 		netdev_for_each_mc_addr(ha, dev) {
9621 			crc = calc_crc(ha->addr, ETH_ALEN);
9622 			bit = ~crc & 0x7f;
9623 			regidx = (bit & 0x60) >> 5;
9624 			bit &= 0x1f;
9625 			mc_filter[regidx] |= (1 << bit);
9626 		}
9627 
9628 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9629 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9630 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9631 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9632 	}
9633 
9634 	if (rx_mode != tp->rx_mode) {
9635 		tp->rx_mode = rx_mode;
9636 		tw32_f(MAC_RX_MODE, rx_mode);
9637 		udelay(10);
9638 	}
9639 }
9640 
9641 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9642 {
9643 	int i;
9644 
9645 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9646 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9647 }
9648 
9649 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9650 {
9651 	int i;
9652 
9653 	if (!tg3_flag(tp, SUPPORT_MSIX))
9654 		return;
9655 
9656 	if (tp->rxq_cnt == 1) {
9657 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9658 		return;
9659 	}
9660 
9661 	/* Validate table against current IRQ count */
9662 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9663 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9664 			break;
9665 	}
9666 
9667 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9668 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9669 }
9670 
9671 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9672 {
9673 	int i = 0;
9674 	u32 reg = MAC_RSS_INDIR_TBL_0;
9675 
9676 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9677 		u32 val = tp->rss_ind_tbl[i];
9678 		i++;
9679 		for (; i % 8; i++) {
9680 			val <<= 4;
9681 			val |= tp->rss_ind_tbl[i];
9682 		}
9683 		tw32(reg, val);
9684 		reg += 4;
9685 	}
9686 }
9687 
9688 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9689 {
9690 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9691 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9692 	else
9693 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9694 }
9695 
9696 /* tp->lock is held. */
9697 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9698 {
9699 	u32 val, rdmac_mode;
9700 	int i, err, limit;
9701 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9702 
9703 	tg3_disable_ints(tp);
9704 
9705 	tg3_stop_fw(tp);
9706 
9707 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9708 
9709 	if (tg3_flag(tp, INIT_COMPLETE))
9710 		tg3_abort_hw(tp, 1);
9711 
9712 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9713 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9714 		tg3_phy_pull_config(tp);
9715 		tg3_eee_pull_config(tp, NULL);
9716 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9717 	}
9718 
9719 	/* Enable MAC control of LPI */
9720 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9721 		tg3_setup_eee(tp);
9722 
9723 	if (reset_phy)
9724 		tg3_phy_reset(tp);
9725 
9726 	err = tg3_chip_reset(tp);
9727 	if (err)
9728 		return err;
9729 
9730 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9731 
9732 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9733 		val = tr32(TG3_CPMU_CTRL);
9734 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9735 		tw32(TG3_CPMU_CTRL, val);
9736 
9737 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9738 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9739 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9740 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9741 
9742 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9743 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9744 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9745 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9746 
9747 		val = tr32(TG3_CPMU_HST_ACC);
9748 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9749 		val |= CPMU_HST_ACC_MACCLK_6_25;
9750 		tw32(TG3_CPMU_HST_ACC, val);
9751 	}
9752 
9753 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9754 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9755 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9756 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9757 		tw32(PCIE_PWR_MGMT_THRESH, val);
9758 
9759 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9760 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9761 
9762 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9763 
9764 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9765 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9766 	}
9767 
9768 	if (tg3_flag(tp, L1PLLPD_EN)) {
9769 		u32 grc_mode = tr32(GRC_MODE);
9770 
9771 		/* Access the lower 1K of PL PCIE block registers. */
9772 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9773 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9774 
9775 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9776 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9777 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9778 
9779 		tw32(GRC_MODE, grc_mode);
9780 	}
9781 
9782 	if (tg3_flag(tp, 57765_CLASS)) {
9783 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9784 			u32 grc_mode = tr32(GRC_MODE);
9785 
9786 			/* Access the lower 1K of PL PCIE block registers. */
9787 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9788 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9789 
9790 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9791 				   TG3_PCIE_PL_LO_PHYCTL5);
9792 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9793 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9794 
9795 			tw32(GRC_MODE, grc_mode);
9796 		}
9797 
9798 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9799 			u32 grc_mode;
9800 
9801 			/* Fix transmit hangs */
9802 			val = tr32(TG3_CPMU_PADRNG_CTL);
9803 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9804 			tw32(TG3_CPMU_PADRNG_CTL, val);
9805 
9806 			grc_mode = tr32(GRC_MODE);
9807 
9808 			/* Access the lower 1K of DL PCIE block registers. */
9809 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9810 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9811 
9812 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9813 				   TG3_PCIE_DL_LO_FTSMAX);
9814 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9815 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9816 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9817 
9818 			tw32(GRC_MODE, grc_mode);
9819 		}
9820 
9821 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9822 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9823 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9824 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9825 	}
9826 
9827 	/* This works around an issue with Athlon chipsets on
9828 	 * B3 tigon3 silicon.  This bit has no effect on any
9829 	 * other revision.  But do not set this on PCI Express
9830 	 * chips and don't even touch the clocks if the CPMU is present.
9831 	 */
9832 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9833 		if (!tg3_flag(tp, PCI_EXPRESS))
9834 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9835 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9836 	}
9837 
9838 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9839 	    tg3_flag(tp, PCIX_MODE)) {
9840 		val = tr32(TG3PCI_PCISTATE);
9841 		val |= PCISTATE_RETRY_SAME_DMA;
9842 		tw32(TG3PCI_PCISTATE, val);
9843 	}
9844 
9845 	if (tg3_flag(tp, ENABLE_APE)) {
9846 		/* Allow reads and writes to the
9847 		 * APE register and memory space.
9848 		 */
9849 		val = tr32(TG3PCI_PCISTATE);
9850 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9851 		       PCISTATE_ALLOW_APE_SHMEM_WR |
9852 		       PCISTATE_ALLOW_APE_PSPACE_WR;
9853 		tw32(TG3PCI_PCISTATE, val);
9854 	}
9855 
9856 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9857 		/* Enable some hw fixes.  */
9858 		val = tr32(TG3PCI_MSI_DATA);
9859 		val |= (1 << 26) | (1 << 28) | (1 << 29);
9860 		tw32(TG3PCI_MSI_DATA, val);
9861 	}
9862 
9863 	/* Descriptor ring init may make accesses to the
9864 	 * NIC SRAM area to setup the TX descriptors, so we
9865 	 * can only do this after the hardware has been
9866 	 * successfully reset.
9867 	 */
9868 	err = tg3_init_rings(tp);
9869 	if (err)
9870 		return err;
9871 
9872 	if (tg3_flag(tp, 57765_PLUS)) {
9873 		val = tr32(TG3PCI_DMA_RW_CTRL) &
9874 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9875 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9876 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9877 		if (!tg3_flag(tp, 57765_CLASS) &&
9878 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
9879 		    tg3_asic_rev(tp) != ASIC_REV_5762)
9880 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
9881 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9882 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9883 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
9884 		/* This value is determined during the probe time DMA
9885 		 * engine test, tg3_test_dma.
9886 		 */
9887 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9888 	}
9889 
9890 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9891 			  GRC_MODE_4X_NIC_SEND_RINGS |
9892 			  GRC_MODE_NO_TX_PHDR_CSUM |
9893 			  GRC_MODE_NO_RX_PHDR_CSUM);
9894 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9895 
9896 	/* Pseudo-header checksum is done by hardware logic and not
9897 	 * the offload processers, so make the chip do the pseudo-
9898 	 * header checksums on receive.  For transmit it is more
9899 	 * convenient to do the pseudo-header checksum in software
9900 	 * as Linux does that on transmit for us in all cases.
9901 	 */
9902 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9903 
9904 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9905 	if (tp->rxptpctl)
9906 		tw32(TG3_RX_PTP_CTL,
9907 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9908 
9909 	if (tg3_flag(tp, PTP_CAPABLE))
9910 		val |= GRC_MODE_TIME_SYNC_ENABLE;
9911 
9912 	tw32(GRC_MODE, tp->grc_mode | val);
9913 
9914 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
9915 	val = tr32(GRC_MISC_CFG);
9916 	val &= ~0xff;
9917 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9918 	tw32(GRC_MISC_CFG, val);
9919 
9920 	/* Initialize MBUF/DESC pool. */
9921 	if (tg3_flag(tp, 5750_PLUS)) {
9922 		/* Do nothing.  */
9923 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9924 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9925 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
9926 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9927 		else
9928 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9929 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9930 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9931 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
9932 		int fw_len;
9933 
9934 		fw_len = tp->fw_len;
9935 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9936 		tw32(BUFMGR_MB_POOL_ADDR,
9937 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9938 		tw32(BUFMGR_MB_POOL_SIZE,
9939 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9940 	}
9941 
9942 	if (tp->dev->mtu <= ETH_DATA_LEN) {
9943 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9944 		     tp->bufmgr_config.mbuf_read_dma_low_water);
9945 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9946 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
9947 		tw32(BUFMGR_MB_HIGH_WATER,
9948 		     tp->bufmgr_config.mbuf_high_water);
9949 	} else {
9950 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
9951 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9952 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
9953 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9954 		tw32(BUFMGR_MB_HIGH_WATER,
9955 		     tp->bufmgr_config.mbuf_high_water_jumbo);
9956 	}
9957 	tw32(BUFMGR_DMA_LOW_WATER,
9958 	     tp->bufmgr_config.dma_low_water);
9959 	tw32(BUFMGR_DMA_HIGH_WATER,
9960 	     tp->bufmgr_config.dma_high_water);
9961 
9962 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9963 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9964 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9965 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9966 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9967 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9968 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9969 	tw32(BUFMGR_MODE, val);
9970 	for (i = 0; i < 2000; i++) {
9971 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9972 			break;
9973 		udelay(10);
9974 	}
9975 	if (i >= 2000) {
9976 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9977 		return -ENODEV;
9978 	}
9979 
9980 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9981 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9982 
9983 	tg3_setup_rxbd_thresholds(tp);
9984 
9985 	/* Initialize TG3_BDINFO's at:
9986 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
9987 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
9988 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
9989 	 *
9990 	 * like so:
9991 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
9992 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
9993 	 *                              ring attribute flags
9994 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
9995 	 *
9996 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9997 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9998 	 *
9999 	 * The size of each ring is fixed in the firmware, but the location is
10000 	 * configurable.
10001 	 */
10002 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10003 	     ((u64) tpr->rx_std_mapping >> 32));
10004 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10005 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10006 	if (!tg3_flag(tp, 5717_PLUS))
10007 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10008 		     NIC_SRAM_RX_BUFFER_DESC);
10009 
10010 	/* Disable the mini ring */
10011 	if (!tg3_flag(tp, 5705_PLUS))
10012 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10013 		     BDINFO_FLAGS_DISABLED);
10014 
10015 	/* Program the jumbo buffer descriptor ring control
10016 	 * blocks on those devices that have them.
10017 	 */
10018 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10019 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10020 
10021 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10022 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10023 			     ((u64) tpr->rx_jmb_mapping >> 32));
10024 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10025 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10026 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10027 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10028 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10029 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10030 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10031 			    tg3_flag(tp, 57765_CLASS) ||
10032 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10033 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10034 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10035 		} else {
10036 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10037 			     BDINFO_FLAGS_DISABLED);
10038 		}
10039 
10040 		if (tg3_flag(tp, 57765_PLUS)) {
10041 			val = TG3_RX_STD_RING_SIZE(tp);
10042 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10043 			val |= (TG3_RX_STD_DMA_SZ << 2);
10044 		} else
10045 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10046 	} else
10047 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10048 
10049 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10050 
10051 	tpr->rx_std_prod_idx = tp->rx_pending;
10052 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10053 
10054 	tpr->rx_jmb_prod_idx =
10055 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10056 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10057 
10058 	tg3_rings_reset(tp);
10059 
10060 	/* Initialize MAC address and backoff seed. */
10061 	__tg3_set_mac_addr(tp, false);
10062 
10063 	/* MTU + ethernet header + FCS + optional VLAN tag */
10064 	tw32(MAC_RX_MTU_SIZE,
10065 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10066 
10067 	/* The slot time is changed by tg3_setup_phy if we
10068 	 * run at gigabit with half duplex.
10069 	 */
10070 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10071 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10072 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10073 
10074 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10075 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10076 		val |= tr32(MAC_TX_LENGTHS) &
10077 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10078 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10079 
10080 	tw32(MAC_TX_LENGTHS, val);
10081 
10082 	/* Receive rules. */
10083 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10084 	tw32(RCVLPC_CONFIG, 0x0181);
10085 
10086 	/* Calculate RDMAC_MODE setting early, we need it to determine
10087 	 * the RCVLPC_STATE_ENABLE mask.
10088 	 */
10089 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10090 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10091 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10092 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10093 		      RDMAC_MODE_LNGREAD_ENAB);
10094 
10095 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10096 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10097 
10098 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10099 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10100 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10101 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10102 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10103 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10104 
10105 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10106 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10107 		if (tg3_flag(tp, TSO_CAPABLE) &&
10108 		    tg3_asic_rev(tp) == ASIC_REV_5705) {
10109 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10110 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10111 			   !tg3_flag(tp, IS_5788)) {
10112 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10113 		}
10114 	}
10115 
10116 	if (tg3_flag(tp, PCI_EXPRESS))
10117 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10118 
10119 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10120 		tp->dma_limit = 0;
10121 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10122 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10123 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10124 		}
10125 	}
10126 
10127 	if (tg3_flag(tp, HW_TSO_1) ||
10128 	    tg3_flag(tp, HW_TSO_2) ||
10129 	    tg3_flag(tp, HW_TSO_3))
10130 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10131 
10132 	if (tg3_flag(tp, 57765_PLUS) ||
10133 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10134 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10135 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10136 
10137 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10138 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10139 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10140 
10141 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10142 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10143 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10144 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10145 	    tg3_flag(tp, 57765_PLUS)) {
10146 		u32 tgtreg;
10147 
10148 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10149 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10150 		else
10151 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10152 
10153 		val = tr32(tgtreg);
10154 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10155 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10156 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10157 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10158 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10159 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10160 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10161 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10162 		}
10163 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10164 	}
10165 
10166 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10167 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10168 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10169 		u32 tgtreg;
10170 
10171 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10172 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10173 		else
10174 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10175 
10176 		val = tr32(tgtreg);
10177 		tw32(tgtreg, val |
10178 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10179 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10180 	}
10181 
10182 	/* Receive/send statistics. */
10183 	if (tg3_flag(tp, 5750_PLUS)) {
10184 		val = tr32(RCVLPC_STATS_ENABLE);
10185 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10186 		tw32(RCVLPC_STATS_ENABLE, val);
10187 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10188 		   tg3_flag(tp, TSO_CAPABLE)) {
10189 		val = tr32(RCVLPC_STATS_ENABLE);
10190 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10191 		tw32(RCVLPC_STATS_ENABLE, val);
10192 	} else {
10193 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10194 	}
10195 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10196 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10197 	tw32(SNDDATAI_STATSCTRL,
10198 	     (SNDDATAI_SCTRL_ENABLE |
10199 	      SNDDATAI_SCTRL_FASTUPD));
10200 
10201 	/* Setup host coalescing engine. */
10202 	tw32(HOSTCC_MODE, 0);
10203 	for (i = 0; i < 2000; i++) {
10204 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10205 			break;
10206 		udelay(10);
10207 	}
10208 
10209 	__tg3_set_coalesce(tp, &tp->coal);
10210 
10211 	if (!tg3_flag(tp, 5705_PLUS)) {
10212 		/* Status/statistics block address.  See tg3_timer,
10213 		 * the tg3_periodic_fetch_stats call there, and
10214 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10215 		 */
10216 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10217 		     ((u64) tp->stats_mapping >> 32));
10218 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10219 		     ((u64) tp->stats_mapping & 0xffffffff));
10220 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10221 
10222 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10223 
10224 		/* Clear statistics and status block memory areas */
10225 		for (i = NIC_SRAM_STATS_BLK;
10226 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10227 		     i += sizeof(u32)) {
10228 			tg3_write_mem(tp, i, 0);
10229 			udelay(40);
10230 		}
10231 	}
10232 
10233 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10234 
10235 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10236 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10237 	if (!tg3_flag(tp, 5705_PLUS))
10238 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10239 
10240 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10241 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10242 		/* reset to prevent losing 1st rx packet intermittently */
10243 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10244 		udelay(10);
10245 	}
10246 
10247 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10248 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10249 			MAC_MODE_FHDE_ENABLE;
10250 	if (tg3_flag(tp, ENABLE_APE))
10251 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10252 	if (!tg3_flag(tp, 5705_PLUS) &&
10253 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10254 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10255 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10256 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10257 	udelay(40);
10258 
10259 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10260 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10261 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10262 	 * whether used as inputs or outputs, are set by boot code after
10263 	 * reset.
10264 	 */
10265 	if (!tg3_flag(tp, IS_NIC)) {
10266 		u32 gpio_mask;
10267 
10268 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10269 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10270 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10271 
10272 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10273 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10274 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10275 
10276 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10277 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10278 
10279 		tp->grc_local_ctrl &= ~gpio_mask;
10280 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10281 
10282 		/* GPIO1 must be driven high for eeprom write protect */
10283 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10284 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10285 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10286 	}
10287 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10288 	udelay(100);
10289 
10290 	if (tg3_flag(tp, USING_MSIX)) {
10291 		val = tr32(MSGINT_MODE);
10292 		val |= MSGINT_MODE_ENABLE;
10293 		if (tp->irq_cnt > 1)
10294 			val |= MSGINT_MODE_MULTIVEC_EN;
10295 		if (!tg3_flag(tp, 1SHOT_MSI))
10296 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10297 		tw32(MSGINT_MODE, val);
10298 	}
10299 
10300 	if (!tg3_flag(tp, 5705_PLUS)) {
10301 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10302 		udelay(40);
10303 	}
10304 
10305 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10306 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10307 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10308 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10309 	       WDMAC_MODE_LNGREAD_ENAB);
10310 
10311 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10312 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10313 		if (tg3_flag(tp, TSO_CAPABLE) &&
10314 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10315 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10316 			/* nothing */
10317 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10318 			   !tg3_flag(tp, IS_5788)) {
10319 			val |= WDMAC_MODE_RX_ACCEL;
10320 		}
10321 	}
10322 
10323 	/* Enable host coalescing bug fix */
10324 	if (tg3_flag(tp, 5755_PLUS))
10325 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10326 
10327 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10328 		val |= WDMAC_MODE_BURST_ALL_DATA;
10329 
10330 	tw32_f(WDMAC_MODE, val);
10331 	udelay(40);
10332 
10333 	if (tg3_flag(tp, PCIX_MODE)) {
10334 		u16 pcix_cmd;
10335 
10336 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10337 				     &pcix_cmd);
10338 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10339 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10340 			pcix_cmd |= PCI_X_CMD_READ_2K;
10341 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10342 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10343 			pcix_cmd |= PCI_X_CMD_READ_2K;
10344 		}
10345 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10346 				      pcix_cmd);
10347 	}
10348 
10349 	tw32_f(RDMAC_MODE, rdmac_mode);
10350 	udelay(40);
10351 
10352 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10353 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10354 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10355 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10356 				break;
10357 		}
10358 		if (i < TG3_NUM_RDMA_CHANNELS) {
10359 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10360 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10361 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10362 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10363 		}
10364 	}
10365 
10366 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10367 	if (!tg3_flag(tp, 5705_PLUS))
10368 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10369 
10370 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10371 		tw32(SNDDATAC_MODE,
10372 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10373 	else
10374 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10375 
10376 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10377 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10378 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10379 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10380 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10381 	tw32(RCVDBDI_MODE, val);
10382 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10383 	if (tg3_flag(tp, HW_TSO_1) ||
10384 	    tg3_flag(tp, HW_TSO_2) ||
10385 	    tg3_flag(tp, HW_TSO_3))
10386 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10387 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10388 	if (tg3_flag(tp, ENABLE_TSS))
10389 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10390 	tw32(SNDBDI_MODE, val);
10391 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10392 
10393 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10394 		err = tg3_load_5701_a0_firmware_fix(tp);
10395 		if (err)
10396 			return err;
10397 	}
10398 
10399 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10400 		/* Ignore any errors for the firmware download. If download
10401 		 * fails, the device will operate with EEE disabled
10402 		 */
10403 		tg3_load_57766_firmware(tp);
10404 	}
10405 
10406 	if (tg3_flag(tp, TSO_CAPABLE)) {
10407 		err = tg3_load_tso_firmware(tp);
10408 		if (err)
10409 			return err;
10410 	}
10411 
10412 	tp->tx_mode = TX_MODE_ENABLE;
10413 
10414 	if (tg3_flag(tp, 5755_PLUS) ||
10415 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10416 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10417 
10418 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10419 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10420 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10421 		tp->tx_mode &= ~val;
10422 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10423 	}
10424 
10425 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10426 	udelay(100);
10427 
10428 	if (tg3_flag(tp, ENABLE_RSS)) {
10429 		tg3_rss_write_indir_tbl(tp);
10430 
10431 		/* Setup the "secret" hash key. */
10432 		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10433 		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10434 		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10435 		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10436 		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10437 		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10438 		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10439 		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10440 		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10441 		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10442 	}
10443 
10444 	tp->rx_mode = RX_MODE_ENABLE;
10445 	if (tg3_flag(tp, 5755_PLUS))
10446 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10447 
10448 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10449 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10450 
10451 	if (tg3_flag(tp, ENABLE_RSS))
10452 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10453 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10454 			       RX_MODE_RSS_IPV6_HASH_EN |
10455 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10456 			       RX_MODE_RSS_IPV4_HASH_EN |
10457 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10458 
10459 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10460 	udelay(10);
10461 
10462 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10463 
10464 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10465 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10466 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10467 		udelay(10);
10468 	}
10469 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10470 	udelay(10);
10471 
10472 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10473 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10474 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10475 			/* Set drive transmission level to 1.2V  */
10476 			/* only if the signal pre-emphasis bit is not set  */
10477 			val = tr32(MAC_SERDES_CFG);
10478 			val &= 0xfffff000;
10479 			val |= 0x880;
10480 			tw32(MAC_SERDES_CFG, val);
10481 		}
10482 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10483 			tw32(MAC_SERDES_CFG, 0x616000);
10484 	}
10485 
10486 	/* Prevent chip from dropping frames when flow control
10487 	 * is enabled.
10488 	 */
10489 	if (tg3_flag(tp, 57765_CLASS))
10490 		val = 1;
10491 	else
10492 		val = 2;
10493 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10494 
10495 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10496 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10497 		/* Use hardware link auto-negotiation */
10498 		tg3_flag_set(tp, HW_AUTONEG);
10499 	}
10500 
10501 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10502 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10503 		u32 tmp;
10504 
10505 		tmp = tr32(SERDES_RX_CTRL);
10506 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10507 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10508 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10509 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10510 	}
10511 
10512 	if (!tg3_flag(tp, USE_PHYLIB)) {
10513 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10514 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10515 
10516 		err = tg3_setup_phy(tp, false);
10517 		if (err)
10518 			return err;
10519 
10520 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10521 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10522 			u32 tmp;
10523 
10524 			/* Clear CRC stats. */
10525 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10526 				tg3_writephy(tp, MII_TG3_TEST1,
10527 					     tmp | MII_TG3_TEST1_CRC_EN);
10528 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10529 			}
10530 		}
10531 	}
10532 
10533 	__tg3_set_rx_mode(tp->dev);
10534 
10535 	/* Initialize receive rules. */
10536 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10537 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10538 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10539 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10540 
10541 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10542 		limit = 8;
10543 	else
10544 		limit = 16;
10545 	if (tg3_flag(tp, ENABLE_ASF))
10546 		limit -= 4;
10547 	switch (limit) {
10548 	case 16:
10549 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10550 	case 15:
10551 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10552 	case 14:
10553 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10554 	case 13:
10555 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10556 	case 12:
10557 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10558 	case 11:
10559 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10560 	case 10:
10561 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10562 	case 9:
10563 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10564 	case 8:
10565 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10566 	case 7:
10567 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10568 	case 6:
10569 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10570 	case 5:
10571 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10572 	case 4:
10573 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10574 	case 3:
10575 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10576 	case 2:
10577 	case 1:
10578 
10579 	default:
10580 		break;
10581 	}
10582 
10583 	if (tg3_flag(tp, ENABLE_APE))
10584 		/* Write our heartbeat update interval to APE. */
10585 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10586 				APE_HOST_HEARTBEAT_INT_DISABLE);
10587 
10588 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10589 
10590 	return 0;
10591 }
10592 
10593 /* Called at device open time to get the chip ready for
10594  * packet processing.  Invoked with tp->lock held.
10595  */
10596 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10597 {
10598 	/* Chip may have been just powered on. If so, the boot code may still
10599 	 * be running initialization. Wait for it to finish to avoid races in
10600 	 * accessing the hardware.
10601 	 */
10602 	tg3_enable_register_access(tp);
10603 	tg3_poll_fw(tp);
10604 
10605 	tg3_switch_clocks(tp);
10606 
10607 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10608 
10609 	return tg3_reset_hw(tp, reset_phy);
10610 }
10611 
10612 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10613 {
10614 	int i;
10615 
10616 	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10617 		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10618 
10619 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10620 		off += len;
10621 
10622 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10623 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10624 			memset(ocir, 0, TG3_OCIR_LEN);
10625 	}
10626 }
10627 
10628 /* sysfs attributes for hwmon */
10629 static ssize_t tg3_show_temp(struct device *dev,
10630 			     struct device_attribute *devattr, char *buf)
10631 {
10632 	struct pci_dev *pdev = to_pci_dev(dev);
10633 	struct net_device *netdev = pci_get_drvdata(pdev);
10634 	struct tg3 *tp = netdev_priv(netdev);
10635 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10636 	u32 temperature;
10637 
10638 	spin_lock_bh(&tp->lock);
10639 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10640 				sizeof(temperature));
10641 	spin_unlock_bh(&tp->lock);
10642 	return sprintf(buf, "%u\n", temperature);
10643 }
10644 
10645 
10646 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10647 			  TG3_TEMP_SENSOR_OFFSET);
10648 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10649 			  TG3_TEMP_CAUTION_OFFSET);
10650 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10651 			  TG3_TEMP_MAX_OFFSET);
10652 
10653 static struct attribute *tg3_attributes[] = {
10654 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10655 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10656 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10657 	NULL
10658 };
10659 
10660 static const struct attribute_group tg3_group = {
10661 	.attrs = tg3_attributes,
10662 };
10663 
10664 static void tg3_hwmon_close(struct tg3 *tp)
10665 {
10666 	if (tp->hwmon_dev) {
10667 		hwmon_device_unregister(tp->hwmon_dev);
10668 		tp->hwmon_dev = NULL;
10669 		sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10670 	}
10671 }
10672 
10673 static void tg3_hwmon_open(struct tg3 *tp)
10674 {
10675 	int i, err;
10676 	u32 size = 0;
10677 	struct pci_dev *pdev = tp->pdev;
10678 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10679 
10680 	tg3_sd_scan_scratchpad(tp, ocirs);
10681 
10682 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10683 		if (!ocirs[i].src_data_length)
10684 			continue;
10685 
10686 		size += ocirs[i].src_hdr_length;
10687 		size += ocirs[i].src_data_length;
10688 	}
10689 
10690 	if (!size)
10691 		return;
10692 
10693 	/* Register hwmon sysfs hooks */
10694 	err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10695 	if (err) {
10696 		dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10697 		return;
10698 	}
10699 
10700 	tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10701 	if (IS_ERR(tp->hwmon_dev)) {
10702 		tp->hwmon_dev = NULL;
10703 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10704 		sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10705 	}
10706 }
10707 
10708 
10709 #define TG3_STAT_ADD32(PSTAT, REG) \
10710 do {	u32 __val = tr32(REG); \
10711 	(PSTAT)->low += __val; \
10712 	if ((PSTAT)->low < __val) \
10713 		(PSTAT)->high += 1; \
10714 } while (0)
10715 
10716 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10717 {
10718 	struct tg3_hw_stats *sp = tp->hw_stats;
10719 
10720 	if (!tp->link_up)
10721 		return;
10722 
10723 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10724 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10725 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10726 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10727 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10728 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10729 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10730 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10731 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10732 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10733 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10734 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10735 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10736 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10737 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10738 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10739 		u32 val;
10740 
10741 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10742 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10743 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10744 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10745 	}
10746 
10747 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10748 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10749 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10750 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10751 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10752 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10753 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10754 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10755 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10756 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10757 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10758 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10759 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10760 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10761 
10762 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10763 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10764 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10765 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10766 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10767 	} else {
10768 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10769 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10770 		if (val) {
10771 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10772 			sp->rx_discards.low += val;
10773 			if (sp->rx_discards.low < val)
10774 				sp->rx_discards.high += 1;
10775 		}
10776 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10777 	}
10778 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10779 }
10780 
10781 static void tg3_chk_missed_msi(struct tg3 *tp)
10782 {
10783 	u32 i;
10784 
10785 	for (i = 0; i < tp->irq_cnt; i++) {
10786 		struct tg3_napi *tnapi = &tp->napi[i];
10787 
10788 		if (tg3_has_work(tnapi)) {
10789 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10790 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10791 				if (tnapi->chk_msi_cnt < 1) {
10792 					tnapi->chk_msi_cnt++;
10793 					return;
10794 				}
10795 				tg3_msi(0, tnapi);
10796 			}
10797 		}
10798 		tnapi->chk_msi_cnt = 0;
10799 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10800 		tnapi->last_tx_cons = tnapi->tx_cons;
10801 	}
10802 }
10803 
10804 static void tg3_timer(unsigned long __opaque)
10805 {
10806 	struct tg3 *tp = (struct tg3 *) __opaque;
10807 
10808 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10809 		goto restart_timer;
10810 
10811 	spin_lock(&tp->lock);
10812 
10813 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10814 	    tg3_flag(tp, 57765_CLASS))
10815 		tg3_chk_missed_msi(tp);
10816 
10817 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10818 		/* BCM4785: Flush posted writes from GbE to host memory. */
10819 		tr32(HOSTCC_MODE);
10820 	}
10821 
10822 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10823 		/* All of this garbage is because when using non-tagged
10824 		 * IRQ status the mailbox/status_block protocol the chip
10825 		 * uses with the cpu is race prone.
10826 		 */
10827 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10828 			tw32(GRC_LOCAL_CTRL,
10829 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10830 		} else {
10831 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10832 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10833 		}
10834 
10835 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10836 			spin_unlock(&tp->lock);
10837 			tg3_reset_task_schedule(tp);
10838 			goto restart_timer;
10839 		}
10840 	}
10841 
10842 	/* This part only runs once per second. */
10843 	if (!--tp->timer_counter) {
10844 		if (tg3_flag(tp, 5705_PLUS))
10845 			tg3_periodic_fetch_stats(tp);
10846 
10847 		if (tp->setlpicnt && !--tp->setlpicnt)
10848 			tg3_phy_eee_enable(tp);
10849 
10850 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
10851 			u32 mac_stat;
10852 			int phy_event;
10853 
10854 			mac_stat = tr32(MAC_STATUS);
10855 
10856 			phy_event = 0;
10857 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10858 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10859 					phy_event = 1;
10860 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10861 				phy_event = 1;
10862 
10863 			if (phy_event)
10864 				tg3_setup_phy(tp, false);
10865 		} else if (tg3_flag(tp, POLL_SERDES)) {
10866 			u32 mac_stat = tr32(MAC_STATUS);
10867 			int need_setup = 0;
10868 
10869 			if (tp->link_up &&
10870 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10871 				need_setup = 1;
10872 			}
10873 			if (!tp->link_up &&
10874 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
10875 					 MAC_STATUS_SIGNAL_DET))) {
10876 				need_setup = 1;
10877 			}
10878 			if (need_setup) {
10879 				if (!tp->serdes_counter) {
10880 					tw32_f(MAC_MODE,
10881 					     (tp->mac_mode &
10882 					      ~MAC_MODE_PORT_MODE_MASK));
10883 					udelay(40);
10884 					tw32_f(MAC_MODE, tp->mac_mode);
10885 					udelay(40);
10886 				}
10887 				tg3_setup_phy(tp, false);
10888 			}
10889 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10890 			   tg3_flag(tp, 5780_CLASS)) {
10891 			tg3_serdes_parallel_detect(tp);
10892 		}
10893 
10894 		tp->timer_counter = tp->timer_multiplier;
10895 	}
10896 
10897 	/* Heartbeat is only sent once every 2 seconds.
10898 	 *
10899 	 * The heartbeat is to tell the ASF firmware that the host
10900 	 * driver is still alive.  In the event that the OS crashes,
10901 	 * ASF needs to reset the hardware to free up the FIFO space
10902 	 * that may be filled with rx packets destined for the host.
10903 	 * If the FIFO is full, ASF will no longer function properly.
10904 	 *
10905 	 * Unintended resets have been reported on real time kernels
10906 	 * where the timer doesn't run on time.  Netpoll will also have
10907 	 * same problem.
10908 	 *
10909 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10910 	 * to check the ring condition when the heartbeat is expiring
10911 	 * before doing the reset.  This will prevent most unintended
10912 	 * resets.
10913 	 */
10914 	if (!--tp->asf_counter) {
10915 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10916 			tg3_wait_for_event_ack(tp);
10917 
10918 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10919 				      FWCMD_NICDRV_ALIVE3);
10920 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10921 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10922 				      TG3_FW_UPDATE_TIMEOUT_SEC);
10923 
10924 			tg3_generate_fw_event(tp);
10925 		}
10926 		tp->asf_counter = tp->asf_multiplier;
10927 	}
10928 
10929 	spin_unlock(&tp->lock);
10930 
10931 restart_timer:
10932 	tp->timer.expires = jiffies + tp->timer_offset;
10933 	add_timer(&tp->timer);
10934 }
10935 
10936 static void tg3_timer_init(struct tg3 *tp)
10937 {
10938 	if (tg3_flag(tp, TAGGED_STATUS) &&
10939 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10940 	    !tg3_flag(tp, 57765_CLASS))
10941 		tp->timer_offset = HZ;
10942 	else
10943 		tp->timer_offset = HZ / 10;
10944 
10945 	BUG_ON(tp->timer_offset > HZ);
10946 
10947 	tp->timer_multiplier = (HZ / tp->timer_offset);
10948 	tp->asf_multiplier = (HZ / tp->timer_offset) *
10949 			     TG3_FW_UPDATE_FREQ_SEC;
10950 
10951 	init_timer(&tp->timer);
10952 	tp->timer.data = (unsigned long) tp;
10953 	tp->timer.function = tg3_timer;
10954 }
10955 
10956 static void tg3_timer_start(struct tg3 *tp)
10957 {
10958 	tp->asf_counter   = tp->asf_multiplier;
10959 	tp->timer_counter = tp->timer_multiplier;
10960 
10961 	tp->timer.expires = jiffies + tp->timer_offset;
10962 	add_timer(&tp->timer);
10963 }
10964 
10965 static void tg3_timer_stop(struct tg3 *tp)
10966 {
10967 	del_timer_sync(&tp->timer);
10968 }
10969 
10970 /* Restart hardware after configuration changes, self-test, etc.
10971  * Invoked with tp->lock held.
10972  */
10973 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10974 	__releases(tp->lock)
10975 	__acquires(tp->lock)
10976 {
10977 	int err;
10978 
10979 	err = tg3_init_hw(tp, reset_phy);
10980 	if (err) {
10981 		netdev_err(tp->dev,
10982 			   "Failed to re-initialize device, aborting\n");
10983 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10984 		tg3_full_unlock(tp);
10985 		tg3_timer_stop(tp);
10986 		tp->irq_sync = 0;
10987 		tg3_napi_enable(tp);
10988 		dev_close(tp->dev);
10989 		tg3_full_lock(tp, 0);
10990 	}
10991 	return err;
10992 }
10993 
10994 static void tg3_reset_task(struct work_struct *work)
10995 {
10996 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
10997 	int err;
10998 
10999 	tg3_full_lock(tp, 0);
11000 
11001 	if (!netif_running(tp->dev)) {
11002 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11003 		tg3_full_unlock(tp);
11004 		return;
11005 	}
11006 
11007 	tg3_full_unlock(tp);
11008 
11009 	tg3_phy_stop(tp);
11010 
11011 	tg3_netif_stop(tp);
11012 
11013 	tg3_full_lock(tp, 1);
11014 
11015 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11016 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11017 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11018 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11019 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11020 	}
11021 
11022 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11023 	err = tg3_init_hw(tp, true);
11024 	if (err)
11025 		goto out;
11026 
11027 	tg3_netif_start(tp);
11028 
11029 out:
11030 	tg3_full_unlock(tp);
11031 
11032 	if (!err)
11033 		tg3_phy_start(tp);
11034 
11035 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11036 }
11037 
11038 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11039 {
11040 	irq_handler_t fn;
11041 	unsigned long flags;
11042 	char *name;
11043 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11044 
11045 	if (tp->irq_cnt == 1)
11046 		name = tp->dev->name;
11047 	else {
11048 		name = &tnapi->irq_lbl[0];
11049 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11050 			snprintf(name, IFNAMSIZ,
11051 				 "%s-txrx-%d", tp->dev->name, irq_num);
11052 		else if (tnapi->tx_buffers)
11053 			snprintf(name, IFNAMSIZ,
11054 				 "%s-tx-%d", tp->dev->name, irq_num);
11055 		else if (tnapi->rx_rcb)
11056 			snprintf(name, IFNAMSIZ,
11057 				 "%s-rx-%d", tp->dev->name, irq_num);
11058 		else
11059 			snprintf(name, IFNAMSIZ,
11060 				 "%s-%d", tp->dev->name, irq_num);
11061 		name[IFNAMSIZ-1] = 0;
11062 	}
11063 
11064 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11065 		fn = tg3_msi;
11066 		if (tg3_flag(tp, 1SHOT_MSI))
11067 			fn = tg3_msi_1shot;
11068 		flags = 0;
11069 	} else {
11070 		fn = tg3_interrupt;
11071 		if (tg3_flag(tp, TAGGED_STATUS))
11072 			fn = tg3_interrupt_tagged;
11073 		flags = IRQF_SHARED;
11074 	}
11075 
11076 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11077 }
11078 
11079 static int tg3_test_interrupt(struct tg3 *tp)
11080 {
11081 	struct tg3_napi *tnapi = &tp->napi[0];
11082 	struct net_device *dev = tp->dev;
11083 	int err, i, intr_ok = 0;
11084 	u32 val;
11085 
11086 	if (!netif_running(dev))
11087 		return -ENODEV;
11088 
11089 	tg3_disable_ints(tp);
11090 
11091 	free_irq(tnapi->irq_vec, tnapi);
11092 
11093 	/*
11094 	 * Turn off MSI one shot mode.  Otherwise this test has no
11095 	 * observable way to know whether the interrupt was delivered.
11096 	 */
11097 	if (tg3_flag(tp, 57765_PLUS)) {
11098 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11099 		tw32(MSGINT_MODE, val);
11100 	}
11101 
11102 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11103 			  IRQF_SHARED, dev->name, tnapi);
11104 	if (err)
11105 		return err;
11106 
11107 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11108 	tg3_enable_ints(tp);
11109 
11110 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11111 	       tnapi->coal_now);
11112 
11113 	for (i = 0; i < 5; i++) {
11114 		u32 int_mbox, misc_host_ctrl;
11115 
11116 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11117 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11118 
11119 		if ((int_mbox != 0) ||
11120 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11121 			intr_ok = 1;
11122 			break;
11123 		}
11124 
11125 		if (tg3_flag(tp, 57765_PLUS) &&
11126 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11127 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11128 
11129 		msleep(10);
11130 	}
11131 
11132 	tg3_disable_ints(tp);
11133 
11134 	free_irq(tnapi->irq_vec, tnapi);
11135 
11136 	err = tg3_request_irq(tp, 0);
11137 
11138 	if (err)
11139 		return err;
11140 
11141 	if (intr_ok) {
11142 		/* Reenable MSI one shot mode. */
11143 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11144 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11145 			tw32(MSGINT_MODE, val);
11146 		}
11147 		return 0;
11148 	}
11149 
11150 	return -EIO;
11151 }
11152 
11153 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11154  * successfully restored
11155  */
11156 static int tg3_test_msi(struct tg3 *tp)
11157 {
11158 	int err;
11159 	u16 pci_cmd;
11160 
11161 	if (!tg3_flag(tp, USING_MSI))
11162 		return 0;
11163 
11164 	/* Turn off SERR reporting in case MSI terminates with Master
11165 	 * Abort.
11166 	 */
11167 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11168 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11169 			      pci_cmd & ~PCI_COMMAND_SERR);
11170 
11171 	err = tg3_test_interrupt(tp);
11172 
11173 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11174 
11175 	if (!err)
11176 		return 0;
11177 
11178 	/* other failures */
11179 	if (err != -EIO)
11180 		return err;
11181 
11182 	/* MSI test failed, go back to INTx mode */
11183 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11184 		    "to INTx mode. Please report this failure to the PCI "
11185 		    "maintainer and include system chipset information\n");
11186 
11187 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11188 
11189 	pci_disable_msi(tp->pdev);
11190 
11191 	tg3_flag_clear(tp, USING_MSI);
11192 	tp->napi[0].irq_vec = tp->pdev->irq;
11193 
11194 	err = tg3_request_irq(tp, 0);
11195 	if (err)
11196 		return err;
11197 
11198 	/* Need to reset the chip because the MSI cycle may have terminated
11199 	 * with Master Abort.
11200 	 */
11201 	tg3_full_lock(tp, 1);
11202 
11203 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11204 	err = tg3_init_hw(tp, true);
11205 
11206 	tg3_full_unlock(tp);
11207 
11208 	if (err)
11209 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11210 
11211 	return err;
11212 }
11213 
11214 static int tg3_request_firmware(struct tg3 *tp)
11215 {
11216 	const struct tg3_firmware_hdr *fw_hdr;
11217 
11218 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11219 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11220 			   tp->fw_needed);
11221 		return -ENOENT;
11222 	}
11223 
11224 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11225 
11226 	/* Firmware blob starts with version numbers, followed by
11227 	 * start address and _full_ length including BSS sections
11228 	 * (which must be longer than the actual data, of course
11229 	 */
11230 
11231 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11232 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11233 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11234 			   tp->fw_len, tp->fw_needed);
11235 		release_firmware(tp->fw);
11236 		tp->fw = NULL;
11237 		return -EINVAL;
11238 	}
11239 
11240 	/* We no longer need firmware; we have it. */
11241 	tp->fw_needed = NULL;
11242 	return 0;
11243 }
11244 
11245 static u32 tg3_irq_count(struct tg3 *tp)
11246 {
11247 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11248 
11249 	if (irq_cnt > 1) {
11250 		/* We want as many rx rings enabled as there are cpus.
11251 		 * In multiqueue MSI-X mode, the first MSI-X vector
11252 		 * only deals with link interrupts, etc, so we add
11253 		 * one to the number of vectors we are requesting.
11254 		 */
11255 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11256 	}
11257 
11258 	return irq_cnt;
11259 }
11260 
11261 static bool tg3_enable_msix(struct tg3 *tp)
11262 {
11263 	int i, rc;
11264 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11265 
11266 	tp->txq_cnt = tp->txq_req;
11267 	tp->rxq_cnt = tp->rxq_req;
11268 	if (!tp->rxq_cnt)
11269 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11270 	if (tp->rxq_cnt > tp->rxq_max)
11271 		tp->rxq_cnt = tp->rxq_max;
11272 
11273 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11274 	 * scheduling of the TX rings can cause starvation of rings with
11275 	 * small packets when other rings have TSO or jumbo packets.
11276 	 */
11277 	if (!tp->txq_req)
11278 		tp->txq_cnt = 1;
11279 
11280 	tp->irq_cnt = tg3_irq_count(tp);
11281 
11282 	for (i = 0; i < tp->irq_max; i++) {
11283 		msix_ent[i].entry  = i;
11284 		msix_ent[i].vector = 0;
11285 	}
11286 
11287 	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11288 	if (rc < 0) {
11289 		return false;
11290 	} else if (rc != 0) {
11291 		if (pci_enable_msix(tp->pdev, msix_ent, rc))
11292 			return false;
11293 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11294 			      tp->irq_cnt, rc);
11295 		tp->irq_cnt = rc;
11296 		tp->rxq_cnt = max(rc - 1, 1);
11297 		if (tp->txq_cnt)
11298 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11299 	}
11300 
11301 	for (i = 0; i < tp->irq_max; i++)
11302 		tp->napi[i].irq_vec = msix_ent[i].vector;
11303 
11304 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11305 		pci_disable_msix(tp->pdev);
11306 		return false;
11307 	}
11308 
11309 	if (tp->irq_cnt == 1)
11310 		return true;
11311 
11312 	tg3_flag_set(tp, ENABLE_RSS);
11313 
11314 	if (tp->txq_cnt > 1)
11315 		tg3_flag_set(tp, ENABLE_TSS);
11316 
11317 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11318 
11319 	return true;
11320 }
11321 
11322 static void tg3_ints_init(struct tg3 *tp)
11323 {
11324 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11325 	    !tg3_flag(tp, TAGGED_STATUS)) {
11326 		/* All MSI supporting chips should support tagged
11327 		 * status.  Assert that this is the case.
11328 		 */
11329 		netdev_warn(tp->dev,
11330 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11331 		goto defcfg;
11332 	}
11333 
11334 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11335 		tg3_flag_set(tp, USING_MSIX);
11336 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11337 		tg3_flag_set(tp, USING_MSI);
11338 
11339 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11340 		u32 msi_mode = tr32(MSGINT_MODE);
11341 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11342 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11343 		if (!tg3_flag(tp, 1SHOT_MSI))
11344 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11345 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11346 	}
11347 defcfg:
11348 	if (!tg3_flag(tp, USING_MSIX)) {
11349 		tp->irq_cnt = 1;
11350 		tp->napi[0].irq_vec = tp->pdev->irq;
11351 	}
11352 
11353 	if (tp->irq_cnt == 1) {
11354 		tp->txq_cnt = 1;
11355 		tp->rxq_cnt = 1;
11356 		netif_set_real_num_tx_queues(tp->dev, 1);
11357 		netif_set_real_num_rx_queues(tp->dev, 1);
11358 	}
11359 }
11360 
11361 static void tg3_ints_fini(struct tg3 *tp)
11362 {
11363 	if (tg3_flag(tp, USING_MSIX))
11364 		pci_disable_msix(tp->pdev);
11365 	else if (tg3_flag(tp, USING_MSI))
11366 		pci_disable_msi(tp->pdev);
11367 	tg3_flag_clear(tp, USING_MSI);
11368 	tg3_flag_clear(tp, USING_MSIX);
11369 	tg3_flag_clear(tp, ENABLE_RSS);
11370 	tg3_flag_clear(tp, ENABLE_TSS);
11371 }
11372 
11373 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11374 		     bool init)
11375 {
11376 	struct net_device *dev = tp->dev;
11377 	int i, err;
11378 
11379 	/*
11380 	 * Setup interrupts first so we know how
11381 	 * many NAPI resources to allocate
11382 	 */
11383 	tg3_ints_init(tp);
11384 
11385 	tg3_rss_check_indir_tbl(tp);
11386 
11387 	/* The placement of this call is tied
11388 	 * to the setup and use of Host TX descriptors.
11389 	 */
11390 	err = tg3_alloc_consistent(tp);
11391 	if (err)
11392 		goto out_ints_fini;
11393 
11394 	tg3_napi_init(tp);
11395 
11396 	tg3_napi_enable(tp);
11397 
11398 	for (i = 0; i < tp->irq_cnt; i++) {
11399 		struct tg3_napi *tnapi = &tp->napi[i];
11400 		err = tg3_request_irq(tp, i);
11401 		if (err) {
11402 			for (i--; i >= 0; i--) {
11403 				tnapi = &tp->napi[i];
11404 				free_irq(tnapi->irq_vec, tnapi);
11405 			}
11406 			goto out_napi_fini;
11407 		}
11408 	}
11409 
11410 	tg3_full_lock(tp, 0);
11411 
11412 	if (init)
11413 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11414 
11415 	err = tg3_init_hw(tp, reset_phy);
11416 	if (err) {
11417 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11418 		tg3_free_rings(tp);
11419 	}
11420 
11421 	tg3_full_unlock(tp);
11422 
11423 	if (err)
11424 		goto out_free_irq;
11425 
11426 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11427 		err = tg3_test_msi(tp);
11428 
11429 		if (err) {
11430 			tg3_full_lock(tp, 0);
11431 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11432 			tg3_free_rings(tp);
11433 			tg3_full_unlock(tp);
11434 
11435 			goto out_napi_fini;
11436 		}
11437 
11438 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11439 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11440 
11441 			tw32(PCIE_TRANSACTION_CFG,
11442 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11443 		}
11444 	}
11445 
11446 	tg3_phy_start(tp);
11447 
11448 	tg3_hwmon_open(tp);
11449 
11450 	tg3_full_lock(tp, 0);
11451 
11452 	tg3_timer_start(tp);
11453 	tg3_flag_set(tp, INIT_COMPLETE);
11454 	tg3_enable_ints(tp);
11455 
11456 	if (init)
11457 		tg3_ptp_init(tp);
11458 	else
11459 		tg3_ptp_resume(tp);
11460 
11461 
11462 	tg3_full_unlock(tp);
11463 
11464 	netif_tx_start_all_queues(dev);
11465 
11466 	/*
11467 	 * Reset loopback feature if it was turned on while the device was down
11468 	 * make sure that it's installed properly now.
11469 	 */
11470 	if (dev->features & NETIF_F_LOOPBACK)
11471 		tg3_set_loopback(dev, dev->features);
11472 
11473 	return 0;
11474 
11475 out_free_irq:
11476 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11477 		struct tg3_napi *tnapi = &tp->napi[i];
11478 		free_irq(tnapi->irq_vec, tnapi);
11479 	}
11480 
11481 out_napi_fini:
11482 	tg3_napi_disable(tp);
11483 	tg3_napi_fini(tp);
11484 	tg3_free_consistent(tp);
11485 
11486 out_ints_fini:
11487 	tg3_ints_fini(tp);
11488 
11489 	return err;
11490 }
11491 
11492 static void tg3_stop(struct tg3 *tp)
11493 {
11494 	int i;
11495 
11496 	tg3_reset_task_cancel(tp);
11497 	tg3_netif_stop(tp);
11498 
11499 	tg3_timer_stop(tp);
11500 
11501 	tg3_hwmon_close(tp);
11502 
11503 	tg3_phy_stop(tp);
11504 
11505 	tg3_full_lock(tp, 1);
11506 
11507 	tg3_disable_ints(tp);
11508 
11509 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11510 	tg3_free_rings(tp);
11511 	tg3_flag_clear(tp, INIT_COMPLETE);
11512 
11513 	tg3_full_unlock(tp);
11514 
11515 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11516 		struct tg3_napi *tnapi = &tp->napi[i];
11517 		free_irq(tnapi->irq_vec, tnapi);
11518 	}
11519 
11520 	tg3_ints_fini(tp);
11521 
11522 	tg3_napi_fini(tp);
11523 
11524 	tg3_free_consistent(tp);
11525 }
11526 
11527 static int tg3_open(struct net_device *dev)
11528 {
11529 	struct tg3 *tp = netdev_priv(dev);
11530 	int err;
11531 
11532 	if (tp->fw_needed) {
11533 		err = tg3_request_firmware(tp);
11534 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11535 			if (err) {
11536 				netdev_warn(tp->dev, "EEE capability disabled\n");
11537 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11538 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11539 				netdev_warn(tp->dev, "EEE capability restored\n");
11540 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11541 			}
11542 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11543 			if (err)
11544 				return err;
11545 		} else if (err) {
11546 			netdev_warn(tp->dev, "TSO capability disabled\n");
11547 			tg3_flag_clear(tp, TSO_CAPABLE);
11548 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11549 			netdev_notice(tp->dev, "TSO capability restored\n");
11550 			tg3_flag_set(tp, TSO_CAPABLE);
11551 		}
11552 	}
11553 
11554 	tg3_carrier_off(tp);
11555 
11556 	err = tg3_power_up(tp);
11557 	if (err)
11558 		return err;
11559 
11560 	tg3_full_lock(tp, 0);
11561 
11562 	tg3_disable_ints(tp);
11563 	tg3_flag_clear(tp, INIT_COMPLETE);
11564 
11565 	tg3_full_unlock(tp);
11566 
11567 	err = tg3_start(tp,
11568 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11569 			true, true);
11570 	if (err) {
11571 		tg3_frob_aux_power(tp, false);
11572 		pci_set_power_state(tp->pdev, PCI_D3hot);
11573 	}
11574 
11575 	if (tg3_flag(tp, PTP_CAPABLE)) {
11576 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11577 						   &tp->pdev->dev);
11578 		if (IS_ERR(tp->ptp_clock))
11579 			tp->ptp_clock = NULL;
11580 	}
11581 
11582 	return err;
11583 }
11584 
11585 static int tg3_close(struct net_device *dev)
11586 {
11587 	struct tg3 *tp = netdev_priv(dev);
11588 
11589 	tg3_ptp_fini(tp);
11590 
11591 	tg3_stop(tp);
11592 
11593 	/* Clear stats across close / open calls */
11594 	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11595 	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11596 
11597 	tg3_power_down_prepare(tp);
11598 
11599 	tg3_carrier_off(tp);
11600 
11601 	return 0;
11602 }
11603 
11604 static inline u64 get_stat64(tg3_stat64_t *val)
11605 {
11606        return ((u64)val->high << 32) | ((u64)val->low);
11607 }
11608 
11609 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11610 {
11611 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11612 
11613 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11614 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11615 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11616 		u32 val;
11617 
11618 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11619 			tg3_writephy(tp, MII_TG3_TEST1,
11620 				     val | MII_TG3_TEST1_CRC_EN);
11621 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11622 		} else
11623 			val = 0;
11624 
11625 		tp->phy_crc_errors += val;
11626 
11627 		return tp->phy_crc_errors;
11628 	}
11629 
11630 	return get_stat64(&hw_stats->rx_fcs_errors);
11631 }
11632 
11633 #define ESTAT_ADD(member) \
11634 	estats->member =	old_estats->member + \
11635 				get_stat64(&hw_stats->member)
11636 
11637 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11638 {
11639 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11640 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11641 
11642 	ESTAT_ADD(rx_octets);
11643 	ESTAT_ADD(rx_fragments);
11644 	ESTAT_ADD(rx_ucast_packets);
11645 	ESTAT_ADD(rx_mcast_packets);
11646 	ESTAT_ADD(rx_bcast_packets);
11647 	ESTAT_ADD(rx_fcs_errors);
11648 	ESTAT_ADD(rx_align_errors);
11649 	ESTAT_ADD(rx_xon_pause_rcvd);
11650 	ESTAT_ADD(rx_xoff_pause_rcvd);
11651 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11652 	ESTAT_ADD(rx_xoff_entered);
11653 	ESTAT_ADD(rx_frame_too_long_errors);
11654 	ESTAT_ADD(rx_jabbers);
11655 	ESTAT_ADD(rx_undersize_packets);
11656 	ESTAT_ADD(rx_in_length_errors);
11657 	ESTAT_ADD(rx_out_length_errors);
11658 	ESTAT_ADD(rx_64_or_less_octet_packets);
11659 	ESTAT_ADD(rx_65_to_127_octet_packets);
11660 	ESTAT_ADD(rx_128_to_255_octet_packets);
11661 	ESTAT_ADD(rx_256_to_511_octet_packets);
11662 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11663 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11664 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11665 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11666 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11667 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11668 
11669 	ESTAT_ADD(tx_octets);
11670 	ESTAT_ADD(tx_collisions);
11671 	ESTAT_ADD(tx_xon_sent);
11672 	ESTAT_ADD(tx_xoff_sent);
11673 	ESTAT_ADD(tx_flow_control);
11674 	ESTAT_ADD(tx_mac_errors);
11675 	ESTAT_ADD(tx_single_collisions);
11676 	ESTAT_ADD(tx_mult_collisions);
11677 	ESTAT_ADD(tx_deferred);
11678 	ESTAT_ADD(tx_excessive_collisions);
11679 	ESTAT_ADD(tx_late_collisions);
11680 	ESTAT_ADD(tx_collide_2times);
11681 	ESTAT_ADD(tx_collide_3times);
11682 	ESTAT_ADD(tx_collide_4times);
11683 	ESTAT_ADD(tx_collide_5times);
11684 	ESTAT_ADD(tx_collide_6times);
11685 	ESTAT_ADD(tx_collide_7times);
11686 	ESTAT_ADD(tx_collide_8times);
11687 	ESTAT_ADD(tx_collide_9times);
11688 	ESTAT_ADD(tx_collide_10times);
11689 	ESTAT_ADD(tx_collide_11times);
11690 	ESTAT_ADD(tx_collide_12times);
11691 	ESTAT_ADD(tx_collide_13times);
11692 	ESTAT_ADD(tx_collide_14times);
11693 	ESTAT_ADD(tx_collide_15times);
11694 	ESTAT_ADD(tx_ucast_packets);
11695 	ESTAT_ADD(tx_mcast_packets);
11696 	ESTAT_ADD(tx_bcast_packets);
11697 	ESTAT_ADD(tx_carrier_sense_errors);
11698 	ESTAT_ADD(tx_discards);
11699 	ESTAT_ADD(tx_errors);
11700 
11701 	ESTAT_ADD(dma_writeq_full);
11702 	ESTAT_ADD(dma_write_prioq_full);
11703 	ESTAT_ADD(rxbds_empty);
11704 	ESTAT_ADD(rx_discards);
11705 	ESTAT_ADD(rx_errors);
11706 	ESTAT_ADD(rx_threshold_hit);
11707 
11708 	ESTAT_ADD(dma_readq_full);
11709 	ESTAT_ADD(dma_read_prioq_full);
11710 	ESTAT_ADD(tx_comp_queue_full);
11711 
11712 	ESTAT_ADD(ring_set_send_prod_index);
11713 	ESTAT_ADD(ring_status_update);
11714 	ESTAT_ADD(nic_irqs);
11715 	ESTAT_ADD(nic_avoided_irqs);
11716 	ESTAT_ADD(nic_tx_threshold_hit);
11717 
11718 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11719 }
11720 
11721 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11722 {
11723 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11724 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11725 
11726 	stats->rx_packets = old_stats->rx_packets +
11727 		get_stat64(&hw_stats->rx_ucast_packets) +
11728 		get_stat64(&hw_stats->rx_mcast_packets) +
11729 		get_stat64(&hw_stats->rx_bcast_packets);
11730 
11731 	stats->tx_packets = old_stats->tx_packets +
11732 		get_stat64(&hw_stats->tx_ucast_packets) +
11733 		get_stat64(&hw_stats->tx_mcast_packets) +
11734 		get_stat64(&hw_stats->tx_bcast_packets);
11735 
11736 	stats->rx_bytes = old_stats->rx_bytes +
11737 		get_stat64(&hw_stats->rx_octets);
11738 	stats->tx_bytes = old_stats->tx_bytes +
11739 		get_stat64(&hw_stats->tx_octets);
11740 
11741 	stats->rx_errors = old_stats->rx_errors +
11742 		get_stat64(&hw_stats->rx_errors);
11743 	stats->tx_errors = old_stats->tx_errors +
11744 		get_stat64(&hw_stats->tx_errors) +
11745 		get_stat64(&hw_stats->tx_mac_errors) +
11746 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11747 		get_stat64(&hw_stats->tx_discards);
11748 
11749 	stats->multicast = old_stats->multicast +
11750 		get_stat64(&hw_stats->rx_mcast_packets);
11751 	stats->collisions = old_stats->collisions +
11752 		get_stat64(&hw_stats->tx_collisions);
11753 
11754 	stats->rx_length_errors = old_stats->rx_length_errors +
11755 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11756 		get_stat64(&hw_stats->rx_undersize_packets);
11757 
11758 	stats->rx_over_errors = old_stats->rx_over_errors +
11759 		get_stat64(&hw_stats->rxbds_empty);
11760 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11761 		get_stat64(&hw_stats->rx_align_errors);
11762 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11763 		get_stat64(&hw_stats->tx_discards);
11764 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11765 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11766 
11767 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11768 		tg3_calc_crc_errors(tp);
11769 
11770 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11771 		get_stat64(&hw_stats->rx_discards);
11772 
11773 	stats->rx_dropped = tp->rx_dropped;
11774 	stats->tx_dropped = tp->tx_dropped;
11775 }
11776 
11777 static int tg3_get_regs_len(struct net_device *dev)
11778 {
11779 	return TG3_REG_BLK_SIZE;
11780 }
11781 
11782 static void tg3_get_regs(struct net_device *dev,
11783 		struct ethtool_regs *regs, void *_p)
11784 {
11785 	struct tg3 *tp = netdev_priv(dev);
11786 
11787 	regs->version = 0;
11788 
11789 	memset(_p, 0, TG3_REG_BLK_SIZE);
11790 
11791 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11792 		return;
11793 
11794 	tg3_full_lock(tp, 0);
11795 
11796 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11797 
11798 	tg3_full_unlock(tp);
11799 }
11800 
11801 static int tg3_get_eeprom_len(struct net_device *dev)
11802 {
11803 	struct tg3 *tp = netdev_priv(dev);
11804 
11805 	return tp->nvram_size;
11806 }
11807 
11808 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11809 {
11810 	struct tg3 *tp = netdev_priv(dev);
11811 	int ret;
11812 	u8  *pd;
11813 	u32 i, offset, len, b_offset, b_count;
11814 	__be32 val;
11815 
11816 	if (tg3_flag(tp, NO_NVRAM))
11817 		return -EINVAL;
11818 
11819 	offset = eeprom->offset;
11820 	len = eeprom->len;
11821 	eeprom->len = 0;
11822 
11823 	eeprom->magic = TG3_EEPROM_MAGIC;
11824 
11825 	if (offset & 3) {
11826 		/* adjustments to start on required 4 byte boundary */
11827 		b_offset = offset & 3;
11828 		b_count = 4 - b_offset;
11829 		if (b_count > len) {
11830 			/* i.e. offset=1 len=2 */
11831 			b_count = len;
11832 		}
11833 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11834 		if (ret)
11835 			return ret;
11836 		memcpy(data, ((char *)&val) + b_offset, b_count);
11837 		len -= b_count;
11838 		offset += b_count;
11839 		eeprom->len += b_count;
11840 	}
11841 
11842 	/* read bytes up to the last 4 byte boundary */
11843 	pd = &data[eeprom->len];
11844 	for (i = 0; i < (len - (len & 3)); i += 4) {
11845 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
11846 		if (ret) {
11847 			eeprom->len += i;
11848 			return ret;
11849 		}
11850 		memcpy(pd + i, &val, 4);
11851 	}
11852 	eeprom->len += i;
11853 
11854 	if (len & 3) {
11855 		/* read last bytes not ending on 4 byte boundary */
11856 		pd = &data[eeprom->len];
11857 		b_count = len & 3;
11858 		b_offset = offset + len - b_count;
11859 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
11860 		if (ret)
11861 			return ret;
11862 		memcpy(pd, &val, b_count);
11863 		eeprom->len += b_count;
11864 	}
11865 	return 0;
11866 }
11867 
11868 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11869 {
11870 	struct tg3 *tp = netdev_priv(dev);
11871 	int ret;
11872 	u32 offset, len, b_offset, odd_len;
11873 	u8 *buf;
11874 	__be32 start, end;
11875 
11876 	if (tg3_flag(tp, NO_NVRAM) ||
11877 	    eeprom->magic != TG3_EEPROM_MAGIC)
11878 		return -EINVAL;
11879 
11880 	offset = eeprom->offset;
11881 	len = eeprom->len;
11882 
11883 	if ((b_offset = (offset & 3))) {
11884 		/* adjustments to start on required 4 byte boundary */
11885 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11886 		if (ret)
11887 			return ret;
11888 		len += b_offset;
11889 		offset &= ~3;
11890 		if (len < 4)
11891 			len = 4;
11892 	}
11893 
11894 	odd_len = 0;
11895 	if (len & 3) {
11896 		/* adjustments to end on required 4 byte boundary */
11897 		odd_len = 1;
11898 		len = (len + 3) & ~3;
11899 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11900 		if (ret)
11901 			return ret;
11902 	}
11903 
11904 	buf = data;
11905 	if (b_offset || odd_len) {
11906 		buf = kmalloc(len, GFP_KERNEL);
11907 		if (!buf)
11908 			return -ENOMEM;
11909 		if (b_offset)
11910 			memcpy(buf, &start, 4);
11911 		if (odd_len)
11912 			memcpy(buf+len-4, &end, 4);
11913 		memcpy(buf + b_offset, data, eeprom->len);
11914 	}
11915 
11916 	ret = tg3_nvram_write_block(tp, offset, len, buf);
11917 
11918 	if (buf != data)
11919 		kfree(buf);
11920 
11921 	return ret;
11922 }
11923 
11924 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11925 {
11926 	struct tg3 *tp = netdev_priv(dev);
11927 
11928 	if (tg3_flag(tp, USE_PHYLIB)) {
11929 		struct phy_device *phydev;
11930 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11931 			return -EAGAIN;
11932 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
11933 		return phy_ethtool_gset(phydev, cmd);
11934 	}
11935 
11936 	cmd->supported = (SUPPORTED_Autoneg);
11937 
11938 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11939 		cmd->supported |= (SUPPORTED_1000baseT_Half |
11940 				   SUPPORTED_1000baseT_Full);
11941 
11942 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11943 		cmd->supported |= (SUPPORTED_100baseT_Half |
11944 				  SUPPORTED_100baseT_Full |
11945 				  SUPPORTED_10baseT_Half |
11946 				  SUPPORTED_10baseT_Full |
11947 				  SUPPORTED_TP);
11948 		cmd->port = PORT_TP;
11949 	} else {
11950 		cmd->supported |= SUPPORTED_FIBRE;
11951 		cmd->port = PORT_FIBRE;
11952 	}
11953 
11954 	cmd->advertising = tp->link_config.advertising;
11955 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
11956 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11957 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11958 				cmd->advertising |= ADVERTISED_Pause;
11959 			} else {
11960 				cmd->advertising |= ADVERTISED_Pause |
11961 						    ADVERTISED_Asym_Pause;
11962 			}
11963 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11964 			cmd->advertising |= ADVERTISED_Asym_Pause;
11965 		}
11966 	}
11967 	if (netif_running(dev) && tp->link_up) {
11968 		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11969 		cmd->duplex = tp->link_config.active_duplex;
11970 		cmd->lp_advertising = tp->link_config.rmt_adv;
11971 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11972 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11973 				cmd->eth_tp_mdix = ETH_TP_MDI_X;
11974 			else
11975 				cmd->eth_tp_mdix = ETH_TP_MDI;
11976 		}
11977 	} else {
11978 		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11979 		cmd->duplex = DUPLEX_UNKNOWN;
11980 		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11981 	}
11982 	cmd->phy_address = tp->phy_addr;
11983 	cmd->transceiver = XCVR_INTERNAL;
11984 	cmd->autoneg = tp->link_config.autoneg;
11985 	cmd->maxtxpkt = 0;
11986 	cmd->maxrxpkt = 0;
11987 	return 0;
11988 }
11989 
11990 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11991 {
11992 	struct tg3 *tp = netdev_priv(dev);
11993 	u32 speed = ethtool_cmd_speed(cmd);
11994 
11995 	if (tg3_flag(tp, USE_PHYLIB)) {
11996 		struct phy_device *phydev;
11997 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11998 			return -EAGAIN;
11999 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12000 		return phy_ethtool_sset(phydev, cmd);
12001 	}
12002 
12003 	if (cmd->autoneg != AUTONEG_ENABLE &&
12004 	    cmd->autoneg != AUTONEG_DISABLE)
12005 		return -EINVAL;
12006 
12007 	if (cmd->autoneg == AUTONEG_DISABLE &&
12008 	    cmd->duplex != DUPLEX_FULL &&
12009 	    cmd->duplex != DUPLEX_HALF)
12010 		return -EINVAL;
12011 
12012 	if (cmd->autoneg == AUTONEG_ENABLE) {
12013 		u32 mask = ADVERTISED_Autoneg |
12014 			   ADVERTISED_Pause |
12015 			   ADVERTISED_Asym_Pause;
12016 
12017 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12018 			mask |= ADVERTISED_1000baseT_Half |
12019 				ADVERTISED_1000baseT_Full;
12020 
12021 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12022 			mask |= ADVERTISED_100baseT_Half |
12023 				ADVERTISED_100baseT_Full |
12024 				ADVERTISED_10baseT_Half |
12025 				ADVERTISED_10baseT_Full |
12026 				ADVERTISED_TP;
12027 		else
12028 			mask |= ADVERTISED_FIBRE;
12029 
12030 		if (cmd->advertising & ~mask)
12031 			return -EINVAL;
12032 
12033 		mask &= (ADVERTISED_1000baseT_Half |
12034 			 ADVERTISED_1000baseT_Full |
12035 			 ADVERTISED_100baseT_Half |
12036 			 ADVERTISED_100baseT_Full |
12037 			 ADVERTISED_10baseT_Half |
12038 			 ADVERTISED_10baseT_Full);
12039 
12040 		cmd->advertising &= mask;
12041 	} else {
12042 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12043 			if (speed != SPEED_1000)
12044 				return -EINVAL;
12045 
12046 			if (cmd->duplex != DUPLEX_FULL)
12047 				return -EINVAL;
12048 		} else {
12049 			if (speed != SPEED_100 &&
12050 			    speed != SPEED_10)
12051 				return -EINVAL;
12052 		}
12053 	}
12054 
12055 	tg3_full_lock(tp, 0);
12056 
12057 	tp->link_config.autoneg = cmd->autoneg;
12058 	if (cmd->autoneg == AUTONEG_ENABLE) {
12059 		tp->link_config.advertising = (cmd->advertising |
12060 					      ADVERTISED_Autoneg);
12061 		tp->link_config.speed = SPEED_UNKNOWN;
12062 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12063 	} else {
12064 		tp->link_config.advertising = 0;
12065 		tp->link_config.speed = speed;
12066 		tp->link_config.duplex = cmd->duplex;
12067 	}
12068 
12069 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12070 
12071 	tg3_warn_mgmt_link_flap(tp);
12072 
12073 	if (netif_running(dev))
12074 		tg3_setup_phy(tp, true);
12075 
12076 	tg3_full_unlock(tp);
12077 
12078 	return 0;
12079 }
12080 
12081 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12082 {
12083 	struct tg3 *tp = netdev_priv(dev);
12084 
12085 	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12086 	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12087 	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12088 	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12089 }
12090 
12091 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12092 {
12093 	struct tg3 *tp = netdev_priv(dev);
12094 
12095 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12096 		wol->supported = WAKE_MAGIC;
12097 	else
12098 		wol->supported = 0;
12099 	wol->wolopts = 0;
12100 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12101 		wol->wolopts = WAKE_MAGIC;
12102 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12103 }
12104 
12105 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12106 {
12107 	struct tg3 *tp = netdev_priv(dev);
12108 	struct device *dp = &tp->pdev->dev;
12109 
12110 	if (wol->wolopts & ~WAKE_MAGIC)
12111 		return -EINVAL;
12112 	if ((wol->wolopts & WAKE_MAGIC) &&
12113 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12114 		return -EINVAL;
12115 
12116 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12117 
12118 	if (device_may_wakeup(dp))
12119 		tg3_flag_set(tp, WOL_ENABLE);
12120 	else
12121 		tg3_flag_clear(tp, WOL_ENABLE);
12122 
12123 	return 0;
12124 }
12125 
12126 static u32 tg3_get_msglevel(struct net_device *dev)
12127 {
12128 	struct tg3 *tp = netdev_priv(dev);
12129 	return tp->msg_enable;
12130 }
12131 
12132 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12133 {
12134 	struct tg3 *tp = netdev_priv(dev);
12135 	tp->msg_enable = value;
12136 }
12137 
12138 static int tg3_nway_reset(struct net_device *dev)
12139 {
12140 	struct tg3 *tp = netdev_priv(dev);
12141 	int r;
12142 
12143 	if (!netif_running(dev))
12144 		return -EAGAIN;
12145 
12146 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12147 		return -EINVAL;
12148 
12149 	tg3_warn_mgmt_link_flap(tp);
12150 
12151 	if (tg3_flag(tp, USE_PHYLIB)) {
12152 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12153 			return -EAGAIN;
12154 		r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12155 	} else {
12156 		u32 bmcr;
12157 
12158 		spin_lock_bh(&tp->lock);
12159 		r = -EINVAL;
12160 		tg3_readphy(tp, MII_BMCR, &bmcr);
12161 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12162 		    ((bmcr & BMCR_ANENABLE) ||
12163 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12164 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12165 						   BMCR_ANENABLE);
12166 			r = 0;
12167 		}
12168 		spin_unlock_bh(&tp->lock);
12169 	}
12170 
12171 	return r;
12172 }
12173 
12174 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12175 {
12176 	struct tg3 *tp = netdev_priv(dev);
12177 
12178 	ering->rx_max_pending = tp->rx_std_ring_mask;
12179 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12180 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12181 	else
12182 		ering->rx_jumbo_max_pending = 0;
12183 
12184 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12185 
12186 	ering->rx_pending = tp->rx_pending;
12187 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12188 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12189 	else
12190 		ering->rx_jumbo_pending = 0;
12191 
12192 	ering->tx_pending = tp->napi[0].tx_pending;
12193 }
12194 
12195 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12196 {
12197 	struct tg3 *tp = netdev_priv(dev);
12198 	int i, irq_sync = 0, err = 0;
12199 
12200 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12201 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12202 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12203 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12204 	    (tg3_flag(tp, TSO_BUG) &&
12205 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12206 		return -EINVAL;
12207 
12208 	if (netif_running(dev)) {
12209 		tg3_phy_stop(tp);
12210 		tg3_netif_stop(tp);
12211 		irq_sync = 1;
12212 	}
12213 
12214 	tg3_full_lock(tp, irq_sync);
12215 
12216 	tp->rx_pending = ering->rx_pending;
12217 
12218 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12219 	    tp->rx_pending > 63)
12220 		tp->rx_pending = 63;
12221 	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12222 
12223 	for (i = 0; i < tp->irq_max; i++)
12224 		tp->napi[i].tx_pending = ering->tx_pending;
12225 
12226 	if (netif_running(dev)) {
12227 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12228 		err = tg3_restart_hw(tp, false);
12229 		if (!err)
12230 			tg3_netif_start(tp);
12231 	}
12232 
12233 	tg3_full_unlock(tp);
12234 
12235 	if (irq_sync && !err)
12236 		tg3_phy_start(tp);
12237 
12238 	return err;
12239 }
12240 
12241 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12242 {
12243 	struct tg3 *tp = netdev_priv(dev);
12244 
12245 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12246 
12247 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12248 		epause->rx_pause = 1;
12249 	else
12250 		epause->rx_pause = 0;
12251 
12252 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12253 		epause->tx_pause = 1;
12254 	else
12255 		epause->tx_pause = 0;
12256 }
12257 
12258 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12259 {
12260 	struct tg3 *tp = netdev_priv(dev);
12261 	int err = 0;
12262 
12263 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12264 		tg3_warn_mgmt_link_flap(tp);
12265 
12266 	if (tg3_flag(tp, USE_PHYLIB)) {
12267 		u32 newadv;
12268 		struct phy_device *phydev;
12269 
12270 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12271 
12272 		if (!(phydev->supported & SUPPORTED_Pause) ||
12273 		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12274 		     (epause->rx_pause != epause->tx_pause)))
12275 			return -EINVAL;
12276 
12277 		tp->link_config.flowctrl = 0;
12278 		if (epause->rx_pause) {
12279 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12280 
12281 			if (epause->tx_pause) {
12282 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12283 				newadv = ADVERTISED_Pause;
12284 			} else
12285 				newadv = ADVERTISED_Pause |
12286 					 ADVERTISED_Asym_Pause;
12287 		} else if (epause->tx_pause) {
12288 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12289 			newadv = ADVERTISED_Asym_Pause;
12290 		} else
12291 			newadv = 0;
12292 
12293 		if (epause->autoneg)
12294 			tg3_flag_set(tp, PAUSE_AUTONEG);
12295 		else
12296 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12297 
12298 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12299 			u32 oldadv = phydev->advertising &
12300 				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12301 			if (oldadv != newadv) {
12302 				phydev->advertising &=
12303 					~(ADVERTISED_Pause |
12304 					  ADVERTISED_Asym_Pause);
12305 				phydev->advertising |= newadv;
12306 				if (phydev->autoneg) {
12307 					/*
12308 					 * Always renegotiate the link to
12309 					 * inform our link partner of our
12310 					 * flow control settings, even if the
12311 					 * flow control is forced.  Let
12312 					 * tg3_adjust_link() do the final
12313 					 * flow control setup.
12314 					 */
12315 					return phy_start_aneg(phydev);
12316 				}
12317 			}
12318 
12319 			if (!epause->autoneg)
12320 				tg3_setup_flow_control(tp, 0, 0);
12321 		} else {
12322 			tp->link_config.advertising &=
12323 					~(ADVERTISED_Pause |
12324 					  ADVERTISED_Asym_Pause);
12325 			tp->link_config.advertising |= newadv;
12326 		}
12327 	} else {
12328 		int irq_sync = 0;
12329 
12330 		if (netif_running(dev)) {
12331 			tg3_netif_stop(tp);
12332 			irq_sync = 1;
12333 		}
12334 
12335 		tg3_full_lock(tp, irq_sync);
12336 
12337 		if (epause->autoneg)
12338 			tg3_flag_set(tp, PAUSE_AUTONEG);
12339 		else
12340 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12341 		if (epause->rx_pause)
12342 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12343 		else
12344 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12345 		if (epause->tx_pause)
12346 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12347 		else
12348 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12349 
12350 		if (netif_running(dev)) {
12351 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12352 			err = tg3_restart_hw(tp, false);
12353 			if (!err)
12354 				tg3_netif_start(tp);
12355 		}
12356 
12357 		tg3_full_unlock(tp);
12358 	}
12359 
12360 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12361 
12362 	return err;
12363 }
12364 
12365 static int tg3_get_sset_count(struct net_device *dev, int sset)
12366 {
12367 	switch (sset) {
12368 	case ETH_SS_TEST:
12369 		return TG3_NUM_TEST;
12370 	case ETH_SS_STATS:
12371 		return TG3_NUM_STATS;
12372 	default:
12373 		return -EOPNOTSUPP;
12374 	}
12375 }
12376 
12377 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12378 			 u32 *rules __always_unused)
12379 {
12380 	struct tg3 *tp = netdev_priv(dev);
12381 
12382 	if (!tg3_flag(tp, SUPPORT_MSIX))
12383 		return -EOPNOTSUPP;
12384 
12385 	switch (info->cmd) {
12386 	case ETHTOOL_GRXRINGS:
12387 		if (netif_running(tp->dev))
12388 			info->data = tp->rxq_cnt;
12389 		else {
12390 			info->data = num_online_cpus();
12391 			if (info->data > TG3_RSS_MAX_NUM_QS)
12392 				info->data = TG3_RSS_MAX_NUM_QS;
12393 		}
12394 
12395 		/* The first interrupt vector only
12396 		 * handles link interrupts.
12397 		 */
12398 		info->data -= 1;
12399 		return 0;
12400 
12401 	default:
12402 		return -EOPNOTSUPP;
12403 	}
12404 }
12405 
12406 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12407 {
12408 	u32 size = 0;
12409 	struct tg3 *tp = netdev_priv(dev);
12410 
12411 	if (tg3_flag(tp, SUPPORT_MSIX))
12412 		size = TG3_RSS_INDIR_TBL_SIZE;
12413 
12414 	return size;
12415 }
12416 
12417 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12418 {
12419 	struct tg3 *tp = netdev_priv(dev);
12420 	int i;
12421 
12422 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12423 		indir[i] = tp->rss_ind_tbl[i];
12424 
12425 	return 0;
12426 }
12427 
12428 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12429 {
12430 	struct tg3 *tp = netdev_priv(dev);
12431 	size_t i;
12432 
12433 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12434 		tp->rss_ind_tbl[i] = indir[i];
12435 
12436 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12437 		return 0;
12438 
12439 	/* It is legal to write the indirection
12440 	 * table while the device is running.
12441 	 */
12442 	tg3_full_lock(tp, 0);
12443 	tg3_rss_write_indir_tbl(tp);
12444 	tg3_full_unlock(tp);
12445 
12446 	return 0;
12447 }
12448 
12449 static void tg3_get_channels(struct net_device *dev,
12450 			     struct ethtool_channels *channel)
12451 {
12452 	struct tg3 *tp = netdev_priv(dev);
12453 	u32 deflt_qs = netif_get_num_default_rss_queues();
12454 
12455 	channel->max_rx = tp->rxq_max;
12456 	channel->max_tx = tp->txq_max;
12457 
12458 	if (netif_running(dev)) {
12459 		channel->rx_count = tp->rxq_cnt;
12460 		channel->tx_count = tp->txq_cnt;
12461 	} else {
12462 		if (tp->rxq_req)
12463 			channel->rx_count = tp->rxq_req;
12464 		else
12465 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12466 
12467 		if (tp->txq_req)
12468 			channel->tx_count = tp->txq_req;
12469 		else
12470 			channel->tx_count = min(deflt_qs, tp->txq_max);
12471 	}
12472 }
12473 
12474 static int tg3_set_channels(struct net_device *dev,
12475 			    struct ethtool_channels *channel)
12476 {
12477 	struct tg3 *tp = netdev_priv(dev);
12478 
12479 	if (!tg3_flag(tp, SUPPORT_MSIX))
12480 		return -EOPNOTSUPP;
12481 
12482 	if (channel->rx_count > tp->rxq_max ||
12483 	    channel->tx_count > tp->txq_max)
12484 		return -EINVAL;
12485 
12486 	tp->rxq_req = channel->rx_count;
12487 	tp->txq_req = channel->tx_count;
12488 
12489 	if (!netif_running(dev))
12490 		return 0;
12491 
12492 	tg3_stop(tp);
12493 
12494 	tg3_carrier_off(tp);
12495 
12496 	tg3_start(tp, true, false, false);
12497 
12498 	return 0;
12499 }
12500 
12501 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12502 {
12503 	switch (stringset) {
12504 	case ETH_SS_STATS:
12505 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12506 		break;
12507 	case ETH_SS_TEST:
12508 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12509 		break;
12510 	default:
12511 		WARN_ON(1);	/* we need a WARN() */
12512 		break;
12513 	}
12514 }
12515 
12516 static int tg3_set_phys_id(struct net_device *dev,
12517 			    enum ethtool_phys_id_state state)
12518 {
12519 	struct tg3 *tp = netdev_priv(dev);
12520 
12521 	if (!netif_running(tp->dev))
12522 		return -EAGAIN;
12523 
12524 	switch (state) {
12525 	case ETHTOOL_ID_ACTIVE:
12526 		return 1;	/* cycle on/off once per second */
12527 
12528 	case ETHTOOL_ID_ON:
12529 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12530 		     LED_CTRL_1000MBPS_ON |
12531 		     LED_CTRL_100MBPS_ON |
12532 		     LED_CTRL_10MBPS_ON |
12533 		     LED_CTRL_TRAFFIC_OVERRIDE |
12534 		     LED_CTRL_TRAFFIC_BLINK |
12535 		     LED_CTRL_TRAFFIC_LED);
12536 		break;
12537 
12538 	case ETHTOOL_ID_OFF:
12539 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12540 		     LED_CTRL_TRAFFIC_OVERRIDE);
12541 		break;
12542 
12543 	case ETHTOOL_ID_INACTIVE:
12544 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12545 		break;
12546 	}
12547 
12548 	return 0;
12549 }
12550 
12551 static void tg3_get_ethtool_stats(struct net_device *dev,
12552 				   struct ethtool_stats *estats, u64 *tmp_stats)
12553 {
12554 	struct tg3 *tp = netdev_priv(dev);
12555 
12556 	if (tp->hw_stats)
12557 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12558 	else
12559 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12560 }
12561 
12562 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12563 {
12564 	int i;
12565 	__be32 *buf;
12566 	u32 offset = 0, len = 0;
12567 	u32 magic, val;
12568 
12569 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12570 		return NULL;
12571 
12572 	if (magic == TG3_EEPROM_MAGIC) {
12573 		for (offset = TG3_NVM_DIR_START;
12574 		     offset < TG3_NVM_DIR_END;
12575 		     offset += TG3_NVM_DIRENT_SIZE) {
12576 			if (tg3_nvram_read(tp, offset, &val))
12577 				return NULL;
12578 
12579 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12580 			    TG3_NVM_DIRTYPE_EXTVPD)
12581 				break;
12582 		}
12583 
12584 		if (offset != TG3_NVM_DIR_END) {
12585 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12586 			if (tg3_nvram_read(tp, offset + 4, &offset))
12587 				return NULL;
12588 
12589 			offset = tg3_nvram_logical_addr(tp, offset);
12590 		}
12591 	}
12592 
12593 	if (!offset || !len) {
12594 		offset = TG3_NVM_VPD_OFF;
12595 		len = TG3_NVM_VPD_LEN;
12596 	}
12597 
12598 	buf = kmalloc(len, GFP_KERNEL);
12599 	if (buf == NULL)
12600 		return NULL;
12601 
12602 	if (magic == TG3_EEPROM_MAGIC) {
12603 		for (i = 0; i < len; i += 4) {
12604 			/* The data is in little-endian format in NVRAM.
12605 			 * Use the big-endian read routines to preserve
12606 			 * the byte order as it exists in NVRAM.
12607 			 */
12608 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12609 				goto error;
12610 		}
12611 	} else {
12612 		u8 *ptr;
12613 		ssize_t cnt;
12614 		unsigned int pos = 0;
12615 
12616 		ptr = (u8 *)&buf[0];
12617 		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12618 			cnt = pci_read_vpd(tp->pdev, pos,
12619 					   len - pos, ptr);
12620 			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12621 				cnt = 0;
12622 			else if (cnt < 0)
12623 				goto error;
12624 		}
12625 		if (pos != len)
12626 			goto error;
12627 	}
12628 
12629 	*vpdlen = len;
12630 
12631 	return buf;
12632 
12633 error:
12634 	kfree(buf);
12635 	return NULL;
12636 }
12637 
12638 #define NVRAM_TEST_SIZE 0x100
12639 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12640 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12641 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12642 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12643 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12644 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12645 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12646 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12647 
12648 static int tg3_test_nvram(struct tg3 *tp)
12649 {
12650 	u32 csum, magic, len;
12651 	__be32 *buf;
12652 	int i, j, k, err = 0, size;
12653 
12654 	if (tg3_flag(tp, NO_NVRAM))
12655 		return 0;
12656 
12657 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12658 		return -EIO;
12659 
12660 	if (magic == TG3_EEPROM_MAGIC)
12661 		size = NVRAM_TEST_SIZE;
12662 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12663 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12664 		    TG3_EEPROM_SB_FORMAT_1) {
12665 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12666 			case TG3_EEPROM_SB_REVISION_0:
12667 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12668 				break;
12669 			case TG3_EEPROM_SB_REVISION_2:
12670 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12671 				break;
12672 			case TG3_EEPROM_SB_REVISION_3:
12673 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12674 				break;
12675 			case TG3_EEPROM_SB_REVISION_4:
12676 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12677 				break;
12678 			case TG3_EEPROM_SB_REVISION_5:
12679 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12680 				break;
12681 			case TG3_EEPROM_SB_REVISION_6:
12682 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12683 				break;
12684 			default:
12685 				return -EIO;
12686 			}
12687 		} else
12688 			return 0;
12689 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12690 		size = NVRAM_SELFBOOT_HW_SIZE;
12691 	else
12692 		return -EIO;
12693 
12694 	buf = kmalloc(size, GFP_KERNEL);
12695 	if (buf == NULL)
12696 		return -ENOMEM;
12697 
12698 	err = -EIO;
12699 	for (i = 0, j = 0; i < size; i += 4, j++) {
12700 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12701 		if (err)
12702 			break;
12703 	}
12704 	if (i < size)
12705 		goto out;
12706 
12707 	/* Selfboot format */
12708 	magic = be32_to_cpu(buf[0]);
12709 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12710 	    TG3_EEPROM_MAGIC_FW) {
12711 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12712 
12713 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12714 		    TG3_EEPROM_SB_REVISION_2) {
12715 			/* For rev 2, the csum doesn't include the MBA. */
12716 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12717 				csum8 += buf8[i];
12718 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12719 				csum8 += buf8[i];
12720 		} else {
12721 			for (i = 0; i < size; i++)
12722 				csum8 += buf8[i];
12723 		}
12724 
12725 		if (csum8 == 0) {
12726 			err = 0;
12727 			goto out;
12728 		}
12729 
12730 		err = -EIO;
12731 		goto out;
12732 	}
12733 
12734 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12735 	    TG3_EEPROM_MAGIC_HW) {
12736 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12737 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12738 		u8 *buf8 = (u8 *) buf;
12739 
12740 		/* Separate the parity bits and the data bytes.  */
12741 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12742 			if ((i == 0) || (i == 8)) {
12743 				int l;
12744 				u8 msk;
12745 
12746 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12747 					parity[k++] = buf8[i] & msk;
12748 				i++;
12749 			} else if (i == 16) {
12750 				int l;
12751 				u8 msk;
12752 
12753 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12754 					parity[k++] = buf8[i] & msk;
12755 				i++;
12756 
12757 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12758 					parity[k++] = buf8[i] & msk;
12759 				i++;
12760 			}
12761 			data[j++] = buf8[i];
12762 		}
12763 
12764 		err = -EIO;
12765 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12766 			u8 hw8 = hweight8(data[i]);
12767 
12768 			if ((hw8 & 0x1) && parity[i])
12769 				goto out;
12770 			else if (!(hw8 & 0x1) && !parity[i])
12771 				goto out;
12772 		}
12773 		err = 0;
12774 		goto out;
12775 	}
12776 
12777 	err = -EIO;
12778 
12779 	/* Bootstrap checksum at offset 0x10 */
12780 	csum = calc_crc((unsigned char *) buf, 0x10);
12781 	if (csum != le32_to_cpu(buf[0x10/4]))
12782 		goto out;
12783 
12784 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12785 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12786 	if (csum != le32_to_cpu(buf[0xfc/4]))
12787 		goto out;
12788 
12789 	kfree(buf);
12790 
12791 	buf = tg3_vpd_readblock(tp, &len);
12792 	if (!buf)
12793 		return -ENOMEM;
12794 
12795 	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12796 	if (i > 0) {
12797 		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12798 		if (j < 0)
12799 			goto out;
12800 
12801 		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12802 			goto out;
12803 
12804 		i += PCI_VPD_LRDT_TAG_SIZE;
12805 		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12806 					      PCI_VPD_RO_KEYWORD_CHKSUM);
12807 		if (j > 0) {
12808 			u8 csum8 = 0;
12809 
12810 			j += PCI_VPD_INFO_FLD_HDR_SIZE;
12811 
12812 			for (i = 0; i <= j; i++)
12813 				csum8 += ((u8 *)buf)[i];
12814 
12815 			if (csum8)
12816 				goto out;
12817 		}
12818 	}
12819 
12820 	err = 0;
12821 
12822 out:
12823 	kfree(buf);
12824 	return err;
12825 }
12826 
12827 #define TG3_SERDES_TIMEOUT_SEC	2
12828 #define TG3_COPPER_TIMEOUT_SEC	6
12829 
12830 static int tg3_test_link(struct tg3 *tp)
12831 {
12832 	int i, max;
12833 
12834 	if (!netif_running(tp->dev))
12835 		return -ENODEV;
12836 
12837 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12838 		max = TG3_SERDES_TIMEOUT_SEC;
12839 	else
12840 		max = TG3_COPPER_TIMEOUT_SEC;
12841 
12842 	for (i = 0; i < max; i++) {
12843 		if (tp->link_up)
12844 			return 0;
12845 
12846 		if (msleep_interruptible(1000))
12847 			break;
12848 	}
12849 
12850 	return -EIO;
12851 }
12852 
12853 /* Only test the commonly used registers */
12854 static int tg3_test_registers(struct tg3 *tp)
12855 {
12856 	int i, is_5705, is_5750;
12857 	u32 offset, read_mask, write_mask, val, save_val, read_val;
12858 	static struct {
12859 		u16 offset;
12860 		u16 flags;
12861 #define TG3_FL_5705	0x1
12862 #define TG3_FL_NOT_5705	0x2
12863 #define TG3_FL_NOT_5788	0x4
12864 #define TG3_FL_NOT_5750	0x8
12865 		u32 read_mask;
12866 		u32 write_mask;
12867 	} reg_tbl[] = {
12868 		/* MAC Control Registers */
12869 		{ MAC_MODE, TG3_FL_NOT_5705,
12870 			0x00000000, 0x00ef6f8c },
12871 		{ MAC_MODE, TG3_FL_5705,
12872 			0x00000000, 0x01ef6b8c },
12873 		{ MAC_STATUS, TG3_FL_NOT_5705,
12874 			0x03800107, 0x00000000 },
12875 		{ MAC_STATUS, TG3_FL_5705,
12876 			0x03800100, 0x00000000 },
12877 		{ MAC_ADDR_0_HIGH, 0x0000,
12878 			0x00000000, 0x0000ffff },
12879 		{ MAC_ADDR_0_LOW, 0x0000,
12880 			0x00000000, 0xffffffff },
12881 		{ MAC_RX_MTU_SIZE, 0x0000,
12882 			0x00000000, 0x0000ffff },
12883 		{ MAC_TX_MODE, 0x0000,
12884 			0x00000000, 0x00000070 },
12885 		{ MAC_TX_LENGTHS, 0x0000,
12886 			0x00000000, 0x00003fff },
12887 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
12888 			0x00000000, 0x000007fc },
12889 		{ MAC_RX_MODE, TG3_FL_5705,
12890 			0x00000000, 0x000007dc },
12891 		{ MAC_HASH_REG_0, 0x0000,
12892 			0x00000000, 0xffffffff },
12893 		{ MAC_HASH_REG_1, 0x0000,
12894 			0x00000000, 0xffffffff },
12895 		{ MAC_HASH_REG_2, 0x0000,
12896 			0x00000000, 0xffffffff },
12897 		{ MAC_HASH_REG_3, 0x0000,
12898 			0x00000000, 0xffffffff },
12899 
12900 		/* Receive Data and Receive BD Initiator Control Registers. */
12901 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12902 			0x00000000, 0xffffffff },
12903 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12904 			0x00000000, 0xffffffff },
12905 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12906 			0x00000000, 0x00000003 },
12907 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12908 			0x00000000, 0xffffffff },
12909 		{ RCVDBDI_STD_BD+0, 0x0000,
12910 			0x00000000, 0xffffffff },
12911 		{ RCVDBDI_STD_BD+4, 0x0000,
12912 			0x00000000, 0xffffffff },
12913 		{ RCVDBDI_STD_BD+8, 0x0000,
12914 			0x00000000, 0xffff0002 },
12915 		{ RCVDBDI_STD_BD+0xc, 0x0000,
12916 			0x00000000, 0xffffffff },
12917 
12918 		/* Receive BD Initiator Control Registers. */
12919 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12920 			0x00000000, 0xffffffff },
12921 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
12922 			0x00000000, 0x000003ff },
12923 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12924 			0x00000000, 0xffffffff },
12925 
12926 		/* Host Coalescing Control Registers. */
12927 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
12928 			0x00000000, 0x00000004 },
12929 		{ HOSTCC_MODE, TG3_FL_5705,
12930 			0x00000000, 0x000000f6 },
12931 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12932 			0x00000000, 0xffffffff },
12933 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12934 			0x00000000, 0x000003ff },
12935 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12936 			0x00000000, 0xffffffff },
12937 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12938 			0x00000000, 0x000003ff },
12939 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12940 			0x00000000, 0xffffffff },
12941 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12942 			0x00000000, 0x000000ff },
12943 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12944 			0x00000000, 0xffffffff },
12945 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12946 			0x00000000, 0x000000ff },
12947 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12948 			0x00000000, 0xffffffff },
12949 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12950 			0x00000000, 0xffffffff },
12951 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12952 			0x00000000, 0xffffffff },
12953 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12954 			0x00000000, 0x000000ff },
12955 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12956 			0x00000000, 0xffffffff },
12957 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12958 			0x00000000, 0x000000ff },
12959 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12960 			0x00000000, 0xffffffff },
12961 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12962 			0x00000000, 0xffffffff },
12963 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12964 			0x00000000, 0xffffffff },
12965 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12966 			0x00000000, 0xffffffff },
12967 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12968 			0x00000000, 0xffffffff },
12969 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12970 			0xffffffff, 0x00000000 },
12971 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12972 			0xffffffff, 0x00000000 },
12973 
12974 		/* Buffer Manager Control Registers. */
12975 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12976 			0x00000000, 0x007fff80 },
12977 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12978 			0x00000000, 0x007fffff },
12979 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12980 			0x00000000, 0x0000003f },
12981 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12982 			0x00000000, 0x000001ff },
12983 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
12984 			0x00000000, 0x000001ff },
12985 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12986 			0xffffffff, 0x00000000 },
12987 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12988 			0xffffffff, 0x00000000 },
12989 
12990 		/* Mailbox Registers */
12991 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12992 			0x00000000, 0x000001ff },
12993 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12994 			0x00000000, 0x000001ff },
12995 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12996 			0x00000000, 0x000007ff },
12997 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12998 			0x00000000, 0x000001ff },
12999 
13000 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13001 	};
13002 
13003 	is_5705 = is_5750 = 0;
13004 	if (tg3_flag(tp, 5705_PLUS)) {
13005 		is_5705 = 1;
13006 		if (tg3_flag(tp, 5750_PLUS))
13007 			is_5750 = 1;
13008 	}
13009 
13010 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13011 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13012 			continue;
13013 
13014 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13015 			continue;
13016 
13017 		if (tg3_flag(tp, IS_5788) &&
13018 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13019 			continue;
13020 
13021 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13022 			continue;
13023 
13024 		offset = (u32) reg_tbl[i].offset;
13025 		read_mask = reg_tbl[i].read_mask;
13026 		write_mask = reg_tbl[i].write_mask;
13027 
13028 		/* Save the original register content */
13029 		save_val = tr32(offset);
13030 
13031 		/* Determine the read-only value. */
13032 		read_val = save_val & read_mask;
13033 
13034 		/* Write zero to the register, then make sure the read-only bits
13035 		 * are not changed and the read/write bits are all zeros.
13036 		 */
13037 		tw32(offset, 0);
13038 
13039 		val = tr32(offset);
13040 
13041 		/* Test the read-only and read/write bits. */
13042 		if (((val & read_mask) != read_val) || (val & write_mask))
13043 			goto out;
13044 
13045 		/* Write ones to all the bits defined by RdMask and WrMask, then
13046 		 * make sure the read-only bits are not changed and the
13047 		 * read/write bits are all ones.
13048 		 */
13049 		tw32(offset, read_mask | write_mask);
13050 
13051 		val = tr32(offset);
13052 
13053 		/* Test the read-only bits. */
13054 		if ((val & read_mask) != read_val)
13055 			goto out;
13056 
13057 		/* Test the read/write bits. */
13058 		if ((val & write_mask) != write_mask)
13059 			goto out;
13060 
13061 		tw32(offset, save_val);
13062 	}
13063 
13064 	return 0;
13065 
13066 out:
13067 	if (netif_msg_hw(tp))
13068 		netdev_err(tp->dev,
13069 			   "Register test failed at offset %x\n", offset);
13070 	tw32(offset, save_val);
13071 	return -EIO;
13072 }
13073 
13074 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13075 {
13076 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13077 	int i;
13078 	u32 j;
13079 
13080 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13081 		for (j = 0; j < len; j += 4) {
13082 			u32 val;
13083 
13084 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13085 			tg3_read_mem(tp, offset + j, &val);
13086 			if (val != test_pattern[i])
13087 				return -EIO;
13088 		}
13089 	}
13090 	return 0;
13091 }
13092 
13093 static int tg3_test_memory(struct tg3 *tp)
13094 {
13095 	static struct mem_entry {
13096 		u32 offset;
13097 		u32 len;
13098 	} mem_tbl_570x[] = {
13099 		{ 0x00000000, 0x00b50},
13100 		{ 0x00002000, 0x1c000},
13101 		{ 0xffffffff, 0x00000}
13102 	}, mem_tbl_5705[] = {
13103 		{ 0x00000100, 0x0000c},
13104 		{ 0x00000200, 0x00008},
13105 		{ 0x00004000, 0x00800},
13106 		{ 0x00006000, 0x01000},
13107 		{ 0x00008000, 0x02000},
13108 		{ 0x00010000, 0x0e000},
13109 		{ 0xffffffff, 0x00000}
13110 	}, mem_tbl_5755[] = {
13111 		{ 0x00000200, 0x00008},
13112 		{ 0x00004000, 0x00800},
13113 		{ 0x00006000, 0x00800},
13114 		{ 0x00008000, 0x02000},
13115 		{ 0x00010000, 0x0c000},
13116 		{ 0xffffffff, 0x00000}
13117 	}, mem_tbl_5906[] = {
13118 		{ 0x00000200, 0x00008},
13119 		{ 0x00004000, 0x00400},
13120 		{ 0x00006000, 0x00400},
13121 		{ 0x00008000, 0x01000},
13122 		{ 0x00010000, 0x01000},
13123 		{ 0xffffffff, 0x00000}
13124 	}, mem_tbl_5717[] = {
13125 		{ 0x00000200, 0x00008},
13126 		{ 0x00010000, 0x0a000},
13127 		{ 0x00020000, 0x13c00},
13128 		{ 0xffffffff, 0x00000}
13129 	}, mem_tbl_57765[] = {
13130 		{ 0x00000200, 0x00008},
13131 		{ 0x00004000, 0x00800},
13132 		{ 0x00006000, 0x09800},
13133 		{ 0x00010000, 0x0a000},
13134 		{ 0xffffffff, 0x00000}
13135 	};
13136 	struct mem_entry *mem_tbl;
13137 	int err = 0;
13138 	int i;
13139 
13140 	if (tg3_flag(tp, 5717_PLUS))
13141 		mem_tbl = mem_tbl_5717;
13142 	else if (tg3_flag(tp, 57765_CLASS) ||
13143 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13144 		mem_tbl = mem_tbl_57765;
13145 	else if (tg3_flag(tp, 5755_PLUS))
13146 		mem_tbl = mem_tbl_5755;
13147 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13148 		mem_tbl = mem_tbl_5906;
13149 	else if (tg3_flag(tp, 5705_PLUS))
13150 		mem_tbl = mem_tbl_5705;
13151 	else
13152 		mem_tbl = mem_tbl_570x;
13153 
13154 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13155 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13156 		if (err)
13157 			break;
13158 	}
13159 
13160 	return err;
13161 }
13162 
13163 #define TG3_TSO_MSS		500
13164 
13165 #define TG3_TSO_IP_HDR_LEN	20
13166 #define TG3_TSO_TCP_HDR_LEN	20
13167 #define TG3_TSO_TCP_OPT_LEN	12
13168 
13169 static const u8 tg3_tso_header[] = {
13170 0x08, 0x00,
13171 0x45, 0x00, 0x00, 0x00,
13172 0x00, 0x00, 0x40, 0x00,
13173 0x40, 0x06, 0x00, 0x00,
13174 0x0a, 0x00, 0x00, 0x01,
13175 0x0a, 0x00, 0x00, 0x02,
13176 0x0d, 0x00, 0xe0, 0x00,
13177 0x00, 0x00, 0x01, 0x00,
13178 0x00, 0x00, 0x02, 0x00,
13179 0x80, 0x10, 0x10, 0x00,
13180 0x14, 0x09, 0x00, 0x00,
13181 0x01, 0x01, 0x08, 0x0a,
13182 0x11, 0x11, 0x11, 0x11,
13183 0x11, 0x11, 0x11, 0x11,
13184 };
13185 
13186 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13187 {
13188 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13189 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13190 	u32 budget;
13191 	struct sk_buff *skb;
13192 	u8 *tx_data, *rx_data;
13193 	dma_addr_t map;
13194 	int num_pkts, tx_len, rx_len, i, err;
13195 	struct tg3_rx_buffer_desc *desc;
13196 	struct tg3_napi *tnapi, *rnapi;
13197 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13198 
13199 	tnapi = &tp->napi[0];
13200 	rnapi = &tp->napi[0];
13201 	if (tp->irq_cnt > 1) {
13202 		if (tg3_flag(tp, ENABLE_RSS))
13203 			rnapi = &tp->napi[1];
13204 		if (tg3_flag(tp, ENABLE_TSS))
13205 			tnapi = &tp->napi[1];
13206 	}
13207 	coal_now = tnapi->coal_now | rnapi->coal_now;
13208 
13209 	err = -EIO;
13210 
13211 	tx_len = pktsz;
13212 	skb = netdev_alloc_skb(tp->dev, tx_len);
13213 	if (!skb)
13214 		return -ENOMEM;
13215 
13216 	tx_data = skb_put(skb, tx_len);
13217 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13218 	memset(tx_data + ETH_ALEN, 0x0, 8);
13219 
13220 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13221 
13222 	if (tso_loopback) {
13223 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13224 
13225 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13226 			      TG3_TSO_TCP_OPT_LEN;
13227 
13228 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13229 		       sizeof(tg3_tso_header));
13230 		mss = TG3_TSO_MSS;
13231 
13232 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13233 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13234 
13235 		/* Set the total length field in the IP header */
13236 		iph->tot_len = htons((u16)(mss + hdr_len));
13237 
13238 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13239 			      TXD_FLAG_CPU_POST_DMA);
13240 
13241 		if (tg3_flag(tp, HW_TSO_1) ||
13242 		    tg3_flag(tp, HW_TSO_2) ||
13243 		    tg3_flag(tp, HW_TSO_3)) {
13244 			struct tcphdr *th;
13245 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13246 			th = (struct tcphdr *)&tx_data[val];
13247 			th->check = 0;
13248 		} else
13249 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13250 
13251 		if (tg3_flag(tp, HW_TSO_3)) {
13252 			mss |= (hdr_len & 0xc) << 12;
13253 			if (hdr_len & 0x10)
13254 				base_flags |= 0x00000010;
13255 			base_flags |= (hdr_len & 0x3e0) << 5;
13256 		} else if (tg3_flag(tp, HW_TSO_2))
13257 			mss |= hdr_len << 9;
13258 		else if (tg3_flag(tp, HW_TSO_1) ||
13259 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13260 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13261 		} else {
13262 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13263 		}
13264 
13265 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13266 	} else {
13267 		num_pkts = 1;
13268 		data_off = ETH_HLEN;
13269 
13270 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13271 		    tx_len > VLAN_ETH_FRAME_LEN)
13272 			base_flags |= TXD_FLAG_JMB_PKT;
13273 	}
13274 
13275 	for (i = data_off; i < tx_len; i++)
13276 		tx_data[i] = (u8) (i & 0xff);
13277 
13278 	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13279 	if (pci_dma_mapping_error(tp->pdev, map)) {
13280 		dev_kfree_skb(skb);
13281 		return -EIO;
13282 	}
13283 
13284 	val = tnapi->tx_prod;
13285 	tnapi->tx_buffers[val].skb = skb;
13286 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13287 
13288 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13289 	       rnapi->coal_now);
13290 
13291 	udelay(10);
13292 
13293 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13294 
13295 	budget = tg3_tx_avail(tnapi);
13296 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13297 			    base_flags | TXD_FLAG_END, mss, 0)) {
13298 		tnapi->tx_buffers[val].skb = NULL;
13299 		dev_kfree_skb(skb);
13300 		return -EIO;
13301 	}
13302 
13303 	tnapi->tx_prod++;
13304 
13305 	/* Sync BD data before updating mailbox */
13306 	wmb();
13307 
13308 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13309 	tr32_mailbox(tnapi->prodmbox);
13310 
13311 	udelay(10);
13312 
13313 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13314 	for (i = 0; i < 35; i++) {
13315 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13316 		       coal_now);
13317 
13318 		udelay(10);
13319 
13320 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13321 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13322 		if ((tx_idx == tnapi->tx_prod) &&
13323 		    (rx_idx == (rx_start_idx + num_pkts)))
13324 			break;
13325 	}
13326 
13327 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13328 	dev_kfree_skb(skb);
13329 
13330 	if (tx_idx != tnapi->tx_prod)
13331 		goto out;
13332 
13333 	if (rx_idx != rx_start_idx + num_pkts)
13334 		goto out;
13335 
13336 	val = data_off;
13337 	while (rx_idx != rx_start_idx) {
13338 		desc = &rnapi->rx_rcb[rx_start_idx++];
13339 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13340 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13341 
13342 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13343 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13344 			goto out;
13345 
13346 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13347 			 - ETH_FCS_LEN;
13348 
13349 		if (!tso_loopback) {
13350 			if (rx_len != tx_len)
13351 				goto out;
13352 
13353 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13354 				if (opaque_key != RXD_OPAQUE_RING_STD)
13355 					goto out;
13356 			} else {
13357 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13358 					goto out;
13359 			}
13360 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13361 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13362 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13363 			goto out;
13364 		}
13365 
13366 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13367 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13368 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13369 					     mapping);
13370 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13371 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13372 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13373 					     mapping);
13374 		} else
13375 			goto out;
13376 
13377 		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13378 					    PCI_DMA_FROMDEVICE);
13379 
13380 		rx_data += TG3_RX_OFFSET(tp);
13381 		for (i = data_off; i < rx_len; i++, val++) {
13382 			if (*(rx_data + i) != (u8) (val & 0xff))
13383 				goto out;
13384 		}
13385 	}
13386 
13387 	err = 0;
13388 
13389 	/* tg3_free_rings will unmap and free the rx_data */
13390 out:
13391 	return err;
13392 }
13393 
13394 #define TG3_STD_LOOPBACK_FAILED		1
13395 #define TG3_JMB_LOOPBACK_FAILED		2
13396 #define TG3_TSO_LOOPBACK_FAILED		4
13397 #define TG3_LOOPBACK_FAILED \
13398 	(TG3_STD_LOOPBACK_FAILED | \
13399 	 TG3_JMB_LOOPBACK_FAILED | \
13400 	 TG3_TSO_LOOPBACK_FAILED)
13401 
13402 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13403 {
13404 	int err = -EIO;
13405 	u32 eee_cap;
13406 	u32 jmb_pkt_sz = 9000;
13407 
13408 	if (tp->dma_limit)
13409 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13410 
13411 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13412 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13413 
13414 	if (!netif_running(tp->dev)) {
13415 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13416 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13417 		if (do_extlpbk)
13418 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13419 		goto done;
13420 	}
13421 
13422 	err = tg3_reset_hw(tp, true);
13423 	if (err) {
13424 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13425 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13426 		if (do_extlpbk)
13427 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13428 		goto done;
13429 	}
13430 
13431 	if (tg3_flag(tp, ENABLE_RSS)) {
13432 		int i;
13433 
13434 		/* Reroute all rx packets to the 1st queue */
13435 		for (i = MAC_RSS_INDIR_TBL_0;
13436 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13437 			tw32(i, 0x0);
13438 	}
13439 
13440 	/* HW errata - mac loopback fails in some cases on 5780.
13441 	 * Normal traffic and PHY loopback are not affected by
13442 	 * errata.  Also, the MAC loopback test is deprecated for
13443 	 * all newer ASIC revisions.
13444 	 */
13445 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13446 	    !tg3_flag(tp, CPMU_PRESENT)) {
13447 		tg3_mac_loopback(tp, true);
13448 
13449 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13450 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13451 
13452 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13453 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13454 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13455 
13456 		tg3_mac_loopback(tp, false);
13457 	}
13458 
13459 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13460 	    !tg3_flag(tp, USE_PHYLIB)) {
13461 		int i;
13462 
13463 		tg3_phy_lpbk_set(tp, 0, false);
13464 
13465 		/* Wait for link */
13466 		for (i = 0; i < 100; i++) {
13467 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13468 				break;
13469 			mdelay(1);
13470 		}
13471 
13472 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13473 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13474 		if (tg3_flag(tp, TSO_CAPABLE) &&
13475 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13476 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13477 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13478 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13479 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13480 
13481 		if (do_extlpbk) {
13482 			tg3_phy_lpbk_set(tp, 0, true);
13483 
13484 			/* All link indications report up, but the hardware
13485 			 * isn't really ready for about 20 msec.  Double it
13486 			 * to be sure.
13487 			 */
13488 			mdelay(40);
13489 
13490 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13491 				data[TG3_EXT_LOOPB_TEST] |=
13492 							TG3_STD_LOOPBACK_FAILED;
13493 			if (tg3_flag(tp, TSO_CAPABLE) &&
13494 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13495 				data[TG3_EXT_LOOPB_TEST] |=
13496 							TG3_TSO_LOOPBACK_FAILED;
13497 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13498 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13499 				data[TG3_EXT_LOOPB_TEST] |=
13500 							TG3_JMB_LOOPBACK_FAILED;
13501 		}
13502 
13503 		/* Re-enable gphy autopowerdown. */
13504 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13505 			tg3_phy_toggle_apd(tp, true);
13506 	}
13507 
13508 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13509 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13510 
13511 done:
13512 	tp->phy_flags |= eee_cap;
13513 
13514 	return err;
13515 }
13516 
13517 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13518 			  u64 *data)
13519 {
13520 	struct tg3 *tp = netdev_priv(dev);
13521 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13522 
13523 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13524 		if (tg3_power_up(tp)) {
13525 			etest->flags |= ETH_TEST_FL_FAILED;
13526 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13527 			return;
13528 		}
13529 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13530 	}
13531 
13532 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13533 
13534 	if (tg3_test_nvram(tp) != 0) {
13535 		etest->flags |= ETH_TEST_FL_FAILED;
13536 		data[TG3_NVRAM_TEST] = 1;
13537 	}
13538 	if (!doextlpbk && tg3_test_link(tp)) {
13539 		etest->flags |= ETH_TEST_FL_FAILED;
13540 		data[TG3_LINK_TEST] = 1;
13541 	}
13542 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13543 		int err, err2 = 0, irq_sync = 0;
13544 
13545 		if (netif_running(dev)) {
13546 			tg3_phy_stop(tp);
13547 			tg3_netif_stop(tp);
13548 			irq_sync = 1;
13549 		}
13550 
13551 		tg3_full_lock(tp, irq_sync);
13552 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13553 		err = tg3_nvram_lock(tp);
13554 		tg3_halt_cpu(tp, RX_CPU_BASE);
13555 		if (!tg3_flag(tp, 5705_PLUS))
13556 			tg3_halt_cpu(tp, TX_CPU_BASE);
13557 		if (!err)
13558 			tg3_nvram_unlock(tp);
13559 
13560 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13561 			tg3_phy_reset(tp);
13562 
13563 		if (tg3_test_registers(tp) != 0) {
13564 			etest->flags |= ETH_TEST_FL_FAILED;
13565 			data[TG3_REGISTER_TEST] = 1;
13566 		}
13567 
13568 		if (tg3_test_memory(tp) != 0) {
13569 			etest->flags |= ETH_TEST_FL_FAILED;
13570 			data[TG3_MEMORY_TEST] = 1;
13571 		}
13572 
13573 		if (doextlpbk)
13574 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13575 
13576 		if (tg3_test_loopback(tp, data, doextlpbk))
13577 			etest->flags |= ETH_TEST_FL_FAILED;
13578 
13579 		tg3_full_unlock(tp);
13580 
13581 		if (tg3_test_interrupt(tp) != 0) {
13582 			etest->flags |= ETH_TEST_FL_FAILED;
13583 			data[TG3_INTERRUPT_TEST] = 1;
13584 		}
13585 
13586 		tg3_full_lock(tp, 0);
13587 
13588 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13589 		if (netif_running(dev)) {
13590 			tg3_flag_set(tp, INIT_COMPLETE);
13591 			err2 = tg3_restart_hw(tp, true);
13592 			if (!err2)
13593 				tg3_netif_start(tp);
13594 		}
13595 
13596 		tg3_full_unlock(tp);
13597 
13598 		if (irq_sync && !err2)
13599 			tg3_phy_start(tp);
13600 	}
13601 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13602 		tg3_power_down_prepare(tp);
13603 
13604 }
13605 
13606 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13607 			      struct ifreq *ifr, int cmd)
13608 {
13609 	struct tg3 *tp = netdev_priv(dev);
13610 	struct hwtstamp_config stmpconf;
13611 
13612 	if (!tg3_flag(tp, PTP_CAPABLE))
13613 		return -EINVAL;
13614 
13615 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13616 		return -EFAULT;
13617 
13618 	if (stmpconf.flags)
13619 		return -EINVAL;
13620 
13621 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13622 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13623 		return -ERANGE;
13624 
13625 	switch (stmpconf.rx_filter) {
13626 	case HWTSTAMP_FILTER_NONE:
13627 		tp->rxptpctl = 0;
13628 		break;
13629 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13630 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13631 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13632 		break;
13633 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13634 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13635 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13636 		break;
13637 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13638 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13639 			       TG3_RX_PTP_CTL_DELAY_REQ;
13640 		break;
13641 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13642 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13643 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13644 		break;
13645 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13646 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13647 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13648 		break;
13649 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13650 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13651 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13652 		break;
13653 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13654 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13655 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13656 		break;
13657 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13658 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13659 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13660 		break;
13661 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13662 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13663 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13664 		break;
13665 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13666 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13667 			       TG3_RX_PTP_CTL_DELAY_REQ;
13668 		break;
13669 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13670 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13671 			       TG3_RX_PTP_CTL_DELAY_REQ;
13672 		break;
13673 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13674 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13675 			       TG3_RX_PTP_CTL_DELAY_REQ;
13676 		break;
13677 	default:
13678 		return -ERANGE;
13679 	}
13680 
13681 	if (netif_running(dev) && tp->rxptpctl)
13682 		tw32(TG3_RX_PTP_CTL,
13683 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13684 
13685 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13686 		tg3_flag_set(tp, TX_TSTAMP_EN);
13687 	else
13688 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13689 
13690 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13691 		-EFAULT : 0;
13692 }
13693 
13694 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13695 {
13696 	struct mii_ioctl_data *data = if_mii(ifr);
13697 	struct tg3 *tp = netdev_priv(dev);
13698 	int err;
13699 
13700 	if (tg3_flag(tp, USE_PHYLIB)) {
13701 		struct phy_device *phydev;
13702 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13703 			return -EAGAIN;
13704 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13705 		return phy_mii_ioctl(phydev, ifr, cmd);
13706 	}
13707 
13708 	switch (cmd) {
13709 	case SIOCGMIIPHY:
13710 		data->phy_id = tp->phy_addr;
13711 
13712 		/* fallthru */
13713 	case SIOCGMIIREG: {
13714 		u32 mii_regval;
13715 
13716 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13717 			break;			/* We have no PHY */
13718 
13719 		if (!netif_running(dev))
13720 			return -EAGAIN;
13721 
13722 		spin_lock_bh(&tp->lock);
13723 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
13724 				    data->reg_num & 0x1f, &mii_regval);
13725 		spin_unlock_bh(&tp->lock);
13726 
13727 		data->val_out = mii_regval;
13728 
13729 		return err;
13730 	}
13731 
13732 	case SIOCSMIIREG:
13733 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13734 			break;			/* We have no PHY */
13735 
13736 		if (!netif_running(dev))
13737 			return -EAGAIN;
13738 
13739 		spin_lock_bh(&tp->lock);
13740 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
13741 				     data->reg_num & 0x1f, data->val_in);
13742 		spin_unlock_bh(&tp->lock);
13743 
13744 		return err;
13745 
13746 	case SIOCSHWTSTAMP:
13747 		return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13748 
13749 	default:
13750 		/* do nothing */
13751 		break;
13752 	}
13753 	return -EOPNOTSUPP;
13754 }
13755 
13756 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13757 {
13758 	struct tg3 *tp = netdev_priv(dev);
13759 
13760 	memcpy(ec, &tp->coal, sizeof(*ec));
13761 	return 0;
13762 }
13763 
13764 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13765 {
13766 	struct tg3 *tp = netdev_priv(dev);
13767 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13768 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13769 
13770 	if (!tg3_flag(tp, 5705_PLUS)) {
13771 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13772 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13773 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13774 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13775 	}
13776 
13777 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13778 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13779 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13780 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13781 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13782 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13783 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13784 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13785 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13786 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13787 		return -EINVAL;
13788 
13789 	/* No rx interrupts will be generated if both are zero */
13790 	if ((ec->rx_coalesce_usecs == 0) &&
13791 	    (ec->rx_max_coalesced_frames == 0))
13792 		return -EINVAL;
13793 
13794 	/* No tx interrupts will be generated if both are zero */
13795 	if ((ec->tx_coalesce_usecs == 0) &&
13796 	    (ec->tx_max_coalesced_frames == 0))
13797 		return -EINVAL;
13798 
13799 	/* Only copy relevant parameters, ignore all others. */
13800 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13801 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13802 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13803 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13804 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13805 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13806 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13807 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13808 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13809 
13810 	if (netif_running(dev)) {
13811 		tg3_full_lock(tp, 0);
13812 		__tg3_set_coalesce(tp, &tp->coal);
13813 		tg3_full_unlock(tp);
13814 	}
13815 	return 0;
13816 }
13817 
13818 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13819 {
13820 	struct tg3 *tp = netdev_priv(dev);
13821 
13822 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13823 		netdev_warn(tp->dev, "Board does not support EEE!\n");
13824 		return -EOPNOTSUPP;
13825 	}
13826 
13827 	if (edata->advertised != tp->eee.advertised) {
13828 		netdev_warn(tp->dev,
13829 			    "Direct manipulation of EEE advertisement is not supported\n");
13830 		return -EINVAL;
13831 	}
13832 
13833 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13834 		netdev_warn(tp->dev,
13835 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
13836 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13837 		return -EINVAL;
13838 	}
13839 
13840 	tp->eee = *edata;
13841 
13842 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13843 	tg3_warn_mgmt_link_flap(tp);
13844 
13845 	if (netif_running(tp->dev)) {
13846 		tg3_full_lock(tp, 0);
13847 		tg3_setup_eee(tp);
13848 		tg3_phy_reset(tp);
13849 		tg3_full_unlock(tp);
13850 	}
13851 
13852 	return 0;
13853 }
13854 
13855 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13856 {
13857 	struct tg3 *tp = netdev_priv(dev);
13858 
13859 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13860 		netdev_warn(tp->dev,
13861 			    "Board does not support EEE!\n");
13862 		return -EOPNOTSUPP;
13863 	}
13864 
13865 	*edata = tp->eee;
13866 	return 0;
13867 }
13868 
13869 static const struct ethtool_ops tg3_ethtool_ops = {
13870 	.get_settings		= tg3_get_settings,
13871 	.set_settings		= tg3_set_settings,
13872 	.get_drvinfo		= tg3_get_drvinfo,
13873 	.get_regs_len		= tg3_get_regs_len,
13874 	.get_regs		= tg3_get_regs,
13875 	.get_wol		= tg3_get_wol,
13876 	.set_wol		= tg3_set_wol,
13877 	.get_msglevel		= tg3_get_msglevel,
13878 	.set_msglevel		= tg3_set_msglevel,
13879 	.nway_reset		= tg3_nway_reset,
13880 	.get_link		= ethtool_op_get_link,
13881 	.get_eeprom_len		= tg3_get_eeprom_len,
13882 	.get_eeprom		= tg3_get_eeprom,
13883 	.set_eeprom		= tg3_set_eeprom,
13884 	.get_ringparam		= tg3_get_ringparam,
13885 	.set_ringparam		= tg3_set_ringparam,
13886 	.get_pauseparam		= tg3_get_pauseparam,
13887 	.set_pauseparam		= tg3_set_pauseparam,
13888 	.self_test		= tg3_self_test,
13889 	.get_strings		= tg3_get_strings,
13890 	.set_phys_id		= tg3_set_phys_id,
13891 	.get_ethtool_stats	= tg3_get_ethtool_stats,
13892 	.get_coalesce		= tg3_get_coalesce,
13893 	.set_coalesce		= tg3_set_coalesce,
13894 	.get_sset_count		= tg3_get_sset_count,
13895 	.get_rxnfc		= tg3_get_rxnfc,
13896 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13897 	.get_rxfh_indir		= tg3_get_rxfh_indir,
13898 	.set_rxfh_indir		= tg3_set_rxfh_indir,
13899 	.get_channels		= tg3_get_channels,
13900 	.set_channels		= tg3_set_channels,
13901 	.get_ts_info		= tg3_get_ts_info,
13902 	.get_eee		= tg3_get_eee,
13903 	.set_eee		= tg3_set_eee,
13904 };
13905 
13906 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13907 						struct rtnl_link_stats64 *stats)
13908 {
13909 	struct tg3 *tp = netdev_priv(dev);
13910 
13911 	spin_lock_bh(&tp->lock);
13912 	if (!tp->hw_stats) {
13913 		spin_unlock_bh(&tp->lock);
13914 		return &tp->net_stats_prev;
13915 	}
13916 
13917 	tg3_get_nstats(tp, stats);
13918 	spin_unlock_bh(&tp->lock);
13919 
13920 	return stats;
13921 }
13922 
13923 static void tg3_set_rx_mode(struct net_device *dev)
13924 {
13925 	struct tg3 *tp = netdev_priv(dev);
13926 
13927 	if (!netif_running(dev))
13928 		return;
13929 
13930 	tg3_full_lock(tp, 0);
13931 	__tg3_set_rx_mode(dev);
13932 	tg3_full_unlock(tp);
13933 }
13934 
13935 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13936 			       int new_mtu)
13937 {
13938 	dev->mtu = new_mtu;
13939 
13940 	if (new_mtu > ETH_DATA_LEN) {
13941 		if (tg3_flag(tp, 5780_CLASS)) {
13942 			netdev_update_features(dev);
13943 			tg3_flag_clear(tp, TSO_CAPABLE);
13944 		} else {
13945 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
13946 		}
13947 	} else {
13948 		if (tg3_flag(tp, 5780_CLASS)) {
13949 			tg3_flag_set(tp, TSO_CAPABLE);
13950 			netdev_update_features(dev);
13951 		}
13952 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13953 	}
13954 }
13955 
13956 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13957 {
13958 	struct tg3 *tp = netdev_priv(dev);
13959 	int err;
13960 	bool reset_phy = false;
13961 
13962 	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13963 		return -EINVAL;
13964 
13965 	if (!netif_running(dev)) {
13966 		/* We'll just catch it later when the
13967 		 * device is up'd.
13968 		 */
13969 		tg3_set_mtu(dev, tp, new_mtu);
13970 		return 0;
13971 	}
13972 
13973 	tg3_phy_stop(tp);
13974 
13975 	tg3_netif_stop(tp);
13976 
13977 	tg3_full_lock(tp, 1);
13978 
13979 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13980 
13981 	tg3_set_mtu(dev, tp, new_mtu);
13982 
13983 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
13984 	 * breaks all requests to 256 bytes.
13985 	 */
13986 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
13987 		reset_phy = true;
13988 
13989 	err = tg3_restart_hw(tp, reset_phy);
13990 
13991 	if (!err)
13992 		tg3_netif_start(tp);
13993 
13994 	tg3_full_unlock(tp);
13995 
13996 	if (!err)
13997 		tg3_phy_start(tp);
13998 
13999 	return err;
14000 }
14001 
14002 static const struct net_device_ops tg3_netdev_ops = {
14003 	.ndo_open		= tg3_open,
14004 	.ndo_stop		= tg3_close,
14005 	.ndo_start_xmit		= tg3_start_xmit,
14006 	.ndo_get_stats64	= tg3_get_stats64,
14007 	.ndo_validate_addr	= eth_validate_addr,
14008 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14009 	.ndo_set_mac_address	= tg3_set_mac_addr,
14010 	.ndo_do_ioctl		= tg3_ioctl,
14011 	.ndo_tx_timeout		= tg3_tx_timeout,
14012 	.ndo_change_mtu		= tg3_change_mtu,
14013 	.ndo_fix_features	= tg3_fix_features,
14014 	.ndo_set_features	= tg3_set_features,
14015 #ifdef CONFIG_NET_POLL_CONTROLLER
14016 	.ndo_poll_controller	= tg3_poll_controller,
14017 #endif
14018 };
14019 
14020 static void tg3_get_eeprom_size(struct tg3 *tp)
14021 {
14022 	u32 cursize, val, magic;
14023 
14024 	tp->nvram_size = EEPROM_CHIP_SIZE;
14025 
14026 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14027 		return;
14028 
14029 	if ((magic != TG3_EEPROM_MAGIC) &&
14030 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14031 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14032 		return;
14033 
14034 	/*
14035 	 * Size the chip by reading offsets at increasing powers of two.
14036 	 * When we encounter our validation signature, we know the addressing
14037 	 * has wrapped around, and thus have our chip size.
14038 	 */
14039 	cursize = 0x10;
14040 
14041 	while (cursize < tp->nvram_size) {
14042 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14043 			return;
14044 
14045 		if (val == magic)
14046 			break;
14047 
14048 		cursize <<= 1;
14049 	}
14050 
14051 	tp->nvram_size = cursize;
14052 }
14053 
14054 static void tg3_get_nvram_size(struct tg3 *tp)
14055 {
14056 	u32 val;
14057 
14058 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14059 		return;
14060 
14061 	/* Selfboot format */
14062 	if (val != TG3_EEPROM_MAGIC) {
14063 		tg3_get_eeprom_size(tp);
14064 		return;
14065 	}
14066 
14067 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14068 		if (val != 0) {
14069 			/* This is confusing.  We want to operate on the
14070 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14071 			 * call will read from NVRAM and byteswap the data
14072 			 * according to the byteswapping settings for all
14073 			 * other register accesses.  This ensures the data we
14074 			 * want will always reside in the lower 16-bits.
14075 			 * However, the data in NVRAM is in LE format, which
14076 			 * means the data from the NVRAM read will always be
14077 			 * opposite the endianness of the CPU.  The 16-bit
14078 			 * byteswap then brings the data to CPU endianness.
14079 			 */
14080 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14081 			return;
14082 		}
14083 	}
14084 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14085 }
14086 
14087 static void tg3_get_nvram_info(struct tg3 *tp)
14088 {
14089 	u32 nvcfg1;
14090 
14091 	nvcfg1 = tr32(NVRAM_CFG1);
14092 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14093 		tg3_flag_set(tp, FLASH);
14094 	} else {
14095 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14096 		tw32(NVRAM_CFG1, nvcfg1);
14097 	}
14098 
14099 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14100 	    tg3_flag(tp, 5780_CLASS)) {
14101 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14102 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14103 			tp->nvram_jedecnum = JEDEC_ATMEL;
14104 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14105 			tg3_flag_set(tp, NVRAM_BUFFERED);
14106 			break;
14107 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14108 			tp->nvram_jedecnum = JEDEC_ATMEL;
14109 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14110 			break;
14111 		case FLASH_VENDOR_ATMEL_EEPROM:
14112 			tp->nvram_jedecnum = JEDEC_ATMEL;
14113 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14114 			tg3_flag_set(tp, NVRAM_BUFFERED);
14115 			break;
14116 		case FLASH_VENDOR_ST:
14117 			tp->nvram_jedecnum = JEDEC_ST;
14118 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14119 			tg3_flag_set(tp, NVRAM_BUFFERED);
14120 			break;
14121 		case FLASH_VENDOR_SAIFUN:
14122 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14123 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14124 			break;
14125 		case FLASH_VENDOR_SST_SMALL:
14126 		case FLASH_VENDOR_SST_LARGE:
14127 			tp->nvram_jedecnum = JEDEC_SST;
14128 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14129 			break;
14130 		}
14131 	} else {
14132 		tp->nvram_jedecnum = JEDEC_ATMEL;
14133 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14134 		tg3_flag_set(tp, NVRAM_BUFFERED);
14135 	}
14136 }
14137 
14138 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14139 {
14140 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14141 	case FLASH_5752PAGE_SIZE_256:
14142 		tp->nvram_pagesize = 256;
14143 		break;
14144 	case FLASH_5752PAGE_SIZE_512:
14145 		tp->nvram_pagesize = 512;
14146 		break;
14147 	case FLASH_5752PAGE_SIZE_1K:
14148 		tp->nvram_pagesize = 1024;
14149 		break;
14150 	case FLASH_5752PAGE_SIZE_2K:
14151 		tp->nvram_pagesize = 2048;
14152 		break;
14153 	case FLASH_5752PAGE_SIZE_4K:
14154 		tp->nvram_pagesize = 4096;
14155 		break;
14156 	case FLASH_5752PAGE_SIZE_264:
14157 		tp->nvram_pagesize = 264;
14158 		break;
14159 	case FLASH_5752PAGE_SIZE_528:
14160 		tp->nvram_pagesize = 528;
14161 		break;
14162 	}
14163 }
14164 
14165 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14166 {
14167 	u32 nvcfg1;
14168 
14169 	nvcfg1 = tr32(NVRAM_CFG1);
14170 
14171 	/* NVRAM protection for TPM */
14172 	if (nvcfg1 & (1 << 27))
14173 		tg3_flag_set(tp, PROTECTED_NVRAM);
14174 
14175 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14176 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14177 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14178 		tp->nvram_jedecnum = JEDEC_ATMEL;
14179 		tg3_flag_set(tp, NVRAM_BUFFERED);
14180 		break;
14181 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14182 		tp->nvram_jedecnum = JEDEC_ATMEL;
14183 		tg3_flag_set(tp, NVRAM_BUFFERED);
14184 		tg3_flag_set(tp, FLASH);
14185 		break;
14186 	case FLASH_5752VENDOR_ST_M45PE10:
14187 	case FLASH_5752VENDOR_ST_M45PE20:
14188 	case FLASH_5752VENDOR_ST_M45PE40:
14189 		tp->nvram_jedecnum = JEDEC_ST;
14190 		tg3_flag_set(tp, NVRAM_BUFFERED);
14191 		tg3_flag_set(tp, FLASH);
14192 		break;
14193 	}
14194 
14195 	if (tg3_flag(tp, FLASH)) {
14196 		tg3_nvram_get_pagesize(tp, nvcfg1);
14197 	} else {
14198 		/* For eeprom, set pagesize to maximum eeprom size */
14199 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14200 
14201 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14202 		tw32(NVRAM_CFG1, nvcfg1);
14203 	}
14204 }
14205 
14206 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14207 {
14208 	u32 nvcfg1, protect = 0;
14209 
14210 	nvcfg1 = tr32(NVRAM_CFG1);
14211 
14212 	/* NVRAM protection for TPM */
14213 	if (nvcfg1 & (1 << 27)) {
14214 		tg3_flag_set(tp, PROTECTED_NVRAM);
14215 		protect = 1;
14216 	}
14217 
14218 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14219 	switch (nvcfg1) {
14220 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14221 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14222 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14223 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14224 		tp->nvram_jedecnum = JEDEC_ATMEL;
14225 		tg3_flag_set(tp, NVRAM_BUFFERED);
14226 		tg3_flag_set(tp, FLASH);
14227 		tp->nvram_pagesize = 264;
14228 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14229 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14230 			tp->nvram_size = (protect ? 0x3e200 :
14231 					  TG3_NVRAM_SIZE_512KB);
14232 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14233 			tp->nvram_size = (protect ? 0x1f200 :
14234 					  TG3_NVRAM_SIZE_256KB);
14235 		else
14236 			tp->nvram_size = (protect ? 0x1f200 :
14237 					  TG3_NVRAM_SIZE_128KB);
14238 		break;
14239 	case FLASH_5752VENDOR_ST_M45PE10:
14240 	case FLASH_5752VENDOR_ST_M45PE20:
14241 	case FLASH_5752VENDOR_ST_M45PE40:
14242 		tp->nvram_jedecnum = JEDEC_ST;
14243 		tg3_flag_set(tp, NVRAM_BUFFERED);
14244 		tg3_flag_set(tp, FLASH);
14245 		tp->nvram_pagesize = 256;
14246 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14247 			tp->nvram_size = (protect ?
14248 					  TG3_NVRAM_SIZE_64KB :
14249 					  TG3_NVRAM_SIZE_128KB);
14250 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14251 			tp->nvram_size = (protect ?
14252 					  TG3_NVRAM_SIZE_64KB :
14253 					  TG3_NVRAM_SIZE_256KB);
14254 		else
14255 			tp->nvram_size = (protect ?
14256 					  TG3_NVRAM_SIZE_128KB :
14257 					  TG3_NVRAM_SIZE_512KB);
14258 		break;
14259 	}
14260 }
14261 
14262 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14263 {
14264 	u32 nvcfg1;
14265 
14266 	nvcfg1 = tr32(NVRAM_CFG1);
14267 
14268 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14269 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14270 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14271 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14272 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14273 		tp->nvram_jedecnum = JEDEC_ATMEL;
14274 		tg3_flag_set(tp, NVRAM_BUFFERED);
14275 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14276 
14277 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14278 		tw32(NVRAM_CFG1, nvcfg1);
14279 		break;
14280 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14281 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14282 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14283 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14284 		tp->nvram_jedecnum = JEDEC_ATMEL;
14285 		tg3_flag_set(tp, NVRAM_BUFFERED);
14286 		tg3_flag_set(tp, FLASH);
14287 		tp->nvram_pagesize = 264;
14288 		break;
14289 	case FLASH_5752VENDOR_ST_M45PE10:
14290 	case FLASH_5752VENDOR_ST_M45PE20:
14291 	case FLASH_5752VENDOR_ST_M45PE40:
14292 		tp->nvram_jedecnum = JEDEC_ST;
14293 		tg3_flag_set(tp, NVRAM_BUFFERED);
14294 		tg3_flag_set(tp, FLASH);
14295 		tp->nvram_pagesize = 256;
14296 		break;
14297 	}
14298 }
14299 
14300 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14301 {
14302 	u32 nvcfg1, protect = 0;
14303 
14304 	nvcfg1 = tr32(NVRAM_CFG1);
14305 
14306 	/* NVRAM protection for TPM */
14307 	if (nvcfg1 & (1 << 27)) {
14308 		tg3_flag_set(tp, PROTECTED_NVRAM);
14309 		protect = 1;
14310 	}
14311 
14312 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14313 	switch (nvcfg1) {
14314 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14315 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14316 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14317 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14318 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14319 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14320 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14321 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14322 		tp->nvram_jedecnum = JEDEC_ATMEL;
14323 		tg3_flag_set(tp, NVRAM_BUFFERED);
14324 		tg3_flag_set(tp, FLASH);
14325 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14326 		tp->nvram_pagesize = 256;
14327 		break;
14328 	case FLASH_5761VENDOR_ST_A_M45PE20:
14329 	case FLASH_5761VENDOR_ST_A_M45PE40:
14330 	case FLASH_5761VENDOR_ST_A_M45PE80:
14331 	case FLASH_5761VENDOR_ST_A_M45PE16:
14332 	case FLASH_5761VENDOR_ST_M_M45PE20:
14333 	case FLASH_5761VENDOR_ST_M_M45PE40:
14334 	case FLASH_5761VENDOR_ST_M_M45PE80:
14335 	case FLASH_5761VENDOR_ST_M_M45PE16:
14336 		tp->nvram_jedecnum = JEDEC_ST;
14337 		tg3_flag_set(tp, NVRAM_BUFFERED);
14338 		tg3_flag_set(tp, FLASH);
14339 		tp->nvram_pagesize = 256;
14340 		break;
14341 	}
14342 
14343 	if (protect) {
14344 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14345 	} else {
14346 		switch (nvcfg1) {
14347 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14348 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14349 		case FLASH_5761VENDOR_ST_A_M45PE16:
14350 		case FLASH_5761VENDOR_ST_M_M45PE16:
14351 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14352 			break;
14353 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14354 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14355 		case FLASH_5761VENDOR_ST_A_M45PE80:
14356 		case FLASH_5761VENDOR_ST_M_M45PE80:
14357 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14358 			break;
14359 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14360 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14361 		case FLASH_5761VENDOR_ST_A_M45PE40:
14362 		case FLASH_5761VENDOR_ST_M_M45PE40:
14363 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14364 			break;
14365 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14366 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14367 		case FLASH_5761VENDOR_ST_A_M45PE20:
14368 		case FLASH_5761VENDOR_ST_M_M45PE20:
14369 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14370 			break;
14371 		}
14372 	}
14373 }
14374 
14375 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14376 {
14377 	tp->nvram_jedecnum = JEDEC_ATMEL;
14378 	tg3_flag_set(tp, NVRAM_BUFFERED);
14379 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14380 }
14381 
14382 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14383 {
14384 	u32 nvcfg1;
14385 
14386 	nvcfg1 = tr32(NVRAM_CFG1);
14387 
14388 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14389 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14390 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14391 		tp->nvram_jedecnum = JEDEC_ATMEL;
14392 		tg3_flag_set(tp, NVRAM_BUFFERED);
14393 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14394 
14395 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14396 		tw32(NVRAM_CFG1, nvcfg1);
14397 		return;
14398 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14399 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14400 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14401 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14402 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14403 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14404 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14405 		tp->nvram_jedecnum = JEDEC_ATMEL;
14406 		tg3_flag_set(tp, NVRAM_BUFFERED);
14407 		tg3_flag_set(tp, FLASH);
14408 
14409 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14410 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14411 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14412 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14413 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14414 			break;
14415 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14416 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14417 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14418 			break;
14419 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14420 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14421 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14422 			break;
14423 		}
14424 		break;
14425 	case FLASH_5752VENDOR_ST_M45PE10:
14426 	case FLASH_5752VENDOR_ST_M45PE20:
14427 	case FLASH_5752VENDOR_ST_M45PE40:
14428 		tp->nvram_jedecnum = JEDEC_ST;
14429 		tg3_flag_set(tp, NVRAM_BUFFERED);
14430 		tg3_flag_set(tp, FLASH);
14431 
14432 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14433 		case FLASH_5752VENDOR_ST_M45PE10:
14434 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14435 			break;
14436 		case FLASH_5752VENDOR_ST_M45PE20:
14437 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14438 			break;
14439 		case FLASH_5752VENDOR_ST_M45PE40:
14440 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14441 			break;
14442 		}
14443 		break;
14444 	default:
14445 		tg3_flag_set(tp, NO_NVRAM);
14446 		return;
14447 	}
14448 
14449 	tg3_nvram_get_pagesize(tp, nvcfg1);
14450 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14451 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14452 }
14453 
14454 
14455 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14456 {
14457 	u32 nvcfg1;
14458 
14459 	nvcfg1 = tr32(NVRAM_CFG1);
14460 
14461 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14462 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14463 	case FLASH_5717VENDOR_MICRO_EEPROM:
14464 		tp->nvram_jedecnum = JEDEC_ATMEL;
14465 		tg3_flag_set(tp, NVRAM_BUFFERED);
14466 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14467 
14468 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14469 		tw32(NVRAM_CFG1, nvcfg1);
14470 		return;
14471 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14472 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14473 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14474 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14475 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14476 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14477 	case FLASH_5717VENDOR_ATMEL_45USPT:
14478 		tp->nvram_jedecnum = JEDEC_ATMEL;
14479 		tg3_flag_set(tp, NVRAM_BUFFERED);
14480 		tg3_flag_set(tp, FLASH);
14481 
14482 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14483 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14484 			/* Detect size with tg3_nvram_get_size() */
14485 			break;
14486 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14487 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14488 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14489 			break;
14490 		default:
14491 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14492 			break;
14493 		}
14494 		break;
14495 	case FLASH_5717VENDOR_ST_M_M25PE10:
14496 	case FLASH_5717VENDOR_ST_A_M25PE10:
14497 	case FLASH_5717VENDOR_ST_M_M45PE10:
14498 	case FLASH_5717VENDOR_ST_A_M45PE10:
14499 	case FLASH_5717VENDOR_ST_M_M25PE20:
14500 	case FLASH_5717VENDOR_ST_A_M25PE20:
14501 	case FLASH_5717VENDOR_ST_M_M45PE20:
14502 	case FLASH_5717VENDOR_ST_A_M45PE20:
14503 	case FLASH_5717VENDOR_ST_25USPT:
14504 	case FLASH_5717VENDOR_ST_45USPT:
14505 		tp->nvram_jedecnum = JEDEC_ST;
14506 		tg3_flag_set(tp, NVRAM_BUFFERED);
14507 		tg3_flag_set(tp, FLASH);
14508 
14509 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14510 		case FLASH_5717VENDOR_ST_M_M25PE20:
14511 		case FLASH_5717VENDOR_ST_M_M45PE20:
14512 			/* Detect size with tg3_nvram_get_size() */
14513 			break;
14514 		case FLASH_5717VENDOR_ST_A_M25PE20:
14515 		case FLASH_5717VENDOR_ST_A_M45PE20:
14516 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14517 			break;
14518 		default:
14519 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14520 			break;
14521 		}
14522 		break;
14523 	default:
14524 		tg3_flag_set(tp, NO_NVRAM);
14525 		return;
14526 	}
14527 
14528 	tg3_nvram_get_pagesize(tp, nvcfg1);
14529 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14530 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14531 }
14532 
14533 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14534 {
14535 	u32 nvcfg1, nvmpinstrp;
14536 
14537 	nvcfg1 = tr32(NVRAM_CFG1);
14538 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14539 
14540 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14541 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14542 			tg3_flag_set(tp, NO_NVRAM);
14543 			return;
14544 		}
14545 
14546 		switch (nvmpinstrp) {
14547 		case FLASH_5762_EEPROM_HD:
14548 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14549 			break;
14550 		case FLASH_5762_EEPROM_LD:
14551 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14552 			break;
14553 		case FLASH_5720VENDOR_M_ST_M45PE20:
14554 			/* This pinstrap supports multiple sizes, so force it
14555 			 * to read the actual size from location 0xf0.
14556 			 */
14557 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14558 			break;
14559 		}
14560 	}
14561 
14562 	switch (nvmpinstrp) {
14563 	case FLASH_5720_EEPROM_HD:
14564 	case FLASH_5720_EEPROM_LD:
14565 		tp->nvram_jedecnum = JEDEC_ATMEL;
14566 		tg3_flag_set(tp, NVRAM_BUFFERED);
14567 
14568 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14569 		tw32(NVRAM_CFG1, nvcfg1);
14570 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14571 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14572 		else
14573 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14574 		return;
14575 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14576 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14577 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14578 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14579 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14580 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14581 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14582 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14583 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14584 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14585 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14586 	case FLASH_5720VENDOR_ATMEL_45USPT:
14587 		tp->nvram_jedecnum = JEDEC_ATMEL;
14588 		tg3_flag_set(tp, NVRAM_BUFFERED);
14589 		tg3_flag_set(tp, FLASH);
14590 
14591 		switch (nvmpinstrp) {
14592 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14593 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14594 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14595 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14596 			break;
14597 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14598 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14599 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14600 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14601 			break;
14602 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14603 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14604 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14605 			break;
14606 		default:
14607 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14608 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14609 			break;
14610 		}
14611 		break;
14612 	case FLASH_5720VENDOR_M_ST_M25PE10:
14613 	case FLASH_5720VENDOR_M_ST_M45PE10:
14614 	case FLASH_5720VENDOR_A_ST_M25PE10:
14615 	case FLASH_5720VENDOR_A_ST_M45PE10:
14616 	case FLASH_5720VENDOR_M_ST_M25PE20:
14617 	case FLASH_5720VENDOR_M_ST_M45PE20:
14618 	case FLASH_5720VENDOR_A_ST_M25PE20:
14619 	case FLASH_5720VENDOR_A_ST_M45PE20:
14620 	case FLASH_5720VENDOR_M_ST_M25PE40:
14621 	case FLASH_5720VENDOR_M_ST_M45PE40:
14622 	case FLASH_5720VENDOR_A_ST_M25PE40:
14623 	case FLASH_5720VENDOR_A_ST_M45PE40:
14624 	case FLASH_5720VENDOR_M_ST_M25PE80:
14625 	case FLASH_5720VENDOR_M_ST_M45PE80:
14626 	case FLASH_5720VENDOR_A_ST_M25PE80:
14627 	case FLASH_5720VENDOR_A_ST_M45PE80:
14628 	case FLASH_5720VENDOR_ST_25USPT:
14629 	case FLASH_5720VENDOR_ST_45USPT:
14630 		tp->nvram_jedecnum = JEDEC_ST;
14631 		tg3_flag_set(tp, NVRAM_BUFFERED);
14632 		tg3_flag_set(tp, FLASH);
14633 
14634 		switch (nvmpinstrp) {
14635 		case FLASH_5720VENDOR_M_ST_M25PE20:
14636 		case FLASH_5720VENDOR_M_ST_M45PE20:
14637 		case FLASH_5720VENDOR_A_ST_M25PE20:
14638 		case FLASH_5720VENDOR_A_ST_M45PE20:
14639 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14640 			break;
14641 		case FLASH_5720VENDOR_M_ST_M25PE40:
14642 		case FLASH_5720VENDOR_M_ST_M45PE40:
14643 		case FLASH_5720VENDOR_A_ST_M25PE40:
14644 		case FLASH_5720VENDOR_A_ST_M45PE40:
14645 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14646 			break;
14647 		case FLASH_5720VENDOR_M_ST_M25PE80:
14648 		case FLASH_5720VENDOR_M_ST_M45PE80:
14649 		case FLASH_5720VENDOR_A_ST_M25PE80:
14650 		case FLASH_5720VENDOR_A_ST_M45PE80:
14651 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14652 			break;
14653 		default:
14654 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14655 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14656 			break;
14657 		}
14658 		break;
14659 	default:
14660 		tg3_flag_set(tp, NO_NVRAM);
14661 		return;
14662 	}
14663 
14664 	tg3_nvram_get_pagesize(tp, nvcfg1);
14665 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14666 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14667 
14668 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14669 		u32 val;
14670 
14671 		if (tg3_nvram_read(tp, 0, &val))
14672 			return;
14673 
14674 		if (val != TG3_EEPROM_MAGIC &&
14675 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14676 			tg3_flag_set(tp, NO_NVRAM);
14677 	}
14678 }
14679 
14680 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14681 static void tg3_nvram_init(struct tg3 *tp)
14682 {
14683 	if (tg3_flag(tp, IS_SSB_CORE)) {
14684 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14685 		tg3_flag_clear(tp, NVRAM);
14686 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14687 		tg3_flag_set(tp, NO_NVRAM);
14688 		return;
14689 	}
14690 
14691 	tw32_f(GRC_EEPROM_ADDR,
14692 	     (EEPROM_ADDR_FSM_RESET |
14693 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14694 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14695 
14696 	msleep(1);
14697 
14698 	/* Enable seeprom accesses. */
14699 	tw32_f(GRC_LOCAL_CTRL,
14700 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14701 	udelay(100);
14702 
14703 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14704 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
14705 		tg3_flag_set(tp, NVRAM);
14706 
14707 		if (tg3_nvram_lock(tp)) {
14708 			netdev_warn(tp->dev,
14709 				    "Cannot get nvram lock, %s failed\n",
14710 				    __func__);
14711 			return;
14712 		}
14713 		tg3_enable_nvram_access(tp);
14714 
14715 		tp->nvram_size = 0;
14716 
14717 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
14718 			tg3_get_5752_nvram_info(tp);
14719 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14720 			tg3_get_5755_nvram_info(tp);
14721 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14722 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14723 			 tg3_asic_rev(tp) == ASIC_REV_5785)
14724 			tg3_get_5787_nvram_info(tp);
14725 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14726 			tg3_get_5761_nvram_info(tp);
14727 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14728 			tg3_get_5906_nvram_info(tp);
14729 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14730 			 tg3_flag(tp, 57765_CLASS))
14731 			tg3_get_57780_nvram_info(tp);
14732 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14733 			 tg3_asic_rev(tp) == ASIC_REV_5719)
14734 			tg3_get_5717_nvram_info(tp);
14735 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14736 			 tg3_asic_rev(tp) == ASIC_REV_5762)
14737 			tg3_get_5720_nvram_info(tp);
14738 		else
14739 			tg3_get_nvram_info(tp);
14740 
14741 		if (tp->nvram_size == 0)
14742 			tg3_get_nvram_size(tp);
14743 
14744 		tg3_disable_nvram_access(tp);
14745 		tg3_nvram_unlock(tp);
14746 
14747 	} else {
14748 		tg3_flag_clear(tp, NVRAM);
14749 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14750 
14751 		tg3_get_eeprom_size(tp);
14752 	}
14753 }
14754 
14755 struct subsys_tbl_ent {
14756 	u16 subsys_vendor, subsys_devid;
14757 	u32 phy_id;
14758 };
14759 
14760 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14761 	/* Broadcom boards. */
14762 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14763 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14764 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14765 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14766 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14767 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14768 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14769 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14770 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14771 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14772 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14773 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14774 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14775 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14776 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14777 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14778 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14779 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14780 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14781 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14782 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
14783 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14784 
14785 	/* 3com boards. */
14786 	{ TG3PCI_SUBVENDOR_ID_3COM,
14787 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14788 	{ TG3PCI_SUBVENDOR_ID_3COM,
14789 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14790 	{ TG3PCI_SUBVENDOR_ID_3COM,
14791 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14792 	{ TG3PCI_SUBVENDOR_ID_3COM,
14793 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14794 	{ TG3PCI_SUBVENDOR_ID_3COM,
14795 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14796 
14797 	/* DELL boards. */
14798 	{ TG3PCI_SUBVENDOR_ID_DELL,
14799 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14800 	{ TG3PCI_SUBVENDOR_ID_DELL,
14801 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14802 	{ TG3PCI_SUBVENDOR_ID_DELL,
14803 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14804 	{ TG3PCI_SUBVENDOR_ID_DELL,
14805 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14806 
14807 	/* Compaq boards. */
14808 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14809 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14810 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14811 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14812 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14813 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14814 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14815 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14816 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
14817 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14818 
14819 	/* IBM boards. */
14820 	{ TG3PCI_SUBVENDOR_ID_IBM,
14821 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14822 };
14823 
14824 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14825 {
14826 	int i;
14827 
14828 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14829 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
14830 		     tp->pdev->subsystem_vendor) &&
14831 		    (subsys_id_to_phy_id[i].subsys_devid ==
14832 		     tp->pdev->subsystem_device))
14833 			return &subsys_id_to_phy_id[i];
14834 	}
14835 	return NULL;
14836 }
14837 
14838 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14839 {
14840 	u32 val;
14841 
14842 	tp->phy_id = TG3_PHY_ID_INVALID;
14843 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14844 
14845 	/* Assume an onboard device and WOL capable by default.  */
14846 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
14847 	tg3_flag_set(tp, WOL_CAP);
14848 
14849 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14850 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14851 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14852 			tg3_flag_set(tp, IS_NIC);
14853 		}
14854 		val = tr32(VCPU_CFGSHDW);
14855 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
14856 			tg3_flag_set(tp, ASPM_WORKAROUND);
14857 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14858 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14859 			tg3_flag_set(tp, WOL_ENABLE);
14860 			device_set_wakeup_enable(&tp->pdev->dev, true);
14861 		}
14862 		goto done;
14863 	}
14864 
14865 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14866 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14867 		u32 nic_cfg, led_cfg;
14868 		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14869 		int eeprom_phy_serdes = 0;
14870 
14871 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14872 		tp->nic_sram_data_cfg = nic_cfg;
14873 
14874 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14875 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
14876 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14877 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
14878 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
14879 		    (ver > 0) && (ver < 0x100))
14880 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14881 
14882 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
14883 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14884 
14885 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14886 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14887 			eeprom_phy_serdes = 1;
14888 
14889 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14890 		if (nic_phy_id != 0) {
14891 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14892 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14893 
14894 			eeprom_phy_id  = (id1 >> 16) << 10;
14895 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
14896 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14897 		} else
14898 			eeprom_phy_id = 0;
14899 
14900 		tp->phy_id = eeprom_phy_id;
14901 		if (eeprom_phy_serdes) {
14902 			if (!tg3_flag(tp, 5705_PLUS))
14903 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14904 			else
14905 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14906 		}
14907 
14908 		if (tg3_flag(tp, 5750_PLUS))
14909 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14910 				    SHASTA_EXT_LED_MODE_MASK);
14911 		else
14912 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14913 
14914 		switch (led_cfg) {
14915 		default:
14916 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14917 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14918 			break;
14919 
14920 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14921 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14922 			break;
14923 
14924 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14925 			tp->led_ctrl = LED_CTRL_MODE_MAC;
14926 
14927 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
14928 			 * read on some older 5700/5701 bootcode.
14929 			 */
14930 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14931 			    tg3_asic_rev(tp) == ASIC_REV_5701)
14932 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14933 
14934 			break;
14935 
14936 		case SHASTA_EXT_LED_SHARED:
14937 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
14938 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14939 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14940 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14941 						 LED_CTRL_MODE_PHY_2);
14942 
14943 			if (tg3_flag(tp, 5717_PLUS) ||
14944 			    tg3_asic_rev(tp) == ASIC_REV_5762)
14945 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
14946 						LED_CTRL_BLINK_RATE_MASK;
14947 
14948 			break;
14949 
14950 		case SHASTA_EXT_LED_MAC:
14951 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14952 			break;
14953 
14954 		case SHASTA_EXT_LED_COMBO:
14955 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
14956 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14957 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14958 						 LED_CTRL_MODE_PHY_2);
14959 			break;
14960 
14961 		}
14962 
14963 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14964 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
14965 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14966 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14967 
14968 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14969 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14970 
14971 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14972 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
14973 			if ((tp->pdev->subsystem_vendor ==
14974 			     PCI_VENDOR_ID_ARIMA) &&
14975 			    (tp->pdev->subsystem_device == 0x205a ||
14976 			     tp->pdev->subsystem_device == 0x2063))
14977 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14978 		} else {
14979 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14980 			tg3_flag_set(tp, IS_NIC);
14981 		}
14982 
14983 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14984 			tg3_flag_set(tp, ENABLE_ASF);
14985 			if (tg3_flag(tp, 5750_PLUS))
14986 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14987 		}
14988 
14989 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14990 		    tg3_flag(tp, 5750_PLUS))
14991 			tg3_flag_set(tp, ENABLE_APE);
14992 
14993 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14994 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14995 			tg3_flag_clear(tp, WOL_CAP);
14996 
14997 		if (tg3_flag(tp, WOL_CAP) &&
14998 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14999 			tg3_flag_set(tp, WOL_ENABLE);
15000 			device_set_wakeup_enable(&tp->pdev->dev, true);
15001 		}
15002 
15003 		if (cfg2 & (1 << 17))
15004 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15005 
15006 		/* serdes signal pre-emphasis in register 0x590 set by */
15007 		/* bootcode if bit 18 is set */
15008 		if (cfg2 & (1 << 18))
15009 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15010 
15011 		if ((tg3_flag(tp, 57765_PLUS) ||
15012 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15013 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15014 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15015 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15016 
15017 		if (tg3_flag(tp, PCI_EXPRESS)) {
15018 			u32 cfg3;
15019 
15020 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15021 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15022 			    !tg3_flag(tp, 57765_PLUS) &&
15023 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15024 				tg3_flag_set(tp, ASPM_WORKAROUND);
15025 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15026 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15027 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15028 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15029 		}
15030 
15031 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15032 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15033 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15034 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15035 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15036 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15037 	}
15038 done:
15039 	if (tg3_flag(tp, WOL_CAP))
15040 		device_set_wakeup_enable(&tp->pdev->dev,
15041 					 tg3_flag(tp, WOL_ENABLE));
15042 	else
15043 		device_set_wakeup_capable(&tp->pdev->dev, false);
15044 }
15045 
15046 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15047 {
15048 	int i, err;
15049 	u32 val2, off = offset * 8;
15050 
15051 	err = tg3_nvram_lock(tp);
15052 	if (err)
15053 		return err;
15054 
15055 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15056 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15057 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15058 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15059 	udelay(10);
15060 
15061 	for (i = 0; i < 100; i++) {
15062 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15063 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15064 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15065 			break;
15066 		}
15067 		udelay(10);
15068 	}
15069 
15070 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15071 
15072 	tg3_nvram_unlock(tp);
15073 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15074 		return 0;
15075 
15076 	return -EBUSY;
15077 }
15078 
15079 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15080 {
15081 	int i;
15082 	u32 val;
15083 
15084 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15085 	tw32(OTP_CTRL, cmd);
15086 
15087 	/* Wait for up to 1 ms for command to execute. */
15088 	for (i = 0; i < 100; i++) {
15089 		val = tr32(OTP_STATUS);
15090 		if (val & OTP_STATUS_CMD_DONE)
15091 			break;
15092 		udelay(10);
15093 	}
15094 
15095 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15096 }
15097 
15098 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15099  * configuration is a 32-bit value that straddles the alignment boundary.
15100  * We do two 32-bit reads and then shift and merge the results.
15101  */
15102 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15103 {
15104 	u32 bhalf_otp, thalf_otp;
15105 
15106 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15107 
15108 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15109 		return 0;
15110 
15111 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15112 
15113 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15114 		return 0;
15115 
15116 	thalf_otp = tr32(OTP_READ_DATA);
15117 
15118 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15119 
15120 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15121 		return 0;
15122 
15123 	bhalf_otp = tr32(OTP_READ_DATA);
15124 
15125 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15126 }
15127 
15128 static void tg3_phy_init_link_config(struct tg3 *tp)
15129 {
15130 	u32 adv = ADVERTISED_Autoneg;
15131 
15132 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15133 		adv |= ADVERTISED_1000baseT_Half |
15134 		       ADVERTISED_1000baseT_Full;
15135 
15136 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15137 		adv |= ADVERTISED_100baseT_Half |
15138 		       ADVERTISED_100baseT_Full |
15139 		       ADVERTISED_10baseT_Half |
15140 		       ADVERTISED_10baseT_Full |
15141 		       ADVERTISED_TP;
15142 	else
15143 		adv |= ADVERTISED_FIBRE;
15144 
15145 	tp->link_config.advertising = adv;
15146 	tp->link_config.speed = SPEED_UNKNOWN;
15147 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15148 	tp->link_config.autoneg = AUTONEG_ENABLE;
15149 	tp->link_config.active_speed = SPEED_UNKNOWN;
15150 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15151 
15152 	tp->old_link = -1;
15153 }
15154 
15155 static int tg3_phy_probe(struct tg3 *tp)
15156 {
15157 	u32 hw_phy_id_1, hw_phy_id_2;
15158 	u32 hw_phy_id, hw_phy_id_masked;
15159 	int err;
15160 
15161 	/* flow control autonegotiation is default behavior */
15162 	tg3_flag_set(tp, PAUSE_AUTONEG);
15163 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15164 
15165 	if (tg3_flag(tp, ENABLE_APE)) {
15166 		switch (tp->pci_fn) {
15167 		case 0:
15168 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15169 			break;
15170 		case 1:
15171 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15172 			break;
15173 		case 2:
15174 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15175 			break;
15176 		case 3:
15177 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15178 			break;
15179 		}
15180 	}
15181 
15182 	if (!tg3_flag(tp, ENABLE_ASF) &&
15183 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15184 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15185 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15186 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15187 
15188 	if (tg3_flag(tp, USE_PHYLIB))
15189 		return tg3_phy_init(tp);
15190 
15191 	/* Reading the PHY ID register can conflict with ASF
15192 	 * firmware access to the PHY hardware.
15193 	 */
15194 	err = 0;
15195 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15196 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15197 	} else {
15198 		/* Now read the physical PHY_ID from the chip and verify
15199 		 * that it is sane.  If it doesn't look good, we fall back
15200 		 * to either the hard-coded table based PHY_ID and failing
15201 		 * that the value found in the eeprom area.
15202 		 */
15203 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15204 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15205 
15206 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15207 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15208 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15209 
15210 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15211 	}
15212 
15213 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15214 		tp->phy_id = hw_phy_id;
15215 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15216 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15217 		else
15218 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15219 	} else {
15220 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15221 			/* Do nothing, phy ID already set up in
15222 			 * tg3_get_eeprom_hw_cfg().
15223 			 */
15224 		} else {
15225 			struct subsys_tbl_ent *p;
15226 
15227 			/* No eeprom signature?  Try the hardcoded
15228 			 * subsys device table.
15229 			 */
15230 			p = tg3_lookup_by_subsys(tp);
15231 			if (p) {
15232 				tp->phy_id = p->phy_id;
15233 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15234 				/* For now we saw the IDs 0xbc050cd0,
15235 				 * 0xbc050f80 and 0xbc050c30 on devices
15236 				 * connected to an BCM4785 and there are
15237 				 * probably more. Just assume that the phy is
15238 				 * supported when it is connected to a SSB core
15239 				 * for now.
15240 				 */
15241 				return -ENODEV;
15242 			}
15243 
15244 			if (!tp->phy_id ||
15245 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15246 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15247 		}
15248 	}
15249 
15250 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15251 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15252 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15253 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15254 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15255 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15256 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15257 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15258 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15259 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15260 
15261 		tp->eee.supported = SUPPORTED_100baseT_Full |
15262 				    SUPPORTED_1000baseT_Full;
15263 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15264 				     ADVERTISED_1000baseT_Full;
15265 		tp->eee.eee_enabled = 1;
15266 		tp->eee.tx_lpi_enabled = 1;
15267 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15268 	}
15269 
15270 	tg3_phy_init_link_config(tp);
15271 
15272 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15273 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15274 	    !tg3_flag(tp, ENABLE_APE) &&
15275 	    !tg3_flag(tp, ENABLE_ASF)) {
15276 		u32 bmsr, dummy;
15277 
15278 		tg3_readphy(tp, MII_BMSR, &bmsr);
15279 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15280 		    (bmsr & BMSR_LSTATUS))
15281 			goto skip_phy_reset;
15282 
15283 		err = tg3_phy_reset(tp);
15284 		if (err)
15285 			return err;
15286 
15287 		tg3_phy_set_wirespeed(tp);
15288 
15289 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15290 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15291 					    tp->link_config.flowctrl);
15292 
15293 			tg3_writephy(tp, MII_BMCR,
15294 				     BMCR_ANENABLE | BMCR_ANRESTART);
15295 		}
15296 	}
15297 
15298 skip_phy_reset:
15299 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15300 		err = tg3_init_5401phy_dsp(tp);
15301 		if (err)
15302 			return err;
15303 
15304 		err = tg3_init_5401phy_dsp(tp);
15305 	}
15306 
15307 	return err;
15308 }
15309 
15310 static void tg3_read_vpd(struct tg3 *tp)
15311 {
15312 	u8 *vpd_data;
15313 	unsigned int block_end, rosize, len;
15314 	u32 vpdlen;
15315 	int j, i = 0;
15316 
15317 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15318 	if (!vpd_data)
15319 		goto out_no_vpd;
15320 
15321 	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15322 	if (i < 0)
15323 		goto out_not_found;
15324 
15325 	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15326 	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15327 	i += PCI_VPD_LRDT_TAG_SIZE;
15328 
15329 	if (block_end > vpdlen)
15330 		goto out_not_found;
15331 
15332 	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15333 				      PCI_VPD_RO_KEYWORD_MFR_ID);
15334 	if (j > 0) {
15335 		len = pci_vpd_info_field_size(&vpd_data[j]);
15336 
15337 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15338 		if (j + len > block_end || len != 4 ||
15339 		    memcmp(&vpd_data[j], "1028", 4))
15340 			goto partno;
15341 
15342 		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15343 					      PCI_VPD_RO_KEYWORD_VENDOR0);
15344 		if (j < 0)
15345 			goto partno;
15346 
15347 		len = pci_vpd_info_field_size(&vpd_data[j]);
15348 
15349 		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15350 		if (j + len > block_end)
15351 			goto partno;
15352 
15353 		if (len >= sizeof(tp->fw_ver))
15354 			len = sizeof(tp->fw_ver) - 1;
15355 		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15356 		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15357 			 &vpd_data[j]);
15358 	}
15359 
15360 partno:
15361 	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15362 				      PCI_VPD_RO_KEYWORD_PARTNO);
15363 	if (i < 0)
15364 		goto out_not_found;
15365 
15366 	len = pci_vpd_info_field_size(&vpd_data[i]);
15367 
15368 	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15369 	if (len > TG3_BPN_SIZE ||
15370 	    (len + i) > vpdlen)
15371 		goto out_not_found;
15372 
15373 	memcpy(tp->board_part_number, &vpd_data[i], len);
15374 
15375 out_not_found:
15376 	kfree(vpd_data);
15377 	if (tp->board_part_number[0])
15378 		return;
15379 
15380 out_no_vpd:
15381 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15382 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15383 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15384 			strcpy(tp->board_part_number, "BCM5717");
15385 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15386 			strcpy(tp->board_part_number, "BCM5718");
15387 		else
15388 			goto nomatch;
15389 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15390 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15391 			strcpy(tp->board_part_number, "BCM57780");
15392 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15393 			strcpy(tp->board_part_number, "BCM57760");
15394 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15395 			strcpy(tp->board_part_number, "BCM57790");
15396 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15397 			strcpy(tp->board_part_number, "BCM57788");
15398 		else
15399 			goto nomatch;
15400 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15401 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15402 			strcpy(tp->board_part_number, "BCM57761");
15403 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15404 			strcpy(tp->board_part_number, "BCM57765");
15405 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15406 			strcpy(tp->board_part_number, "BCM57781");
15407 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15408 			strcpy(tp->board_part_number, "BCM57785");
15409 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15410 			strcpy(tp->board_part_number, "BCM57791");
15411 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15412 			strcpy(tp->board_part_number, "BCM57795");
15413 		else
15414 			goto nomatch;
15415 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15416 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15417 			strcpy(tp->board_part_number, "BCM57762");
15418 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15419 			strcpy(tp->board_part_number, "BCM57766");
15420 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15421 			strcpy(tp->board_part_number, "BCM57782");
15422 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15423 			strcpy(tp->board_part_number, "BCM57786");
15424 		else
15425 			goto nomatch;
15426 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15427 		strcpy(tp->board_part_number, "BCM95906");
15428 	} else {
15429 nomatch:
15430 		strcpy(tp->board_part_number, "none");
15431 	}
15432 }
15433 
15434 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15435 {
15436 	u32 val;
15437 
15438 	if (tg3_nvram_read(tp, offset, &val) ||
15439 	    (val & 0xfc000000) != 0x0c000000 ||
15440 	    tg3_nvram_read(tp, offset + 4, &val) ||
15441 	    val != 0)
15442 		return 0;
15443 
15444 	return 1;
15445 }
15446 
15447 static void tg3_read_bc_ver(struct tg3 *tp)
15448 {
15449 	u32 val, offset, start, ver_offset;
15450 	int i, dst_off;
15451 	bool newver = false;
15452 
15453 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15454 	    tg3_nvram_read(tp, 0x4, &start))
15455 		return;
15456 
15457 	offset = tg3_nvram_logical_addr(tp, offset);
15458 
15459 	if (tg3_nvram_read(tp, offset, &val))
15460 		return;
15461 
15462 	if ((val & 0xfc000000) == 0x0c000000) {
15463 		if (tg3_nvram_read(tp, offset + 4, &val))
15464 			return;
15465 
15466 		if (val == 0)
15467 			newver = true;
15468 	}
15469 
15470 	dst_off = strlen(tp->fw_ver);
15471 
15472 	if (newver) {
15473 		if (TG3_VER_SIZE - dst_off < 16 ||
15474 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15475 			return;
15476 
15477 		offset = offset + ver_offset - start;
15478 		for (i = 0; i < 16; i += 4) {
15479 			__be32 v;
15480 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15481 				return;
15482 
15483 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15484 		}
15485 	} else {
15486 		u32 major, minor;
15487 
15488 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15489 			return;
15490 
15491 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15492 			TG3_NVM_BCVER_MAJSFT;
15493 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15494 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15495 			 "v%d.%02d", major, minor);
15496 	}
15497 }
15498 
15499 static void tg3_read_hwsb_ver(struct tg3 *tp)
15500 {
15501 	u32 val, major, minor;
15502 
15503 	/* Use native endian representation */
15504 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15505 		return;
15506 
15507 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15508 		TG3_NVM_HWSB_CFG1_MAJSFT;
15509 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15510 		TG3_NVM_HWSB_CFG1_MINSFT;
15511 
15512 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15513 }
15514 
15515 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15516 {
15517 	u32 offset, major, minor, build;
15518 
15519 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15520 
15521 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15522 		return;
15523 
15524 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15525 	case TG3_EEPROM_SB_REVISION_0:
15526 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15527 		break;
15528 	case TG3_EEPROM_SB_REVISION_2:
15529 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15530 		break;
15531 	case TG3_EEPROM_SB_REVISION_3:
15532 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15533 		break;
15534 	case TG3_EEPROM_SB_REVISION_4:
15535 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15536 		break;
15537 	case TG3_EEPROM_SB_REVISION_5:
15538 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15539 		break;
15540 	case TG3_EEPROM_SB_REVISION_6:
15541 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15542 		break;
15543 	default:
15544 		return;
15545 	}
15546 
15547 	if (tg3_nvram_read(tp, offset, &val))
15548 		return;
15549 
15550 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15551 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15552 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15553 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15554 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15555 
15556 	if (minor > 99 || build > 26)
15557 		return;
15558 
15559 	offset = strlen(tp->fw_ver);
15560 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15561 		 " v%d.%02d", major, minor);
15562 
15563 	if (build > 0) {
15564 		offset = strlen(tp->fw_ver);
15565 		if (offset < TG3_VER_SIZE - 1)
15566 			tp->fw_ver[offset] = 'a' + build - 1;
15567 	}
15568 }
15569 
15570 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15571 {
15572 	u32 val, offset, start;
15573 	int i, vlen;
15574 
15575 	for (offset = TG3_NVM_DIR_START;
15576 	     offset < TG3_NVM_DIR_END;
15577 	     offset += TG3_NVM_DIRENT_SIZE) {
15578 		if (tg3_nvram_read(tp, offset, &val))
15579 			return;
15580 
15581 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15582 			break;
15583 	}
15584 
15585 	if (offset == TG3_NVM_DIR_END)
15586 		return;
15587 
15588 	if (!tg3_flag(tp, 5705_PLUS))
15589 		start = 0x08000000;
15590 	else if (tg3_nvram_read(tp, offset - 4, &start))
15591 		return;
15592 
15593 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15594 	    !tg3_fw_img_is_valid(tp, offset) ||
15595 	    tg3_nvram_read(tp, offset + 8, &val))
15596 		return;
15597 
15598 	offset += val - start;
15599 
15600 	vlen = strlen(tp->fw_ver);
15601 
15602 	tp->fw_ver[vlen++] = ',';
15603 	tp->fw_ver[vlen++] = ' ';
15604 
15605 	for (i = 0; i < 4; i++) {
15606 		__be32 v;
15607 		if (tg3_nvram_read_be32(tp, offset, &v))
15608 			return;
15609 
15610 		offset += sizeof(v);
15611 
15612 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15613 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15614 			break;
15615 		}
15616 
15617 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15618 		vlen += sizeof(v);
15619 	}
15620 }
15621 
15622 static void tg3_probe_ncsi(struct tg3 *tp)
15623 {
15624 	u32 apedata;
15625 
15626 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15627 	if (apedata != APE_SEG_SIG_MAGIC)
15628 		return;
15629 
15630 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15631 	if (!(apedata & APE_FW_STATUS_READY))
15632 		return;
15633 
15634 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15635 		tg3_flag_set(tp, APE_HAS_NCSI);
15636 }
15637 
15638 static void tg3_read_dash_ver(struct tg3 *tp)
15639 {
15640 	int vlen;
15641 	u32 apedata;
15642 	char *fwtype;
15643 
15644 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15645 
15646 	if (tg3_flag(tp, APE_HAS_NCSI))
15647 		fwtype = "NCSI";
15648 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15649 		fwtype = "SMASH";
15650 	else
15651 		fwtype = "DASH";
15652 
15653 	vlen = strlen(tp->fw_ver);
15654 
15655 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15656 		 fwtype,
15657 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15658 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15659 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15660 		 (apedata & APE_FW_VERSION_BLDMSK));
15661 }
15662 
15663 static void tg3_read_otp_ver(struct tg3 *tp)
15664 {
15665 	u32 val, val2;
15666 
15667 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15668 		return;
15669 
15670 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15671 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15672 	    TG3_OTP_MAGIC0_VALID(val)) {
15673 		u64 val64 = (u64) val << 32 | val2;
15674 		u32 ver = 0;
15675 		int i, vlen;
15676 
15677 		for (i = 0; i < 7; i++) {
15678 			if ((val64 & 0xff) == 0)
15679 				break;
15680 			ver = val64 & 0xff;
15681 			val64 >>= 8;
15682 		}
15683 		vlen = strlen(tp->fw_ver);
15684 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15685 	}
15686 }
15687 
15688 static void tg3_read_fw_ver(struct tg3 *tp)
15689 {
15690 	u32 val;
15691 	bool vpd_vers = false;
15692 
15693 	if (tp->fw_ver[0] != 0)
15694 		vpd_vers = true;
15695 
15696 	if (tg3_flag(tp, NO_NVRAM)) {
15697 		strcat(tp->fw_ver, "sb");
15698 		tg3_read_otp_ver(tp);
15699 		return;
15700 	}
15701 
15702 	if (tg3_nvram_read(tp, 0, &val))
15703 		return;
15704 
15705 	if (val == TG3_EEPROM_MAGIC)
15706 		tg3_read_bc_ver(tp);
15707 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15708 		tg3_read_sb_ver(tp, val);
15709 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15710 		tg3_read_hwsb_ver(tp);
15711 
15712 	if (tg3_flag(tp, ENABLE_ASF)) {
15713 		if (tg3_flag(tp, ENABLE_APE)) {
15714 			tg3_probe_ncsi(tp);
15715 			if (!vpd_vers)
15716 				tg3_read_dash_ver(tp);
15717 		} else if (!vpd_vers) {
15718 			tg3_read_mgmtfw_ver(tp);
15719 		}
15720 	}
15721 
15722 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15723 }
15724 
15725 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15726 {
15727 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
15728 		return TG3_RX_RET_MAX_SIZE_5717;
15729 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15730 		return TG3_RX_RET_MAX_SIZE_5700;
15731 	else
15732 		return TG3_RX_RET_MAX_SIZE_5705;
15733 }
15734 
15735 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15736 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15737 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15738 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15739 	{ },
15740 };
15741 
15742 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15743 {
15744 	struct pci_dev *peer;
15745 	unsigned int func, devnr = tp->pdev->devfn & ~7;
15746 
15747 	for (func = 0; func < 8; func++) {
15748 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
15749 		if (peer && peer != tp->pdev)
15750 			break;
15751 		pci_dev_put(peer);
15752 	}
15753 	/* 5704 can be configured in single-port mode, set peer to
15754 	 * tp->pdev in that case.
15755 	 */
15756 	if (!peer) {
15757 		peer = tp->pdev;
15758 		return peer;
15759 	}
15760 
15761 	/*
15762 	 * We don't need to keep the refcount elevated; there's no way
15763 	 * to remove one half of this device without removing the other
15764 	 */
15765 	pci_dev_put(peer);
15766 
15767 	return peer;
15768 }
15769 
15770 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15771 {
15772 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15773 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15774 		u32 reg;
15775 
15776 		/* All devices that use the alternate
15777 		 * ASIC REV location have a CPMU.
15778 		 */
15779 		tg3_flag_set(tp, CPMU_PRESENT);
15780 
15781 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15782 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15783 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15784 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15785 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15786 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
15787 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
15788 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15789 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15790 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
15791 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
15792 			reg = TG3PCI_GEN2_PRODID_ASICREV;
15793 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15794 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15795 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15796 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15797 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15798 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15799 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15800 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15801 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15802 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15803 			reg = TG3PCI_GEN15_PRODID_ASICREV;
15804 		else
15805 			reg = TG3PCI_PRODID_ASICREV;
15806 
15807 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15808 	}
15809 
15810 	/* Wrong chip ID in 5752 A0. This code can be removed later
15811 	 * as A0 is not in production.
15812 	 */
15813 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15814 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15815 
15816 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15817 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15818 
15819 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15820 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15821 	    tg3_asic_rev(tp) == ASIC_REV_5720)
15822 		tg3_flag_set(tp, 5717_PLUS);
15823 
15824 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15825 	    tg3_asic_rev(tp) == ASIC_REV_57766)
15826 		tg3_flag_set(tp, 57765_CLASS);
15827 
15828 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15829 	     tg3_asic_rev(tp) == ASIC_REV_5762)
15830 		tg3_flag_set(tp, 57765_PLUS);
15831 
15832 	/* Intentionally exclude ASIC_REV_5906 */
15833 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15834 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
15835 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
15836 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
15837 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
15838 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
15839 	    tg3_flag(tp, 57765_PLUS))
15840 		tg3_flag_set(tp, 5755_PLUS);
15841 
15842 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15843 	    tg3_asic_rev(tp) == ASIC_REV_5714)
15844 		tg3_flag_set(tp, 5780_CLASS);
15845 
15846 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15847 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
15848 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
15849 	    tg3_flag(tp, 5755_PLUS) ||
15850 	    tg3_flag(tp, 5780_CLASS))
15851 		tg3_flag_set(tp, 5750_PLUS);
15852 
15853 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15854 	    tg3_flag(tp, 5750_PLUS))
15855 		tg3_flag_set(tp, 5705_PLUS);
15856 }
15857 
15858 static bool tg3_10_100_only_device(struct tg3 *tp,
15859 				   const struct pci_device_id *ent)
15860 {
15861 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15862 
15863 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15864 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15865 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
15866 		return true;
15867 
15868 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15869 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15870 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15871 				return true;
15872 		} else {
15873 			return true;
15874 		}
15875 	}
15876 
15877 	return false;
15878 }
15879 
15880 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15881 {
15882 	u32 misc_ctrl_reg;
15883 	u32 pci_state_reg, grc_misc_cfg;
15884 	u32 val;
15885 	u16 pci_cmd;
15886 	int err;
15887 
15888 	/* Force memory write invalidate off.  If we leave it on,
15889 	 * then on 5700_BX chips we have to enable a workaround.
15890 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15891 	 * to match the cacheline size.  The Broadcom driver have this
15892 	 * workaround but turns MWI off all the times so never uses
15893 	 * it.  This seems to suggest that the workaround is insufficient.
15894 	 */
15895 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15896 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15897 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15898 
15899 	/* Important! -- Make sure register accesses are byteswapped
15900 	 * correctly.  Also, for those chips that require it, make
15901 	 * sure that indirect register accesses are enabled before
15902 	 * the first operation.
15903 	 */
15904 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15905 			      &misc_ctrl_reg);
15906 	tp->misc_host_ctrl |= (misc_ctrl_reg &
15907 			       MISC_HOST_CTRL_CHIPREV);
15908 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15909 			       tp->misc_host_ctrl);
15910 
15911 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
15912 
15913 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15914 	 * we need to disable memory and use config. cycles
15915 	 * only to access all registers. The 5702/03 chips
15916 	 * can mistakenly decode the special cycles from the
15917 	 * ICH chipsets as memory write cycles, causing corruption
15918 	 * of register and memory space. Only certain ICH bridges
15919 	 * will drive special cycles with non-zero data during the
15920 	 * address phase which can fall within the 5703's address
15921 	 * range. This is not an ICH bug as the PCI spec allows
15922 	 * non-zero address during special cycles. However, only
15923 	 * these ICH bridges are known to drive non-zero addresses
15924 	 * during special cycles.
15925 	 *
15926 	 * Since special cycles do not cross PCI bridges, we only
15927 	 * enable this workaround if the 5703 is on the secondary
15928 	 * bus of these ICH bridges.
15929 	 */
15930 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15931 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15932 		static struct tg3_dev_id {
15933 			u32	vendor;
15934 			u32	device;
15935 			u32	rev;
15936 		} ich_chipsets[] = {
15937 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15938 			  PCI_ANY_ID },
15939 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15940 			  PCI_ANY_ID },
15941 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15942 			  0xa },
15943 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15944 			  PCI_ANY_ID },
15945 			{ },
15946 		};
15947 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
15948 		struct pci_dev *bridge = NULL;
15949 
15950 		while (pci_id->vendor != 0) {
15951 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
15952 						bridge);
15953 			if (!bridge) {
15954 				pci_id++;
15955 				continue;
15956 			}
15957 			if (pci_id->rev != PCI_ANY_ID) {
15958 				if (bridge->revision > pci_id->rev)
15959 					continue;
15960 			}
15961 			if (bridge->subordinate &&
15962 			    (bridge->subordinate->number ==
15963 			     tp->pdev->bus->number)) {
15964 				tg3_flag_set(tp, ICH_WORKAROUND);
15965 				pci_dev_put(bridge);
15966 				break;
15967 			}
15968 		}
15969 	}
15970 
15971 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15972 		static struct tg3_dev_id {
15973 			u32	vendor;
15974 			u32	device;
15975 		} bridge_chipsets[] = {
15976 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15977 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15978 			{ },
15979 		};
15980 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15981 		struct pci_dev *bridge = NULL;
15982 
15983 		while (pci_id->vendor != 0) {
15984 			bridge = pci_get_device(pci_id->vendor,
15985 						pci_id->device,
15986 						bridge);
15987 			if (!bridge) {
15988 				pci_id++;
15989 				continue;
15990 			}
15991 			if (bridge->subordinate &&
15992 			    (bridge->subordinate->number <=
15993 			     tp->pdev->bus->number) &&
15994 			    (bridge->subordinate->busn_res.end >=
15995 			     tp->pdev->bus->number)) {
15996 				tg3_flag_set(tp, 5701_DMA_BUG);
15997 				pci_dev_put(bridge);
15998 				break;
15999 			}
16000 		}
16001 	}
16002 
16003 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16004 	 * DMA addresses > 40-bit. This bridge may have other additional
16005 	 * 57xx devices behind it in some 4-port NIC designs for example.
16006 	 * Any tg3 device found behind the bridge will also need the 40-bit
16007 	 * DMA workaround.
16008 	 */
16009 	if (tg3_flag(tp, 5780_CLASS)) {
16010 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16011 		tp->msi_cap = tp->pdev->msi_cap;
16012 	} else {
16013 		struct pci_dev *bridge = NULL;
16014 
16015 		do {
16016 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16017 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16018 						bridge);
16019 			if (bridge && bridge->subordinate &&
16020 			    (bridge->subordinate->number <=
16021 			     tp->pdev->bus->number) &&
16022 			    (bridge->subordinate->busn_res.end >=
16023 			     tp->pdev->bus->number)) {
16024 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16025 				pci_dev_put(bridge);
16026 				break;
16027 			}
16028 		} while (bridge);
16029 	}
16030 
16031 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16032 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16033 		tp->pdev_peer = tg3_find_peer(tp);
16034 
16035 	/* Determine TSO capabilities */
16036 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16037 		; /* Do nothing. HW bug. */
16038 	else if (tg3_flag(tp, 57765_PLUS))
16039 		tg3_flag_set(tp, HW_TSO_3);
16040 	else if (tg3_flag(tp, 5755_PLUS) ||
16041 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16042 		tg3_flag_set(tp, HW_TSO_2);
16043 	else if (tg3_flag(tp, 5750_PLUS)) {
16044 		tg3_flag_set(tp, HW_TSO_1);
16045 		tg3_flag_set(tp, TSO_BUG);
16046 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16047 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16048 			tg3_flag_clear(tp, TSO_BUG);
16049 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16050 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16051 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16052 		tg3_flag_set(tp, FW_TSO);
16053 		tg3_flag_set(tp, TSO_BUG);
16054 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16055 			tp->fw_needed = FIRMWARE_TG3TSO5;
16056 		else
16057 			tp->fw_needed = FIRMWARE_TG3TSO;
16058 	}
16059 
16060 	/* Selectively allow TSO based on operating conditions */
16061 	if (tg3_flag(tp, HW_TSO_1) ||
16062 	    tg3_flag(tp, HW_TSO_2) ||
16063 	    tg3_flag(tp, HW_TSO_3) ||
16064 	    tg3_flag(tp, FW_TSO)) {
16065 		/* For firmware TSO, assume ASF is disabled.
16066 		 * We'll disable TSO later if we discover ASF
16067 		 * is enabled in tg3_get_eeprom_hw_cfg().
16068 		 */
16069 		tg3_flag_set(tp, TSO_CAPABLE);
16070 	} else {
16071 		tg3_flag_clear(tp, TSO_CAPABLE);
16072 		tg3_flag_clear(tp, TSO_BUG);
16073 		tp->fw_needed = NULL;
16074 	}
16075 
16076 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16077 		tp->fw_needed = FIRMWARE_TG3;
16078 
16079 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16080 		tp->fw_needed = FIRMWARE_TG357766;
16081 
16082 	tp->irq_max = 1;
16083 
16084 	if (tg3_flag(tp, 5750_PLUS)) {
16085 		tg3_flag_set(tp, SUPPORT_MSI);
16086 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16087 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16088 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16089 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16090 		     tp->pdev_peer == tp->pdev))
16091 			tg3_flag_clear(tp, SUPPORT_MSI);
16092 
16093 		if (tg3_flag(tp, 5755_PLUS) ||
16094 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16095 			tg3_flag_set(tp, 1SHOT_MSI);
16096 		}
16097 
16098 		if (tg3_flag(tp, 57765_PLUS)) {
16099 			tg3_flag_set(tp, SUPPORT_MSIX);
16100 			tp->irq_max = TG3_IRQ_MAX_VECS;
16101 		}
16102 	}
16103 
16104 	tp->txq_max = 1;
16105 	tp->rxq_max = 1;
16106 	if (tp->irq_max > 1) {
16107 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16108 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16109 
16110 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16111 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16112 			tp->txq_max = tp->irq_max - 1;
16113 	}
16114 
16115 	if (tg3_flag(tp, 5755_PLUS) ||
16116 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16117 		tg3_flag_set(tp, SHORT_DMA_BUG);
16118 
16119 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16120 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16121 
16122 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16123 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16124 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16125 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16126 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16127 
16128 	if (tg3_flag(tp, 57765_PLUS) &&
16129 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16130 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16131 
16132 	if (!tg3_flag(tp, 5705_PLUS) ||
16133 	    tg3_flag(tp, 5780_CLASS) ||
16134 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16135 		tg3_flag_set(tp, JUMBO_CAPABLE);
16136 
16137 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16138 			      &pci_state_reg);
16139 
16140 	if (pci_is_pcie(tp->pdev)) {
16141 		u16 lnkctl;
16142 
16143 		tg3_flag_set(tp, PCI_EXPRESS);
16144 
16145 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16146 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16147 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16148 				tg3_flag_clear(tp, HW_TSO_2);
16149 				tg3_flag_clear(tp, TSO_CAPABLE);
16150 			}
16151 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16152 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16153 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16154 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16155 				tg3_flag_set(tp, CLKREQ_BUG);
16156 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16157 			tg3_flag_set(tp, L1PLLPD_EN);
16158 		}
16159 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16160 		/* BCM5785 devices are effectively PCIe devices, and should
16161 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16162 		 * section.
16163 		 */
16164 		tg3_flag_set(tp, PCI_EXPRESS);
16165 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16166 		   tg3_flag(tp, 5780_CLASS)) {
16167 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16168 		if (!tp->pcix_cap) {
16169 			dev_err(&tp->pdev->dev,
16170 				"Cannot find PCI-X capability, aborting\n");
16171 			return -EIO;
16172 		}
16173 
16174 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16175 			tg3_flag_set(tp, PCIX_MODE);
16176 	}
16177 
16178 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16179 	 * reordering to the mailbox registers done by the host
16180 	 * controller can cause major troubles.  We read back from
16181 	 * every mailbox register write to force the writes to be
16182 	 * posted to the chip in order.
16183 	 */
16184 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16185 	    !tg3_flag(tp, PCI_EXPRESS))
16186 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16187 
16188 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16189 			     &tp->pci_cacheline_sz);
16190 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16191 			     &tp->pci_lat_timer);
16192 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16193 	    tp->pci_lat_timer < 64) {
16194 		tp->pci_lat_timer = 64;
16195 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16196 				      tp->pci_lat_timer);
16197 	}
16198 
16199 	/* Important! -- It is critical that the PCI-X hw workaround
16200 	 * situation is decided before the first MMIO register access.
16201 	 */
16202 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16203 		/* 5700 BX chips need to have their TX producer index
16204 		 * mailboxes written twice to workaround a bug.
16205 		 */
16206 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16207 
16208 		/* If we are in PCI-X mode, enable register write workaround.
16209 		 *
16210 		 * The workaround is to use indirect register accesses
16211 		 * for all chip writes not to mailbox registers.
16212 		 */
16213 		if (tg3_flag(tp, PCIX_MODE)) {
16214 			u32 pm_reg;
16215 
16216 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16217 
16218 			/* The chip can have it's power management PCI config
16219 			 * space registers clobbered due to this bug.
16220 			 * So explicitly force the chip into D0 here.
16221 			 */
16222 			pci_read_config_dword(tp->pdev,
16223 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16224 					      &pm_reg);
16225 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16226 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16227 			pci_write_config_dword(tp->pdev,
16228 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16229 					       pm_reg);
16230 
16231 			/* Also, force SERR#/PERR# in PCI command. */
16232 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16233 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16234 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16235 		}
16236 	}
16237 
16238 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16239 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16240 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16241 		tg3_flag_set(tp, PCI_32BIT);
16242 
16243 	/* Chip-specific fixup from Broadcom driver */
16244 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16245 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16246 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16247 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16248 	}
16249 
16250 	/* Default fast path register access methods */
16251 	tp->read32 = tg3_read32;
16252 	tp->write32 = tg3_write32;
16253 	tp->read32_mbox = tg3_read32;
16254 	tp->write32_mbox = tg3_write32;
16255 	tp->write32_tx_mbox = tg3_write32;
16256 	tp->write32_rx_mbox = tg3_write32;
16257 
16258 	/* Various workaround register access methods */
16259 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16260 		tp->write32 = tg3_write_indirect_reg32;
16261 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16262 		 (tg3_flag(tp, PCI_EXPRESS) &&
16263 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16264 		/*
16265 		 * Back to back register writes can cause problems on these
16266 		 * chips, the workaround is to read back all reg writes
16267 		 * except those to mailbox regs.
16268 		 *
16269 		 * See tg3_write_indirect_reg32().
16270 		 */
16271 		tp->write32 = tg3_write_flush_reg32;
16272 	}
16273 
16274 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16275 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16276 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16277 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16278 	}
16279 
16280 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16281 		tp->read32 = tg3_read_indirect_reg32;
16282 		tp->write32 = tg3_write_indirect_reg32;
16283 		tp->read32_mbox = tg3_read_indirect_mbox;
16284 		tp->write32_mbox = tg3_write_indirect_mbox;
16285 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16286 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16287 
16288 		iounmap(tp->regs);
16289 		tp->regs = NULL;
16290 
16291 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16292 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16293 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16294 	}
16295 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16296 		tp->read32_mbox = tg3_read32_mbox_5906;
16297 		tp->write32_mbox = tg3_write32_mbox_5906;
16298 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16299 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16300 	}
16301 
16302 	if (tp->write32 == tg3_write_indirect_reg32 ||
16303 	    (tg3_flag(tp, PCIX_MODE) &&
16304 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16305 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16306 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16307 
16308 	/* The memory arbiter has to be enabled in order for SRAM accesses
16309 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16310 	 * sure it is enabled, but other entities such as system netboot
16311 	 * code might disable it.
16312 	 */
16313 	val = tr32(MEMARB_MODE);
16314 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16315 
16316 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16317 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16318 	    tg3_flag(tp, 5780_CLASS)) {
16319 		if (tg3_flag(tp, PCIX_MODE)) {
16320 			pci_read_config_dword(tp->pdev,
16321 					      tp->pcix_cap + PCI_X_STATUS,
16322 					      &val);
16323 			tp->pci_fn = val & 0x7;
16324 		}
16325 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16326 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16327 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16328 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16329 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16330 			val = tr32(TG3_CPMU_STATUS);
16331 
16332 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16333 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16334 		else
16335 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16336 				     TG3_CPMU_STATUS_FSHFT_5719;
16337 	}
16338 
16339 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16340 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16341 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16342 	}
16343 
16344 	/* Get eeprom hw config before calling tg3_set_power_state().
16345 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16346 	 * determined before calling tg3_set_power_state() so that
16347 	 * we know whether or not to switch out of Vaux power.
16348 	 * When the flag is set, it means that GPIO1 is used for eeprom
16349 	 * write protect and also implies that it is a LOM where GPIOs
16350 	 * are not used to switch power.
16351 	 */
16352 	tg3_get_eeprom_hw_cfg(tp);
16353 
16354 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16355 		tg3_flag_clear(tp, TSO_CAPABLE);
16356 		tg3_flag_clear(tp, TSO_BUG);
16357 		tp->fw_needed = NULL;
16358 	}
16359 
16360 	if (tg3_flag(tp, ENABLE_APE)) {
16361 		/* Allow reads and writes to the
16362 		 * APE register and memory space.
16363 		 */
16364 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16365 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16366 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16367 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16368 				       pci_state_reg);
16369 
16370 		tg3_ape_lock_init(tp);
16371 	}
16372 
16373 	/* Set up tp->grc_local_ctrl before calling
16374 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16375 	 * will bring 5700's external PHY out of reset.
16376 	 * It is also used as eeprom write protect on LOMs.
16377 	 */
16378 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16379 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16380 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16381 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16382 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16383 	/* Unused GPIO3 must be driven as output on 5752 because there
16384 	 * are no pull-up resistors on unused GPIO pins.
16385 	 */
16386 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16387 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16388 
16389 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16390 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16391 	    tg3_flag(tp, 57765_CLASS))
16392 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16393 
16394 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16395 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16396 		/* Turn off the debug UART. */
16397 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16398 		if (tg3_flag(tp, IS_NIC))
16399 			/* Keep VMain power. */
16400 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16401 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16402 	}
16403 
16404 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16405 		tp->grc_local_ctrl |=
16406 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16407 
16408 	/* Switch out of Vaux if it is a NIC */
16409 	tg3_pwrsrc_switch_to_vmain(tp);
16410 
16411 	/* Derive initial jumbo mode from MTU assigned in
16412 	 * ether_setup() via the alloc_etherdev() call
16413 	 */
16414 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16415 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16416 
16417 	/* Determine WakeOnLan speed to use. */
16418 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16419 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16420 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16421 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16422 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16423 	} else {
16424 		tg3_flag_set(tp, WOL_SPEED_100MB);
16425 	}
16426 
16427 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16428 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16429 
16430 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16431 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16432 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16433 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16434 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16435 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16436 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16437 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16438 
16439 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16440 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16441 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16442 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16443 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16444 
16445 	if (tg3_flag(tp, 5705_PLUS) &&
16446 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16447 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16448 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16449 	    !tg3_flag(tp, 57765_PLUS)) {
16450 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16451 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16452 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16453 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16454 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16455 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16456 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16457 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16458 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16459 		} else
16460 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16461 	}
16462 
16463 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16464 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16465 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16466 		if (tp->phy_otp == 0)
16467 			tp->phy_otp = TG3_OTP_DEFAULT;
16468 	}
16469 
16470 	if (tg3_flag(tp, CPMU_PRESENT))
16471 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16472 	else
16473 		tp->mi_mode = MAC_MI_MODE_BASE;
16474 
16475 	tp->coalesce_mode = 0;
16476 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16477 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16478 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16479 
16480 	/* Set these bits to enable statistics workaround. */
16481 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16482 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16483 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16484 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16485 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16486 	}
16487 
16488 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16489 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16490 		tg3_flag_set(tp, USE_PHYLIB);
16491 
16492 	err = tg3_mdio_init(tp);
16493 	if (err)
16494 		return err;
16495 
16496 	/* Initialize data/descriptor byte/word swapping. */
16497 	val = tr32(GRC_MODE);
16498 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16499 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16500 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16501 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16502 			GRC_MODE_B2HRX_ENABLE |
16503 			GRC_MODE_HTX2B_ENABLE |
16504 			GRC_MODE_HOST_STACKUP);
16505 	else
16506 		val &= GRC_MODE_HOST_STACKUP;
16507 
16508 	tw32(GRC_MODE, val | tp->grc_mode);
16509 
16510 	tg3_switch_clocks(tp);
16511 
16512 	/* Clear this out for sanity. */
16513 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16514 
16515 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16516 			      &pci_state_reg);
16517 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16518 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16519 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16520 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16521 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16522 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16523 			void __iomem *sram_base;
16524 
16525 			/* Write some dummy words into the SRAM status block
16526 			 * area, see if it reads back correctly.  If the return
16527 			 * value is bad, force enable the PCIX workaround.
16528 			 */
16529 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16530 
16531 			writel(0x00000000, sram_base);
16532 			writel(0x00000000, sram_base + 4);
16533 			writel(0xffffffff, sram_base + 4);
16534 			if (readl(sram_base) != 0x00000000)
16535 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16536 		}
16537 	}
16538 
16539 	udelay(50);
16540 	tg3_nvram_init(tp);
16541 
16542 	/* If the device has an NVRAM, no need to load patch firmware */
16543 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16544 	    !tg3_flag(tp, NO_NVRAM))
16545 		tp->fw_needed = NULL;
16546 
16547 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16548 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16549 
16550 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16551 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16552 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16553 		tg3_flag_set(tp, IS_5788);
16554 
16555 	if (!tg3_flag(tp, IS_5788) &&
16556 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16557 		tg3_flag_set(tp, TAGGED_STATUS);
16558 	if (tg3_flag(tp, TAGGED_STATUS)) {
16559 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16560 				      HOSTCC_MODE_CLRTICK_TXBD);
16561 
16562 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16563 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16564 				       tp->misc_host_ctrl);
16565 	}
16566 
16567 	/* Preserve the APE MAC_MODE bits */
16568 	if (tg3_flag(tp, ENABLE_APE))
16569 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16570 	else
16571 		tp->mac_mode = 0;
16572 
16573 	if (tg3_10_100_only_device(tp, ent))
16574 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16575 
16576 	err = tg3_phy_probe(tp);
16577 	if (err) {
16578 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16579 		/* ... but do not return immediately ... */
16580 		tg3_mdio_fini(tp);
16581 	}
16582 
16583 	tg3_read_vpd(tp);
16584 	tg3_read_fw_ver(tp);
16585 
16586 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16587 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16588 	} else {
16589 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16590 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16591 		else
16592 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16593 	}
16594 
16595 	/* 5700 {AX,BX} chips have a broken status block link
16596 	 * change bit implementation, so we must use the
16597 	 * status register in those cases.
16598 	 */
16599 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16600 		tg3_flag_set(tp, USE_LINKCHG_REG);
16601 	else
16602 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16603 
16604 	/* The led_ctrl is set during tg3_phy_probe, here we might
16605 	 * have to force the link status polling mechanism based
16606 	 * upon subsystem IDs.
16607 	 */
16608 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16609 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16610 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16611 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16612 		tg3_flag_set(tp, USE_LINKCHG_REG);
16613 	}
16614 
16615 	/* For all SERDES we poll the MAC status register. */
16616 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16617 		tg3_flag_set(tp, POLL_SERDES);
16618 	else
16619 		tg3_flag_clear(tp, POLL_SERDES);
16620 
16621 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16622 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16623 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16624 	    tg3_flag(tp, PCIX_MODE)) {
16625 		tp->rx_offset = NET_SKB_PAD;
16626 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16627 		tp->rx_copy_thresh = ~(u16)0;
16628 #endif
16629 	}
16630 
16631 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16632 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16633 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16634 
16635 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16636 
16637 	/* Increment the rx prod index on the rx std ring by at most
16638 	 * 8 for these chips to workaround hw errata.
16639 	 */
16640 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16641 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16642 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16643 		tp->rx_std_max_post = 8;
16644 
16645 	if (tg3_flag(tp, ASPM_WORKAROUND))
16646 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16647 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16648 
16649 	return err;
16650 }
16651 
16652 #ifdef CONFIG_SPARC
16653 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16654 {
16655 	struct net_device *dev = tp->dev;
16656 	struct pci_dev *pdev = tp->pdev;
16657 	struct device_node *dp = pci_device_to_OF_node(pdev);
16658 	const unsigned char *addr;
16659 	int len;
16660 
16661 	addr = of_get_property(dp, "local-mac-address", &len);
16662 	if (addr && len == ETH_ALEN) {
16663 		memcpy(dev->dev_addr, addr, ETH_ALEN);
16664 		return 0;
16665 	}
16666 	return -ENODEV;
16667 }
16668 
16669 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16670 {
16671 	struct net_device *dev = tp->dev;
16672 
16673 	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16674 	return 0;
16675 }
16676 #endif
16677 
16678 static int tg3_get_device_address(struct tg3 *tp)
16679 {
16680 	struct net_device *dev = tp->dev;
16681 	u32 hi, lo, mac_offset;
16682 	int addr_ok = 0;
16683 	int err;
16684 
16685 #ifdef CONFIG_SPARC
16686 	if (!tg3_get_macaddr_sparc(tp))
16687 		return 0;
16688 #endif
16689 
16690 	if (tg3_flag(tp, IS_SSB_CORE)) {
16691 		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16692 		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16693 			return 0;
16694 	}
16695 
16696 	mac_offset = 0x7c;
16697 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16698 	    tg3_flag(tp, 5780_CLASS)) {
16699 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16700 			mac_offset = 0xcc;
16701 		if (tg3_nvram_lock(tp))
16702 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16703 		else
16704 			tg3_nvram_unlock(tp);
16705 	} else if (tg3_flag(tp, 5717_PLUS)) {
16706 		if (tp->pci_fn & 1)
16707 			mac_offset = 0xcc;
16708 		if (tp->pci_fn > 1)
16709 			mac_offset += 0x18c;
16710 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16711 		mac_offset = 0x10;
16712 
16713 	/* First try to get it from MAC address mailbox. */
16714 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16715 	if ((hi >> 16) == 0x484b) {
16716 		dev->dev_addr[0] = (hi >>  8) & 0xff;
16717 		dev->dev_addr[1] = (hi >>  0) & 0xff;
16718 
16719 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16720 		dev->dev_addr[2] = (lo >> 24) & 0xff;
16721 		dev->dev_addr[3] = (lo >> 16) & 0xff;
16722 		dev->dev_addr[4] = (lo >>  8) & 0xff;
16723 		dev->dev_addr[5] = (lo >>  0) & 0xff;
16724 
16725 		/* Some old bootcode may report a 0 MAC address in SRAM */
16726 		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16727 	}
16728 	if (!addr_ok) {
16729 		/* Next, try NVRAM. */
16730 		if (!tg3_flag(tp, NO_NVRAM) &&
16731 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16732 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16733 			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16734 			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16735 		}
16736 		/* Finally just fetch it out of the MAC control regs. */
16737 		else {
16738 			hi = tr32(MAC_ADDR_0_HIGH);
16739 			lo = tr32(MAC_ADDR_0_LOW);
16740 
16741 			dev->dev_addr[5] = lo & 0xff;
16742 			dev->dev_addr[4] = (lo >> 8) & 0xff;
16743 			dev->dev_addr[3] = (lo >> 16) & 0xff;
16744 			dev->dev_addr[2] = (lo >> 24) & 0xff;
16745 			dev->dev_addr[1] = hi & 0xff;
16746 			dev->dev_addr[0] = (hi >> 8) & 0xff;
16747 		}
16748 	}
16749 
16750 	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16751 #ifdef CONFIG_SPARC
16752 		if (!tg3_get_default_macaddr_sparc(tp))
16753 			return 0;
16754 #endif
16755 		return -EINVAL;
16756 	}
16757 	return 0;
16758 }
16759 
16760 #define BOUNDARY_SINGLE_CACHELINE	1
16761 #define BOUNDARY_MULTI_CACHELINE	2
16762 
16763 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16764 {
16765 	int cacheline_size;
16766 	u8 byte;
16767 	int goal;
16768 
16769 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16770 	if (byte == 0)
16771 		cacheline_size = 1024;
16772 	else
16773 		cacheline_size = (int) byte * 4;
16774 
16775 	/* On 5703 and later chips, the boundary bits have no
16776 	 * effect.
16777 	 */
16778 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16779 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16780 	    !tg3_flag(tp, PCI_EXPRESS))
16781 		goto out;
16782 
16783 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16784 	goal = BOUNDARY_MULTI_CACHELINE;
16785 #else
16786 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16787 	goal = BOUNDARY_SINGLE_CACHELINE;
16788 #else
16789 	goal = 0;
16790 #endif
16791 #endif
16792 
16793 	if (tg3_flag(tp, 57765_PLUS)) {
16794 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16795 		goto out;
16796 	}
16797 
16798 	if (!goal)
16799 		goto out;
16800 
16801 	/* PCI controllers on most RISC systems tend to disconnect
16802 	 * when a device tries to burst across a cache-line boundary.
16803 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16804 	 *
16805 	 * Unfortunately, for PCI-E there are only limited
16806 	 * write-side controls for this, and thus for reads
16807 	 * we will still get the disconnects.  We'll also waste
16808 	 * these PCI cycles for both read and write for chips
16809 	 * other than 5700 and 5701 which do not implement the
16810 	 * boundary bits.
16811 	 */
16812 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16813 		switch (cacheline_size) {
16814 		case 16:
16815 		case 32:
16816 		case 64:
16817 		case 128:
16818 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16819 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16820 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16821 			} else {
16822 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16823 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16824 			}
16825 			break;
16826 
16827 		case 256:
16828 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16829 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16830 			break;
16831 
16832 		default:
16833 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16834 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16835 			break;
16836 		}
16837 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
16838 		switch (cacheline_size) {
16839 		case 16:
16840 		case 32:
16841 		case 64:
16842 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16843 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16844 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16845 				break;
16846 			}
16847 			/* fallthrough */
16848 		case 128:
16849 		default:
16850 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16851 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16852 			break;
16853 		}
16854 	} else {
16855 		switch (cacheline_size) {
16856 		case 16:
16857 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16858 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
16859 					DMA_RWCTRL_WRITE_BNDRY_16);
16860 				break;
16861 			}
16862 			/* fallthrough */
16863 		case 32:
16864 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16865 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
16866 					DMA_RWCTRL_WRITE_BNDRY_32);
16867 				break;
16868 			}
16869 			/* fallthrough */
16870 		case 64:
16871 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16872 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
16873 					DMA_RWCTRL_WRITE_BNDRY_64);
16874 				break;
16875 			}
16876 			/* fallthrough */
16877 		case 128:
16878 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
16879 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
16880 					DMA_RWCTRL_WRITE_BNDRY_128);
16881 				break;
16882 			}
16883 			/* fallthrough */
16884 		case 256:
16885 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
16886 				DMA_RWCTRL_WRITE_BNDRY_256);
16887 			break;
16888 		case 512:
16889 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
16890 				DMA_RWCTRL_WRITE_BNDRY_512);
16891 			break;
16892 		case 1024:
16893 		default:
16894 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16895 				DMA_RWCTRL_WRITE_BNDRY_1024);
16896 			break;
16897 		}
16898 	}
16899 
16900 out:
16901 	return val;
16902 }
16903 
16904 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16905 			   int size, bool to_device)
16906 {
16907 	struct tg3_internal_buffer_desc test_desc;
16908 	u32 sram_dma_descs;
16909 	int i, ret;
16910 
16911 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16912 
16913 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16914 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16915 	tw32(RDMAC_STATUS, 0);
16916 	tw32(WDMAC_STATUS, 0);
16917 
16918 	tw32(BUFMGR_MODE, 0);
16919 	tw32(FTQ_RESET, 0);
16920 
16921 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
16922 	test_desc.addr_lo = buf_dma & 0xffffffff;
16923 	test_desc.nic_mbuf = 0x00002100;
16924 	test_desc.len = size;
16925 
16926 	/*
16927 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16928 	 * the *second* time the tg3 driver was getting loaded after an
16929 	 * initial scan.
16930 	 *
16931 	 * Broadcom tells me:
16932 	 *   ...the DMA engine is connected to the GRC block and a DMA
16933 	 *   reset may affect the GRC block in some unpredictable way...
16934 	 *   The behavior of resets to individual blocks has not been tested.
16935 	 *
16936 	 * Broadcom noted the GRC reset will also reset all sub-components.
16937 	 */
16938 	if (to_device) {
16939 		test_desc.cqid_sqid = (13 << 8) | 2;
16940 
16941 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16942 		udelay(40);
16943 	} else {
16944 		test_desc.cqid_sqid = (16 << 8) | 7;
16945 
16946 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16947 		udelay(40);
16948 	}
16949 	test_desc.flags = 0x00000005;
16950 
16951 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16952 		u32 val;
16953 
16954 		val = *(((u32 *)&test_desc) + i);
16955 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16956 				       sram_dma_descs + (i * sizeof(u32)));
16957 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16958 	}
16959 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16960 
16961 	if (to_device)
16962 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16963 	else
16964 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16965 
16966 	ret = -ENODEV;
16967 	for (i = 0; i < 40; i++) {
16968 		u32 val;
16969 
16970 		if (to_device)
16971 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16972 		else
16973 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16974 		if ((val & 0xffff) == sram_dma_descs) {
16975 			ret = 0;
16976 			break;
16977 		}
16978 
16979 		udelay(100);
16980 	}
16981 
16982 	return ret;
16983 }
16984 
16985 #define TEST_BUFFER_SIZE	0x2000
16986 
16987 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16988 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16989 	{ },
16990 };
16991 
16992 static int tg3_test_dma(struct tg3 *tp)
16993 {
16994 	dma_addr_t buf_dma;
16995 	u32 *buf, saved_dma_rwctrl;
16996 	int ret = 0;
16997 
16998 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16999 				 &buf_dma, GFP_KERNEL);
17000 	if (!buf) {
17001 		ret = -ENOMEM;
17002 		goto out_nofree;
17003 	}
17004 
17005 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17006 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17007 
17008 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17009 
17010 	if (tg3_flag(tp, 57765_PLUS))
17011 		goto out;
17012 
17013 	if (tg3_flag(tp, PCI_EXPRESS)) {
17014 		/* DMA read watermark not used on PCIE */
17015 		tp->dma_rwctrl |= 0x00180000;
17016 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17017 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17018 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17019 			tp->dma_rwctrl |= 0x003f0000;
17020 		else
17021 			tp->dma_rwctrl |= 0x003f000f;
17022 	} else {
17023 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17024 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17025 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17026 			u32 read_water = 0x7;
17027 
17028 			/* If the 5704 is behind the EPB bridge, we can
17029 			 * do the less restrictive ONE_DMA workaround for
17030 			 * better performance.
17031 			 */
17032 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17033 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17034 				tp->dma_rwctrl |= 0x8000;
17035 			else if (ccval == 0x6 || ccval == 0x7)
17036 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17037 
17038 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17039 				read_water = 4;
17040 			/* Set bit 23 to enable PCIX hw bug fix */
17041 			tp->dma_rwctrl |=
17042 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17043 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17044 				(1 << 23);
17045 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17046 			/* 5780 always in PCIX mode */
17047 			tp->dma_rwctrl |= 0x00144000;
17048 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17049 			/* 5714 always in PCIX mode */
17050 			tp->dma_rwctrl |= 0x00148000;
17051 		} else {
17052 			tp->dma_rwctrl |= 0x001b000f;
17053 		}
17054 	}
17055 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17056 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17057 
17058 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17059 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17060 		tp->dma_rwctrl &= 0xfffffff0;
17061 
17062 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17063 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17064 		/* Remove this if it causes problems for some boards. */
17065 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17066 
17067 		/* On 5700/5701 chips, we need to set this bit.
17068 		 * Otherwise the chip will issue cacheline transactions
17069 		 * to streamable DMA memory with not all the byte
17070 		 * enables turned on.  This is an error on several
17071 		 * RISC PCI controllers, in particular sparc64.
17072 		 *
17073 		 * On 5703/5704 chips, this bit has been reassigned
17074 		 * a different meaning.  In particular, it is used
17075 		 * on those chips to enable a PCI-X workaround.
17076 		 */
17077 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17078 	}
17079 
17080 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17081 
17082 
17083 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17084 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17085 		goto out;
17086 
17087 	/* It is best to perform DMA test with maximum write burst size
17088 	 * to expose the 5700/5701 write DMA bug.
17089 	 */
17090 	saved_dma_rwctrl = tp->dma_rwctrl;
17091 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17092 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17093 
17094 	while (1) {
17095 		u32 *p = buf, i;
17096 
17097 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17098 			p[i] = i;
17099 
17100 		/* Send the buffer to the chip. */
17101 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17102 		if (ret) {
17103 			dev_err(&tp->pdev->dev,
17104 				"%s: Buffer write failed. err = %d\n",
17105 				__func__, ret);
17106 			break;
17107 		}
17108 
17109 		/* Now read it back. */
17110 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17111 		if (ret) {
17112 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17113 				"err = %d\n", __func__, ret);
17114 			break;
17115 		}
17116 
17117 		/* Verify it. */
17118 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17119 			if (p[i] == i)
17120 				continue;
17121 
17122 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17123 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17124 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17125 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17126 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17127 				break;
17128 			} else {
17129 				dev_err(&tp->pdev->dev,
17130 					"%s: Buffer corrupted on read back! "
17131 					"(%d != %d)\n", __func__, p[i], i);
17132 				ret = -ENODEV;
17133 				goto out;
17134 			}
17135 		}
17136 
17137 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17138 			/* Success. */
17139 			ret = 0;
17140 			break;
17141 		}
17142 	}
17143 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17144 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17145 		/* DMA test passed without adjusting DMA boundary,
17146 		 * now look for chipsets that are known to expose the
17147 		 * DMA bug without failing the test.
17148 		 */
17149 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17150 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17151 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17152 		} else {
17153 			/* Safe to use the calculated DMA boundary. */
17154 			tp->dma_rwctrl = saved_dma_rwctrl;
17155 		}
17156 
17157 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17158 	}
17159 
17160 out:
17161 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17162 out_nofree:
17163 	return ret;
17164 }
17165 
17166 static void tg3_init_bufmgr_config(struct tg3 *tp)
17167 {
17168 	if (tg3_flag(tp, 57765_PLUS)) {
17169 		tp->bufmgr_config.mbuf_read_dma_low_water =
17170 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17171 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17172 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17173 		tp->bufmgr_config.mbuf_high_water =
17174 			DEFAULT_MB_HIGH_WATER_57765;
17175 
17176 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17177 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17178 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17179 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17180 		tp->bufmgr_config.mbuf_high_water_jumbo =
17181 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17182 	} else if (tg3_flag(tp, 5705_PLUS)) {
17183 		tp->bufmgr_config.mbuf_read_dma_low_water =
17184 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17185 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17186 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17187 		tp->bufmgr_config.mbuf_high_water =
17188 			DEFAULT_MB_HIGH_WATER_5705;
17189 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17190 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17191 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17192 			tp->bufmgr_config.mbuf_high_water =
17193 				DEFAULT_MB_HIGH_WATER_5906;
17194 		}
17195 
17196 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17197 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17198 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17199 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17200 		tp->bufmgr_config.mbuf_high_water_jumbo =
17201 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17202 	} else {
17203 		tp->bufmgr_config.mbuf_read_dma_low_water =
17204 			DEFAULT_MB_RDMA_LOW_WATER;
17205 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17206 			DEFAULT_MB_MACRX_LOW_WATER;
17207 		tp->bufmgr_config.mbuf_high_water =
17208 			DEFAULT_MB_HIGH_WATER;
17209 
17210 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17211 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17212 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17213 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17214 		tp->bufmgr_config.mbuf_high_water_jumbo =
17215 			DEFAULT_MB_HIGH_WATER_JUMBO;
17216 	}
17217 
17218 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17219 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17220 }
17221 
17222 static char *tg3_phy_string(struct tg3 *tp)
17223 {
17224 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17225 	case TG3_PHY_ID_BCM5400:	return "5400";
17226 	case TG3_PHY_ID_BCM5401:	return "5401";
17227 	case TG3_PHY_ID_BCM5411:	return "5411";
17228 	case TG3_PHY_ID_BCM5701:	return "5701";
17229 	case TG3_PHY_ID_BCM5703:	return "5703";
17230 	case TG3_PHY_ID_BCM5704:	return "5704";
17231 	case TG3_PHY_ID_BCM5705:	return "5705";
17232 	case TG3_PHY_ID_BCM5750:	return "5750";
17233 	case TG3_PHY_ID_BCM5752:	return "5752";
17234 	case TG3_PHY_ID_BCM5714:	return "5714";
17235 	case TG3_PHY_ID_BCM5780:	return "5780";
17236 	case TG3_PHY_ID_BCM5755:	return "5755";
17237 	case TG3_PHY_ID_BCM5787:	return "5787";
17238 	case TG3_PHY_ID_BCM5784:	return "5784";
17239 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17240 	case TG3_PHY_ID_BCM5906:	return "5906";
17241 	case TG3_PHY_ID_BCM5761:	return "5761";
17242 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17243 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17244 	case TG3_PHY_ID_BCM57765:	return "57765";
17245 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17246 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17247 	case TG3_PHY_ID_BCM5762:	return "5762C";
17248 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17249 	case 0:			return "serdes";
17250 	default:		return "unknown";
17251 	}
17252 }
17253 
17254 static char *tg3_bus_string(struct tg3 *tp, char *str)
17255 {
17256 	if (tg3_flag(tp, PCI_EXPRESS)) {
17257 		strcpy(str, "PCI Express");
17258 		return str;
17259 	} else if (tg3_flag(tp, PCIX_MODE)) {
17260 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17261 
17262 		strcpy(str, "PCIX:");
17263 
17264 		if ((clock_ctrl == 7) ||
17265 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17266 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17267 			strcat(str, "133MHz");
17268 		else if (clock_ctrl == 0)
17269 			strcat(str, "33MHz");
17270 		else if (clock_ctrl == 2)
17271 			strcat(str, "50MHz");
17272 		else if (clock_ctrl == 4)
17273 			strcat(str, "66MHz");
17274 		else if (clock_ctrl == 6)
17275 			strcat(str, "100MHz");
17276 	} else {
17277 		strcpy(str, "PCI:");
17278 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17279 			strcat(str, "66MHz");
17280 		else
17281 			strcat(str, "33MHz");
17282 	}
17283 	if (tg3_flag(tp, PCI_32BIT))
17284 		strcat(str, ":32-bit");
17285 	else
17286 		strcat(str, ":64-bit");
17287 	return str;
17288 }
17289 
17290 static void tg3_init_coal(struct tg3 *tp)
17291 {
17292 	struct ethtool_coalesce *ec = &tp->coal;
17293 
17294 	memset(ec, 0, sizeof(*ec));
17295 	ec->cmd = ETHTOOL_GCOALESCE;
17296 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17297 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17298 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17299 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17300 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17301 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17302 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17303 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17304 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17305 
17306 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17307 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17308 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17309 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17310 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17311 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17312 	}
17313 
17314 	if (tg3_flag(tp, 5705_PLUS)) {
17315 		ec->rx_coalesce_usecs_irq = 0;
17316 		ec->tx_coalesce_usecs_irq = 0;
17317 		ec->stats_block_coalesce_usecs = 0;
17318 	}
17319 }
17320 
17321 static int tg3_init_one(struct pci_dev *pdev,
17322 				  const struct pci_device_id *ent)
17323 {
17324 	struct net_device *dev;
17325 	struct tg3 *tp;
17326 	int i, err;
17327 	u32 sndmbx, rcvmbx, intmbx;
17328 	char str[40];
17329 	u64 dma_mask, persist_dma_mask;
17330 	netdev_features_t features = 0;
17331 
17332 	printk_once(KERN_INFO "%s\n", version);
17333 
17334 	err = pci_enable_device(pdev);
17335 	if (err) {
17336 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17337 		return err;
17338 	}
17339 
17340 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17341 	if (err) {
17342 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17343 		goto err_out_disable_pdev;
17344 	}
17345 
17346 	pci_set_master(pdev);
17347 
17348 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17349 	if (!dev) {
17350 		err = -ENOMEM;
17351 		goto err_out_free_res;
17352 	}
17353 
17354 	SET_NETDEV_DEV(dev, &pdev->dev);
17355 
17356 	tp = netdev_priv(dev);
17357 	tp->pdev = pdev;
17358 	tp->dev = dev;
17359 	tp->rx_mode = TG3_DEF_RX_MODE;
17360 	tp->tx_mode = TG3_DEF_TX_MODE;
17361 	tp->irq_sync = 1;
17362 
17363 	if (tg3_debug > 0)
17364 		tp->msg_enable = tg3_debug;
17365 	else
17366 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17367 
17368 	if (pdev_is_ssb_gige_core(pdev)) {
17369 		tg3_flag_set(tp, IS_SSB_CORE);
17370 		if (ssb_gige_must_flush_posted_writes(pdev))
17371 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17372 		if (ssb_gige_one_dma_at_once(pdev))
17373 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17374 		if (ssb_gige_have_roboswitch(pdev)) {
17375 			tg3_flag_set(tp, USE_PHYLIB);
17376 			tg3_flag_set(tp, ROBOSWITCH);
17377 		}
17378 		if (ssb_gige_is_rgmii(pdev))
17379 			tg3_flag_set(tp, RGMII_MODE);
17380 	}
17381 
17382 	/* The word/byte swap controls here control register access byte
17383 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17384 	 * setting below.
17385 	 */
17386 	tp->misc_host_ctrl =
17387 		MISC_HOST_CTRL_MASK_PCI_INT |
17388 		MISC_HOST_CTRL_WORD_SWAP |
17389 		MISC_HOST_CTRL_INDIR_ACCESS |
17390 		MISC_HOST_CTRL_PCISTATE_RW;
17391 
17392 	/* The NONFRM (non-frame) byte/word swap controls take effect
17393 	 * on descriptor entries, anything which isn't packet data.
17394 	 *
17395 	 * The StrongARM chips on the board (one for tx, one for rx)
17396 	 * are running in big-endian mode.
17397 	 */
17398 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17399 			GRC_MODE_WSWAP_NONFRM_DATA);
17400 #ifdef __BIG_ENDIAN
17401 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17402 #endif
17403 	spin_lock_init(&tp->lock);
17404 	spin_lock_init(&tp->indirect_lock);
17405 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17406 
17407 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17408 	if (!tp->regs) {
17409 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17410 		err = -ENOMEM;
17411 		goto err_out_free_dev;
17412 	}
17413 
17414 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17415 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17416 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17417 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17418 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17419 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17420 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17421 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17422 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17423 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17424 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17425 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17426 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17427 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17428 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17429 		tg3_flag_set(tp, ENABLE_APE);
17430 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17431 		if (!tp->aperegs) {
17432 			dev_err(&pdev->dev,
17433 				"Cannot map APE registers, aborting\n");
17434 			err = -ENOMEM;
17435 			goto err_out_iounmap;
17436 		}
17437 	}
17438 
17439 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17440 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17441 
17442 	dev->ethtool_ops = &tg3_ethtool_ops;
17443 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17444 	dev->netdev_ops = &tg3_netdev_ops;
17445 	dev->irq = pdev->irq;
17446 
17447 	err = tg3_get_invariants(tp, ent);
17448 	if (err) {
17449 		dev_err(&pdev->dev,
17450 			"Problem fetching invariants of chip, aborting\n");
17451 		goto err_out_apeunmap;
17452 	}
17453 
17454 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17455 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17456 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17457 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17458 	 * do DMA address check in tg3_start_xmit().
17459 	 */
17460 	if (tg3_flag(tp, IS_5788))
17461 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17462 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17463 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17464 #ifdef CONFIG_HIGHMEM
17465 		dma_mask = DMA_BIT_MASK(64);
17466 #endif
17467 	} else
17468 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17469 
17470 	/* Configure DMA attributes. */
17471 	if (dma_mask > DMA_BIT_MASK(32)) {
17472 		err = pci_set_dma_mask(pdev, dma_mask);
17473 		if (!err) {
17474 			features |= NETIF_F_HIGHDMA;
17475 			err = pci_set_consistent_dma_mask(pdev,
17476 							  persist_dma_mask);
17477 			if (err < 0) {
17478 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17479 					"DMA for consistent allocations\n");
17480 				goto err_out_apeunmap;
17481 			}
17482 		}
17483 	}
17484 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17485 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17486 		if (err) {
17487 			dev_err(&pdev->dev,
17488 				"No usable DMA configuration, aborting\n");
17489 			goto err_out_apeunmap;
17490 		}
17491 	}
17492 
17493 	tg3_init_bufmgr_config(tp);
17494 
17495 	features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17496 
17497 	/* 5700 B0 chips do not support checksumming correctly due
17498 	 * to hardware bugs.
17499 	 */
17500 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17501 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17502 
17503 		if (tg3_flag(tp, 5755_PLUS))
17504 			features |= NETIF_F_IPV6_CSUM;
17505 	}
17506 
17507 	/* TSO is on by default on chips that support hardware TSO.
17508 	 * Firmware TSO on older chips gives lower performance, so it
17509 	 * is off by default, but can be enabled using ethtool.
17510 	 */
17511 	if ((tg3_flag(tp, HW_TSO_1) ||
17512 	     tg3_flag(tp, HW_TSO_2) ||
17513 	     tg3_flag(tp, HW_TSO_3)) &&
17514 	    (features & NETIF_F_IP_CSUM))
17515 		features |= NETIF_F_TSO;
17516 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17517 		if (features & NETIF_F_IPV6_CSUM)
17518 			features |= NETIF_F_TSO6;
17519 		if (tg3_flag(tp, HW_TSO_3) ||
17520 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17521 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17522 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17523 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17524 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17525 			features |= NETIF_F_TSO_ECN;
17526 	}
17527 
17528 	dev->features |= features;
17529 	dev->vlan_features |= features;
17530 
17531 	/*
17532 	 * Add loopback capability only for a subset of devices that support
17533 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17534 	 * loopback for the remaining devices.
17535 	 */
17536 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17537 	    !tg3_flag(tp, CPMU_PRESENT))
17538 		/* Add the loopback capability */
17539 		features |= NETIF_F_LOOPBACK;
17540 
17541 	dev->hw_features |= features;
17542 
17543 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17544 	    !tg3_flag(tp, TSO_CAPABLE) &&
17545 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17546 		tg3_flag_set(tp, MAX_RXPEND_64);
17547 		tp->rx_pending = 63;
17548 	}
17549 
17550 	err = tg3_get_device_address(tp);
17551 	if (err) {
17552 		dev_err(&pdev->dev,
17553 			"Could not obtain valid ethernet address, aborting\n");
17554 		goto err_out_apeunmap;
17555 	}
17556 
17557 	/*
17558 	 * Reset chip in case UNDI or EFI driver did not shutdown
17559 	 * DMA self test will enable WDMAC and we'll see (spurious)
17560 	 * pending DMA on the PCI bus at that point.
17561 	 */
17562 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17563 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17564 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17565 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17566 	}
17567 
17568 	err = tg3_test_dma(tp);
17569 	if (err) {
17570 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17571 		goto err_out_apeunmap;
17572 	}
17573 
17574 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17575 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17576 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17577 	for (i = 0; i < tp->irq_max; i++) {
17578 		struct tg3_napi *tnapi = &tp->napi[i];
17579 
17580 		tnapi->tp = tp;
17581 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17582 
17583 		tnapi->int_mbox = intmbx;
17584 		if (i <= 4)
17585 			intmbx += 0x8;
17586 		else
17587 			intmbx += 0x4;
17588 
17589 		tnapi->consmbox = rcvmbx;
17590 		tnapi->prodmbox = sndmbx;
17591 
17592 		if (i)
17593 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17594 		else
17595 			tnapi->coal_now = HOSTCC_MODE_NOW;
17596 
17597 		if (!tg3_flag(tp, SUPPORT_MSIX))
17598 			break;
17599 
17600 		/*
17601 		 * If we support MSIX, we'll be using RSS.  If we're using
17602 		 * RSS, the first vector only handles link interrupts and the
17603 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17604 		 * mailbox values for the next iteration.  The values we setup
17605 		 * above are still useful for the single vectored mode.
17606 		 */
17607 		if (!i)
17608 			continue;
17609 
17610 		rcvmbx += 0x8;
17611 
17612 		if (sndmbx & 0x4)
17613 			sndmbx -= 0x4;
17614 		else
17615 			sndmbx += 0xc;
17616 	}
17617 
17618 	tg3_init_coal(tp);
17619 
17620 	pci_set_drvdata(pdev, dev);
17621 
17622 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17623 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17624 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17625 		tg3_flag_set(tp, PTP_CAPABLE);
17626 
17627 	tg3_timer_init(tp);
17628 
17629 	tg3_carrier_off(tp);
17630 
17631 	err = register_netdev(dev);
17632 	if (err) {
17633 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17634 		goto err_out_apeunmap;
17635 	}
17636 
17637 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17638 		    tp->board_part_number,
17639 		    tg3_chip_rev_id(tp),
17640 		    tg3_bus_string(tp, str),
17641 		    dev->dev_addr);
17642 
17643 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17644 		struct phy_device *phydev;
17645 		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17646 		netdev_info(dev,
17647 			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17648 			    phydev->drv->name, dev_name(&phydev->dev));
17649 	} else {
17650 		char *ethtype;
17651 
17652 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17653 			ethtype = "10/100Base-TX";
17654 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17655 			ethtype = "1000Base-SX";
17656 		else
17657 			ethtype = "10/100/1000Base-T";
17658 
17659 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17660 			    "(WireSpeed[%d], EEE[%d])\n",
17661 			    tg3_phy_string(tp), ethtype,
17662 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17663 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17664 	}
17665 
17666 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17667 		    (dev->features & NETIF_F_RXCSUM) != 0,
17668 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17669 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17670 		    tg3_flag(tp, ENABLE_ASF) != 0,
17671 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17672 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17673 		    tp->dma_rwctrl,
17674 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17675 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17676 
17677 	pci_save_state(pdev);
17678 
17679 	return 0;
17680 
17681 err_out_apeunmap:
17682 	if (tp->aperegs) {
17683 		iounmap(tp->aperegs);
17684 		tp->aperegs = NULL;
17685 	}
17686 
17687 err_out_iounmap:
17688 	if (tp->regs) {
17689 		iounmap(tp->regs);
17690 		tp->regs = NULL;
17691 	}
17692 
17693 err_out_free_dev:
17694 	free_netdev(dev);
17695 
17696 err_out_free_res:
17697 	pci_release_regions(pdev);
17698 
17699 err_out_disable_pdev:
17700 	if (pci_is_enabled(pdev))
17701 		pci_disable_device(pdev);
17702 	return err;
17703 }
17704 
17705 static void tg3_remove_one(struct pci_dev *pdev)
17706 {
17707 	struct net_device *dev = pci_get_drvdata(pdev);
17708 
17709 	if (dev) {
17710 		struct tg3 *tp = netdev_priv(dev);
17711 
17712 		release_firmware(tp->fw);
17713 
17714 		tg3_reset_task_cancel(tp);
17715 
17716 		if (tg3_flag(tp, USE_PHYLIB)) {
17717 			tg3_phy_fini(tp);
17718 			tg3_mdio_fini(tp);
17719 		}
17720 
17721 		unregister_netdev(dev);
17722 		if (tp->aperegs) {
17723 			iounmap(tp->aperegs);
17724 			tp->aperegs = NULL;
17725 		}
17726 		if (tp->regs) {
17727 			iounmap(tp->regs);
17728 			tp->regs = NULL;
17729 		}
17730 		free_netdev(dev);
17731 		pci_release_regions(pdev);
17732 		pci_disable_device(pdev);
17733 	}
17734 }
17735 
17736 #ifdef CONFIG_PM_SLEEP
17737 static int tg3_suspend(struct device *device)
17738 {
17739 	struct pci_dev *pdev = to_pci_dev(device);
17740 	struct net_device *dev = pci_get_drvdata(pdev);
17741 	struct tg3 *tp = netdev_priv(dev);
17742 	int err;
17743 
17744 	if (!netif_running(dev))
17745 		return 0;
17746 
17747 	tg3_reset_task_cancel(tp);
17748 	tg3_phy_stop(tp);
17749 	tg3_netif_stop(tp);
17750 
17751 	tg3_timer_stop(tp);
17752 
17753 	tg3_full_lock(tp, 1);
17754 	tg3_disable_ints(tp);
17755 	tg3_full_unlock(tp);
17756 
17757 	netif_device_detach(dev);
17758 
17759 	tg3_full_lock(tp, 0);
17760 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17761 	tg3_flag_clear(tp, INIT_COMPLETE);
17762 	tg3_full_unlock(tp);
17763 
17764 	err = tg3_power_down_prepare(tp);
17765 	if (err) {
17766 		int err2;
17767 
17768 		tg3_full_lock(tp, 0);
17769 
17770 		tg3_flag_set(tp, INIT_COMPLETE);
17771 		err2 = tg3_restart_hw(tp, true);
17772 		if (err2)
17773 			goto out;
17774 
17775 		tg3_timer_start(tp);
17776 
17777 		netif_device_attach(dev);
17778 		tg3_netif_start(tp);
17779 
17780 out:
17781 		tg3_full_unlock(tp);
17782 
17783 		if (!err2)
17784 			tg3_phy_start(tp);
17785 	}
17786 
17787 	return err;
17788 }
17789 
17790 static int tg3_resume(struct device *device)
17791 {
17792 	struct pci_dev *pdev = to_pci_dev(device);
17793 	struct net_device *dev = pci_get_drvdata(pdev);
17794 	struct tg3 *tp = netdev_priv(dev);
17795 	int err;
17796 
17797 	if (!netif_running(dev))
17798 		return 0;
17799 
17800 	netif_device_attach(dev);
17801 
17802 	tg3_full_lock(tp, 0);
17803 
17804 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17805 
17806 	tg3_flag_set(tp, INIT_COMPLETE);
17807 	err = tg3_restart_hw(tp,
17808 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17809 	if (err)
17810 		goto out;
17811 
17812 	tg3_timer_start(tp);
17813 
17814 	tg3_netif_start(tp);
17815 
17816 out:
17817 	tg3_full_unlock(tp);
17818 
17819 	if (!err)
17820 		tg3_phy_start(tp);
17821 
17822 	return err;
17823 }
17824 #endif /* CONFIG_PM_SLEEP */
17825 
17826 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17827 
17828 static void tg3_shutdown(struct pci_dev *pdev)
17829 {
17830 	struct net_device *dev = pci_get_drvdata(pdev);
17831 	struct tg3 *tp = netdev_priv(dev);
17832 
17833 	rtnl_lock();
17834 	netif_device_detach(dev);
17835 
17836 	if (netif_running(dev))
17837 		dev_close(dev);
17838 
17839 	if (system_state == SYSTEM_POWER_OFF)
17840 		tg3_power_down(tp);
17841 
17842 	rtnl_unlock();
17843 }
17844 
17845 /**
17846  * tg3_io_error_detected - called when PCI error is detected
17847  * @pdev: Pointer to PCI device
17848  * @state: The current pci connection state
17849  *
17850  * This function is called after a PCI bus error affecting
17851  * this device has been detected.
17852  */
17853 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17854 					      pci_channel_state_t state)
17855 {
17856 	struct net_device *netdev = pci_get_drvdata(pdev);
17857 	struct tg3 *tp = netdev_priv(netdev);
17858 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17859 
17860 	netdev_info(netdev, "PCI I/O error detected\n");
17861 
17862 	rtnl_lock();
17863 
17864 	/* We probably don't have netdev yet */
17865 	if (!netdev || !netif_running(netdev))
17866 		goto done;
17867 
17868 	tg3_phy_stop(tp);
17869 
17870 	tg3_netif_stop(tp);
17871 
17872 	tg3_timer_stop(tp);
17873 
17874 	/* Want to make sure that the reset task doesn't run */
17875 	tg3_reset_task_cancel(tp);
17876 
17877 	netif_device_detach(netdev);
17878 
17879 	/* Clean up software state, even if MMIO is blocked */
17880 	tg3_full_lock(tp, 0);
17881 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17882 	tg3_full_unlock(tp);
17883 
17884 done:
17885 	if (state == pci_channel_io_perm_failure) {
17886 		if (netdev) {
17887 			tg3_napi_enable(tp);
17888 			dev_close(netdev);
17889 		}
17890 		err = PCI_ERS_RESULT_DISCONNECT;
17891 	} else {
17892 		pci_disable_device(pdev);
17893 	}
17894 
17895 	rtnl_unlock();
17896 
17897 	return err;
17898 }
17899 
17900 /**
17901  * tg3_io_slot_reset - called after the pci bus has been reset.
17902  * @pdev: Pointer to PCI device
17903  *
17904  * Restart the card from scratch, as if from a cold-boot.
17905  * At this point, the card has exprienced a hard reset,
17906  * followed by fixups by BIOS, and has its config space
17907  * set up identically to what it was at cold boot.
17908  */
17909 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17910 {
17911 	struct net_device *netdev = pci_get_drvdata(pdev);
17912 	struct tg3 *tp = netdev_priv(netdev);
17913 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17914 	int err;
17915 
17916 	rtnl_lock();
17917 
17918 	if (pci_enable_device(pdev)) {
17919 		dev_err(&pdev->dev,
17920 			"Cannot re-enable PCI device after reset.\n");
17921 		goto done;
17922 	}
17923 
17924 	pci_set_master(pdev);
17925 	pci_restore_state(pdev);
17926 	pci_save_state(pdev);
17927 
17928 	if (!netdev || !netif_running(netdev)) {
17929 		rc = PCI_ERS_RESULT_RECOVERED;
17930 		goto done;
17931 	}
17932 
17933 	err = tg3_power_up(tp);
17934 	if (err)
17935 		goto done;
17936 
17937 	rc = PCI_ERS_RESULT_RECOVERED;
17938 
17939 done:
17940 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
17941 		tg3_napi_enable(tp);
17942 		dev_close(netdev);
17943 	}
17944 	rtnl_unlock();
17945 
17946 	return rc;
17947 }
17948 
17949 /**
17950  * tg3_io_resume - called when traffic can start flowing again.
17951  * @pdev: Pointer to PCI device
17952  *
17953  * This callback is called when the error recovery driver tells
17954  * us that its OK to resume normal operation.
17955  */
17956 static void tg3_io_resume(struct pci_dev *pdev)
17957 {
17958 	struct net_device *netdev = pci_get_drvdata(pdev);
17959 	struct tg3 *tp = netdev_priv(netdev);
17960 	int err;
17961 
17962 	rtnl_lock();
17963 
17964 	if (!netif_running(netdev))
17965 		goto done;
17966 
17967 	tg3_full_lock(tp, 0);
17968 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17969 	tg3_flag_set(tp, INIT_COMPLETE);
17970 	err = tg3_restart_hw(tp, true);
17971 	if (err) {
17972 		tg3_full_unlock(tp);
17973 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
17974 		goto done;
17975 	}
17976 
17977 	netif_device_attach(netdev);
17978 
17979 	tg3_timer_start(tp);
17980 
17981 	tg3_netif_start(tp);
17982 
17983 	tg3_full_unlock(tp);
17984 
17985 	tg3_phy_start(tp);
17986 
17987 done:
17988 	rtnl_unlock();
17989 }
17990 
17991 static const struct pci_error_handlers tg3_err_handler = {
17992 	.error_detected	= tg3_io_error_detected,
17993 	.slot_reset	= tg3_io_slot_reset,
17994 	.resume		= tg3_io_resume
17995 };
17996 
17997 static struct pci_driver tg3_driver = {
17998 	.name		= DRV_MODULE_NAME,
17999 	.id_table	= tg3_pci_tbl,
18000 	.probe		= tg3_init_one,
18001 	.remove		= tg3_remove_one,
18002 	.err_handler	= &tg3_err_handler,
18003 	.driver.pm	= &tg3_pm_ops,
18004 	.shutdown	= tg3_shutdown,
18005 };
18006 
18007 module_pci_driver(tg3_driver);
18008