xref: /linux/drivers/net/ethernet/broadcom/tg3.c (revision f2586d921cea4feeddd1cc5ee3495700540dba8f)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *	Derived from proprietary unpublished source code,
14  *	Copyright (C) 2000-2016 Broadcom Corporation.
15  *	Copyright (C) 2016-2017 Broadcom Ltd.
16  *	Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *	refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *	Permission is hereby granted for the distribution of this firmware
20  *	data in hexadecimal or equivalent format, provided this copyright
21  *	notice is accompanying it.
22  */
23 
24 
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58 
59 #include <net/checksum.h>
60 #include <net/gso.h>
61 #include <net/ip.h>
62 
63 #include <linux/io.h>
64 #include <asm/byteorder.h>
65 #include <linux/uaccess.h>
66 
67 #include <uapi/linux/net_tstamp.h>
68 #include <linux/ptp_clock_kernel.h>
69 
70 #define BAR_0	0
71 #define BAR_2	2
72 
73 #include "tg3.h"
74 
75 /* Functions & macros to verify TG3_FLAGS types */
76 
77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79 	return test_bit(flag, bits);
80 }
81 
82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84 	set_bit(flag, bits);
85 }
86 
87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 {
89 	clear_bit(flag, bits);
90 }
91 
92 #define tg3_flag(tp, flag)				\
93 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define tg3_flag_set(tp, flag)				\
95 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define tg3_flag_clear(tp, flag)			\
97 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98 
99 #define DRV_MODULE_NAME		"tg3"
100 /* DO NOT UPDATE TG3_*_NUM defines */
101 #define TG3_MAJ_NUM			3
102 #define TG3_MIN_NUM			137
103 
104 #define RESET_KIND_SHUTDOWN	0
105 #define RESET_KIND_INIT		1
106 #define RESET_KIND_SUSPEND	2
107 
108 #define TG3_DEF_RX_MODE		0
109 #define TG3_DEF_TX_MODE		0
110 #define TG3_DEF_MSG_ENABLE	  \
111 	(NETIF_MSG_DRV		| \
112 	 NETIF_MSG_PROBE	| \
113 	 NETIF_MSG_LINK		| \
114 	 NETIF_MSG_TIMER	| \
115 	 NETIF_MSG_IFDOWN	| \
116 	 NETIF_MSG_IFUP		| \
117 	 NETIF_MSG_RX_ERR	| \
118 	 NETIF_MSG_TX_ERR)
119 
120 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
121 
122 /* length of time before we decide the hardware is borked,
123  * and dev->tx_timeout() should be called to fix the problem
124  */
125 
126 #define TG3_TX_TIMEOUT			(5 * HZ)
127 
128 /* hardware minimum and maximum for a single frame's data payload */
129 #define TG3_MIN_MTU			ETH_ZLEN
130 #define TG3_MAX_MTU(tp)	\
131 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132 
133 /* These numbers seem to be hard coded in the NIC firmware somehow.
134  * You can't change the ring sizes, but you can change where you place
135  * them in the NIC onboard memory.
136  */
137 #define TG3_RX_STD_RING_SIZE(tp) \
138 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
140 #define TG3_DEF_RX_RING_PENDING		200
141 #define TG3_RX_JMB_RING_SIZE(tp) \
142 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
143 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
144 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
145 
146 /* Do not place this n-ring entries value into the tp struct itself,
147  * we really want to expose these constants to GCC so that modulo et
148  * al.  operations are done with shifts and masks instead of with
149  * hw multiply/modulo instructions.  Another solution would be to
150  * replace things like '% foo' with '& (foo - 1)'.
151  */
152 
153 #define TG3_TX_RING_SIZE		512
154 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
155 
156 #define TG3_RX_STD_RING_BYTES(tp) \
157 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
158 #define TG3_RX_JMB_RING_BYTES(tp) \
159 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
160 #define TG3_RX_RCB_RING_BYTES(tp) \
161 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
162 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
163 				 TG3_TX_RING_SIZE)
164 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
165 
166 #define TG3_DMA_BYTE_ENAB		64
167 
168 #define TG3_RX_STD_DMA_SZ		1536
169 #define TG3_RX_JMB_DMA_SZ		9046
170 
171 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
172 
173 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
174 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175 
176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
177 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178 
179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
180 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181 
182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
183  * that are at least dword aligned when used in PCIX mode.  The driver
184  * works around this bug by double copying the packet.  This workaround
185  * is built into the normal double copy length check for efficiency.
186  *
187  * However, the double copy is only necessary on those architectures
188  * where unaligned memory accesses are inefficient.  For those architectures
189  * where unaligned memory accesses incur little penalty, we can reintegrate
190  * the 5701 in the normal rx path.  Doing so saves a device structure
191  * dereference by hardcoding the double copy threshold in place.
192  */
193 #define TG3_RX_COPY_THRESHOLD		256
194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
195 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
196 #else
197 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
198 #endif
199 
200 #if (NET_IP_ALIGN != 0)
201 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
202 #else
203 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
204 #endif
205 
206 /* minimum number of free TX descriptors required to wake up TX process */
207 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
208 #define TG3_TX_BD_DMA_MAX_2K		2048
209 #define TG3_TX_BD_DMA_MAX_4K		4096
210 
211 #define TG3_RAW_IP_ALIGN 2
212 
213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215 
216 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
217 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218 
219 #define FIRMWARE_TG3		"tigon/tg3.bin"
220 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
221 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
222 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
223 
224 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
226 MODULE_LICENSE("GPL");
227 MODULE_FIRMWARE(FIRMWARE_TG3);
228 MODULE_FIRMWARE(FIRMWARE_TG357766);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
231 
232 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
233 module_param(tg3_debug, int, 0);
234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
235 
236 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
237 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
238 
239 static const struct pci_device_id tg3_pci_tbl[] = {
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
259 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 			TG3_DRV_DATA_FLAG_5705_10_100},
261 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
262 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 			TG3_DRV_DATA_FLAG_5705_10_100},
264 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
266 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 			TG3_DRV_DATA_FLAG_5705_10_100},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
274 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
280 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
288 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
289 			PCI_VENDOR_ID_LENOVO,
290 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
291 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
294 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
312 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
313 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
315 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
317 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
318 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
320 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
322 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
330 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
332 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
334 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
352 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
353 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
354 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
355 	{}
356 };
357 
358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
359 
360 static const struct {
361 	const char string[ETH_GSTRING_LEN];
362 } ethtool_stats_keys[] = {
363 	{ "rx_octets" },
364 	{ "rx_fragments" },
365 	{ "rx_ucast_packets" },
366 	{ "rx_mcast_packets" },
367 	{ "rx_bcast_packets" },
368 	{ "rx_fcs_errors" },
369 	{ "rx_align_errors" },
370 	{ "rx_xon_pause_rcvd" },
371 	{ "rx_xoff_pause_rcvd" },
372 	{ "rx_mac_ctrl_rcvd" },
373 	{ "rx_xoff_entered" },
374 	{ "rx_frame_too_long_errors" },
375 	{ "rx_jabbers" },
376 	{ "rx_undersize_packets" },
377 	{ "rx_in_length_errors" },
378 	{ "rx_out_length_errors" },
379 	{ "rx_64_or_less_octet_packets" },
380 	{ "rx_65_to_127_octet_packets" },
381 	{ "rx_128_to_255_octet_packets" },
382 	{ "rx_256_to_511_octet_packets" },
383 	{ "rx_512_to_1023_octet_packets" },
384 	{ "rx_1024_to_1522_octet_packets" },
385 	{ "rx_1523_to_2047_octet_packets" },
386 	{ "rx_2048_to_4095_octet_packets" },
387 	{ "rx_4096_to_8191_octet_packets" },
388 	{ "rx_8192_to_9022_octet_packets" },
389 
390 	{ "tx_octets" },
391 	{ "tx_collisions" },
392 
393 	{ "tx_xon_sent" },
394 	{ "tx_xoff_sent" },
395 	{ "tx_flow_control" },
396 	{ "tx_mac_errors" },
397 	{ "tx_single_collisions" },
398 	{ "tx_mult_collisions" },
399 	{ "tx_deferred" },
400 	{ "tx_excessive_collisions" },
401 	{ "tx_late_collisions" },
402 	{ "tx_collide_2times" },
403 	{ "tx_collide_3times" },
404 	{ "tx_collide_4times" },
405 	{ "tx_collide_5times" },
406 	{ "tx_collide_6times" },
407 	{ "tx_collide_7times" },
408 	{ "tx_collide_8times" },
409 	{ "tx_collide_9times" },
410 	{ "tx_collide_10times" },
411 	{ "tx_collide_11times" },
412 	{ "tx_collide_12times" },
413 	{ "tx_collide_13times" },
414 	{ "tx_collide_14times" },
415 	{ "tx_collide_15times" },
416 	{ "tx_ucast_packets" },
417 	{ "tx_mcast_packets" },
418 	{ "tx_bcast_packets" },
419 	{ "tx_carrier_sense_errors" },
420 	{ "tx_discards" },
421 	{ "tx_errors" },
422 
423 	{ "dma_writeq_full" },
424 	{ "dma_write_prioq_full" },
425 	{ "rxbds_empty" },
426 	{ "rx_discards" },
427 	{ "rx_errors" },
428 	{ "rx_threshold_hit" },
429 
430 	{ "dma_readq_full" },
431 	{ "dma_read_prioq_full" },
432 	{ "tx_comp_queue_full" },
433 
434 	{ "ring_set_send_prod_index" },
435 	{ "ring_status_update" },
436 	{ "nic_irqs" },
437 	{ "nic_avoided_irqs" },
438 	{ "nic_tx_threshold_hit" },
439 
440 	{ "mbuf_lwm_thresh_hit" },
441 };
442 
443 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
444 #define TG3_NVRAM_TEST		0
445 #define TG3_LINK_TEST		1
446 #define TG3_REGISTER_TEST	2
447 #define TG3_MEMORY_TEST		3
448 #define TG3_MAC_LOOPB_TEST	4
449 #define TG3_PHY_LOOPB_TEST	5
450 #define TG3_EXT_LOOPB_TEST	6
451 #define TG3_INTERRUPT_TEST	7
452 
453 
454 static const struct {
455 	const char string[ETH_GSTRING_LEN];
456 } ethtool_test_keys[] = {
457 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
458 	[TG3_LINK_TEST]		= { "link test         (online) " },
459 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
460 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
461 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
462 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
463 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
464 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
465 };
466 
467 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
468 
469 
470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
471 {
472 	writel(val, tp->regs + off);
473 }
474 
475 static u32 tg3_read32(struct tg3 *tp, u32 off)
476 {
477 	return readl(tp->regs + off);
478 }
479 
480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
481 {
482 	writel(val, tp->aperegs + off);
483 }
484 
485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
486 {
487 	return readl(tp->aperegs + off);
488 }
489 
490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
491 {
492 	unsigned long flags;
493 
494 	spin_lock_irqsave(&tp->indirect_lock, flags);
495 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
497 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 }
499 
500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
501 {
502 	writel(val, tp->regs + off);
503 	readl(tp->regs + off);
504 }
505 
506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
507 {
508 	unsigned long flags;
509 	u32 val;
510 
511 	spin_lock_irqsave(&tp->indirect_lock, flags);
512 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
513 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
514 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
515 	return val;
516 }
517 
518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
519 {
520 	unsigned long flags;
521 
522 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
523 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
524 				       TG3_64BIT_REG_LOW, val);
525 		return;
526 	}
527 	if (off == TG3_RX_STD_PROD_IDX_REG) {
528 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
529 				       TG3_64BIT_REG_LOW, val);
530 		return;
531 	}
532 
533 	spin_lock_irqsave(&tp->indirect_lock, flags);
534 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
535 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
536 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
537 
538 	/* In indirect mode when disabling interrupts, we also need
539 	 * to clear the interrupt bit in the GRC local ctrl register.
540 	 */
541 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
542 	    (val == 0x1)) {
543 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
544 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
545 	}
546 }
547 
548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
549 {
550 	unsigned long flags;
551 	u32 val;
552 
553 	spin_lock_irqsave(&tp->indirect_lock, flags);
554 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
555 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
556 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
557 	return val;
558 }
559 
560 /* usec_wait specifies the wait time in usec when writing to certain registers
561  * where it is unsafe to read back the register without some delay.
562  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
563  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
564  */
565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
566 {
567 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
568 		/* Non-posted methods */
569 		tp->write32(tp, off, val);
570 	else {
571 		/* Posted method */
572 		tg3_write32(tp, off, val);
573 		if (usec_wait)
574 			udelay(usec_wait);
575 		tp->read32(tp, off);
576 	}
577 	/* Wait again after the read for the posted method to guarantee that
578 	 * the wait time is met.
579 	 */
580 	if (usec_wait)
581 		udelay(usec_wait);
582 }
583 
584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
585 {
586 	tp->write32_mbox(tp, off, val);
587 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
588 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
589 	     !tg3_flag(tp, ICH_WORKAROUND)))
590 		tp->read32_mbox(tp, off);
591 }
592 
593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
594 {
595 	void __iomem *mbox = tp->regs + off;
596 	writel(val, mbox);
597 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
598 		writel(val, mbox);
599 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
600 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
601 		readl(mbox);
602 }
603 
604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
605 {
606 	return readl(tp->regs + off + GRCMBOX_BASE);
607 }
608 
609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
610 {
611 	writel(val, tp->regs + off + GRCMBOX_BASE);
612 }
613 
614 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
615 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
616 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
617 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
618 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
619 
620 #define tw32(reg, val)			tp->write32(tp, reg, val)
621 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
622 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
623 #define tr32(reg)			tp->read32(tp, reg)
624 
625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
626 {
627 	unsigned long flags;
628 
629 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
630 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
631 		return;
632 
633 	spin_lock_irqsave(&tp->indirect_lock, flags);
634 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
635 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
636 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
637 
638 		/* Always leave this as zero. */
639 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 	} else {
641 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
642 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
643 
644 		/* Always leave this as zero. */
645 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
646 	}
647 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 }
649 
650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
651 {
652 	unsigned long flags;
653 
654 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
655 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
656 		*val = 0;
657 		return;
658 	}
659 
660 	spin_lock_irqsave(&tp->indirect_lock, flags);
661 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
662 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
663 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
664 
665 		/* Always leave this as zero. */
666 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 	} else {
668 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
669 		*val = tr32(TG3PCI_MEM_WIN_DATA);
670 
671 		/* Always leave this as zero. */
672 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
673 	}
674 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 }
676 
677 static void tg3_ape_lock_init(struct tg3 *tp)
678 {
679 	int i;
680 	u32 regbase, bit;
681 
682 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
683 		regbase = TG3_APE_LOCK_GRANT;
684 	else
685 		regbase = TG3_APE_PER_LOCK_GRANT;
686 
687 	/* Make sure the driver hasn't any stale locks. */
688 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
689 		switch (i) {
690 		case TG3_APE_LOCK_PHY0:
691 		case TG3_APE_LOCK_PHY1:
692 		case TG3_APE_LOCK_PHY2:
693 		case TG3_APE_LOCK_PHY3:
694 			bit = APE_LOCK_GRANT_DRIVER;
695 			break;
696 		default:
697 			if (!tp->pci_fn)
698 				bit = APE_LOCK_GRANT_DRIVER;
699 			else
700 				bit = 1 << tp->pci_fn;
701 		}
702 		tg3_ape_write32(tp, regbase + 4 * i, bit);
703 	}
704 
705 }
706 
707 static int tg3_ape_lock(struct tg3 *tp, int locknum)
708 {
709 	int i, off;
710 	int ret = 0;
711 	u32 status, req, gnt, bit;
712 
713 	if (!tg3_flag(tp, ENABLE_APE))
714 		return 0;
715 
716 	switch (locknum) {
717 	case TG3_APE_LOCK_GPIO:
718 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
719 			return 0;
720 		fallthrough;
721 	case TG3_APE_LOCK_GRC:
722 	case TG3_APE_LOCK_MEM:
723 		if (!tp->pci_fn)
724 			bit = APE_LOCK_REQ_DRIVER;
725 		else
726 			bit = 1 << tp->pci_fn;
727 		break;
728 	case TG3_APE_LOCK_PHY0:
729 	case TG3_APE_LOCK_PHY1:
730 	case TG3_APE_LOCK_PHY2:
731 	case TG3_APE_LOCK_PHY3:
732 		bit = APE_LOCK_REQ_DRIVER;
733 		break;
734 	default:
735 		return -EINVAL;
736 	}
737 
738 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 		req = TG3_APE_LOCK_REQ;
740 		gnt = TG3_APE_LOCK_GRANT;
741 	} else {
742 		req = TG3_APE_PER_LOCK_REQ;
743 		gnt = TG3_APE_PER_LOCK_GRANT;
744 	}
745 
746 	off = 4 * locknum;
747 
748 	tg3_ape_write32(tp, req + off, bit);
749 
750 	/* Wait for up to 1 millisecond to acquire lock. */
751 	for (i = 0; i < 100; i++) {
752 		status = tg3_ape_read32(tp, gnt + off);
753 		if (status == bit)
754 			break;
755 		if (pci_channel_offline(tp->pdev))
756 			break;
757 
758 		udelay(10);
759 	}
760 
761 	if (status != bit) {
762 		/* Revoke the lock request. */
763 		tg3_ape_write32(tp, gnt + off, bit);
764 		ret = -EBUSY;
765 	}
766 
767 	return ret;
768 }
769 
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772 	u32 gnt, bit;
773 
774 	if (!tg3_flag(tp, ENABLE_APE))
775 		return;
776 
777 	switch (locknum) {
778 	case TG3_APE_LOCK_GPIO:
779 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 			return;
781 		fallthrough;
782 	case TG3_APE_LOCK_GRC:
783 	case TG3_APE_LOCK_MEM:
784 		if (!tp->pci_fn)
785 			bit = APE_LOCK_GRANT_DRIVER;
786 		else
787 			bit = 1 << tp->pci_fn;
788 		break;
789 	case TG3_APE_LOCK_PHY0:
790 	case TG3_APE_LOCK_PHY1:
791 	case TG3_APE_LOCK_PHY2:
792 	case TG3_APE_LOCK_PHY3:
793 		bit = APE_LOCK_GRANT_DRIVER;
794 		break;
795 	default:
796 		return;
797 	}
798 
799 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
800 		gnt = TG3_APE_LOCK_GRANT;
801 	else
802 		gnt = TG3_APE_PER_LOCK_GRANT;
803 
804 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806 
807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809 	u32 apedata;
810 
811 	while (timeout_us) {
812 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813 			return -EBUSY;
814 
815 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817 			break;
818 
819 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820 
821 		udelay(10);
822 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823 	}
824 
825 	return timeout_us ? 0 : -EBUSY;
826 }
827 
828 #ifdef CONFIG_TIGON3_HWMON
829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
830 {
831 	u32 i, apedata;
832 
833 	for (i = 0; i < timeout_us / 10; i++) {
834 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
835 
836 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
837 			break;
838 
839 		udelay(10);
840 	}
841 
842 	return i == timeout_us / 10;
843 }
844 
845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
846 				   u32 len)
847 {
848 	int err;
849 	u32 i, bufoff, msgoff, maxlen, apedata;
850 
851 	if (!tg3_flag(tp, APE_HAS_NCSI))
852 		return 0;
853 
854 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855 	if (apedata != APE_SEG_SIG_MAGIC)
856 		return -ENODEV;
857 
858 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859 	if (!(apedata & APE_FW_STATUS_READY))
860 		return -EAGAIN;
861 
862 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
863 		 TG3_APE_SHMEM_BASE;
864 	msgoff = bufoff + 2 * sizeof(u32);
865 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
866 
867 	while (len) {
868 		u32 length;
869 
870 		/* Cap xfer sizes to scratchpad limits. */
871 		length = (len > maxlen) ? maxlen : len;
872 		len -= length;
873 
874 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875 		if (!(apedata & APE_FW_STATUS_READY))
876 			return -EAGAIN;
877 
878 		/* Wait for up to 1 msec for APE to service previous event. */
879 		err = tg3_ape_event_lock(tp, 1000);
880 		if (err)
881 			return err;
882 
883 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884 			  APE_EVENT_STATUS_SCRTCHPD_READ |
885 			  APE_EVENT_STATUS_EVENT_PENDING;
886 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
887 
888 		tg3_ape_write32(tp, bufoff, base_off);
889 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
890 
891 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
893 
894 		base_off += length;
895 
896 		if (tg3_ape_wait_for_event(tp, 30000))
897 			return -EAGAIN;
898 
899 		for (i = 0; length; i += 4, length -= 4) {
900 			u32 val = tg3_ape_read32(tp, msgoff + i);
901 			memcpy(data, &val, sizeof(u32));
902 			data++;
903 		}
904 	}
905 
906 	return 0;
907 }
908 #endif
909 
910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
911 {
912 	int err;
913 	u32 apedata;
914 
915 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916 	if (apedata != APE_SEG_SIG_MAGIC)
917 		return -EAGAIN;
918 
919 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920 	if (!(apedata & APE_FW_STATUS_READY))
921 		return -EAGAIN;
922 
923 	/* Wait for up to 20 millisecond for APE to service previous event. */
924 	err = tg3_ape_event_lock(tp, 20000);
925 	if (err)
926 		return err;
927 
928 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929 			event | APE_EVENT_STATUS_EVENT_PENDING);
930 
931 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
933 
934 	return 0;
935 }
936 
937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
938 {
939 	u32 event;
940 	u32 apedata;
941 
942 	if (!tg3_flag(tp, ENABLE_APE))
943 		return;
944 
945 	switch (kind) {
946 	case RESET_KIND_INIT:
947 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
948 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
949 				APE_HOST_SEG_SIG_MAGIC);
950 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
951 				APE_HOST_SEG_LEN_MAGIC);
952 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
953 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
954 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
955 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
956 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
957 				APE_HOST_BEHAV_NO_PHYLOCK);
958 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
959 				    TG3_APE_HOST_DRVR_STATE_START);
960 
961 		event = APE_EVENT_STATUS_STATE_START;
962 		break;
963 	case RESET_KIND_SHUTDOWN:
964 		if (device_may_wakeup(&tp->pdev->dev) &&
965 		    tg3_flag(tp, WOL_ENABLE)) {
966 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 					    TG3_APE_HOST_WOL_SPEED_AUTO);
968 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969 		} else
970 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
971 
972 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
973 
974 		event = APE_EVENT_STATUS_STATE_UNLOAD;
975 		break;
976 	default:
977 		return;
978 	}
979 
980 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
981 
982 	tg3_ape_send_event(tp, event);
983 }
984 
985 static void tg3_send_ape_heartbeat(struct tg3 *tp,
986 				   unsigned long interval)
987 {
988 	/* Check if hb interval has exceeded */
989 	if (!tg3_flag(tp, ENABLE_APE) ||
990 	    time_before(jiffies, tp->ape_hb_jiffies + interval))
991 		return;
992 
993 	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
994 	tp->ape_hb_jiffies = jiffies;
995 }
996 
997 static void tg3_disable_ints(struct tg3 *tp)
998 {
999 	int i;
1000 
1001 	tw32(TG3PCI_MISC_HOST_CTRL,
1002 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1003 	for (i = 0; i < tp->irq_max; i++)
1004 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1005 }
1006 
1007 static void tg3_enable_ints(struct tg3 *tp)
1008 {
1009 	int i;
1010 
1011 	tp->irq_sync = 0;
1012 	wmb();
1013 
1014 	tw32(TG3PCI_MISC_HOST_CTRL,
1015 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1016 
1017 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1018 	for (i = 0; i < tp->irq_cnt; i++) {
1019 		struct tg3_napi *tnapi = &tp->napi[i];
1020 
1021 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022 		if (tg3_flag(tp, 1SHOT_MSI))
1023 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024 
1025 		tp->coal_now |= tnapi->coal_now;
1026 	}
1027 
1028 	/* Force an initial interrupt */
1029 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1030 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1031 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1032 	else
1033 		tw32(HOSTCC_MODE, tp->coal_now);
1034 
1035 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1036 }
1037 
1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1039 {
1040 	struct tg3 *tp = tnapi->tp;
1041 	struct tg3_hw_status *sblk = tnapi->hw_status;
1042 	unsigned int work_exists = 0;
1043 
1044 	/* check for phy events */
1045 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1046 		if (sblk->status & SD_STATUS_LINK_CHG)
1047 			work_exists = 1;
1048 	}
1049 
1050 	/* check for TX work to do */
1051 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1052 		work_exists = 1;
1053 
1054 	/* check for RX work to do */
1055 	if (tnapi->rx_rcb_prod_idx &&
1056 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1057 		work_exists = 1;
1058 
1059 	return work_exists;
1060 }
1061 
1062 /* tg3_int_reenable
1063  *  similar to tg3_enable_ints, but it accurately determines whether there
1064  *  is new work pending and can return without flushing the PIO write
1065  *  which reenables interrupts
1066  */
1067 static void tg3_int_reenable(struct tg3_napi *tnapi)
1068 {
1069 	struct tg3 *tp = tnapi->tp;
1070 
1071 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1072 
1073 	/* When doing tagged status, this work check is unnecessary.
1074 	 * The last_tag we write above tells the chip which piece of
1075 	 * work we've completed.
1076 	 */
1077 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1078 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1079 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1080 }
1081 
1082 static void tg3_switch_clocks(struct tg3 *tp)
1083 {
1084 	u32 clock_ctrl;
1085 	u32 orig_clock_ctrl;
1086 
1087 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1088 		return;
1089 
1090 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1091 
1092 	orig_clock_ctrl = clock_ctrl;
1093 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1094 		       CLOCK_CTRL_CLKRUN_OENABLE |
1095 		       0x1f);
1096 	tp->pci_clock_ctrl = clock_ctrl;
1097 
1098 	if (tg3_flag(tp, 5705_PLUS)) {
1099 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1100 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1102 		}
1103 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1104 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1105 			    clock_ctrl |
1106 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1107 			    40);
1108 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1110 			    40);
1111 	}
1112 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1113 }
1114 
1115 #define PHY_BUSY_LOOPS	5000
1116 
1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1118 			 u32 *val)
1119 {
1120 	u32 frame_val;
1121 	unsigned int loops;
1122 	int ret;
1123 
1124 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1125 		tw32_f(MAC_MI_MODE,
1126 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1127 		udelay(80);
1128 	}
1129 
1130 	tg3_ape_lock(tp, tp->phy_ape_lock);
1131 
1132 	*val = 0x0;
1133 
1134 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1135 		      MI_COM_PHY_ADDR_MASK);
1136 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1137 		      MI_COM_REG_ADDR_MASK);
1138 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1139 
1140 	tw32_f(MAC_MI_COM, frame_val);
1141 
1142 	loops = PHY_BUSY_LOOPS;
1143 	while (loops != 0) {
1144 		udelay(10);
1145 		frame_val = tr32(MAC_MI_COM);
1146 
1147 		if ((frame_val & MI_COM_BUSY) == 0) {
1148 			udelay(5);
1149 			frame_val = tr32(MAC_MI_COM);
1150 			break;
1151 		}
1152 		loops -= 1;
1153 	}
1154 
1155 	ret = -EBUSY;
1156 	if (loops != 0) {
1157 		*val = frame_val & MI_COM_DATA_MASK;
1158 		ret = 0;
1159 	}
1160 
1161 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1162 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1163 		udelay(80);
1164 	}
1165 
1166 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1167 
1168 	return ret;
1169 }
1170 
1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1172 {
1173 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1174 }
1175 
1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1177 			  u32 val)
1178 {
1179 	u32 frame_val;
1180 	unsigned int loops;
1181 	int ret;
1182 
1183 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1184 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1185 		return 0;
1186 
1187 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188 		tw32_f(MAC_MI_MODE,
1189 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1190 		udelay(80);
1191 	}
1192 
1193 	tg3_ape_lock(tp, tp->phy_ape_lock);
1194 
1195 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1196 		      MI_COM_PHY_ADDR_MASK);
1197 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1198 		      MI_COM_REG_ADDR_MASK);
1199 	frame_val |= (val & MI_COM_DATA_MASK);
1200 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1201 
1202 	tw32_f(MAC_MI_COM, frame_val);
1203 
1204 	loops = PHY_BUSY_LOOPS;
1205 	while (loops != 0) {
1206 		udelay(10);
1207 		frame_val = tr32(MAC_MI_COM);
1208 		if ((frame_val & MI_COM_BUSY) == 0) {
1209 			udelay(5);
1210 			frame_val = tr32(MAC_MI_COM);
1211 			break;
1212 		}
1213 		loops -= 1;
1214 	}
1215 
1216 	ret = -EBUSY;
1217 	if (loops != 0)
1218 		ret = 0;
1219 
1220 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1221 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1222 		udelay(80);
1223 	}
1224 
1225 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1226 
1227 	return ret;
1228 }
1229 
1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1231 {
1232 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1233 }
1234 
1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1236 {
1237 	int err;
1238 
1239 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1240 	if (err)
1241 		goto done;
1242 
1243 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1244 	if (err)
1245 		goto done;
1246 
1247 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1248 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1249 	if (err)
1250 		goto done;
1251 
1252 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1253 
1254 done:
1255 	return err;
1256 }
1257 
1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1259 {
1260 	int err;
1261 
1262 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1263 	if (err)
1264 		goto done;
1265 
1266 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1267 	if (err)
1268 		goto done;
1269 
1270 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1271 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1272 	if (err)
1273 		goto done;
1274 
1275 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1276 
1277 done:
1278 	return err;
1279 }
1280 
1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1282 {
1283 	int err;
1284 
1285 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286 	if (!err)
1287 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1288 
1289 	return err;
1290 }
1291 
1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1293 {
1294 	int err;
1295 
1296 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1297 	if (!err)
1298 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1299 
1300 	return err;
1301 }
1302 
1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1304 {
1305 	int err;
1306 
1307 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1308 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1309 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1310 	if (!err)
1311 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1312 
1313 	return err;
1314 }
1315 
1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1317 {
1318 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1319 		set |= MII_TG3_AUXCTL_MISC_WREN;
1320 
1321 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1322 }
1323 
1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1325 {
1326 	u32 val;
1327 	int err;
1328 
1329 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1330 
1331 	if (err)
1332 		return err;
1333 
1334 	if (enable)
1335 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336 	else
1337 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1338 
1339 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1340 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1341 
1342 	return err;
1343 }
1344 
1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1346 {
1347 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1348 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1349 }
1350 
1351 static int tg3_bmcr_reset(struct tg3 *tp)
1352 {
1353 	u32 phy_control;
1354 	int limit, err;
1355 
1356 	/* OK, reset it, and poll the BMCR_RESET bit until it
1357 	 * clears or we time out.
1358 	 */
1359 	phy_control = BMCR_RESET;
1360 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1361 	if (err != 0)
1362 		return -EBUSY;
1363 
1364 	limit = 5000;
1365 	while (limit--) {
1366 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1367 		if (err != 0)
1368 			return -EBUSY;
1369 
1370 		if ((phy_control & BMCR_RESET) == 0) {
1371 			udelay(40);
1372 			break;
1373 		}
1374 		udelay(10);
1375 	}
1376 	if (limit < 0)
1377 		return -EBUSY;
1378 
1379 	return 0;
1380 }
1381 
1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1383 {
1384 	struct tg3 *tp = bp->priv;
1385 	u32 val;
1386 
1387 	spin_lock_bh(&tp->lock);
1388 
1389 	if (__tg3_readphy(tp, mii_id, reg, &val))
1390 		val = -EIO;
1391 
1392 	spin_unlock_bh(&tp->lock);
1393 
1394 	return val;
1395 }
1396 
1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1398 {
1399 	struct tg3 *tp = bp->priv;
1400 	u32 ret = 0;
1401 
1402 	spin_lock_bh(&tp->lock);
1403 
1404 	if (__tg3_writephy(tp, mii_id, reg, val))
1405 		ret = -EIO;
1406 
1407 	spin_unlock_bh(&tp->lock);
1408 
1409 	return ret;
1410 }
1411 
1412 static void tg3_mdio_config_5785(struct tg3 *tp)
1413 {
1414 	u32 val;
1415 	struct phy_device *phydev;
1416 
1417 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1418 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1419 	case PHY_ID_BCM50610:
1420 	case PHY_ID_BCM50610M:
1421 		val = MAC_PHYCFG2_50610_LED_MODES;
1422 		break;
1423 	case PHY_ID_BCMAC131:
1424 		val = MAC_PHYCFG2_AC131_LED_MODES;
1425 		break;
1426 	case PHY_ID_RTL8211C:
1427 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1428 		break;
1429 	case PHY_ID_RTL8201E:
1430 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1431 		break;
1432 	default:
1433 		return;
1434 	}
1435 
1436 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1437 		tw32(MAC_PHYCFG2, val);
1438 
1439 		val = tr32(MAC_PHYCFG1);
1440 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1441 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1442 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1443 		tw32(MAC_PHYCFG1, val);
1444 
1445 		return;
1446 	}
1447 
1448 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1449 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1450 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1451 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1452 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1453 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1454 		       MAC_PHYCFG2_INBAND_ENABLE;
1455 
1456 	tw32(MAC_PHYCFG2, val);
1457 
1458 	val = tr32(MAC_PHYCFG1);
1459 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1460 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1461 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1464 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1465 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1466 	}
1467 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1468 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1469 	tw32(MAC_PHYCFG1, val);
1470 
1471 	val = tr32(MAC_EXT_RGMII_MODE);
1472 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1473 		 MAC_RGMII_MODE_RX_QUALITY |
1474 		 MAC_RGMII_MODE_RX_ACTIVITY |
1475 		 MAC_RGMII_MODE_RX_ENG_DET |
1476 		 MAC_RGMII_MODE_TX_ENABLE |
1477 		 MAC_RGMII_MODE_TX_LOWPWR |
1478 		 MAC_RGMII_MODE_TX_RESET);
1479 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1480 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1481 			val |= MAC_RGMII_MODE_RX_INT_B |
1482 			       MAC_RGMII_MODE_RX_QUALITY |
1483 			       MAC_RGMII_MODE_RX_ACTIVITY |
1484 			       MAC_RGMII_MODE_RX_ENG_DET;
1485 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1486 			val |= MAC_RGMII_MODE_TX_ENABLE |
1487 			       MAC_RGMII_MODE_TX_LOWPWR |
1488 			       MAC_RGMII_MODE_TX_RESET;
1489 	}
1490 	tw32(MAC_EXT_RGMII_MODE, val);
1491 }
1492 
1493 static void tg3_mdio_start(struct tg3 *tp)
1494 {
1495 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1496 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1497 	udelay(80);
1498 
1499 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1500 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1501 		tg3_mdio_config_5785(tp);
1502 }
1503 
1504 static int tg3_mdio_init(struct tg3 *tp)
1505 {
1506 	int i;
1507 	u32 reg;
1508 	struct phy_device *phydev;
1509 
1510 	if (tg3_flag(tp, 5717_PLUS)) {
1511 		u32 is_serdes;
1512 
1513 		tp->phy_addr = tp->pci_fn + 1;
1514 
1515 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1516 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1517 		else
1518 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1519 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1520 		if (is_serdes)
1521 			tp->phy_addr += 7;
1522 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1523 		int addr;
1524 
1525 		addr = ssb_gige_get_phyaddr(tp->pdev);
1526 		if (addr < 0)
1527 			return addr;
1528 		tp->phy_addr = addr;
1529 	} else
1530 		tp->phy_addr = TG3_PHY_MII_ADDR;
1531 
1532 	tg3_mdio_start(tp);
1533 
1534 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1535 		return 0;
1536 
1537 	tp->mdio_bus = mdiobus_alloc();
1538 	if (tp->mdio_bus == NULL)
1539 		return -ENOMEM;
1540 
1541 	tp->mdio_bus->name     = "tg3 mdio bus";
1542 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1543 		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1544 	tp->mdio_bus->priv     = tp;
1545 	tp->mdio_bus->parent   = &tp->pdev->dev;
1546 	tp->mdio_bus->read     = &tg3_mdio_read;
1547 	tp->mdio_bus->write    = &tg3_mdio_write;
1548 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1549 
1550 	/* The bus registration will look for all the PHYs on the mdio bus.
1551 	 * Unfortunately, it does not ensure the PHY is powered up before
1552 	 * accessing the PHY ID registers.  A chip reset is the
1553 	 * quickest way to bring the device back to an operational state..
1554 	 */
1555 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1556 		tg3_bmcr_reset(tp);
1557 
1558 	i = mdiobus_register(tp->mdio_bus);
1559 	if (i) {
1560 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1561 		mdiobus_free(tp->mdio_bus);
1562 		return i;
1563 	}
1564 
1565 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1566 
1567 	if (!phydev || !phydev->drv) {
1568 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1569 		mdiobus_unregister(tp->mdio_bus);
1570 		mdiobus_free(tp->mdio_bus);
1571 		return -ENODEV;
1572 	}
1573 
1574 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1575 	case PHY_ID_BCM57780:
1576 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1577 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1578 		break;
1579 	case PHY_ID_BCM50610:
1580 	case PHY_ID_BCM50610M:
1581 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1582 				     PHY_BRCM_RX_REFCLK_UNUSED |
1583 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1584 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1585 		fallthrough;
1586 	case PHY_ID_RTL8211C:
1587 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1588 		break;
1589 	case PHY_ID_RTL8201E:
1590 	case PHY_ID_BCMAC131:
1591 		phydev->interface = PHY_INTERFACE_MODE_MII;
1592 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1593 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1594 		break;
1595 	}
1596 
1597 	tg3_flag_set(tp, MDIOBUS_INITED);
1598 
1599 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1600 		tg3_mdio_config_5785(tp);
1601 
1602 	return 0;
1603 }
1604 
1605 static void tg3_mdio_fini(struct tg3 *tp)
1606 {
1607 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1608 		tg3_flag_clear(tp, MDIOBUS_INITED);
1609 		mdiobus_unregister(tp->mdio_bus);
1610 		mdiobus_free(tp->mdio_bus);
1611 	}
1612 }
1613 
1614 /* tp->lock is held. */
1615 static inline void tg3_generate_fw_event(struct tg3 *tp)
1616 {
1617 	u32 val;
1618 
1619 	val = tr32(GRC_RX_CPU_EVENT);
1620 	val |= GRC_RX_CPU_DRIVER_EVENT;
1621 	tw32_f(GRC_RX_CPU_EVENT, val);
1622 
1623 	tp->last_event_jiffies = jiffies;
1624 }
1625 
1626 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1627 
1628 /* tp->lock is held. */
1629 static void tg3_wait_for_event_ack(struct tg3 *tp)
1630 {
1631 	int i;
1632 	unsigned int delay_cnt;
1633 	long time_remain;
1634 
1635 	/* If enough time has passed, no wait is necessary. */
1636 	time_remain = (long)(tp->last_event_jiffies + 1 +
1637 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1638 		      (long)jiffies;
1639 	if (time_remain < 0)
1640 		return;
1641 
1642 	/* Check if we can shorten the wait time. */
1643 	delay_cnt = jiffies_to_usecs(time_remain);
1644 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1645 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1646 	delay_cnt = (delay_cnt >> 3) + 1;
1647 
1648 	for (i = 0; i < delay_cnt; i++) {
1649 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1650 			break;
1651 		if (pci_channel_offline(tp->pdev))
1652 			break;
1653 
1654 		udelay(8);
1655 	}
1656 }
1657 
1658 /* tp->lock is held. */
1659 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1660 {
1661 	u32 reg, val;
1662 
1663 	val = 0;
1664 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1665 		val = reg << 16;
1666 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1667 		val |= (reg & 0xffff);
1668 	*data++ = val;
1669 
1670 	val = 0;
1671 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1672 		val = reg << 16;
1673 	if (!tg3_readphy(tp, MII_LPA, &reg))
1674 		val |= (reg & 0xffff);
1675 	*data++ = val;
1676 
1677 	val = 0;
1678 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1679 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1680 			val = reg << 16;
1681 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1682 			val |= (reg & 0xffff);
1683 	}
1684 	*data++ = val;
1685 
1686 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1687 		val = reg << 16;
1688 	else
1689 		val = 0;
1690 	*data++ = val;
1691 }
1692 
1693 /* tp->lock is held. */
1694 static void tg3_ump_link_report(struct tg3 *tp)
1695 {
1696 	u32 data[4];
1697 
1698 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1699 		return;
1700 
1701 	tg3_phy_gather_ump_data(tp, data);
1702 
1703 	tg3_wait_for_event_ack(tp);
1704 
1705 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1706 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1707 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1708 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1709 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1710 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1711 
1712 	tg3_generate_fw_event(tp);
1713 }
1714 
1715 /* tp->lock is held. */
1716 static void tg3_stop_fw(struct tg3 *tp)
1717 {
1718 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1719 		/* Wait for RX cpu to ACK the previous event. */
1720 		tg3_wait_for_event_ack(tp);
1721 
1722 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1723 
1724 		tg3_generate_fw_event(tp);
1725 
1726 		/* Wait for RX cpu to ACK this event. */
1727 		tg3_wait_for_event_ack(tp);
1728 	}
1729 }
1730 
1731 /* tp->lock is held. */
1732 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1733 {
1734 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1735 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1736 
1737 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1738 		switch (kind) {
1739 		case RESET_KIND_INIT:
1740 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741 				      DRV_STATE_START);
1742 			break;
1743 
1744 		case RESET_KIND_SHUTDOWN:
1745 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746 				      DRV_STATE_UNLOAD);
1747 			break;
1748 
1749 		case RESET_KIND_SUSPEND:
1750 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751 				      DRV_STATE_SUSPEND);
1752 			break;
1753 
1754 		default:
1755 			break;
1756 		}
1757 	}
1758 }
1759 
1760 /* tp->lock is held. */
1761 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1762 {
1763 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1764 		switch (kind) {
1765 		case RESET_KIND_INIT:
1766 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1767 				      DRV_STATE_START_DONE);
1768 			break;
1769 
1770 		case RESET_KIND_SHUTDOWN:
1771 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 				      DRV_STATE_UNLOAD_DONE);
1773 			break;
1774 
1775 		default:
1776 			break;
1777 		}
1778 	}
1779 }
1780 
1781 /* tp->lock is held. */
1782 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1783 {
1784 	if (tg3_flag(tp, ENABLE_ASF)) {
1785 		switch (kind) {
1786 		case RESET_KIND_INIT:
1787 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 				      DRV_STATE_START);
1789 			break;
1790 
1791 		case RESET_KIND_SHUTDOWN:
1792 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793 				      DRV_STATE_UNLOAD);
1794 			break;
1795 
1796 		case RESET_KIND_SUSPEND:
1797 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 				      DRV_STATE_SUSPEND);
1799 			break;
1800 
1801 		default:
1802 			break;
1803 		}
1804 	}
1805 }
1806 
1807 static int tg3_poll_fw(struct tg3 *tp)
1808 {
1809 	int i;
1810 	u32 val;
1811 
1812 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1813 		return 0;
1814 
1815 	if (tg3_flag(tp, IS_SSB_CORE)) {
1816 		/* We don't use firmware. */
1817 		return 0;
1818 	}
1819 
1820 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1821 		/* Wait up to 20ms for init done. */
1822 		for (i = 0; i < 200; i++) {
1823 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1824 				return 0;
1825 			if (pci_channel_offline(tp->pdev))
1826 				return -ENODEV;
1827 
1828 			udelay(100);
1829 		}
1830 		return -ENODEV;
1831 	}
1832 
1833 	/* Wait for firmware initialization to complete. */
1834 	for (i = 0; i < 100000; i++) {
1835 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1836 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1837 			break;
1838 		if (pci_channel_offline(tp->pdev)) {
1839 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1840 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1841 				netdev_info(tp->dev, "No firmware running\n");
1842 			}
1843 
1844 			break;
1845 		}
1846 
1847 		udelay(10);
1848 	}
1849 
1850 	/* Chip might not be fitted with firmware.  Some Sun onboard
1851 	 * parts are configured like that.  So don't signal the timeout
1852 	 * of the above loop as an error, but do report the lack of
1853 	 * running firmware once.
1854 	 */
1855 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1856 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1857 
1858 		netdev_info(tp->dev, "No firmware running\n");
1859 	}
1860 
1861 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1862 		/* The 57765 A0 needs a little more
1863 		 * time to do some important work.
1864 		 */
1865 		mdelay(10);
1866 	}
1867 
1868 	return 0;
1869 }
1870 
1871 static void tg3_link_report(struct tg3 *tp)
1872 {
1873 	if (!netif_carrier_ok(tp->dev)) {
1874 		netif_info(tp, link, tp->dev, "Link is down\n");
1875 		tg3_ump_link_report(tp);
1876 	} else if (netif_msg_link(tp)) {
1877 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1878 			    (tp->link_config.active_speed == SPEED_1000 ?
1879 			     1000 :
1880 			     (tp->link_config.active_speed == SPEED_100 ?
1881 			      100 : 10)),
1882 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1883 			     "full" : "half"));
1884 
1885 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1886 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1887 			    "on" : "off",
1888 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1889 			    "on" : "off");
1890 
1891 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1892 			netdev_info(tp->dev, "EEE is %s\n",
1893 				    tp->setlpicnt ? "enabled" : "disabled");
1894 
1895 		tg3_ump_link_report(tp);
1896 	}
1897 
1898 	tp->link_up = netif_carrier_ok(tp->dev);
1899 }
1900 
1901 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1902 {
1903 	u32 flowctrl = 0;
1904 
1905 	if (adv & ADVERTISE_PAUSE_CAP) {
1906 		flowctrl |= FLOW_CTRL_RX;
1907 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1908 			flowctrl |= FLOW_CTRL_TX;
1909 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1910 		flowctrl |= FLOW_CTRL_TX;
1911 
1912 	return flowctrl;
1913 }
1914 
1915 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1916 {
1917 	u16 miireg;
1918 
1919 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1920 		miireg = ADVERTISE_1000XPAUSE;
1921 	else if (flow_ctrl & FLOW_CTRL_TX)
1922 		miireg = ADVERTISE_1000XPSE_ASYM;
1923 	else if (flow_ctrl & FLOW_CTRL_RX)
1924 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1925 	else
1926 		miireg = 0;
1927 
1928 	return miireg;
1929 }
1930 
1931 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1932 {
1933 	u32 flowctrl = 0;
1934 
1935 	if (adv & ADVERTISE_1000XPAUSE) {
1936 		flowctrl |= FLOW_CTRL_RX;
1937 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1938 			flowctrl |= FLOW_CTRL_TX;
1939 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1940 		flowctrl |= FLOW_CTRL_TX;
1941 
1942 	return flowctrl;
1943 }
1944 
1945 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1946 {
1947 	u8 cap = 0;
1948 
1949 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1950 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1951 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1952 		if (lcladv & ADVERTISE_1000XPAUSE)
1953 			cap = FLOW_CTRL_RX;
1954 		if (rmtadv & ADVERTISE_1000XPAUSE)
1955 			cap = FLOW_CTRL_TX;
1956 	}
1957 
1958 	return cap;
1959 }
1960 
1961 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1962 {
1963 	u8 autoneg;
1964 	u8 flowctrl = 0;
1965 	u32 old_rx_mode = tp->rx_mode;
1966 	u32 old_tx_mode = tp->tx_mode;
1967 
1968 	if (tg3_flag(tp, USE_PHYLIB))
1969 		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1970 	else
1971 		autoneg = tp->link_config.autoneg;
1972 
1973 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1974 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1975 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1976 		else
1977 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1978 	} else
1979 		flowctrl = tp->link_config.flowctrl;
1980 
1981 	tp->link_config.active_flowctrl = flowctrl;
1982 
1983 	if (flowctrl & FLOW_CTRL_RX)
1984 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1985 	else
1986 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1987 
1988 	if (old_rx_mode != tp->rx_mode)
1989 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1990 
1991 	if (flowctrl & FLOW_CTRL_TX)
1992 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1993 	else
1994 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1995 
1996 	if (old_tx_mode != tp->tx_mode)
1997 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1998 }
1999 
2000 static void tg3_adjust_link(struct net_device *dev)
2001 {
2002 	u8 oldflowctrl, linkmesg = 0;
2003 	u32 mac_mode, lcl_adv, rmt_adv;
2004 	struct tg3 *tp = netdev_priv(dev);
2005 	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2006 
2007 	spin_lock_bh(&tp->lock);
2008 
2009 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2010 				    MAC_MODE_HALF_DUPLEX);
2011 
2012 	oldflowctrl = tp->link_config.active_flowctrl;
2013 
2014 	if (phydev->link) {
2015 		lcl_adv = 0;
2016 		rmt_adv = 0;
2017 
2018 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2019 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2020 		else if (phydev->speed == SPEED_1000 ||
2021 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2022 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2023 		else
2024 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2025 
2026 		if (phydev->duplex == DUPLEX_HALF)
2027 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2028 		else {
2029 			lcl_adv = mii_advertise_flowctrl(
2030 				  tp->link_config.flowctrl);
2031 
2032 			if (phydev->pause)
2033 				rmt_adv = LPA_PAUSE_CAP;
2034 			if (phydev->asym_pause)
2035 				rmt_adv |= LPA_PAUSE_ASYM;
2036 		}
2037 
2038 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2039 	} else
2040 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2041 
2042 	if (mac_mode != tp->mac_mode) {
2043 		tp->mac_mode = mac_mode;
2044 		tw32_f(MAC_MODE, tp->mac_mode);
2045 		udelay(40);
2046 	}
2047 
2048 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2049 		if (phydev->speed == SPEED_10)
2050 			tw32(MAC_MI_STAT,
2051 			     MAC_MI_STAT_10MBPS_MODE |
2052 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053 		else
2054 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055 	}
2056 
2057 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2058 		tw32(MAC_TX_LENGTHS,
2059 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2060 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2061 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2062 	else
2063 		tw32(MAC_TX_LENGTHS,
2064 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2066 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067 
2068 	if (phydev->link != tp->old_link ||
2069 	    phydev->speed != tp->link_config.active_speed ||
2070 	    phydev->duplex != tp->link_config.active_duplex ||
2071 	    oldflowctrl != tp->link_config.active_flowctrl)
2072 		linkmesg = 1;
2073 
2074 	tp->old_link = phydev->link;
2075 	tp->link_config.active_speed = phydev->speed;
2076 	tp->link_config.active_duplex = phydev->duplex;
2077 
2078 	spin_unlock_bh(&tp->lock);
2079 
2080 	if (linkmesg)
2081 		tg3_link_report(tp);
2082 }
2083 
2084 static int tg3_phy_init(struct tg3 *tp)
2085 {
2086 	struct phy_device *phydev;
2087 
2088 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2089 		return 0;
2090 
2091 	/* Bring the PHY back to a known state. */
2092 	tg3_bmcr_reset(tp);
2093 
2094 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2095 
2096 	/* Attach the MAC to the PHY. */
2097 	phydev = phy_connect(tp->dev, phydev_name(phydev),
2098 			     tg3_adjust_link, phydev->interface);
2099 	if (IS_ERR(phydev)) {
2100 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2101 		return PTR_ERR(phydev);
2102 	}
2103 
2104 	/* Mask with MAC supported features. */
2105 	switch (phydev->interface) {
2106 	case PHY_INTERFACE_MODE_GMII:
2107 	case PHY_INTERFACE_MODE_RGMII:
2108 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2109 			phy_set_max_speed(phydev, SPEED_1000);
2110 			phy_support_asym_pause(phydev);
2111 			break;
2112 		}
2113 		fallthrough;
2114 	case PHY_INTERFACE_MODE_MII:
2115 		phy_set_max_speed(phydev, SPEED_100);
2116 		phy_support_asym_pause(phydev);
2117 		break;
2118 	default:
2119 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2120 		return -EINVAL;
2121 	}
2122 
2123 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2124 
2125 	phy_attached_info(phydev);
2126 
2127 	return 0;
2128 }
2129 
2130 static void tg3_phy_start(struct tg3 *tp)
2131 {
2132 	struct phy_device *phydev;
2133 
2134 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2135 		return;
2136 
2137 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2138 
2139 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2140 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2141 		phydev->speed = tp->link_config.speed;
2142 		phydev->duplex = tp->link_config.duplex;
2143 		phydev->autoneg = tp->link_config.autoneg;
2144 		ethtool_convert_legacy_u32_to_link_mode(
2145 			phydev->advertising, tp->link_config.advertising);
2146 	}
2147 
2148 	phy_start(phydev);
2149 
2150 	phy_start_aneg(phydev);
2151 }
2152 
2153 static void tg3_phy_stop(struct tg3 *tp)
2154 {
2155 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2156 		return;
2157 
2158 	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2159 }
2160 
2161 static void tg3_phy_fini(struct tg3 *tp)
2162 {
2163 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2164 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2165 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2166 	}
2167 }
2168 
2169 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2170 {
2171 	int err;
2172 	u32 val;
2173 
2174 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2175 		return 0;
2176 
2177 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2178 		/* Cannot do read-modify-write on 5401 */
2179 		err = tg3_phy_auxctl_write(tp,
2180 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2182 					   0x4c20);
2183 		goto done;
2184 	}
2185 
2186 	err = tg3_phy_auxctl_read(tp,
2187 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2188 	if (err)
2189 		return err;
2190 
2191 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2192 	err = tg3_phy_auxctl_write(tp,
2193 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2194 
2195 done:
2196 	return err;
2197 }
2198 
2199 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2200 {
2201 	u32 phytest;
2202 
2203 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2204 		u32 phy;
2205 
2206 		tg3_writephy(tp, MII_TG3_FET_TEST,
2207 			     phytest | MII_TG3_FET_SHADOW_EN);
2208 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2209 			if (enable)
2210 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 			else
2212 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2213 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2214 		}
2215 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2216 	}
2217 }
2218 
2219 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2220 {
2221 	u32 reg;
2222 
2223 	if (!tg3_flag(tp, 5705_PLUS) ||
2224 	    (tg3_flag(tp, 5717_PLUS) &&
2225 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2226 		return;
2227 
2228 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2229 		tg3_phy_fet_toggle_apd(tp, enable);
2230 		return;
2231 	}
2232 
2233 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2234 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2235 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2236 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2237 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2238 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2239 
2240 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2241 
2242 
2243 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2244 	if (enable)
2245 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2246 
2247 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2248 }
2249 
2250 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2251 {
2252 	u32 phy;
2253 
2254 	if (!tg3_flag(tp, 5705_PLUS) ||
2255 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2256 		return;
2257 
2258 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2259 		u32 ephy;
2260 
2261 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2262 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2263 
2264 			tg3_writephy(tp, MII_TG3_FET_TEST,
2265 				     ephy | MII_TG3_FET_SHADOW_EN);
2266 			if (!tg3_readphy(tp, reg, &phy)) {
2267 				if (enable)
2268 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269 				else
2270 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271 				tg3_writephy(tp, reg, phy);
2272 			}
2273 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2274 		}
2275 	} else {
2276 		int ret;
2277 
2278 		ret = tg3_phy_auxctl_read(tp,
2279 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2280 		if (!ret) {
2281 			if (enable)
2282 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283 			else
2284 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285 			tg3_phy_auxctl_write(tp,
2286 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2287 		}
2288 	}
2289 }
2290 
2291 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2292 {
2293 	int ret;
2294 	u32 val;
2295 
2296 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2297 		return;
2298 
2299 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2300 	if (!ret)
2301 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2302 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2303 }
2304 
2305 static void tg3_phy_apply_otp(struct tg3 *tp)
2306 {
2307 	u32 otp, phy;
2308 
2309 	if (!tp->phy_otp)
2310 		return;
2311 
2312 	otp = tp->phy_otp;
2313 
2314 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2315 		return;
2316 
2317 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2318 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2319 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2320 
2321 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2322 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2323 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2324 
2325 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2326 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2327 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2328 
2329 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2330 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2331 
2332 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2333 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2334 
2335 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2336 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2337 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2338 
2339 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2340 }
2341 
2342 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2343 {
2344 	u32 val;
2345 	struct ethtool_eee *dest = &tp->eee;
2346 
2347 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2348 		return;
2349 
2350 	if (eee)
2351 		dest = eee;
2352 
2353 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2354 		return;
2355 
2356 	/* Pull eee_active */
2357 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2358 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2359 		dest->eee_active = 1;
2360 	} else
2361 		dest->eee_active = 0;
2362 
2363 	/* Pull lp advertised settings */
2364 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2365 		return;
2366 	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2367 
2368 	/* Pull advertised and eee_enabled settings */
2369 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2370 		return;
2371 	dest->eee_enabled = !!val;
2372 	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2373 
2374 	/* Pull tx_lpi_enabled */
2375 	val = tr32(TG3_CPMU_EEE_MODE);
2376 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2377 
2378 	/* Pull lpi timer value */
2379 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2380 }
2381 
2382 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2383 {
2384 	u32 val;
2385 
2386 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2387 		return;
2388 
2389 	tp->setlpicnt = 0;
2390 
2391 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2392 	    current_link_up &&
2393 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2394 	    (tp->link_config.active_speed == SPEED_100 ||
2395 	     tp->link_config.active_speed == SPEED_1000)) {
2396 		u32 eeectl;
2397 
2398 		if (tp->link_config.active_speed == SPEED_1000)
2399 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2400 		else
2401 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2402 
2403 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2404 
2405 		tg3_eee_pull_config(tp, NULL);
2406 		if (tp->eee.eee_active)
2407 			tp->setlpicnt = 2;
2408 	}
2409 
2410 	if (!tp->setlpicnt) {
2411 		if (current_link_up &&
2412 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2413 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2414 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2415 		}
2416 
2417 		val = tr32(TG3_CPMU_EEE_MODE);
2418 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2419 	}
2420 }
2421 
2422 static void tg3_phy_eee_enable(struct tg3 *tp)
2423 {
2424 	u32 val;
2425 
2426 	if (tp->link_config.active_speed == SPEED_1000 &&
2427 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2428 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2429 	     tg3_flag(tp, 57765_CLASS)) &&
2430 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2431 		val = MII_TG3_DSP_TAP26_ALNOKO |
2432 		      MII_TG3_DSP_TAP26_RMRXSTO;
2433 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2434 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2435 	}
2436 
2437 	val = tr32(TG3_CPMU_EEE_MODE);
2438 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2439 }
2440 
2441 static int tg3_wait_macro_done(struct tg3 *tp)
2442 {
2443 	int limit = 100;
2444 
2445 	while (limit--) {
2446 		u32 tmp32;
2447 
2448 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2449 			if ((tmp32 & 0x1000) == 0)
2450 				break;
2451 		}
2452 	}
2453 	if (limit < 0)
2454 		return -EBUSY;
2455 
2456 	return 0;
2457 }
2458 
2459 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2460 {
2461 	static const u32 test_pat[4][6] = {
2462 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2463 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2464 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2465 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2466 	};
2467 	int chan;
2468 
2469 	for (chan = 0; chan < 4; chan++) {
2470 		int i;
2471 
2472 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2473 			     (chan * 0x2000) | 0x0200);
2474 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2475 
2476 		for (i = 0; i < 6; i++)
2477 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2478 				     test_pat[chan][i]);
2479 
2480 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2481 		if (tg3_wait_macro_done(tp)) {
2482 			*resetp = 1;
2483 			return -EBUSY;
2484 		}
2485 
2486 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2487 			     (chan * 0x2000) | 0x0200);
2488 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2489 		if (tg3_wait_macro_done(tp)) {
2490 			*resetp = 1;
2491 			return -EBUSY;
2492 		}
2493 
2494 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2495 		if (tg3_wait_macro_done(tp)) {
2496 			*resetp = 1;
2497 			return -EBUSY;
2498 		}
2499 
2500 		for (i = 0; i < 6; i += 2) {
2501 			u32 low, high;
2502 
2503 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2504 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2505 			    tg3_wait_macro_done(tp)) {
2506 				*resetp = 1;
2507 				return -EBUSY;
2508 			}
2509 			low &= 0x7fff;
2510 			high &= 0x000f;
2511 			if (low != test_pat[chan][i] ||
2512 			    high != test_pat[chan][i+1]) {
2513 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2514 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2515 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2516 
2517 				return -EBUSY;
2518 			}
2519 		}
2520 	}
2521 
2522 	return 0;
2523 }
2524 
2525 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2526 {
2527 	int chan;
2528 
2529 	for (chan = 0; chan < 4; chan++) {
2530 		int i;
2531 
2532 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2533 			     (chan * 0x2000) | 0x0200);
2534 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2535 		for (i = 0; i < 6; i++)
2536 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2537 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2538 		if (tg3_wait_macro_done(tp))
2539 			return -EBUSY;
2540 	}
2541 
2542 	return 0;
2543 }
2544 
2545 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2546 {
2547 	u32 reg32, phy9_orig;
2548 	int retries, do_phy_reset, err;
2549 
2550 	retries = 10;
2551 	do_phy_reset = 1;
2552 	do {
2553 		if (do_phy_reset) {
2554 			err = tg3_bmcr_reset(tp);
2555 			if (err)
2556 				return err;
2557 			do_phy_reset = 0;
2558 		}
2559 
2560 		/* Disable transmitter and interrupt.  */
2561 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2562 			continue;
2563 
2564 		reg32 |= 0x3000;
2565 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2566 
2567 		/* Set full-duplex, 1000 mbps.  */
2568 		tg3_writephy(tp, MII_BMCR,
2569 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2570 
2571 		/* Set to master mode.  */
2572 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2573 			continue;
2574 
2575 		tg3_writephy(tp, MII_CTRL1000,
2576 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2577 
2578 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2579 		if (err)
2580 			return err;
2581 
2582 		/* Block the PHY control access.  */
2583 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2584 
2585 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2586 		if (!err)
2587 			break;
2588 	} while (--retries);
2589 
2590 	err = tg3_phy_reset_chanpat(tp);
2591 	if (err)
2592 		return err;
2593 
2594 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2595 
2596 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2597 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2598 
2599 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2600 
2601 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2602 
2603 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2604 	if (err)
2605 		return err;
2606 
2607 	reg32 &= ~0x3000;
2608 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2609 
2610 	return 0;
2611 }
2612 
2613 static void tg3_carrier_off(struct tg3 *tp)
2614 {
2615 	netif_carrier_off(tp->dev);
2616 	tp->link_up = false;
2617 }
2618 
2619 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2620 {
2621 	if (tg3_flag(tp, ENABLE_ASF))
2622 		netdev_warn(tp->dev,
2623 			    "Management side-band traffic will be interrupted during phy settings change\n");
2624 }
2625 
2626 /* This will reset the tigon3 PHY if there is no valid
2627  * link unless the FORCE argument is non-zero.
2628  */
2629 static int tg3_phy_reset(struct tg3 *tp)
2630 {
2631 	u32 val, cpmuctrl;
2632 	int err;
2633 
2634 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2635 		val = tr32(GRC_MISC_CFG);
2636 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2637 		udelay(40);
2638 	}
2639 	err  = tg3_readphy(tp, MII_BMSR, &val);
2640 	err |= tg3_readphy(tp, MII_BMSR, &val);
2641 	if (err != 0)
2642 		return -EBUSY;
2643 
2644 	if (netif_running(tp->dev) && tp->link_up) {
2645 		netif_carrier_off(tp->dev);
2646 		tg3_link_report(tp);
2647 	}
2648 
2649 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2650 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2651 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2652 		err = tg3_phy_reset_5703_4_5(tp);
2653 		if (err)
2654 			return err;
2655 		goto out;
2656 	}
2657 
2658 	cpmuctrl = 0;
2659 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2660 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2661 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2662 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2663 			tw32(TG3_CPMU_CTRL,
2664 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2665 	}
2666 
2667 	err = tg3_bmcr_reset(tp);
2668 	if (err)
2669 		return err;
2670 
2671 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2672 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2673 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2674 
2675 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2676 	}
2677 
2678 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2679 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2680 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2681 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2682 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2683 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2684 			udelay(40);
2685 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2686 		}
2687 	}
2688 
2689 	if (tg3_flag(tp, 5717_PLUS) &&
2690 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2691 		return 0;
2692 
2693 	tg3_phy_apply_otp(tp);
2694 
2695 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2696 		tg3_phy_toggle_apd(tp, true);
2697 	else
2698 		tg3_phy_toggle_apd(tp, false);
2699 
2700 out:
2701 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2702 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2703 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2704 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2705 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2706 	}
2707 
2708 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2709 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2711 	}
2712 
2713 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2714 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2715 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2716 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2717 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2718 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2719 		}
2720 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2721 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2722 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2723 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2724 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2725 				tg3_writephy(tp, MII_TG3_TEST1,
2726 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2727 			} else
2728 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2729 
2730 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2731 		}
2732 	}
2733 
2734 	/* Set Extended packet length bit (bit 14) on all chips that */
2735 	/* support jumbo frames */
2736 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2737 		/* Cannot do read-modify-write on 5401 */
2738 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2739 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2740 		/* Set bit 14 with read-modify-write to preserve other bits */
2741 		err = tg3_phy_auxctl_read(tp,
2742 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2743 		if (!err)
2744 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2745 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2746 	}
2747 
2748 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2749 	 * jumbo frames transmission.
2750 	 */
2751 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2752 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2753 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2754 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2755 	}
2756 
2757 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2758 		/* adjust output voltage */
2759 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2760 	}
2761 
2762 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2763 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2764 
2765 	tg3_phy_toggle_automdix(tp, true);
2766 	tg3_phy_set_wirespeed(tp);
2767 	return 0;
2768 }
2769 
2770 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2771 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2772 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2773 					  TG3_GPIO_MSG_NEED_VAUX)
2774 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2775 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2776 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2777 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2778 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2779 
2780 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2781 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2782 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2783 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2784 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2785 
2786 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2787 {
2788 	u32 status, shift;
2789 
2790 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2791 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2792 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2793 	else
2794 		status = tr32(TG3_CPMU_DRV_STATUS);
2795 
2796 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2797 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2798 	status |= (newstat << shift);
2799 
2800 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2802 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2803 	else
2804 		tw32(TG3_CPMU_DRV_STATUS, status);
2805 
2806 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2807 }
2808 
2809 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2810 {
2811 	if (!tg3_flag(tp, IS_NIC))
2812 		return 0;
2813 
2814 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2815 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2816 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2817 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2818 			return -EIO;
2819 
2820 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2821 
2822 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2823 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2824 
2825 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2826 	} else {
2827 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2829 	}
2830 
2831 	return 0;
2832 }
2833 
2834 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2835 {
2836 	u32 grc_local_ctrl;
2837 
2838 	if (!tg3_flag(tp, IS_NIC) ||
2839 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2840 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2841 		return;
2842 
2843 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2844 
2845 	tw32_wait_f(GRC_LOCAL_CTRL,
2846 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2847 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2848 
2849 	tw32_wait_f(GRC_LOCAL_CTRL,
2850 		    grc_local_ctrl,
2851 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2852 
2853 	tw32_wait_f(GRC_LOCAL_CTRL,
2854 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2855 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2856 }
2857 
2858 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2859 {
2860 	if (!tg3_flag(tp, IS_NIC))
2861 		return;
2862 
2863 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2864 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2865 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2866 			    (GRC_LCLCTRL_GPIO_OE0 |
2867 			     GRC_LCLCTRL_GPIO_OE1 |
2868 			     GRC_LCLCTRL_GPIO_OE2 |
2869 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2870 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2871 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2872 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2873 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2874 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2875 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2876 				     GRC_LCLCTRL_GPIO_OE1 |
2877 				     GRC_LCLCTRL_GPIO_OE2 |
2878 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2879 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2880 				     tp->grc_local_ctrl;
2881 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2882 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2883 
2884 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2885 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2886 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2887 
2888 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2889 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2890 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2891 	} else {
2892 		u32 no_gpio2;
2893 		u32 grc_local_ctrl = 0;
2894 
2895 		/* Workaround to prevent overdrawing Amps. */
2896 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2897 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2898 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2899 				    grc_local_ctrl,
2900 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2901 		}
2902 
2903 		/* On 5753 and variants, GPIO2 cannot be used. */
2904 		no_gpio2 = tp->nic_sram_data_cfg &
2905 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2906 
2907 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2908 				  GRC_LCLCTRL_GPIO_OE1 |
2909 				  GRC_LCLCTRL_GPIO_OE2 |
2910 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2911 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2912 		if (no_gpio2) {
2913 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2914 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2915 		}
2916 		tw32_wait_f(GRC_LOCAL_CTRL,
2917 			    tp->grc_local_ctrl | grc_local_ctrl,
2918 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2919 
2920 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2921 
2922 		tw32_wait_f(GRC_LOCAL_CTRL,
2923 			    tp->grc_local_ctrl | grc_local_ctrl,
2924 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2925 
2926 		if (!no_gpio2) {
2927 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2928 			tw32_wait_f(GRC_LOCAL_CTRL,
2929 				    tp->grc_local_ctrl | grc_local_ctrl,
2930 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2931 		}
2932 	}
2933 }
2934 
2935 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2936 {
2937 	u32 msg = 0;
2938 
2939 	/* Serialize power state transitions */
2940 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2941 		return;
2942 
2943 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2944 		msg = TG3_GPIO_MSG_NEED_VAUX;
2945 
2946 	msg = tg3_set_function_status(tp, msg);
2947 
2948 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2949 		goto done;
2950 
2951 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2952 		tg3_pwrsrc_switch_to_vaux(tp);
2953 	else
2954 		tg3_pwrsrc_die_with_vmain(tp);
2955 
2956 done:
2957 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2958 }
2959 
2960 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2961 {
2962 	bool need_vaux = false;
2963 
2964 	/* The GPIOs do something completely different on 57765. */
2965 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2966 		return;
2967 
2968 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2969 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2970 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2971 		tg3_frob_aux_power_5717(tp, include_wol ?
2972 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2973 		return;
2974 	}
2975 
2976 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2977 		struct net_device *dev_peer;
2978 
2979 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2980 
2981 		/* remove_one() may have been run on the peer. */
2982 		if (dev_peer) {
2983 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2984 
2985 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2986 				return;
2987 
2988 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2989 			    tg3_flag(tp_peer, ENABLE_ASF))
2990 				need_vaux = true;
2991 		}
2992 	}
2993 
2994 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2995 	    tg3_flag(tp, ENABLE_ASF))
2996 		need_vaux = true;
2997 
2998 	if (need_vaux)
2999 		tg3_pwrsrc_switch_to_vaux(tp);
3000 	else
3001 		tg3_pwrsrc_die_with_vmain(tp);
3002 }
3003 
3004 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3005 {
3006 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3007 		return 1;
3008 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3009 		if (speed != SPEED_10)
3010 			return 1;
3011 	} else if (speed == SPEED_10)
3012 		return 1;
3013 
3014 	return 0;
3015 }
3016 
3017 static bool tg3_phy_power_bug(struct tg3 *tp)
3018 {
3019 	switch (tg3_asic_rev(tp)) {
3020 	case ASIC_REV_5700:
3021 	case ASIC_REV_5704:
3022 		return true;
3023 	case ASIC_REV_5780:
3024 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3025 			return true;
3026 		return false;
3027 	case ASIC_REV_5717:
3028 		if (!tp->pci_fn)
3029 			return true;
3030 		return false;
3031 	case ASIC_REV_5719:
3032 	case ASIC_REV_5720:
3033 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3034 		    !tp->pci_fn)
3035 			return true;
3036 		return false;
3037 	}
3038 
3039 	return false;
3040 }
3041 
3042 static bool tg3_phy_led_bug(struct tg3 *tp)
3043 {
3044 	switch (tg3_asic_rev(tp)) {
3045 	case ASIC_REV_5719:
3046 	case ASIC_REV_5720:
3047 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3048 		    !tp->pci_fn)
3049 			return true;
3050 		return false;
3051 	}
3052 
3053 	return false;
3054 }
3055 
3056 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3057 {
3058 	u32 val;
3059 
3060 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3061 		return;
3062 
3063 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3064 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3065 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3066 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3067 
3068 			sg_dig_ctrl |=
3069 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3070 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3071 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3072 		}
3073 		return;
3074 	}
3075 
3076 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3077 		tg3_bmcr_reset(tp);
3078 		val = tr32(GRC_MISC_CFG);
3079 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3080 		udelay(40);
3081 		return;
3082 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3083 		u32 phytest;
3084 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3085 			u32 phy;
3086 
3087 			tg3_writephy(tp, MII_ADVERTISE, 0);
3088 			tg3_writephy(tp, MII_BMCR,
3089 				     BMCR_ANENABLE | BMCR_ANRESTART);
3090 
3091 			tg3_writephy(tp, MII_TG3_FET_TEST,
3092 				     phytest | MII_TG3_FET_SHADOW_EN);
3093 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3094 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3095 				tg3_writephy(tp,
3096 					     MII_TG3_FET_SHDW_AUXMODE4,
3097 					     phy);
3098 			}
3099 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3100 		}
3101 		return;
3102 	} else if (do_low_power) {
3103 		if (!tg3_phy_led_bug(tp))
3104 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3105 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3106 
3107 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3108 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3109 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3110 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3111 	}
3112 
3113 	/* The PHY should not be powered down on some chips because
3114 	 * of bugs.
3115 	 */
3116 	if (tg3_phy_power_bug(tp))
3117 		return;
3118 
3119 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3120 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3121 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3122 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3123 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3124 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3125 	}
3126 
3127 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3128 }
3129 
3130 /* tp->lock is held. */
3131 static int tg3_nvram_lock(struct tg3 *tp)
3132 {
3133 	if (tg3_flag(tp, NVRAM)) {
3134 		int i;
3135 
3136 		if (tp->nvram_lock_cnt == 0) {
3137 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3138 			for (i = 0; i < 8000; i++) {
3139 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3140 					break;
3141 				udelay(20);
3142 			}
3143 			if (i == 8000) {
3144 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3145 				return -ENODEV;
3146 			}
3147 		}
3148 		tp->nvram_lock_cnt++;
3149 	}
3150 	return 0;
3151 }
3152 
3153 /* tp->lock is held. */
3154 static void tg3_nvram_unlock(struct tg3 *tp)
3155 {
3156 	if (tg3_flag(tp, NVRAM)) {
3157 		if (tp->nvram_lock_cnt > 0)
3158 			tp->nvram_lock_cnt--;
3159 		if (tp->nvram_lock_cnt == 0)
3160 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3161 	}
3162 }
3163 
3164 /* tp->lock is held. */
3165 static void tg3_enable_nvram_access(struct tg3 *tp)
3166 {
3167 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3168 		u32 nvaccess = tr32(NVRAM_ACCESS);
3169 
3170 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3171 	}
3172 }
3173 
3174 /* tp->lock is held. */
3175 static void tg3_disable_nvram_access(struct tg3 *tp)
3176 {
3177 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3178 		u32 nvaccess = tr32(NVRAM_ACCESS);
3179 
3180 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3181 	}
3182 }
3183 
3184 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3185 					u32 offset, u32 *val)
3186 {
3187 	u32 tmp;
3188 	int i;
3189 
3190 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3191 		return -EINVAL;
3192 
3193 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3194 					EEPROM_ADDR_DEVID_MASK |
3195 					EEPROM_ADDR_READ);
3196 	tw32(GRC_EEPROM_ADDR,
3197 	     tmp |
3198 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3199 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3200 	      EEPROM_ADDR_ADDR_MASK) |
3201 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3202 
3203 	for (i = 0; i < 1000; i++) {
3204 		tmp = tr32(GRC_EEPROM_ADDR);
3205 
3206 		if (tmp & EEPROM_ADDR_COMPLETE)
3207 			break;
3208 		msleep(1);
3209 	}
3210 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3211 		return -EBUSY;
3212 
3213 	tmp = tr32(GRC_EEPROM_DATA);
3214 
3215 	/*
3216 	 * The data will always be opposite the native endian
3217 	 * format.  Perform a blind byteswap to compensate.
3218 	 */
3219 	*val = swab32(tmp);
3220 
3221 	return 0;
3222 }
3223 
3224 #define NVRAM_CMD_TIMEOUT 10000
3225 
3226 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3227 {
3228 	int i;
3229 
3230 	tw32(NVRAM_CMD, nvram_cmd);
3231 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3232 		usleep_range(10, 40);
3233 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3234 			udelay(10);
3235 			break;
3236 		}
3237 	}
3238 
3239 	if (i == NVRAM_CMD_TIMEOUT)
3240 		return -EBUSY;
3241 
3242 	return 0;
3243 }
3244 
3245 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3246 {
3247 	if (tg3_flag(tp, NVRAM) &&
3248 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3249 	    tg3_flag(tp, FLASH) &&
3250 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3251 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3252 
3253 		addr = ((addr / tp->nvram_pagesize) <<
3254 			ATMEL_AT45DB0X1B_PAGE_POS) +
3255 		       (addr % tp->nvram_pagesize);
3256 
3257 	return addr;
3258 }
3259 
3260 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3261 {
3262 	if (tg3_flag(tp, NVRAM) &&
3263 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3264 	    tg3_flag(tp, FLASH) &&
3265 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3266 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3267 
3268 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3269 			tp->nvram_pagesize) +
3270 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3271 
3272 	return addr;
3273 }
3274 
3275 /* NOTE: Data read in from NVRAM is byteswapped according to
3276  * the byteswapping settings for all other register accesses.
3277  * tg3 devices are BE devices, so on a BE machine, the data
3278  * returned will be exactly as it is seen in NVRAM.  On a LE
3279  * machine, the 32-bit value will be byteswapped.
3280  */
3281 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3282 {
3283 	int ret;
3284 
3285 	if (!tg3_flag(tp, NVRAM))
3286 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3287 
3288 	offset = tg3_nvram_phys_addr(tp, offset);
3289 
3290 	if (offset > NVRAM_ADDR_MSK)
3291 		return -EINVAL;
3292 
3293 	ret = tg3_nvram_lock(tp);
3294 	if (ret)
3295 		return ret;
3296 
3297 	tg3_enable_nvram_access(tp);
3298 
3299 	tw32(NVRAM_ADDR, offset);
3300 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3301 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3302 
3303 	if (ret == 0)
3304 		*val = tr32(NVRAM_RDDATA);
3305 
3306 	tg3_disable_nvram_access(tp);
3307 
3308 	tg3_nvram_unlock(tp);
3309 
3310 	return ret;
3311 }
3312 
3313 /* Ensures NVRAM data is in bytestream format. */
3314 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3315 {
3316 	u32 v;
3317 	int res = tg3_nvram_read(tp, offset, &v);
3318 	if (!res)
3319 		*val = cpu_to_be32(v);
3320 	return res;
3321 }
3322 
3323 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3324 				    u32 offset, u32 len, u8 *buf)
3325 {
3326 	int i, j, rc = 0;
3327 	u32 val;
3328 
3329 	for (i = 0; i < len; i += 4) {
3330 		u32 addr;
3331 		__be32 data;
3332 
3333 		addr = offset + i;
3334 
3335 		memcpy(&data, buf + i, 4);
3336 
3337 		/*
3338 		 * The SEEPROM interface expects the data to always be opposite
3339 		 * the native endian format.  We accomplish this by reversing
3340 		 * all the operations that would have been performed on the
3341 		 * data from a call to tg3_nvram_read_be32().
3342 		 */
3343 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3344 
3345 		val = tr32(GRC_EEPROM_ADDR);
3346 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3347 
3348 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3349 			EEPROM_ADDR_READ);
3350 		tw32(GRC_EEPROM_ADDR, val |
3351 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3352 			(addr & EEPROM_ADDR_ADDR_MASK) |
3353 			EEPROM_ADDR_START |
3354 			EEPROM_ADDR_WRITE);
3355 
3356 		for (j = 0; j < 1000; j++) {
3357 			val = tr32(GRC_EEPROM_ADDR);
3358 
3359 			if (val & EEPROM_ADDR_COMPLETE)
3360 				break;
3361 			msleep(1);
3362 		}
3363 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3364 			rc = -EBUSY;
3365 			break;
3366 		}
3367 	}
3368 
3369 	return rc;
3370 }
3371 
3372 /* offset and length are dword aligned */
3373 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3374 		u8 *buf)
3375 {
3376 	int ret = 0;
3377 	u32 pagesize = tp->nvram_pagesize;
3378 	u32 pagemask = pagesize - 1;
3379 	u32 nvram_cmd;
3380 	u8 *tmp;
3381 
3382 	tmp = kmalloc(pagesize, GFP_KERNEL);
3383 	if (tmp == NULL)
3384 		return -ENOMEM;
3385 
3386 	while (len) {
3387 		int j;
3388 		u32 phy_addr, page_off, size;
3389 
3390 		phy_addr = offset & ~pagemask;
3391 
3392 		for (j = 0; j < pagesize; j += 4) {
3393 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3394 						  (__be32 *) (tmp + j));
3395 			if (ret)
3396 				break;
3397 		}
3398 		if (ret)
3399 			break;
3400 
3401 		page_off = offset & pagemask;
3402 		size = pagesize;
3403 		if (len < size)
3404 			size = len;
3405 
3406 		len -= size;
3407 
3408 		memcpy(tmp + page_off, buf, size);
3409 
3410 		offset = offset + (pagesize - page_off);
3411 
3412 		tg3_enable_nvram_access(tp);
3413 
3414 		/*
3415 		 * Before we can erase the flash page, we need
3416 		 * to issue a special "write enable" command.
3417 		 */
3418 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3419 
3420 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3421 			break;
3422 
3423 		/* Erase the target page */
3424 		tw32(NVRAM_ADDR, phy_addr);
3425 
3426 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3427 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3428 
3429 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3430 			break;
3431 
3432 		/* Issue another write enable to start the write. */
3433 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3434 
3435 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3436 			break;
3437 
3438 		for (j = 0; j < pagesize; j += 4) {
3439 			__be32 data;
3440 
3441 			data = *((__be32 *) (tmp + j));
3442 
3443 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3444 
3445 			tw32(NVRAM_ADDR, phy_addr + j);
3446 
3447 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3448 				NVRAM_CMD_WR;
3449 
3450 			if (j == 0)
3451 				nvram_cmd |= NVRAM_CMD_FIRST;
3452 			else if (j == (pagesize - 4))
3453 				nvram_cmd |= NVRAM_CMD_LAST;
3454 
3455 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3456 			if (ret)
3457 				break;
3458 		}
3459 		if (ret)
3460 			break;
3461 	}
3462 
3463 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3464 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3465 
3466 	kfree(tmp);
3467 
3468 	return ret;
3469 }
3470 
3471 /* offset and length are dword aligned */
3472 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3473 		u8 *buf)
3474 {
3475 	int i, ret = 0;
3476 
3477 	for (i = 0; i < len; i += 4, offset += 4) {
3478 		u32 page_off, phy_addr, nvram_cmd;
3479 		__be32 data;
3480 
3481 		memcpy(&data, buf + i, 4);
3482 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3483 
3484 		page_off = offset % tp->nvram_pagesize;
3485 
3486 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3487 
3488 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3489 
3490 		if (page_off == 0 || i == 0)
3491 			nvram_cmd |= NVRAM_CMD_FIRST;
3492 		if (page_off == (tp->nvram_pagesize - 4))
3493 			nvram_cmd |= NVRAM_CMD_LAST;
3494 
3495 		if (i == (len - 4))
3496 			nvram_cmd |= NVRAM_CMD_LAST;
3497 
3498 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3499 		    !tg3_flag(tp, FLASH) ||
3500 		    !tg3_flag(tp, 57765_PLUS))
3501 			tw32(NVRAM_ADDR, phy_addr);
3502 
3503 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3504 		    !tg3_flag(tp, 5755_PLUS) &&
3505 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3506 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3507 			u32 cmd;
3508 
3509 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3510 			ret = tg3_nvram_exec_cmd(tp, cmd);
3511 			if (ret)
3512 				break;
3513 		}
3514 		if (!tg3_flag(tp, FLASH)) {
3515 			/* We always do complete word writes to eeprom. */
3516 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3517 		}
3518 
3519 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3520 		if (ret)
3521 			break;
3522 	}
3523 	return ret;
3524 }
3525 
3526 /* offset and length are dword aligned */
3527 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3528 {
3529 	int ret;
3530 
3531 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3532 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3533 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3534 		udelay(40);
3535 	}
3536 
3537 	if (!tg3_flag(tp, NVRAM)) {
3538 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3539 	} else {
3540 		u32 grc_mode;
3541 
3542 		ret = tg3_nvram_lock(tp);
3543 		if (ret)
3544 			return ret;
3545 
3546 		tg3_enable_nvram_access(tp);
3547 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3548 			tw32(NVRAM_WRITE1, 0x406);
3549 
3550 		grc_mode = tr32(GRC_MODE);
3551 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3552 
3553 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3554 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3555 				buf);
3556 		} else {
3557 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3558 				buf);
3559 		}
3560 
3561 		grc_mode = tr32(GRC_MODE);
3562 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3563 
3564 		tg3_disable_nvram_access(tp);
3565 		tg3_nvram_unlock(tp);
3566 	}
3567 
3568 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3569 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3570 		udelay(40);
3571 	}
3572 
3573 	return ret;
3574 }
3575 
3576 #define RX_CPU_SCRATCH_BASE	0x30000
3577 #define RX_CPU_SCRATCH_SIZE	0x04000
3578 #define TX_CPU_SCRATCH_BASE	0x34000
3579 #define TX_CPU_SCRATCH_SIZE	0x04000
3580 
3581 /* tp->lock is held. */
3582 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3583 {
3584 	int i;
3585 	const int iters = 10000;
3586 
3587 	for (i = 0; i < iters; i++) {
3588 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3589 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3590 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3591 			break;
3592 		if (pci_channel_offline(tp->pdev))
3593 			return -EBUSY;
3594 	}
3595 
3596 	return (i == iters) ? -EBUSY : 0;
3597 }
3598 
3599 /* tp->lock is held. */
3600 static int tg3_rxcpu_pause(struct tg3 *tp)
3601 {
3602 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3603 
3604 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3605 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3606 	udelay(10);
3607 
3608 	return rc;
3609 }
3610 
3611 /* tp->lock is held. */
3612 static int tg3_txcpu_pause(struct tg3 *tp)
3613 {
3614 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3615 }
3616 
3617 /* tp->lock is held. */
3618 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3619 {
3620 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3621 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3622 }
3623 
3624 /* tp->lock is held. */
3625 static void tg3_rxcpu_resume(struct tg3 *tp)
3626 {
3627 	tg3_resume_cpu(tp, RX_CPU_BASE);
3628 }
3629 
3630 /* tp->lock is held. */
3631 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3632 {
3633 	int rc;
3634 
3635 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3636 
3637 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3638 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3639 
3640 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3641 		return 0;
3642 	}
3643 	if (cpu_base == RX_CPU_BASE) {
3644 		rc = tg3_rxcpu_pause(tp);
3645 	} else {
3646 		/*
3647 		 * There is only an Rx CPU for the 5750 derivative in the
3648 		 * BCM4785.
3649 		 */
3650 		if (tg3_flag(tp, IS_SSB_CORE))
3651 			return 0;
3652 
3653 		rc = tg3_txcpu_pause(tp);
3654 	}
3655 
3656 	if (rc) {
3657 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3658 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3659 		return -ENODEV;
3660 	}
3661 
3662 	/* Clear firmware's nvram arbitration. */
3663 	if (tg3_flag(tp, NVRAM))
3664 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3665 	return 0;
3666 }
3667 
3668 static int tg3_fw_data_len(struct tg3 *tp,
3669 			   const struct tg3_firmware_hdr *fw_hdr)
3670 {
3671 	int fw_len;
3672 
3673 	/* Non fragmented firmware have one firmware header followed by a
3674 	 * contiguous chunk of data to be written. The length field in that
3675 	 * header is not the length of data to be written but the complete
3676 	 * length of the bss. The data length is determined based on
3677 	 * tp->fw->size minus headers.
3678 	 *
3679 	 * Fragmented firmware have a main header followed by multiple
3680 	 * fragments. Each fragment is identical to non fragmented firmware
3681 	 * with a firmware header followed by a contiguous chunk of data. In
3682 	 * the main header, the length field is unused and set to 0xffffffff.
3683 	 * In each fragment header the length is the entire size of that
3684 	 * fragment i.e. fragment data + header length. Data length is
3685 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3686 	 */
3687 	if (tp->fw_len == 0xffffffff)
3688 		fw_len = be32_to_cpu(fw_hdr->len);
3689 	else
3690 		fw_len = tp->fw->size;
3691 
3692 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3693 }
3694 
3695 /* tp->lock is held. */
3696 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3697 				 u32 cpu_scratch_base, int cpu_scratch_size,
3698 				 const struct tg3_firmware_hdr *fw_hdr)
3699 {
3700 	int err, i;
3701 	void (*write_op)(struct tg3 *, u32, u32);
3702 	int total_len = tp->fw->size;
3703 
3704 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3705 		netdev_err(tp->dev,
3706 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3707 			   __func__);
3708 		return -EINVAL;
3709 	}
3710 
3711 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3712 		write_op = tg3_write_mem;
3713 	else
3714 		write_op = tg3_write_indirect_reg32;
3715 
3716 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3717 		/* It is possible that bootcode is still loading at this point.
3718 		 * Get the nvram lock first before halting the cpu.
3719 		 */
3720 		int lock_err = tg3_nvram_lock(tp);
3721 		err = tg3_halt_cpu(tp, cpu_base);
3722 		if (!lock_err)
3723 			tg3_nvram_unlock(tp);
3724 		if (err)
3725 			goto out;
3726 
3727 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3728 			write_op(tp, cpu_scratch_base + i, 0);
3729 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3730 		tw32(cpu_base + CPU_MODE,
3731 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3732 	} else {
3733 		/* Subtract additional main header for fragmented firmware and
3734 		 * advance to the first fragment
3735 		 */
3736 		total_len -= TG3_FW_HDR_LEN;
3737 		fw_hdr++;
3738 	}
3739 
3740 	do {
3741 		u32 *fw_data = (u32 *)(fw_hdr + 1);
3742 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3743 			write_op(tp, cpu_scratch_base +
3744 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3745 				     (i * sizeof(u32)),
3746 				 be32_to_cpu(fw_data[i]));
3747 
3748 		total_len -= be32_to_cpu(fw_hdr->len);
3749 
3750 		/* Advance to next fragment */
3751 		fw_hdr = (struct tg3_firmware_hdr *)
3752 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3753 	} while (total_len > 0);
3754 
3755 	err = 0;
3756 
3757 out:
3758 	return err;
3759 }
3760 
3761 /* tp->lock is held. */
3762 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3763 {
3764 	int i;
3765 	const int iters = 5;
3766 
3767 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3768 	tw32_f(cpu_base + CPU_PC, pc);
3769 
3770 	for (i = 0; i < iters; i++) {
3771 		if (tr32(cpu_base + CPU_PC) == pc)
3772 			break;
3773 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3774 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3775 		tw32_f(cpu_base + CPU_PC, pc);
3776 		udelay(1000);
3777 	}
3778 
3779 	return (i == iters) ? -EBUSY : 0;
3780 }
3781 
3782 /* tp->lock is held. */
3783 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3784 {
3785 	const struct tg3_firmware_hdr *fw_hdr;
3786 	int err;
3787 
3788 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3789 
3790 	/* Firmware blob starts with version numbers, followed by
3791 	   start address and length. We are setting complete length.
3792 	   length = end_address_of_bss - start_address_of_text.
3793 	   Remainder is the blob to be loaded contiguously
3794 	   from start address. */
3795 
3796 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3797 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3798 				    fw_hdr);
3799 	if (err)
3800 		return err;
3801 
3802 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3803 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3804 				    fw_hdr);
3805 	if (err)
3806 		return err;
3807 
3808 	/* Now startup only the RX cpu. */
3809 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3810 				       be32_to_cpu(fw_hdr->base_addr));
3811 	if (err) {
3812 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3813 			   "should be %08x\n", __func__,
3814 			   tr32(RX_CPU_BASE + CPU_PC),
3815 				be32_to_cpu(fw_hdr->base_addr));
3816 		return -ENODEV;
3817 	}
3818 
3819 	tg3_rxcpu_resume(tp);
3820 
3821 	return 0;
3822 }
3823 
3824 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3825 {
3826 	const int iters = 1000;
3827 	int i;
3828 	u32 val;
3829 
3830 	/* Wait for boot code to complete initialization and enter service
3831 	 * loop. It is then safe to download service patches
3832 	 */
3833 	for (i = 0; i < iters; i++) {
3834 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3835 			break;
3836 
3837 		udelay(10);
3838 	}
3839 
3840 	if (i == iters) {
3841 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3842 		return -EBUSY;
3843 	}
3844 
3845 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3846 	if (val & 0xff) {
3847 		netdev_warn(tp->dev,
3848 			    "Other patches exist. Not downloading EEE patch\n");
3849 		return -EEXIST;
3850 	}
3851 
3852 	return 0;
3853 }
3854 
3855 /* tp->lock is held. */
3856 static void tg3_load_57766_firmware(struct tg3 *tp)
3857 {
3858 	struct tg3_firmware_hdr *fw_hdr;
3859 
3860 	if (!tg3_flag(tp, NO_NVRAM))
3861 		return;
3862 
3863 	if (tg3_validate_rxcpu_state(tp))
3864 		return;
3865 
3866 	if (!tp->fw)
3867 		return;
3868 
3869 	/* This firmware blob has a different format than older firmware
3870 	 * releases as given below. The main difference is we have fragmented
3871 	 * data to be written to non-contiguous locations.
3872 	 *
3873 	 * In the beginning we have a firmware header identical to other
3874 	 * firmware which consists of version, base addr and length. The length
3875 	 * here is unused and set to 0xffffffff.
3876 	 *
3877 	 * This is followed by a series of firmware fragments which are
3878 	 * individually identical to previous firmware. i.e. they have the
3879 	 * firmware header and followed by data for that fragment. The version
3880 	 * field of the individual fragment header is unused.
3881 	 */
3882 
3883 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3884 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3885 		return;
3886 
3887 	if (tg3_rxcpu_pause(tp))
3888 		return;
3889 
3890 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3891 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3892 
3893 	tg3_rxcpu_resume(tp);
3894 }
3895 
3896 /* tp->lock is held. */
3897 static int tg3_load_tso_firmware(struct tg3 *tp)
3898 {
3899 	const struct tg3_firmware_hdr *fw_hdr;
3900 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3901 	int err;
3902 
3903 	if (!tg3_flag(tp, FW_TSO))
3904 		return 0;
3905 
3906 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3907 
3908 	/* Firmware blob starts with version numbers, followed by
3909 	   start address and length. We are setting complete length.
3910 	   length = end_address_of_bss - start_address_of_text.
3911 	   Remainder is the blob to be loaded contiguously
3912 	   from start address. */
3913 
3914 	cpu_scratch_size = tp->fw_len;
3915 
3916 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3917 		cpu_base = RX_CPU_BASE;
3918 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3919 	} else {
3920 		cpu_base = TX_CPU_BASE;
3921 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3922 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3923 	}
3924 
3925 	err = tg3_load_firmware_cpu(tp, cpu_base,
3926 				    cpu_scratch_base, cpu_scratch_size,
3927 				    fw_hdr);
3928 	if (err)
3929 		return err;
3930 
3931 	/* Now startup the cpu. */
3932 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3933 				       be32_to_cpu(fw_hdr->base_addr));
3934 	if (err) {
3935 		netdev_err(tp->dev,
3936 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3937 			   __func__, tr32(cpu_base + CPU_PC),
3938 			   be32_to_cpu(fw_hdr->base_addr));
3939 		return -ENODEV;
3940 	}
3941 
3942 	tg3_resume_cpu(tp, cpu_base);
3943 	return 0;
3944 }
3945 
3946 /* tp->lock is held. */
3947 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3948 				   int index)
3949 {
3950 	u32 addr_high, addr_low;
3951 
3952 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3953 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3954 		    (mac_addr[4] <<  8) | mac_addr[5]);
3955 
3956 	if (index < 4) {
3957 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3958 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3959 	} else {
3960 		index -= 4;
3961 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3962 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3963 	}
3964 }
3965 
3966 /* tp->lock is held. */
3967 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3968 {
3969 	u32 addr_high;
3970 	int i;
3971 
3972 	for (i = 0; i < 4; i++) {
3973 		if (i == 1 && skip_mac_1)
3974 			continue;
3975 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3976 	}
3977 
3978 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3979 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3980 		for (i = 4; i < 16; i++)
3981 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3982 	}
3983 
3984 	addr_high = (tp->dev->dev_addr[0] +
3985 		     tp->dev->dev_addr[1] +
3986 		     tp->dev->dev_addr[2] +
3987 		     tp->dev->dev_addr[3] +
3988 		     tp->dev->dev_addr[4] +
3989 		     tp->dev->dev_addr[5]) &
3990 		TX_BACKOFF_SEED_MASK;
3991 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3992 }
3993 
3994 static void tg3_enable_register_access(struct tg3 *tp)
3995 {
3996 	/*
3997 	 * Make sure register accesses (indirect or otherwise) will function
3998 	 * correctly.
3999 	 */
4000 	pci_write_config_dword(tp->pdev,
4001 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4002 }
4003 
4004 static int tg3_power_up(struct tg3 *tp)
4005 {
4006 	int err;
4007 
4008 	tg3_enable_register_access(tp);
4009 
4010 	err = pci_set_power_state(tp->pdev, PCI_D0);
4011 	if (!err) {
4012 		/* Switch out of Vaux if it is a NIC */
4013 		tg3_pwrsrc_switch_to_vmain(tp);
4014 	} else {
4015 		netdev_err(tp->dev, "Transition to D0 failed\n");
4016 	}
4017 
4018 	return err;
4019 }
4020 
4021 static int tg3_setup_phy(struct tg3 *, bool);
4022 
4023 static int tg3_power_down_prepare(struct tg3 *tp)
4024 {
4025 	u32 misc_host_ctrl;
4026 	bool device_should_wake, do_low_power;
4027 
4028 	tg3_enable_register_access(tp);
4029 
4030 	/* Restore the CLKREQ setting. */
4031 	if (tg3_flag(tp, CLKREQ_BUG))
4032 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4033 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4034 
4035 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4036 	tw32(TG3PCI_MISC_HOST_CTRL,
4037 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4038 
4039 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4040 			     tg3_flag(tp, WOL_ENABLE);
4041 
4042 	if (tg3_flag(tp, USE_PHYLIB)) {
4043 		do_low_power = false;
4044 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4045 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4046 			__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4047 			struct phy_device *phydev;
4048 			u32 phyid;
4049 
4050 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4051 
4052 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4053 
4054 			tp->link_config.speed = phydev->speed;
4055 			tp->link_config.duplex = phydev->duplex;
4056 			tp->link_config.autoneg = phydev->autoneg;
4057 			ethtool_convert_link_mode_to_legacy_u32(
4058 				&tp->link_config.advertising,
4059 				phydev->advertising);
4060 
4061 			linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4062 			linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4063 					 advertising);
4064 			linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4065 					 advertising);
4066 			linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4067 					 advertising);
4068 
4069 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4070 				if (tg3_flag(tp, WOL_SPEED_100MB)) {
4071 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4072 							 advertising);
4073 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4074 							 advertising);
4075 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4076 							 advertising);
4077 				} else {
4078 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4079 							 advertising);
4080 				}
4081 			}
4082 
4083 			linkmode_copy(phydev->advertising, advertising);
4084 			phy_start_aneg(phydev);
4085 
4086 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4087 			if (phyid != PHY_ID_BCMAC131) {
4088 				phyid &= PHY_BCM_OUI_MASK;
4089 				if (phyid == PHY_BCM_OUI_1 ||
4090 				    phyid == PHY_BCM_OUI_2 ||
4091 				    phyid == PHY_BCM_OUI_3)
4092 					do_low_power = true;
4093 			}
4094 		}
4095 	} else {
4096 		do_low_power = true;
4097 
4098 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4099 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4100 
4101 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4102 			tg3_setup_phy(tp, false);
4103 	}
4104 
4105 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4106 		u32 val;
4107 
4108 		val = tr32(GRC_VCPU_EXT_CTRL);
4109 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4110 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4111 		int i;
4112 		u32 val;
4113 
4114 		for (i = 0; i < 200; i++) {
4115 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4116 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4117 				break;
4118 			msleep(1);
4119 		}
4120 	}
4121 	if (tg3_flag(tp, WOL_CAP))
4122 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4123 						     WOL_DRV_STATE_SHUTDOWN |
4124 						     WOL_DRV_WOL |
4125 						     WOL_SET_MAGIC_PKT);
4126 
4127 	if (device_should_wake) {
4128 		u32 mac_mode;
4129 
4130 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4131 			if (do_low_power &&
4132 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4133 				tg3_phy_auxctl_write(tp,
4134 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4135 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4136 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4137 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4138 				udelay(40);
4139 			}
4140 
4141 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4142 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4143 			else if (tp->phy_flags &
4144 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4145 				if (tp->link_config.active_speed == SPEED_1000)
4146 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4147 				else
4148 					mac_mode = MAC_MODE_PORT_MODE_MII;
4149 			} else
4150 				mac_mode = MAC_MODE_PORT_MODE_MII;
4151 
4152 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4153 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4154 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4155 					     SPEED_100 : SPEED_10;
4156 				if (tg3_5700_link_polarity(tp, speed))
4157 					mac_mode |= MAC_MODE_LINK_POLARITY;
4158 				else
4159 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4160 			}
4161 		} else {
4162 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4163 		}
4164 
4165 		if (!tg3_flag(tp, 5750_PLUS))
4166 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4167 
4168 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4169 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4170 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4171 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4172 
4173 		if (tg3_flag(tp, ENABLE_APE))
4174 			mac_mode |= MAC_MODE_APE_TX_EN |
4175 				    MAC_MODE_APE_RX_EN |
4176 				    MAC_MODE_TDE_ENABLE;
4177 
4178 		tw32_f(MAC_MODE, mac_mode);
4179 		udelay(100);
4180 
4181 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4182 		udelay(10);
4183 	}
4184 
4185 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4186 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4187 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4188 		u32 base_val;
4189 
4190 		base_val = tp->pci_clock_ctrl;
4191 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4192 			     CLOCK_CTRL_TXCLK_DISABLE);
4193 
4194 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4195 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4196 	} else if (tg3_flag(tp, 5780_CLASS) ||
4197 		   tg3_flag(tp, CPMU_PRESENT) ||
4198 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4199 		/* do nothing */
4200 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4201 		u32 newbits1, newbits2;
4202 
4203 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4204 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4205 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4206 				    CLOCK_CTRL_TXCLK_DISABLE |
4207 				    CLOCK_CTRL_ALTCLK);
4208 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4209 		} else if (tg3_flag(tp, 5705_PLUS)) {
4210 			newbits1 = CLOCK_CTRL_625_CORE;
4211 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4212 		} else {
4213 			newbits1 = CLOCK_CTRL_ALTCLK;
4214 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4215 		}
4216 
4217 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4218 			    40);
4219 
4220 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4221 			    40);
4222 
4223 		if (!tg3_flag(tp, 5705_PLUS)) {
4224 			u32 newbits3;
4225 
4226 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4227 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4228 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4229 					    CLOCK_CTRL_TXCLK_DISABLE |
4230 					    CLOCK_CTRL_44MHZ_CORE);
4231 			} else {
4232 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4233 			}
4234 
4235 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4236 				    tp->pci_clock_ctrl | newbits3, 40);
4237 		}
4238 	}
4239 
4240 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4241 		tg3_power_down_phy(tp, do_low_power);
4242 
4243 	tg3_frob_aux_power(tp, true);
4244 
4245 	/* Workaround for unstable PLL clock */
4246 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4247 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4248 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4249 		u32 val = tr32(0x7d00);
4250 
4251 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4252 		tw32(0x7d00, val);
4253 		if (!tg3_flag(tp, ENABLE_ASF)) {
4254 			int err;
4255 
4256 			err = tg3_nvram_lock(tp);
4257 			tg3_halt_cpu(tp, RX_CPU_BASE);
4258 			if (!err)
4259 				tg3_nvram_unlock(tp);
4260 		}
4261 	}
4262 
4263 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4264 
4265 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4266 
4267 	return 0;
4268 }
4269 
4270 static void tg3_power_down(struct tg3 *tp)
4271 {
4272 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4273 	pci_set_power_state(tp->pdev, PCI_D3hot);
4274 }
4275 
4276 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4277 {
4278 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4279 	case MII_TG3_AUX_STAT_10HALF:
4280 		*speed = SPEED_10;
4281 		*duplex = DUPLEX_HALF;
4282 		break;
4283 
4284 	case MII_TG3_AUX_STAT_10FULL:
4285 		*speed = SPEED_10;
4286 		*duplex = DUPLEX_FULL;
4287 		break;
4288 
4289 	case MII_TG3_AUX_STAT_100HALF:
4290 		*speed = SPEED_100;
4291 		*duplex = DUPLEX_HALF;
4292 		break;
4293 
4294 	case MII_TG3_AUX_STAT_100FULL:
4295 		*speed = SPEED_100;
4296 		*duplex = DUPLEX_FULL;
4297 		break;
4298 
4299 	case MII_TG3_AUX_STAT_1000HALF:
4300 		*speed = SPEED_1000;
4301 		*duplex = DUPLEX_HALF;
4302 		break;
4303 
4304 	case MII_TG3_AUX_STAT_1000FULL:
4305 		*speed = SPEED_1000;
4306 		*duplex = DUPLEX_FULL;
4307 		break;
4308 
4309 	default:
4310 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4311 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4312 				 SPEED_10;
4313 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4314 				  DUPLEX_HALF;
4315 			break;
4316 		}
4317 		*speed = SPEED_UNKNOWN;
4318 		*duplex = DUPLEX_UNKNOWN;
4319 		break;
4320 	}
4321 }
4322 
4323 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4324 {
4325 	int err = 0;
4326 	u32 val, new_adv;
4327 
4328 	new_adv = ADVERTISE_CSMA;
4329 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4330 	new_adv |= mii_advertise_flowctrl(flowctrl);
4331 
4332 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4333 	if (err)
4334 		goto done;
4335 
4336 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4337 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4338 
4339 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4340 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4341 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4342 
4343 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4344 		if (err)
4345 			goto done;
4346 	}
4347 
4348 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4349 		goto done;
4350 
4351 	tw32(TG3_CPMU_EEE_MODE,
4352 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4353 
4354 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4355 	if (!err) {
4356 		u32 err2;
4357 
4358 		val = 0;
4359 		/* Advertise 100-BaseTX EEE ability */
4360 		if (advertise & ADVERTISED_100baseT_Full)
4361 			val |= MDIO_AN_EEE_ADV_100TX;
4362 		/* Advertise 1000-BaseT EEE ability */
4363 		if (advertise & ADVERTISED_1000baseT_Full)
4364 			val |= MDIO_AN_EEE_ADV_1000T;
4365 
4366 		if (!tp->eee.eee_enabled) {
4367 			val = 0;
4368 			tp->eee.advertised = 0;
4369 		} else {
4370 			tp->eee.advertised = advertise &
4371 					     (ADVERTISED_100baseT_Full |
4372 					      ADVERTISED_1000baseT_Full);
4373 		}
4374 
4375 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4376 		if (err)
4377 			val = 0;
4378 
4379 		switch (tg3_asic_rev(tp)) {
4380 		case ASIC_REV_5717:
4381 		case ASIC_REV_57765:
4382 		case ASIC_REV_57766:
4383 		case ASIC_REV_5719:
4384 			/* If we advertised any eee advertisements above... */
4385 			if (val)
4386 				val = MII_TG3_DSP_TAP26_ALNOKO |
4387 				      MII_TG3_DSP_TAP26_RMRXSTO |
4388 				      MII_TG3_DSP_TAP26_OPCSINPT;
4389 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4390 			fallthrough;
4391 		case ASIC_REV_5720:
4392 		case ASIC_REV_5762:
4393 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4394 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4395 						 MII_TG3_DSP_CH34TP2_HIBW01);
4396 		}
4397 
4398 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4399 		if (!err)
4400 			err = err2;
4401 	}
4402 
4403 done:
4404 	return err;
4405 }
4406 
4407 static void tg3_phy_copper_begin(struct tg3 *tp)
4408 {
4409 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4410 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4411 		u32 adv, fc;
4412 
4413 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4414 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4415 			adv = ADVERTISED_10baseT_Half |
4416 			      ADVERTISED_10baseT_Full;
4417 			if (tg3_flag(tp, WOL_SPEED_100MB))
4418 				adv |= ADVERTISED_100baseT_Half |
4419 				       ADVERTISED_100baseT_Full;
4420 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4421 				if (!(tp->phy_flags &
4422 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4423 					adv |= ADVERTISED_1000baseT_Half;
4424 				adv |= ADVERTISED_1000baseT_Full;
4425 			}
4426 
4427 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4428 		} else {
4429 			adv = tp->link_config.advertising;
4430 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4431 				adv &= ~(ADVERTISED_1000baseT_Half |
4432 					 ADVERTISED_1000baseT_Full);
4433 
4434 			fc = tp->link_config.flowctrl;
4435 		}
4436 
4437 		tg3_phy_autoneg_cfg(tp, adv, fc);
4438 
4439 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4440 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4441 			/* Normally during power down we want to autonegotiate
4442 			 * the lowest possible speed for WOL. However, to avoid
4443 			 * link flap, we leave it untouched.
4444 			 */
4445 			return;
4446 		}
4447 
4448 		tg3_writephy(tp, MII_BMCR,
4449 			     BMCR_ANENABLE | BMCR_ANRESTART);
4450 	} else {
4451 		int i;
4452 		u32 bmcr, orig_bmcr;
4453 
4454 		tp->link_config.active_speed = tp->link_config.speed;
4455 		tp->link_config.active_duplex = tp->link_config.duplex;
4456 
4457 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4458 			/* With autoneg disabled, 5715 only links up when the
4459 			 * advertisement register has the configured speed
4460 			 * enabled.
4461 			 */
4462 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4463 		}
4464 
4465 		bmcr = 0;
4466 		switch (tp->link_config.speed) {
4467 		default:
4468 		case SPEED_10:
4469 			break;
4470 
4471 		case SPEED_100:
4472 			bmcr |= BMCR_SPEED100;
4473 			break;
4474 
4475 		case SPEED_1000:
4476 			bmcr |= BMCR_SPEED1000;
4477 			break;
4478 		}
4479 
4480 		if (tp->link_config.duplex == DUPLEX_FULL)
4481 			bmcr |= BMCR_FULLDPLX;
4482 
4483 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4484 		    (bmcr != orig_bmcr)) {
4485 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4486 			for (i = 0; i < 1500; i++) {
4487 				u32 tmp;
4488 
4489 				udelay(10);
4490 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4491 				    tg3_readphy(tp, MII_BMSR, &tmp))
4492 					continue;
4493 				if (!(tmp & BMSR_LSTATUS)) {
4494 					udelay(40);
4495 					break;
4496 				}
4497 			}
4498 			tg3_writephy(tp, MII_BMCR, bmcr);
4499 			udelay(40);
4500 		}
4501 	}
4502 }
4503 
4504 static int tg3_phy_pull_config(struct tg3 *tp)
4505 {
4506 	int err;
4507 	u32 val;
4508 
4509 	err = tg3_readphy(tp, MII_BMCR, &val);
4510 	if (err)
4511 		goto done;
4512 
4513 	if (!(val & BMCR_ANENABLE)) {
4514 		tp->link_config.autoneg = AUTONEG_DISABLE;
4515 		tp->link_config.advertising = 0;
4516 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4517 
4518 		err = -EIO;
4519 
4520 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4521 		case 0:
4522 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4523 				goto done;
4524 
4525 			tp->link_config.speed = SPEED_10;
4526 			break;
4527 		case BMCR_SPEED100:
4528 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4529 				goto done;
4530 
4531 			tp->link_config.speed = SPEED_100;
4532 			break;
4533 		case BMCR_SPEED1000:
4534 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4535 				tp->link_config.speed = SPEED_1000;
4536 				break;
4537 			}
4538 			fallthrough;
4539 		default:
4540 			goto done;
4541 		}
4542 
4543 		if (val & BMCR_FULLDPLX)
4544 			tp->link_config.duplex = DUPLEX_FULL;
4545 		else
4546 			tp->link_config.duplex = DUPLEX_HALF;
4547 
4548 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4549 
4550 		err = 0;
4551 		goto done;
4552 	}
4553 
4554 	tp->link_config.autoneg = AUTONEG_ENABLE;
4555 	tp->link_config.advertising = ADVERTISED_Autoneg;
4556 	tg3_flag_set(tp, PAUSE_AUTONEG);
4557 
4558 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4559 		u32 adv;
4560 
4561 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4562 		if (err)
4563 			goto done;
4564 
4565 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4566 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4567 
4568 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4569 	} else {
4570 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4571 	}
4572 
4573 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4574 		u32 adv;
4575 
4576 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4577 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4578 			if (err)
4579 				goto done;
4580 
4581 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4582 		} else {
4583 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4584 			if (err)
4585 				goto done;
4586 
4587 			adv = tg3_decode_flowctrl_1000X(val);
4588 			tp->link_config.flowctrl = adv;
4589 
4590 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4591 			adv = mii_adv_to_ethtool_adv_x(val);
4592 		}
4593 
4594 		tp->link_config.advertising |= adv;
4595 	}
4596 
4597 done:
4598 	return err;
4599 }
4600 
4601 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4602 {
4603 	int err;
4604 
4605 	/* Turn off tap power management. */
4606 	/* Set Extended packet length bit */
4607 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4608 
4609 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4610 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4611 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4612 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4613 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4614 
4615 	udelay(40);
4616 
4617 	return err;
4618 }
4619 
4620 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4621 {
4622 	struct ethtool_eee eee;
4623 
4624 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4625 		return true;
4626 
4627 	tg3_eee_pull_config(tp, &eee);
4628 
4629 	if (tp->eee.eee_enabled) {
4630 		if (tp->eee.advertised != eee.advertised ||
4631 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4632 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4633 			return false;
4634 	} else {
4635 		/* EEE is disabled but we're advertising */
4636 		if (eee.advertised)
4637 			return false;
4638 	}
4639 
4640 	return true;
4641 }
4642 
4643 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4644 {
4645 	u32 advmsk, tgtadv, advertising;
4646 
4647 	advertising = tp->link_config.advertising;
4648 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4649 
4650 	advmsk = ADVERTISE_ALL;
4651 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4652 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4653 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4654 	}
4655 
4656 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4657 		return false;
4658 
4659 	if ((*lcladv & advmsk) != tgtadv)
4660 		return false;
4661 
4662 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4663 		u32 tg3_ctrl;
4664 
4665 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4666 
4667 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4668 			return false;
4669 
4670 		if (tgtadv &&
4671 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4672 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4673 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4674 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4675 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4676 		} else {
4677 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4678 		}
4679 
4680 		if (tg3_ctrl != tgtadv)
4681 			return false;
4682 	}
4683 
4684 	return true;
4685 }
4686 
4687 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4688 {
4689 	u32 lpeth = 0;
4690 
4691 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4692 		u32 val;
4693 
4694 		if (tg3_readphy(tp, MII_STAT1000, &val))
4695 			return false;
4696 
4697 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4698 	}
4699 
4700 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4701 		return false;
4702 
4703 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4704 	tp->link_config.rmt_adv = lpeth;
4705 
4706 	return true;
4707 }
4708 
4709 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4710 {
4711 	if (curr_link_up != tp->link_up) {
4712 		if (curr_link_up) {
4713 			netif_carrier_on(tp->dev);
4714 		} else {
4715 			netif_carrier_off(tp->dev);
4716 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4717 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4718 		}
4719 
4720 		tg3_link_report(tp);
4721 		return true;
4722 	}
4723 
4724 	return false;
4725 }
4726 
4727 static void tg3_clear_mac_status(struct tg3 *tp)
4728 {
4729 	tw32(MAC_EVENT, 0);
4730 
4731 	tw32_f(MAC_STATUS,
4732 	       MAC_STATUS_SYNC_CHANGED |
4733 	       MAC_STATUS_CFG_CHANGED |
4734 	       MAC_STATUS_MI_COMPLETION |
4735 	       MAC_STATUS_LNKSTATE_CHANGED);
4736 	udelay(40);
4737 }
4738 
4739 static void tg3_setup_eee(struct tg3 *tp)
4740 {
4741 	u32 val;
4742 
4743 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4744 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4745 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4746 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4747 
4748 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4749 
4750 	tw32_f(TG3_CPMU_EEE_CTRL,
4751 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4752 
4753 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4754 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4755 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4756 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4757 
4758 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4759 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4760 
4761 	if (tg3_flag(tp, ENABLE_APE))
4762 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4763 
4764 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4765 
4766 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4767 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4768 	       (tp->eee.tx_lpi_timer & 0xffff));
4769 
4770 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4771 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4772 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4773 }
4774 
4775 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4776 {
4777 	bool current_link_up;
4778 	u32 bmsr, val;
4779 	u32 lcl_adv, rmt_adv;
4780 	u32 current_speed;
4781 	u8 current_duplex;
4782 	int i, err;
4783 
4784 	tg3_clear_mac_status(tp);
4785 
4786 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4787 		tw32_f(MAC_MI_MODE,
4788 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4789 		udelay(80);
4790 	}
4791 
4792 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4793 
4794 	/* Some third-party PHYs need to be reset on link going
4795 	 * down.
4796 	 */
4797 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4798 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4799 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4800 	    tp->link_up) {
4801 		tg3_readphy(tp, MII_BMSR, &bmsr);
4802 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4803 		    !(bmsr & BMSR_LSTATUS))
4804 			force_reset = true;
4805 	}
4806 	if (force_reset)
4807 		tg3_phy_reset(tp);
4808 
4809 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4810 		tg3_readphy(tp, MII_BMSR, &bmsr);
4811 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4812 		    !tg3_flag(tp, INIT_COMPLETE))
4813 			bmsr = 0;
4814 
4815 		if (!(bmsr & BMSR_LSTATUS)) {
4816 			err = tg3_init_5401phy_dsp(tp);
4817 			if (err)
4818 				return err;
4819 
4820 			tg3_readphy(tp, MII_BMSR, &bmsr);
4821 			for (i = 0; i < 1000; i++) {
4822 				udelay(10);
4823 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4824 				    (bmsr & BMSR_LSTATUS)) {
4825 					udelay(40);
4826 					break;
4827 				}
4828 			}
4829 
4830 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4831 			    TG3_PHY_REV_BCM5401_B0 &&
4832 			    !(bmsr & BMSR_LSTATUS) &&
4833 			    tp->link_config.active_speed == SPEED_1000) {
4834 				err = tg3_phy_reset(tp);
4835 				if (!err)
4836 					err = tg3_init_5401phy_dsp(tp);
4837 				if (err)
4838 					return err;
4839 			}
4840 		}
4841 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4842 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4843 		/* 5701 {A0,B0} CRC bug workaround */
4844 		tg3_writephy(tp, 0x15, 0x0a75);
4845 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4846 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4847 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4848 	}
4849 
4850 	/* Clear pending interrupts... */
4851 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4852 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4853 
4854 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4855 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4856 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4857 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4858 
4859 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4860 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4861 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4862 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4863 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4864 		else
4865 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4866 	}
4867 
4868 	current_link_up = false;
4869 	current_speed = SPEED_UNKNOWN;
4870 	current_duplex = DUPLEX_UNKNOWN;
4871 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4872 	tp->link_config.rmt_adv = 0;
4873 
4874 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4875 		err = tg3_phy_auxctl_read(tp,
4876 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4877 					  &val);
4878 		if (!err && !(val & (1 << 10))) {
4879 			tg3_phy_auxctl_write(tp,
4880 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4881 					     val | (1 << 10));
4882 			goto relink;
4883 		}
4884 	}
4885 
4886 	bmsr = 0;
4887 	for (i = 0; i < 100; i++) {
4888 		tg3_readphy(tp, MII_BMSR, &bmsr);
4889 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4890 		    (bmsr & BMSR_LSTATUS))
4891 			break;
4892 		udelay(40);
4893 	}
4894 
4895 	if (bmsr & BMSR_LSTATUS) {
4896 		u32 aux_stat, bmcr;
4897 
4898 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4899 		for (i = 0; i < 2000; i++) {
4900 			udelay(10);
4901 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4902 			    aux_stat)
4903 				break;
4904 		}
4905 
4906 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4907 					     &current_speed,
4908 					     &current_duplex);
4909 
4910 		bmcr = 0;
4911 		for (i = 0; i < 200; i++) {
4912 			tg3_readphy(tp, MII_BMCR, &bmcr);
4913 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4914 				continue;
4915 			if (bmcr && bmcr != 0x7fff)
4916 				break;
4917 			udelay(10);
4918 		}
4919 
4920 		lcl_adv = 0;
4921 		rmt_adv = 0;
4922 
4923 		tp->link_config.active_speed = current_speed;
4924 		tp->link_config.active_duplex = current_duplex;
4925 
4926 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4927 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4928 
4929 			if ((bmcr & BMCR_ANENABLE) &&
4930 			    eee_config_ok &&
4931 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4932 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4933 				current_link_up = true;
4934 
4935 			/* EEE settings changes take effect only after a phy
4936 			 * reset.  If we have skipped a reset due to Link Flap
4937 			 * Avoidance being enabled, do it now.
4938 			 */
4939 			if (!eee_config_ok &&
4940 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4941 			    !force_reset) {
4942 				tg3_setup_eee(tp);
4943 				tg3_phy_reset(tp);
4944 			}
4945 		} else {
4946 			if (!(bmcr & BMCR_ANENABLE) &&
4947 			    tp->link_config.speed == current_speed &&
4948 			    tp->link_config.duplex == current_duplex) {
4949 				current_link_up = true;
4950 			}
4951 		}
4952 
4953 		if (current_link_up &&
4954 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4955 			u32 reg, bit;
4956 
4957 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4958 				reg = MII_TG3_FET_GEN_STAT;
4959 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4960 			} else {
4961 				reg = MII_TG3_EXT_STAT;
4962 				bit = MII_TG3_EXT_STAT_MDIX;
4963 			}
4964 
4965 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4966 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4967 
4968 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4969 		}
4970 	}
4971 
4972 relink:
4973 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4974 		tg3_phy_copper_begin(tp);
4975 
4976 		if (tg3_flag(tp, ROBOSWITCH)) {
4977 			current_link_up = true;
4978 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4979 			current_speed = SPEED_1000;
4980 			current_duplex = DUPLEX_FULL;
4981 			tp->link_config.active_speed = current_speed;
4982 			tp->link_config.active_duplex = current_duplex;
4983 		}
4984 
4985 		tg3_readphy(tp, MII_BMSR, &bmsr);
4986 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4987 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4988 			current_link_up = true;
4989 	}
4990 
4991 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4992 	if (current_link_up) {
4993 		if (tp->link_config.active_speed == SPEED_100 ||
4994 		    tp->link_config.active_speed == SPEED_10)
4995 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4996 		else
4997 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4998 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4999 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5000 	else
5001 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5002 
5003 	/* In order for the 5750 core in BCM4785 chip to work properly
5004 	 * in RGMII mode, the Led Control Register must be set up.
5005 	 */
5006 	if (tg3_flag(tp, RGMII_MODE)) {
5007 		u32 led_ctrl = tr32(MAC_LED_CTRL);
5008 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5009 
5010 		if (tp->link_config.active_speed == SPEED_10)
5011 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5012 		else if (tp->link_config.active_speed == SPEED_100)
5013 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5014 				     LED_CTRL_100MBPS_ON);
5015 		else if (tp->link_config.active_speed == SPEED_1000)
5016 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5017 				     LED_CTRL_1000MBPS_ON);
5018 
5019 		tw32(MAC_LED_CTRL, led_ctrl);
5020 		udelay(40);
5021 	}
5022 
5023 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5024 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5025 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5026 
5027 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5028 		if (current_link_up &&
5029 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5030 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5031 		else
5032 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5033 	}
5034 
5035 	/* ??? Without this setting Netgear GA302T PHY does not
5036 	 * ??? send/receive packets...
5037 	 */
5038 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5039 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5040 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5041 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5042 		udelay(80);
5043 	}
5044 
5045 	tw32_f(MAC_MODE, tp->mac_mode);
5046 	udelay(40);
5047 
5048 	tg3_phy_eee_adjust(tp, current_link_up);
5049 
5050 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5051 		/* Polled via timer. */
5052 		tw32_f(MAC_EVENT, 0);
5053 	} else {
5054 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5055 	}
5056 	udelay(40);
5057 
5058 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5059 	    current_link_up &&
5060 	    tp->link_config.active_speed == SPEED_1000 &&
5061 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5062 		udelay(120);
5063 		tw32_f(MAC_STATUS,
5064 		     (MAC_STATUS_SYNC_CHANGED |
5065 		      MAC_STATUS_CFG_CHANGED));
5066 		udelay(40);
5067 		tg3_write_mem(tp,
5068 			      NIC_SRAM_FIRMWARE_MBOX,
5069 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5070 	}
5071 
5072 	/* Prevent send BD corruption. */
5073 	if (tg3_flag(tp, CLKREQ_BUG)) {
5074 		if (tp->link_config.active_speed == SPEED_100 ||
5075 		    tp->link_config.active_speed == SPEED_10)
5076 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5077 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5078 		else
5079 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5080 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5081 	}
5082 
5083 	tg3_test_and_report_link_chg(tp, current_link_up);
5084 
5085 	return 0;
5086 }
5087 
5088 struct tg3_fiber_aneginfo {
5089 	int state;
5090 #define ANEG_STATE_UNKNOWN		0
5091 #define ANEG_STATE_AN_ENABLE		1
5092 #define ANEG_STATE_RESTART_INIT		2
5093 #define ANEG_STATE_RESTART		3
5094 #define ANEG_STATE_DISABLE_LINK_OK	4
5095 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5096 #define ANEG_STATE_ABILITY_DETECT	6
5097 #define ANEG_STATE_ACK_DETECT_INIT	7
5098 #define ANEG_STATE_ACK_DETECT		8
5099 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5100 #define ANEG_STATE_COMPLETE_ACK		10
5101 #define ANEG_STATE_IDLE_DETECT_INIT	11
5102 #define ANEG_STATE_IDLE_DETECT		12
5103 #define ANEG_STATE_LINK_OK		13
5104 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5105 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5106 
5107 	u32 flags;
5108 #define MR_AN_ENABLE		0x00000001
5109 #define MR_RESTART_AN		0x00000002
5110 #define MR_AN_COMPLETE		0x00000004
5111 #define MR_PAGE_RX		0x00000008
5112 #define MR_NP_LOADED		0x00000010
5113 #define MR_TOGGLE_TX		0x00000020
5114 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5115 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5116 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5117 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5118 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5119 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5120 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5121 #define MR_TOGGLE_RX		0x00002000
5122 #define MR_NP_RX		0x00004000
5123 
5124 #define MR_LINK_OK		0x80000000
5125 
5126 	unsigned long link_time, cur_time;
5127 
5128 	u32 ability_match_cfg;
5129 	int ability_match_count;
5130 
5131 	char ability_match, idle_match, ack_match;
5132 
5133 	u32 txconfig, rxconfig;
5134 #define ANEG_CFG_NP		0x00000080
5135 #define ANEG_CFG_ACK		0x00000040
5136 #define ANEG_CFG_RF2		0x00000020
5137 #define ANEG_CFG_RF1		0x00000010
5138 #define ANEG_CFG_PS2		0x00000001
5139 #define ANEG_CFG_PS1		0x00008000
5140 #define ANEG_CFG_HD		0x00004000
5141 #define ANEG_CFG_FD		0x00002000
5142 #define ANEG_CFG_INVAL		0x00001f06
5143 
5144 };
5145 #define ANEG_OK		0
5146 #define ANEG_DONE	1
5147 #define ANEG_TIMER_ENAB	2
5148 #define ANEG_FAILED	-1
5149 
5150 #define ANEG_STATE_SETTLE_TIME	10000
5151 
5152 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5153 				   struct tg3_fiber_aneginfo *ap)
5154 {
5155 	u16 flowctrl;
5156 	unsigned long delta;
5157 	u32 rx_cfg_reg;
5158 	int ret;
5159 
5160 	if (ap->state == ANEG_STATE_UNKNOWN) {
5161 		ap->rxconfig = 0;
5162 		ap->link_time = 0;
5163 		ap->cur_time = 0;
5164 		ap->ability_match_cfg = 0;
5165 		ap->ability_match_count = 0;
5166 		ap->ability_match = 0;
5167 		ap->idle_match = 0;
5168 		ap->ack_match = 0;
5169 	}
5170 	ap->cur_time++;
5171 
5172 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5173 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5174 
5175 		if (rx_cfg_reg != ap->ability_match_cfg) {
5176 			ap->ability_match_cfg = rx_cfg_reg;
5177 			ap->ability_match = 0;
5178 			ap->ability_match_count = 0;
5179 		} else {
5180 			if (++ap->ability_match_count > 1) {
5181 				ap->ability_match = 1;
5182 				ap->ability_match_cfg = rx_cfg_reg;
5183 			}
5184 		}
5185 		if (rx_cfg_reg & ANEG_CFG_ACK)
5186 			ap->ack_match = 1;
5187 		else
5188 			ap->ack_match = 0;
5189 
5190 		ap->idle_match = 0;
5191 	} else {
5192 		ap->idle_match = 1;
5193 		ap->ability_match_cfg = 0;
5194 		ap->ability_match_count = 0;
5195 		ap->ability_match = 0;
5196 		ap->ack_match = 0;
5197 
5198 		rx_cfg_reg = 0;
5199 	}
5200 
5201 	ap->rxconfig = rx_cfg_reg;
5202 	ret = ANEG_OK;
5203 
5204 	switch (ap->state) {
5205 	case ANEG_STATE_UNKNOWN:
5206 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5207 			ap->state = ANEG_STATE_AN_ENABLE;
5208 
5209 		fallthrough;
5210 	case ANEG_STATE_AN_ENABLE:
5211 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5212 		if (ap->flags & MR_AN_ENABLE) {
5213 			ap->link_time = 0;
5214 			ap->cur_time = 0;
5215 			ap->ability_match_cfg = 0;
5216 			ap->ability_match_count = 0;
5217 			ap->ability_match = 0;
5218 			ap->idle_match = 0;
5219 			ap->ack_match = 0;
5220 
5221 			ap->state = ANEG_STATE_RESTART_INIT;
5222 		} else {
5223 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5224 		}
5225 		break;
5226 
5227 	case ANEG_STATE_RESTART_INIT:
5228 		ap->link_time = ap->cur_time;
5229 		ap->flags &= ~(MR_NP_LOADED);
5230 		ap->txconfig = 0;
5231 		tw32(MAC_TX_AUTO_NEG, 0);
5232 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5233 		tw32_f(MAC_MODE, tp->mac_mode);
5234 		udelay(40);
5235 
5236 		ret = ANEG_TIMER_ENAB;
5237 		ap->state = ANEG_STATE_RESTART;
5238 
5239 		fallthrough;
5240 	case ANEG_STATE_RESTART:
5241 		delta = ap->cur_time - ap->link_time;
5242 		if (delta > ANEG_STATE_SETTLE_TIME)
5243 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5244 		else
5245 			ret = ANEG_TIMER_ENAB;
5246 		break;
5247 
5248 	case ANEG_STATE_DISABLE_LINK_OK:
5249 		ret = ANEG_DONE;
5250 		break;
5251 
5252 	case ANEG_STATE_ABILITY_DETECT_INIT:
5253 		ap->flags &= ~(MR_TOGGLE_TX);
5254 		ap->txconfig = ANEG_CFG_FD;
5255 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5256 		if (flowctrl & ADVERTISE_1000XPAUSE)
5257 			ap->txconfig |= ANEG_CFG_PS1;
5258 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5259 			ap->txconfig |= ANEG_CFG_PS2;
5260 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5261 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5262 		tw32_f(MAC_MODE, tp->mac_mode);
5263 		udelay(40);
5264 
5265 		ap->state = ANEG_STATE_ABILITY_DETECT;
5266 		break;
5267 
5268 	case ANEG_STATE_ABILITY_DETECT:
5269 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5270 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5271 		break;
5272 
5273 	case ANEG_STATE_ACK_DETECT_INIT:
5274 		ap->txconfig |= ANEG_CFG_ACK;
5275 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5276 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5277 		tw32_f(MAC_MODE, tp->mac_mode);
5278 		udelay(40);
5279 
5280 		ap->state = ANEG_STATE_ACK_DETECT;
5281 
5282 		fallthrough;
5283 	case ANEG_STATE_ACK_DETECT:
5284 		if (ap->ack_match != 0) {
5285 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5286 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5287 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5288 			} else {
5289 				ap->state = ANEG_STATE_AN_ENABLE;
5290 			}
5291 		} else if (ap->ability_match != 0 &&
5292 			   ap->rxconfig == 0) {
5293 			ap->state = ANEG_STATE_AN_ENABLE;
5294 		}
5295 		break;
5296 
5297 	case ANEG_STATE_COMPLETE_ACK_INIT:
5298 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5299 			ret = ANEG_FAILED;
5300 			break;
5301 		}
5302 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5303 			       MR_LP_ADV_HALF_DUPLEX |
5304 			       MR_LP_ADV_SYM_PAUSE |
5305 			       MR_LP_ADV_ASYM_PAUSE |
5306 			       MR_LP_ADV_REMOTE_FAULT1 |
5307 			       MR_LP_ADV_REMOTE_FAULT2 |
5308 			       MR_LP_ADV_NEXT_PAGE |
5309 			       MR_TOGGLE_RX |
5310 			       MR_NP_RX);
5311 		if (ap->rxconfig & ANEG_CFG_FD)
5312 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5313 		if (ap->rxconfig & ANEG_CFG_HD)
5314 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5315 		if (ap->rxconfig & ANEG_CFG_PS1)
5316 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5317 		if (ap->rxconfig & ANEG_CFG_PS2)
5318 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5319 		if (ap->rxconfig & ANEG_CFG_RF1)
5320 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5321 		if (ap->rxconfig & ANEG_CFG_RF2)
5322 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5323 		if (ap->rxconfig & ANEG_CFG_NP)
5324 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5325 
5326 		ap->link_time = ap->cur_time;
5327 
5328 		ap->flags ^= (MR_TOGGLE_TX);
5329 		if (ap->rxconfig & 0x0008)
5330 			ap->flags |= MR_TOGGLE_RX;
5331 		if (ap->rxconfig & ANEG_CFG_NP)
5332 			ap->flags |= MR_NP_RX;
5333 		ap->flags |= MR_PAGE_RX;
5334 
5335 		ap->state = ANEG_STATE_COMPLETE_ACK;
5336 		ret = ANEG_TIMER_ENAB;
5337 		break;
5338 
5339 	case ANEG_STATE_COMPLETE_ACK:
5340 		if (ap->ability_match != 0 &&
5341 		    ap->rxconfig == 0) {
5342 			ap->state = ANEG_STATE_AN_ENABLE;
5343 			break;
5344 		}
5345 		delta = ap->cur_time - ap->link_time;
5346 		if (delta > ANEG_STATE_SETTLE_TIME) {
5347 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5348 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5349 			} else {
5350 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5351 				    !(ap->flags & MR_NP_RX)) {
5352 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5353 				} else {
5354 					ret = ANEG_FAILED;
5355 				}
5356 			}
5357 		}
5358 		break;
5359 
5360 	case ANEG_STATE_IDLE_DETECT_INIT:
5361 		ap->link_time = ap->cur_time;
5362 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5363 		tw32_f(MAC_MODE, tp->mac_mode);
5364 		udelay(40);
5365 
5366 		ap->state = ANEG_STATE_IDLE_DETECT;
5367 		ret = ANEG_TIMER_ENAB;
5368 		break;
5369 
5370 	case ANEG_STATE_IDLE_DETECT:
5371 		if (ap->ability_match != 0 &&
5372 		    ap->rxconfig == 0) {
5373 			ap->state = ANEG_STATE_AN_ENABLE;
5374 			break;
5375 		}
5376 		delta = ap->cur_time - ap->link_time;
5377 		if (delta > ANEG_STATE_SETTLE_TIME) {
5378 			/* XXX another gem from the Broadcom driver :( */
5379 			ap->state = ANEG_STATE_LINK_OK;
5380 		}
5381 		break;
5382 
5383 	case ANEG_STATE_LINK_OK:
5384 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5385 		ret = ANEG_DONE;
5386 		break;
5387 
5388 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5389 		/* ??? unimplemented */
5390 		break;
5391 
5392 	case ANEG_STATE_NEXT_PAGE_WAIT:
5393 		/* ??? unimplemented */
5394 		break;
5395 
5396 	default:
5397 		ret = ANEG_FAILED;
5398 		break;
5399 	}
5400 
5401 	return ret;
5402 }
5403 
5404 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5405 {
5406 	int res = 0;
5407 	struct tg3_fiber_aneginfo aninfo;
5408 	int status = ANEG_FAILED;
5409 	unsigned int tick;
5410 	u32 tmp;
5411 
5412 	tw32_f(MAC_TX_AUTO_NEG, 0);
5413 
5414 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5415 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5416 	udelay(40);
5417 
5418 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5419 	udelay(40);
5420 
5421 	memset(&aninfo, 0, sizeof(aninfo));
5422 	aninfo.flags |= MR_AN_ENABLE;
5423 	aninfo.state = ANEG_STATE_UNKNOWN;
5424 	aninfo.cur_time = 0;
5425 	tick = 0;
5426 	while (++tick < 195000) {
5427 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5428 		if (status == ANEG_DONE || status == ANEG_FAILED)
5429 			break;
5430 
5431 		udelay(1);
5432 	}
5433 
5434 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5435 	tw32_f(MAC_MODE, tp->mac_mode);
5436 	udelay(40);
5437 
5438 	*txflags = aninfo.txconfig;
5439 	*rxflags = aninfo.flags;
5440 
5441 	if (status == ANEG_DONE &&
5442 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5443 			     MR_LP_ADV_FULL_DUPLEX)))
5444 		res = 1;
5445 
5446 	return res;
5447 }
5448 
5449 static void tg3_init_bcm8002(struct tg3 *tp)
5450 {
5451 	u32 mac_status = tr32(MAC_STATUS);
5452 	int i;
5453 
5454 	/* Reset when initting first time or we have a link. */
5455 	if (tg3_flag(tp, INIT_COMPLETE) &&
5456 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5457 		return;
5458 
5459 	/* Set PLL lock range. */
5460 	tg3_writephy(tp, 0x16, 0x8007);
5461 
5462 	/* SW reset */
5463 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5464 
5465 	/* Wait for reset to complete. */
5466 	/* XXX schedule_timeout() ... */
5467 	for (i = 0; i < 500; i++)
5468 		udelay(10);
5469 
5470 	/* Config mode; select PMA/Ch 1 regs. */
5471 	tg3_writephy(tp, 0x10, 0x8411);
5472 
5473 	/* Enable auto-lock and comdet, select txclk for tx. */
5474 	tg3_writephy(tp, 0x11, 0x0a10);
5475 
5476 	tg3_writephy(tp, 0x18, 0x00a0);
5477 	tg3_writephy(tp, 0x16, 0x41ff);
5478 
5479 	/* Assert and deassert POR. */
5480 	tg3_writephy(tp, 0x13, 0x0400);
5481 	udelay(40);
5482 	tg3_writephy(tp, 0x13, 0x0000);
5483 
5484 	tg3_writephy(tp, 0x11, 0x0a50);
5485 	udelay(40);
5486 	tg3_writephy(tp, 0x11, 0x0a10);
5487 
5488 	/* Wait for signal to stabilize */
5489 	/* XXX schedule_timeout() ... */
5490 	for (i = 0; i < 15000; i++)
5491 		udelay(10);
5492 
5493 	/* Deselect the channel register so we can read the PHYID
5494 	 * later.
5495 	 */
5496 	tg3_writephy(tp, 0x10, 0x8011);
5497 }
5498 
5499 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5500 {
5501 	u16 flowctrl;
5502 	bool current_link_up;
5503 	u32 sg_dig_ctrl, sg_dig_status;
5504 	u32 serdes_cfg, expected_sg_dig_ctrl;
5505 	int workaround, port_a;
5506 
5507 	serdes_cfg = 0;
5508 	workaround = 0;
5509 	port_a = 1;
5510 	current_link_up = false;
5511 
5512 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5513 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5514 		workaround = 1;
5515 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5516 			port_a = 0;
5517 
5518 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5519 		/* preserve bits 20-23 for voltage regulator */
5520 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5521 	}
5522 
5523 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5524 
5525 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5526 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5527 			if (workaround) {
5528 				u32 val = serdes_cfg;
5529 
5530 				if (port_a)
5531 					val |= 0xc010000;
5532 				else
5533 					val |= 0x4010000;
5534 				tw32_f(MAC_SERDES_CFG, val);
5535 			}
5536 
5537 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5538 		}
5539 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5540 			tg3_setup_flow_control(tp, 0, 0);
5541 			current_link_up = true;
5542 		}
5543 		goto out;
5544 	}
5545 
5546 	/* Want auto-negotiation.  */
5547 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5548 
5549 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5550 	if (flowctrl & ADVERTISE_1000XPAUSE)
5551 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5552 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5553 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5554 
5555 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5556 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5557 		    tp->serdes_counter &&
5558 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5559 				    MAC_STATUS_RCVD_CFG)) ==
5560 		     MAC_STATUS_PCS_SYNCED)) {
5561 			tp->serdes_counter--;
5562 			current_link_up = true;
5563 			goto out;
5564 		}
5565 restart_autoneg:
5566 		if (workaround)
5567 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5568 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5569 		udelay(5);
5570 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5571 
5572 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5573 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5574 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5575 				 MAC_STATUS_SIGNAL_DET)) {
5576 		sg_dig_status = tr32(SG_DIG_STATUS);
5577 		mac_status = tr32(MAC_STATUS);
5578 
5579 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5580 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5581 			u32 local_adv = 0, remote_adv = 0;
5582 
5583 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5584 				local_adv |= ADVERTISE_1000XPAUSE;
5585 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5586 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5587 
5588 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5589 				remote_adv |= LPA_1000XPAUSE;
5590 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5591 				remote_adv |= LPA_1000XPAUSE_ASYM;
5592 
5593 			tp->link_config.rmt_adv =
5594 					   mii_adv_to_ethtool_adv_x(remote_adv);
5595 
5596 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5597 			current_link_up = true;
5598 			tp->serdes_counter = 0;
5599 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5600 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5601 			if (tp->serdes_counter)
5602 				tp->serdes_counter--;
5603 			else {
5604 				if (workaround) {
5605 					u32 val = serdes_cfg;
5606 
5607 					if (port_a)
5608 						val |= 0xc010000;
5609 					else
5610 						val |= 0x4010000;
5611 
5612 					tw32_f(MAC_SERDES_CFG, val);
5613 				}
5614 
5615 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5616 				udelay(40);
5617 
5618 				/* Link parallel detection - link is up */
5619 				/* only if we have PCS_SYNC and not */
5620 				/* receiving config code words */
5621 				mac_status = tr32(MAC_STATUS);
5622 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5623 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5624 					tg3_setup_flow_control(tp, 0, 0);
5625 					current_link_up = true;
5626 					tp->phy_flags |=
5627 						TG3_PHYFLG_PARALLEL_DETECT;
5628 					tp->serdes_counter =
5629 						SERDES_PARALLEL_DET_TIMEOUT;
5630 				} else
5631 					goto restart_autoneg;
5632 			}
5633 		}
5634 	} else {
5635 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5636 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5637 	}
5638 
5639 out:
5640 	return current_link_up;
5641 }
5642 
5643 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5644 {
5645 	bool current_link_up = false;
5646 
5647 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5648 		goto out;
5649 
5650 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5651 		u32 txflags, rxflags;
5652 		int i;
5653 
5654 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5655 			u32 local_adv = 0, remote_adv = 0;
5656 
5657 			if (txflags & ANEG_CFG_PS1)
5658 				local_adv |= ADVERTISE_1000XPAUSE;
5659 			if (txflags & ANEG_CFG_PS2)
5660 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5661 
5662 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5663 				remote_adv |= LPA_1000XPAUSE;
5664 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5665 				remote_adv |= LPA_1000XPAUSE_ASYM;
5666 
5667 			tp->link_config.rmt_adv =
5668 					   mii_adv_to_ethtool_adv_x(remote_adv);
5669 
5670 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5671 
5672 			current_link_up = true;
5673 		}
5674 		for (i = 0; i < 30; i++) {
5675 			udelay(20);
5676 			tw32_f(MAC_STATUS,
5677 			       (MAC_STATUS_SYNC_CHANGED |
5678 				MAC_STATUS_CFG_CHANGED));
5679 			udelay(40);
5680 			if ((tr32(MAC_STATUS) &
5681 			     (MAC_STATUS_SYNC_CHANGED |
5682 			      MAC_STATUS_CFG_CHANGED)) == 0)
5683 				break;
5684 		}
5685 
5686 		mac_status = tr32(MAC_STATUS);
5687 		if (!current_link_up &&
5688 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5689 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5690 			current_link_up = true;
5691 	} else {
5692 		tg3_setup_flow_control(tp, 0, 0);
5693 
5694 		/* Forcing 1000FD link up. */
5695 		current_link_up = true;
5696 
5697 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5698 		udelay(40);
5699 
5700 		tw32_f(MAC_MODE, tp->mac_mode);
5701 		udelay(40);
5702 	}
5703 
5704 out:
5705 	return current_link_up;
5706 }
5707 
5708 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5709 {
5710 	u32 orig_pause_cfg;
5711 	u32 orig_active_speed;
5712 	u8 orig_active_duplex;
5713 	u32 mac_status;
5714 	bool current_link_up;
5715 	int i;
5716 
5717 	orig_pause_cfg = tp->link_config.active_flowctrl;
5718 	orig_active_speed = tp->link_config.active_speed;
5719 	orig_active_duplex = tp->link_config.active_duplex;
5720 
5721 	if (!tg3_flag(tp, HW_AUTONEG) &&
5722 	    tp->link_up &&
5723 	    tg3_flag(tp, INIT_COMPLETE)) {
5724 		mac_status = tr32(MAC_STATUS);
5725 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5726 			       MAC_STATUS_SIGNAL_DET |
5727 			       MAC_STATUS_CFG_CHANGED |
5728 			       MAC_STATUS_RCVD_CFG);
5729 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5730 				   MAC_STATUS_SIGNAL_DET)) {
5731 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5732 					    MAC_STATUS_CFG_CHANGED));
5733 			return 0;
5734 		}
5735 	}
5736 
5737 	tw32_f(MAC_TX_AUTO_NEG, 0);
5738 
5739 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5740 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5741 	tw32_f(MAC_MODE, tp->mac_mode);
5742 	udelay(40);
5743 
5744 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5745 		tg3_init_bcm8002(tp);
5746 
5747 	/* Enable link change event even when serdes polling.  */
5748 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5749 	udelay(40);
5750 
5751 	tp->link_config.rmt_adv = 0;
5752 	mac_status = tr32(MAC_STATUS);
5753 
5754 	if (tg3_flag(tp, HW_AUTONEG))
5755 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5756 	else
5757 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5758 
5759 	tp->napi[0].hw_status->status =
5760 		(SD_STATUS_UPDATED |
5761 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5762 
5763 	for (i = 0; i < 100; i++) {
5764 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5765 				    MAC_STATUS_CFG_CHANGED));
5766 		udelay(5);
5767 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5768 					 MAC_STATUS_CFG_CHANGED |
5769 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5770 			break;
5771 	}
5772 
5773 	mac_status = tr32(MAC_STATUS);
5774 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5775 		current_link_up = false;
5776 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5777 		    tp->serdes_counter == 0) {
5778 			tw32_f(MAC_MODE, (tp->mac_mode |
5779 					  MAC_MODE_SEND_CONFIGS));
5780 			udelay(1);
5781 			tw32_f(MAC_MODE, tp->mac_mode);
5782 		}
5783 	}
5784 
5785 	if (current_link_up) {
5786 		tp->link_config.active_speed = SPEED_1000;
5787 		tp->link_config.active_duplex = DUPLEX_FULL;
5788 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5789 				    LED_CTRL_LNKLED_OVERRIDE |
5790 				    LED_CTRL_1000MBPS_ON));
5791 	} else {
5792 		tp->link_config.active_speed = SPEED_UNKNOWN;
5793 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5794 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5795 				    LED_CTRL_LNKLED_OVERRIDE |
5796 				    LED_CTRL_TRAFFIC_OVERRIDE));
5797 	}
5798 
5799 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5800 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5801 		if (orig_pause_cfg != now_pause_cfg ||
5802 		    orig_active_speed != tp->link_config.active_speed ||
5803 		    orig_active_duplex != tp->link_config.active_duplex)
5804 			tg3_link_report(tp);
5805 	}
5806 
5807 	return 0;
5808 }
5809 
5810 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5811 {
5812 	int err = 0;
5813 	u32 bmsr, bmcr;
5814 	u32 current_speed = SPEED_UNKNOWN;
5815 	u8 current_duplex = DUPLEX_UNKNOWN;
5816 	bool current_link_up = false;
5817 	u32 local_adv, remote_adv, sgsr;
5818 
5819 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5820 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5821 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5822 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5823 
5824 		if (force_reset)
5825 			tg3_phy_reset(tp);
5826 
5827 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5828 
5829 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5830 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5831 		} else {
5832 			current_link_up = true;
5833 			if (sgsr & SERDES_TG3_SPEED_1000) {
5834 				current_speed = SPEED_1000;
5835 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5836 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5837 				current_speed = SPEED_100;
5838 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5839 			} else {
5840 				current_speed = SPEED_10;
5841 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5842 			}
5843 
5844 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5845 				current_duplex = DUPLEX_FULL;
5846 			else
5847 				current_duplex = DUPLEX_HALF;
5848 		}
5849 
5850 		tw32_f(MAC_MODE, tp->mac_mode);
5851 		udelay(40);
5852 
5853 		tg3_clear_mac_status(tp);
5854 
5855 		goto fiber_setup_done;
5856 	}
5857 
5858 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5859 	tw32_f(MAC_MODE, tp->mac_mode);
5860 	udelay(40);
5861 
5862 	tg3_clear_mac_status(tp);
5863 
5864 	if (force_reset)
5865 		tg3_phy_reset(tp);
5866 
5867 	tp->link_config.rmt_adv = 0;
5868 
5869 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5871 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5872 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5873 			bmsr |= BMSR_LSTATUS;
5874 		else
5875 			bmsr &= ~BMSR_LSTATUS;
5876 	}
5877 
5878 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5879 
5880 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5881 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5882 		/* do nothing, just check for link up at the end */
5883 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5884 		u32 adv, newadv;
5885 
5886 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5887 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5888 				 ADVERTISE_1000XPAUSE |
5889 				 ADVERTISE_1000XPSE_ASYM |
5890 				 ADVERTISE_SLCT);
5891 
5892 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5893 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5894 
5895 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5896 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5897 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5898 			tg3_writephy(tp, MII_BMCR, bmcr);
5899 
5900 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5901 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5902 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5903 
5904 			return err;
5905 		}
5906 	} else {
5907 		u32 new_bmcr;
5908 
5909 		bmcr &= ~BMCR_SPEED1000;
5910 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5911 
5912 		if (tp->link_config.duplex == DUPLEX_FULL)
5913 			new_bmcr |= BMCR_FULLDPLX;
5914 
5915 		if (new_bmcr != bmcr) {
5916 			/* BMCR_SPEED1000 is a reserved bit that needs
5917 			 * to be set on write.
5918 			 */
5919 			new_bmcr |= BMCR_SPEED1000;
5920 
5921 			/* Force a linkdown */
5922 			if (tp->link_up) {
5923 				u32 adv;
5924 
5925 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5926 				adv &= ~(ADVERTISE_1000XFULL |
5927 					 ADVERTISE_1000XHALF |
5928 					 ADVERTISE_SLCT);
5929 				tg3_writephy(tp, MII_ADVERTISE, adv);
5930 				tg3_writephy(tp, MII_BMCR, bmcr |
5931 							   BMCR_ANRESTART |
5932 							   BMCR_ANENABLE);
5933 				udelay(10);
5934 				tg3_carrier_off(tp);
5935 			}
5936 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5937 			bmcr = new_bmcr;
5938 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5940 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5941 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5942 					bmsr |= BMSR_LSTATUS;
5943 				else
5944 					bmsr &= ~BMSR_LSTATUS;
5945 			}
5946 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5947 		}
5948 	}
5949 
5950 	if (bmsr & BMSR_LSTATUS) {
5951 		current_speed = SPEED_1000;
5952 		current_link_up = true;
5953 		if (bmcr & BMCR_FULLDPLX)
5954 			current_duplex = DUPLEX_FULL;
5955 		else
5956 			current_duplex = DUPLEX_HALF;
5957 
5958 		local_adv = 0;
5959 		remote_adv = 0;
5960 
5961 		if (bmcr & BMCR_ANENABLE) {
5962 			u32 common;
5963 
5964 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5965 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5966 			common = local_adv & remote_adv;
5967 			if (common & (ADVERTISE_1000XHALF |
5968 				      ADVERTISE_1000XFULL)) {
5969 				if (common & ADVERTISE_1000XFULL)
5970 					current_duplex = DUPLEX_FULL;
5971 				else
5972 					current_duplex = DUPLEX_HALF;
5973 
5974 				tp->link_config.rmt_adv =
5975 					   mii_adv_to_ethtool_adv_x(remote_adv);
5976 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5977 				/* Link is up via parallel detect */
5978 			} else {
5979 				current_link_up = false;
5980 			}
5981 		}
5982 	}
5983 
5984 fiber_setup_done:
5985 	if (current_link_up && current_duplex == DUPLEX_FULL)
5986 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5987 
5988 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5989 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5990 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5991 
5992 	tw32_f(MAC_MODE, tp->mac_mode);
5993 	udelay(40);
5994 
5995 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5996 
5997 	tp->link_config.active_speed = current_speed;
5998 	tp->link_config.active_duplex = current_duplex;
5999 
6000 	tg3_test_and_report_link_chg(tp, current_link_up);
6001 	return err;
6002 }
6003 
6004 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6005 {
6006 	if (tp->serdes_counter) {
6007 		/* Give autoneg time to complete. */
6008 		tp->serdes_counter--;
6009 		return;
6010 	}
6011 
6012 	if (!tp->link_up &&
6013 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6014 		u32 bmcr;
6015 
6016 		tg3_readphy(tp, MII_BMCR, &bmcr);
6017 		if (bmcr & BMCR_ANENABLE) {
6018 			u32 phy1, phy2;
6019 
6020 			/* Select shadow register 0x1f */
6021 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6022 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6023 
6024 			/* Select expansion interrupt status register */
6025 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6026 					 MII_TG3_DSP_EXP1_INT_STAT);
6027 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6029 
6030 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6031 				/* We have signal detect and not receiving
6032 				 * config code words, link is up by parallel
6033 				 * detection.
6034 				 */
6035 
6036 				bmcr &= ~BMCR_ANENABLE;
6037 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6038 				tg3_writephy(tp, MII_BMCR, bmcr);
6039 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6040 			}
6041 		}
6042 	} else if (tp->link_up &&
6043 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6044 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6045 		u32 phy2;
6046 
6047 		/* Select expansion interrupt status register */
6048 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6049 				 MII_TG3_DSP_EXP1_INT_STAT);
6050 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6051 		if (phy2 & 0x20) {
6052 			u32 bmcr;
6053 
6054 			/* Config code words received, turn on autoneg. */
6055 			tg3_readphy(tp, MII_BMCR, &bmcr);
6056 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6057 
6058 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6059 
6060 		}
6061 	}
6062 }
6063 
6064 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6065 {
6066 	u32 val;
6067 	int err;
6068 
6069 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6070 		err = tg3_setup_fiber_phy(tp, force_reset);
6071 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6072 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6073 	else
6074 		err = tg3_setup_copper_phy(tp, force_reset);
6075 
6076 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6077 		u32 scale;
6078 
6079 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6080 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6081 			scale = 65;
6082 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6083 			scale = 6;
6084 		else
6085 			scale = 12;
6086 
6087 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6088 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6089 		tw32(GRC_MISC_CFG, val);
6090 	}
6091 
6092 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6093 	      (6 << TX_LENGTHS_IPG_SHIFT);
6094 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6095 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6096 		val |= tr32(MAC_TX_LENGTHS) &
6097 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6098 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6099 
6100 	if (tp->link_config.active_speed == SPEED_1000 &&
6101 	    tp->link_config.active_duplex == DUPLEX_HALF)
6102 		tw32(MAC_TX_LENGTHS, val |
6103 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6104 	else
6105 		tw32(MAC_TX_LENGTHS, val |
6106 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6107 
6108 	if (!tg3_flag(tp, 5705_PLUS)) {
6109 		if (tp->link_up) {
6110 			tw32(HOSTCC_STAT_COAL_TICKS,
6111 			     tp->coal.stats_block_coalesce_usecs);
6112 		} else {
6113 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6114 		}
6115 	}
6116 
6117 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6118 		val = tr32(PCIE_PWR_MGMT_THRESH);
6119 		if (!tp->link_up)
6120 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6121 			      tp->pwrmgmt_thresh;
6122 		else
6123 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6124 		tw32(PCIE_PWR_MGMT_THRESH, val);
6125 	}
6126 
6127 	return err;
6128 }
6129 
6130 /* tp->lock must be held */
6131 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6132 {
6133 	u64 stamp;
6134 
6135 	ptp_read_system_prets(sts);
6136 	stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6137 	ptp_read_system_postts(sts);
6138 	stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6139 
6140 	return stamp;
6141 }
6142 
6143 /* tp->lock must be held */
6144 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6145 {
6146 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6147 
6148 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6149 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6150 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6151 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6152 }
6153 
6154 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6155 static inline void tg3_full_unlock(struct tg3 *tp);
6156 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6157 {
6158 	struct tg3 *tp = netdev_priv(dev);
6159 
6160 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6161 				SOF_TIMESTAMPING_RX_SOFTWARE |
6162 				SOF_TIMESTAMPING_SOFTWARE;
6163 
6164 	if (tg3_flag(tp, PTP_CAPABLE)) {
6165 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6166 					SOF_TIMESTAMPING_RX_HARDWARE |
6167 					SOF_TIMESTAMPING_RAW_HARDWARE;
6168 	}
6169 
6170 	if (tp->ptp_clock)
6171 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6172 	else
6173 		info->phc_index = -1;
6174 
6175 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6176 
6177 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6178 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6179 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6180 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6181 	return 0;
6182 }
6183 
6184 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6185 {
6186 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6187 	u64 correction;
6188 	bool neg_adj;
6189 
6190 	/* Frequency adjustment is performed using hardware with a 24 bit
6191 	 * accumulator and a programmable correction value. On each clk, the
6192 	 * correction value gets added to the accumulator and when it
6193 	 * overflows, the time counter is incremented/decremented.
6194 	 */
6195 	neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6196 
6197 	tg3_full_lock(tp, 0);
6198 
6199 	if (correction)
6200 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6201 		     TG3_EAV_REF_CLK_CORRECT_EN |
6202 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6203 		     ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6204 	else
6205 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6206 
6207 	tg3_full_unlock(tp);
6208 
6209 	return 0;
6210 }
6211 
6212 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6213 {
6214 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6215 
6216 	tg3_full_lock(tp, 0);
6217 	tp->ptp_adjust += delta;
6218 	tg3_full_unlock(tp);
6219 
6220 	return 0;
6221 }
6222 
6223 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6224 			    struct ptp_system_timestamp *sts)
6225 {
6226 	u64 ns;
6227 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228 
6229 	tg3_full_lock(tp, 0);
6230 	ns = tg3_refclk_read(tp, sts);
6231 	ns += tp->ptp_adjust;
6232 	tg3_full_unlock(tp);
6233 
6234 	*ts = ns_to_timespec64(ns);
6235 
6236 	return 0;
6237 }
6238 
6239 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6240 			   const struct timespec64 *ts)
6241 {
6242 	u64 ns;
6243 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6244 
6245 	ns = timespec64_to_ns(ts);
6246 
6247 	tg3_full_lock(tp, 0);
6248 	tg3_refclk_write(tp, ns);
6249 	tp->ptp_adjust = 0;
6250 	tg3_full_unlock(tp);
6251 
6252 	return 0;
6253 }
6254 
6255 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6256 			  struct ptp_clock_request *rq, int on)
6257 {
6258 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6259 	u32 clock_ctl;
6260 	int rval = 0;
6261 
6262 	switch (rq->type) {
6263 	case PTP_CLK_REQ_PEROUT:
6264 		/* Reject requests with unsupported flags */
6265 		if (rq->perout.flags)
6266 			return -EOPNOTSUPP;
6267 
6268 		if (rq->perout.index != 0)
6269 			return -EINVAL;
6270 
6271 		tg3_full_lock(tp, 0);
6272 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6273 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6274 
6275 		if (on) {
6276 			u64 nsec;
6277 
6278 			nsec = rq->perout.start.sec * 1000000000ULL +
6279 			       rq->perout.start.nsec;
6280 
6281 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6282 				netdev_warn(tp->dev,
6283 					    "Device supports only a one-shot timesync output, period must be 0\n");
6284 				rval = -EINVAL;
6285 				goto err_out;
6286 			}
6287 
6288 			if (nsec & (1ULL << 63)) {
6289 				netdev_warn(tp->dev,
6290 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6291 				rval = -EINVAL;
6292 				goto err_out;
6293 			}
6294 
6295 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6296 			tw32(TG3_EAV_WATCHDOG0_MSB,
6297 			     TG3_EAV_WATCHDOG0_EN |
6298 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6299 
6300 			tw32(TG3_EAV_REF_CLCK_CTL,
6301 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6302 		} else {
6303 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6304 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6305 		}
6306 
6307 err_out:
6308 		tg3_full_unlock(tp);
6309 		return rval;
6310 
6311 	default:
6312 		break;
6313 	}
6314 
6315 	return -EOPNOTSUPP;
6316 }
6317 
6318 static const struct ptp_clock_info tg3_ptp_caps = {
6319 	.owner		= THIS_MODULE,
6320 	.name		= "tg3 clock",
6321 	.max_adj	= 250000000,
6322 	.n_alarm	= 0,
6323 	.n_ext_ts	= 0,
6324 	.n_per_out	= 1,
6325 	.n_pins		= 0,
6326 	.pps		= 0,
6327 	.adjfine	= tg3_ptp_adjfine,
6328 	.adjtime	= tg3_ptp_adjtime,
6329 	.gettimex64	= tg3_ptp_gettimex,
6330 	.settime64	= tg3_ptp_settime,
6331 	.enable		= tg3_ptp_enable,
6332 };
6333 
6334 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6335 				     struct skb_shared_hwtstamps *timestamp)
6336 {
6337 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6338 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6339 					   tp->ptp_adjust);
6340 }
6341 
6342 /* tp->lock must be held */
6343 static void tg3_ptp_init(struct tg3 *tp)
6344 {
6345 	if (!tg3_flag(tp, PTP_CAPABLE))
6346 		return;
6347 
6348 	/* Initialize the hardware clock to the system time. */
6349 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6350 	tp->ptp_adjust = 0;
6351 	tp->ptp_info = tg3_ptp_caps;
6352 }
6353 
6354 /* tp->lock must be held */
6355 static void tg3_ptp_resume(struct tg3 *tp)
6356 {
6357 	if (!tg3_flag(tp, PTP_CAPABLE))
6358 		return;
6359 
6360 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6361 	tp->ptp_adjust = 0;
6362 }
6363 
6364 static void tg3_ptp_fini(struct tg3 *tp)
6365 {
6366 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6367 		return;
6368 
6369 	ptp_clock_unregister(tp->ptp_clock);
6370 	tp->ptp_clock = NULL;
6371 	tp->ptp_adjust = 0;
6372 }
6373 
6374 static inline int tg3_irq_sync(struct tg3 *tp)
6375 {
6376 	return tp->irq_sync;
6377 }
6378 
6379 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6380 {
6381 	int i;
6382 
6383 	dst = (u32 *)((u8 *)dst + off);
6384 	for (i = 0; i < len; i += sizeof(u32))
6385 		*dst++ = tr32(off + i);
6386 }
6387 
6388 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6389 {
6390 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6391 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6392 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6393 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6394 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6395 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6396 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6397 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6398 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6399 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6400 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6401 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6402 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6403 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6404 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6405 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6406 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6407 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6408 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6409 
6410 	if (tg3_flag(tp, SUPPORT_MSIX))
6411 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6412 
6413 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6414 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6415 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6416 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6417 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6418 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6419 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6420 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6421 
6422 	if (!tg3_flag(tp, 5705_PLUS)) {
6423 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6424 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6425 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6426 	}
6427 
6428 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6429 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6430 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6431 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6432 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6433 
6434 	if (tg3_flag(tp, NVRAM))
6435 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6436 }
6437 
6438 static void tg3_dump_state(struct tg3 *tp)
6439 {
6440 	int i;
6441 	u32 *regs;
6442 
6443 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6444 	if (!regs)
6445 		return;
6446 
6447 	if (tg3_flag(tp, PCI_EXPRESS)) {
6448 		/* Read up to but not including private PCI registers */
6449 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6450 			regs[i / sizeof(u32)] = tr32(i);
6451 	} else
6452 		tg3_dump_legacy_regs(tp, regs);
6453 
6454 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6455 		if (!regs[i + 0] && !regs[i + 1] &&
6456 		    !regs[i + 2] && !regs[i + 3])
6457 			continue;
6458 
6459 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6460 			   i * 4,
6461 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6462 	}
6463 
6464 	kfree(regs);
6465 
6466 	for (i = 0; i < tp->irq_cnt; i++) {
6467 		struct tg3_napi *tnapi = &tp->napi[i];
6468 
6469 		/* SW status block */
6470 		netdev_err(tp->dev,
6471 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6472 			   i,
6473 			   tnapi->hw_status->status,
6474 			   tnapi->hw_status->status_tag,
6475 			   tnapi->hw_status->rx_jumbo_consumer,
6476 			   tnapi->hw_status->rx_consumer,
6477 			   tnapi->hw_status->rx_mini_consumer,
6478 			   tnapi->hw_status->idx[0].rx_producer,
6479 			   tnapi->hw_status->idx[0].tx_consumer);
6480 
6481 		netdev_err(tp->dev,
6482 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6483 			   i,
6484 			   tnapi->last_tag, tnapi->last_irq_tag,
6485 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6486 			   tnapi->rx_rcb_ptr,
6487 			   tnapi->prodring.rx_std_prod_idx,
6488 			   tnapi->prodring.rx_std_cons_idx,
6489 			   tnapi->prodring.rx_jmb_prod_idx,
6490 			   tnapi->prodring.rx_jmb_cons_idx);
6491 	}
6492 }
6493 
6494 /* This is called whenever we suspect that the system chipset is re-
6495  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6496  * is bogus tx completions. We try to recover by setting the
6497  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6498  * in the workqueue.
6499  */
6500 static void tg3_tx_recover(struct tg3 *tp)
6501 {
6502 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6503 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6504 
6505 	netdev_warn(tp->dev,
6506 		    "The system may be re-ordering memory-mapped I/O "
6507 		    "cycles to the network device, attempting to recover. "
6508 		    "Please report the problem to the driver maintainer "
6509 		    "and include system chipset information.\n");
6510 
6511 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6512 }
6513 
6514 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6515 {
6516 	/* Tell compiler to fetch tx indices from memory. */
6517 	barrier();
6518 	return tnapi->tx_pending -
6519 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6520 }
6521 
6522 /* Tigon3 never reports partial packet sends.  So we do not
6523  * need special logic to handle SKBs that have not had all
6524  * of their frags sent yet, like SunGEM does.
6525  */
6526 static void tg3_tx(struct tg3_napi *tnapi)
6527 {
6528 	struct tg3 *tp = tnapi->tp;
6529 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6530 	u32 sw_idx = tnapi->tx_cons;
6531 	struct netdev_queue *txq;
6532 	int index = tnapi - tp->napi;
6533 	unsigned int pkts_compl = 0, bytes_compl = 0;
6534 
6535 	if (tg3_flag(tp, ENABLE_TSS))
6536 		index--;
6537 
6538 	txq = netdev_get_tx_queue(tp->dev, index);
6539 
6540 	while (sw_idx != hw_idx) {
6541 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6542 		struct sk_buff *skb = ri->skb;
6543 		int i, tx_bug = 0;
6544 
6545 		if (unlikely(skb == NULL)) {
6546 			tg3_tx_recover(tp);
6547 			return;
6548 		}
6549 
6550 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6551 			struct skb_shared_hwtstamps timestamp;
6552 			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6553 			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6554 
6555 			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6556 
6557 			skb_tstamp_tx(skb, &timestamp);
6558 		}
6559 
6560 		dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6561 				 skb_headlen(skb), DMA_TO_DEVICE);
6562 
6563 		ri->skb = NULL;
6564 
6565 		while (ri->fragmented) {
6566 			ri->fragmented = false;
6567 			sw_idx = NEXT_TX(sw_idx);
6568 			ri = &tnapi->tx_buffers[sw_idx];
6569 		}
6570 
6571 		sw_idx = NEXT_TX(sw_idx);
6572 
6573 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6574 			ri = &tnapi->tx_buffers[sw_idx];
6575 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6576 				tx_bug = 1;
6577 
6578 			dma_unmap_page(&tp->pdev->dev,
6579 				       dma_unmap_addr(ri, mapping),
6580 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6581 				       DMA_TO_DEVICE);
6582 
6583 			while (ri->fragmented) {
6584 				ri->fragmented = false;
6585 				sw_idx = NEXT_TX(sw_idx);
6586 				ri = &tnapi->tx_buffers[sw_idx];
6587 			}
6588 
6589 			sw_idx = NEXT_TX(sw_idx);
6590 		}
6591 
6592 		pkts_compl++;
6593 		bytes_compl += skb->len;
6594 
6595 		dev_consume_skb_any(skb);
6596 
6597 		if (unlikely(tx_bug)) {
6598 			tg3_tx_recover(tp);
6599 			return;
6600 		}
6601 	}
6602 
6603 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6604 
6605 	tnapi->tx_cons = sw_idx;
6606 
6607 	/* Need to make the tx_cons update visible to tg3_start_xmit()
6608 	 * before checking for netif_queue_stopped().  Without the
6609 	 * memory barrier, there is a small possibility that tg3_start_xmit()
6610 	 * will miss it and cause the queue to be stopped forever.
6611 	 */
6612 	smp_mb();
6613 
6614 	if (unlikely(netif_tx_queue_stopped(txq) &&
6615 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6616 		__netif_tx_lock(txq, smp_processor_id());
6617 		if (netif_tx_queue_stopped(txq) &&
6618 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6619 			netif_tx_wake_queue(txq);
6620 		__netif_tx_unlock(txq);
6621 	}
6622 }
6623 
6624 static void tg3_frag_free(bool is_frag, void *data)
6625 {
6626 	if (is_frag)
6627 		skb_free_frag(data);
6628 	else
6629 		kfree(data);
6630 }
6631 
6632 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6633 {
6634 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6635 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6636 
6637 	if (!ri->data)
6638 		return;
6639 
6640 	dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6641 			 DMA_FROM_DEVICE);
6642 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6643 	ri->data = NULL;
6644 }
6645 
6646 
6647 /* Returns size of skb allocated or < 0 on error.
6648  *
6649  * We only need to fill in the address because the other members
6650  * of the RX descriptor are invariant, see tg3_init_rings.
6651  *
6652  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6653  * posting buffers we only dirty the first cache line of the RX
6654  * descriptor (containing the address).  Whereas for the RX status
6655  * buffers the cpu only reads the last cacheline of the RX descriptor
6656  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6657  */
6658 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6659 			     u32 opaque_key, u32 dest_idx_unmasked,
6660 			     unsigned int *frag_size)
6661 {
6662 	struct tg3_rx_buffer_desc *desc;
6663 	struct ring_info *map;
6664 	u8 *data;
6665 	dma_addr_t mapping;
6666 	int skb_size, data_size, dest_idx;
6667 
6668 	switch (opaque_key) {
6669 	case RXD_OPAQUE_RING_STD:
6670 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6671 		desc = &tpr->rx_std[dest_idx];
6672 		map = &tpr->rx_std_buffers[dest_idx];
6673 		data_size = tp->rx_pkt_map_sz;
6674 		break;
6675 
6676 	case RXD_OPAQUE_RING_JUMBO:
6677 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6678 		desc = &tpr->rx_jmb[dest_idx].std;
6679 		map = &tpr->rx_jmb_buffers[dest_idx];
6680 		data_size = TG3_RX_JMB_MAP_SZ;
6681 		break;
6682 
6683 	default:
6684 		return -EINVAL;
6685 	}
6686 
6687 	/* Do not overwrite any of the map or rp information
6688 	 * until we are sure we can commit to a new buffer.
6689 	 *
6690 	 * Callers depend upon this behavior and assume that
6691 	 * we leave everything unchanged if we fail.
6692 	 */
6693 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6694 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6695 	if (skb_size <= PAGE_SIZE) {
6696 		data = napi_alloc_frag(skb_size);
6697 		*frag_size = skb_size;
6698 	} else {
6699 		data = kmalloc(skb_size, GFP_ATOMIC);
6700 		*frag_size = 0;
6701 	}
6702 	if (!data)
6703 		return -ENOMEM;
6704 
6705 	mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6706 				 data_size, DMA_FROM_DEVICE);
6707 	if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6708 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6709 		return -EIO;
6710 	}
6711 
6712 	map->data = data;
6713 	dma_unmap_addr_set(map, mapping, mapping);
6714 
6715 	desc->addr_hi = ((u64)mapping >> 32);
6716 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6717 
6718 	return data_size;
6719 }
6720 
6721 /* We only need to move over in the address because the other
6722  * members of the RX descriptor are invariant.  See notes above
6723  * tg3_alloc_rx_data for full details.
6724  */
6725 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6726 			   struct tg3_rx_prodring_set *dpr,
6727 			   u32 opaque_key, int src_idx,
6728 			   u32 dest_idx_unmasked)
6729 {
6730 	struct tg3 *tp = tnapi->tp;
6731 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6732 	struct ring_info *src_map, *dest_map;
6733 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6734 	int dest_idx;
6735 
6736 	switch (opaque_key) {
6737 	case RXD_OPAQUE_RING_STD:
6738 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6739 		dest_desc = &dpr->rx_std[dest_idx];
6740 		dest_map = &dpr->rx_std_buffers[dest_idx];
6741 		src_desc = &spr->rx_std[src_idx];
6742 		src_map = &spr->rx_std_buffers[src_idx];
6743 		break;
6744 
6745 	case RXD_OPAQUE_RING_JUMBO:
6746 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6747 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6748 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6749 		src_desc = &spr->rx_jmb[src_idx].std;
6750 		src_map = &spr->rx_jmb_buffers[src_idx];
6751 		break;
6752 
6753 	default:
6754 		return;
6755 	}
6756 
6757 	dest_map->data = src_map->data;
6758 	dma_unmap_addr_set(dest_map, mapping,
6759 			   dma_unmap_addr(src_map, mapping));
6760 	dest_desc->addr_hi = src_desc->addr_hi;
6761 	dest_desc->addr_lo = src_desc->addr_lo;
6762 
6763 	/* Ensure that the update to the skb happens after the physical
6764 	 * addresses have been transferred to the new BD location.
6765 	 */
6766 	smp_wmb();
6767 
6768 	src_map->data = NULL;
6769 }
6770 
6771 /* The RX ring scheme is composed of multiple rings which post fresh
6772  * buffers to the chip, and one special ring the chip uses to report
6773  * status back to the host.
6774  *
6775  * The special ring reports the status of received packets to the
6776  * host.  The chip does not write into the original descriptor the
6777  * RX buffer was obtained from.  The chip simply takes the original
6778  * descriptor as provided by the host, updates the status and length
6779  * field, then writes this into the next status ring entry.
6780  *
6781  * Each ring the host uses to post buffers to the chip is described
6782  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6783  * it is first placed into the on-chip ram.  When the packet's length
6784  * is known, it walks down the TG3_BDINFO entries to select the ring.
6785  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6786  * which is within the range of the new packet's length is chosen.
6787  *
6788  * The "separate ring for rx status" scheme may sound queer, but it makes
6789  * sense from a cache coherency perspective.  If only the host writes
6790  * to the buffer post rings, and only the chip writes to the rx status
6791  * rings, then cache lines never move beyond shared-modified state.
6792  * If both the host and chip were to write into the same ring, cache line
6793  * eviction could occur since both entities want it in an exclusive state.
6794  */
6795 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6796 {
6797 	struct tg3 *tp = tnapi->tp;
6798 	u32 work_mask, rx_std_posted = 0;
6799 	u32 std_prod_idx, jmb_prod_idx;
6800 	u32 sw_idx = tnapi->rx_rcb_ptr;
6801 	u16 hw_idx;
6802 	int received;
6803 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6804 
6805 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6806 	/*
6807 	 * We need to order the read of hw_idx and the read of
6808 	 * the opaque cookie.
6809 	 */
6810 	rmb();
6811 	work_mask = 0;
6812 	received = 0;
6813 	std_prod_idx = tpr->rx_std_prod_idx;
6814 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6815 	while (sw_idx != hw_idx && budget > 0) {
6816 		struct ring_info *ri;
6817 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6818 		unsigned int len;
6819 		struct sk_buff *skb;
6820 		dma_addr_t dma_addr;
6821 		u32 opaque_key, desc_idx, *post_ptr;
6822 		u8 *data;
6823 		u64 tstamp = 0;
6824 
6825 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6826 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6827 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6828 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6829 			dma_addr = dma_unmap_addr(ri, mapping);
6830 			data = ri->data;
6831 			post_ptr = &std_prod_idx;
6832 			rx_std_posted++;
6833 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6834 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6835 			dma_addr = dma_unmap_addr(ri, mapping);
6836 			data = ri->data;
6837 			post_ptr = &jmb_prod_idx;
6838 		} else
6839 			goto next_pkt_nopost;
6840 
6841 		work_mask |= opaque_key;
6842 
6843 		if (desc->err_vlan & RXD_ERR_MASK) {
6844 		drop_it:
6845 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6846 				       desc_idx, *post_ptr);
6847 		drop_it_no_recycle:
6848 			/* Other statistics kept track of by card. */
6849 			tp->rx_dropped++;
6850 			goto next_pkt;
6851 		}
6852 
6853 		prefetch(data + TG3_RX_OFFSET(tp));
6854 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6855 		      ETH_FCS_LEN;
6856 
6857 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6858 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6859 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6860 		     RXD_FLAG_PTPSTAT_PTPV2) {
6861 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6862 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6863 		}
6864 
6865 		if (len > TG3_RX_COPY_THRESH(tp)) {
6866 			int skb_size;
6867 			unsigned int frag_size;
6868 
6869 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6870 						    *post_ptr, &frag_size);
6871 			if (skb_size < 0)
6872 				goto drop_it;
6873 
6874 			dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6875 					 DMA_FROM_DEVICE);
6876 
6877 			/* Ensure that the update to the data happens
6878 			 * after the usage of the old DMA mapping.
6879 			 */
6880 			smp_wmb();
6881 
6882 			ri->data = NULL;
6883 
6884 			if (frag_size)
6885 				skb = build_skb(data, frag_size);
6886 			else
6887 				skb = slab_build_skb(data);
6888 			if (!skb) {
6889 				tg3_frag_free(frag_size != 0, data);
6890 				goto drop_it_no_recycle;
6891 			}
6892 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6893 		} else {
6894 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6895 				       desc_idx, *post_ptr);
6896 
6897 			skb = netdev_alloc_skb(tp->dev,
6898 					       len + TG3_RAW_IP_ALIGN);
6899 			if (skb == NULL)
6900 				goto drop_it_no_recycle;
6901 
6902 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6903 			dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6904 						DMA_FROM_DEVICE);
6905 			memcpy(skb->data,
6906 			       data + TG3_RX_OFFSET(tp),
6907 			       len);
6908 			dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6909 						   len, DMA_FROM_DEVICE);
6910 		}
6911 
6912 		skb_put(skb, len);
6913 		if (tstamp)
6914 			tg3_hwclock_to_timestamp(tp, tstamp,
6915 						 skb_hwtstamps(skb));
6916 
6917 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6918 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6919 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6920 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6921 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6922 		else
6923 			skb_checksum_none_assert(skb);
6924 
6925 		skb->protocol = eth_type_trans(skb, tp->dev);
6926 
6927 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6928 		    skb->protocol != htons(ETH_P_8021Q) &&
6929 		    skb->protocol != htons(ETH_P_8021AD)) {
6930 			dev_kfree_skb_any(skb);
6931 			goto drop_it_no_recycle;
6932 		}
6933 
6934 		if (desc->type_flags & RXD_FLAG_VLAN &&
6935 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6936 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6937 					       desc->err_vlan & RXD_VLAN_MASK);
6938 
6939 		napi_gro_receive(&tnapi->napi, skb);
6940 
6941 		received++;
6942 		budget--;
6943 
6944 next_pkt:
6945 		(*post_ptr)++;
6946 
6947 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6948 			tpr->rx_std_prod_idx = std_prod_idx &
6949 					       tp->rx_std_ring_mask;
6950 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6951 				     tpr->rx_std_prod_idx);
6952 			work_mask &= ~RXD_OPAQUE_RING_STD;
6953 			rx_std_posted = 0;
6954 		}
6955 next_pkt_nopost:
6956 		sw_idx++;
6957 		sw_idx &= tp->rx_ret_ring_mask;
6958 
6959 		/* Refresh hw_idx to see if there is new work */
6960 		if (sw_idx == hw_idx) {
6961 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6962 			rmb();
6963 		}
6964 	}
6965 
6966 	/* ACK the status ring. */
6967 	tnapi->rx_rcb_ptr = sw_idx;
6968 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6969 
6970 	/* Refill RX ring(s). */
6971 	if (!tg3_flag(tp, ENABLE_RSS)) {
6972 		/* Sync BD data before updating mailbox */
6973 		wmb();
6974 
6975 		if (work_mask & RXD_OPAQUE_RING_STD) {
6976 			tpr->rx_std_prod_idx = std_prod_idx &
6977 					       tp->rx_std_ring_mask;
6978 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6979 				     tpr->rx_std_prod_idx);
6980 		}
6981 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6982 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6983 					       tp->rx_jmb_ring_mask;
6984 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6985 				     tpr->rx_jmb_prod_idx);
6986 		}
6987 	} else if (work_mask) {
6988 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6989 		 * updated before the producer indices can be updated.
6990 		 */
6991 		smp_wmb();
6992 
6993 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6994 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6995 
6996 		if (tnapi != &tp->napi[1]) {
6997 			tp->rx_refill = true;
6998 			napi_schedule(&tp->napi[1].napi);
6999 		}
7000 	}
7001 
7002 	return received;
7003 }
7004 
7005 static void tg3_poll_link(struct tg3 *tp)
7006 {
7007 	/* handle link change and other phy events */
7008 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7009 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7010 
7011 		if (sblk->status & SD_STATUS_LINK_CHG) {
7012 			sblk->status = SD_STATUS_UPDATED |
7013 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7014 			spin_lock(&tp->lock);
7015 			if (tg3_flag(tp, USE_PHYLIB)) {
7016 				tw32_f(MAC_STATUS,
7017 				     (MAC_STATUS_SYNC_CHANGED |
7018 				      MAC_STATUS_CFG_CHANGED |
7019 				      MAC_STATUS_MI_COMPLETION |
7020 				      MAC_STATUS_LNKSTATE_CHANGED));
7021 				udelay(40);
7022 			} else
7023 				tg3_setup_phy(tp, false);
7024 			spin_unlock(&tp->lock);
7025 		}
7026 	}
7027 }
7028 
7029 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7030 				struct tg3_rx_prodring_set *dpr,
7031 				struct tg3_rx_prodring_set *spr)
7032 {
7033 	u32 si, di, cpycnt, src_prod_idx;
7034 	int i, err = 0;
7035 
7036 	while (1) {
7037 		src_prod_idx = spr->rx_std_prod_idx;
7038 
7039 		/* Make sure updates to the rx_std_buffers[] entries and the
7040 		 * standard producer index are seen in the correct order.
7041 		 */
7042 		smp_rmb();
7043 
7044 		if (spr->rx_std_cons_idx == src_prod_idx)
7045 			break;
7046 
7047 		if (spr->rx_std_cons_idx < src_prod_idx)
7048 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7049 		else
7050 			cpycnt = tp->rx_std_ring_mask + 1 -
7051 				 spr->rx_std_cons_idx;
7052 
7053 		cpycnt = min(cpycnt,
7054 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7055 
7056 		si = spr->rx_std_cons_idx;
7057 		di = dpr->rx_std_prod_idx;
7058 
7059 		for (i = di; i < di + cpycnt; i++) {
7060 			if (dpr->rx_std_buffers[i].data) {
7061 				cpycnt = i - di;
7062 				err = -ENOSPC;
7063 				break;
7064 			}
7065 		}
7066 
7067 		if (!cpycnt)
7068 			break;
7069 
7070 		/* Ensure that updates to the rx_std_buffers ring and the
7071 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7072 		 * ordered correctly WRT the skb check above.
7073 		 */
7074 		smp_rmb();
7075 
7076 		memcpy(&dpr->rx_std_buffers[di],
7077 		       &spr->rx_std_buffers[si],
7078 		       cpycnt * sizeof(struct ring_info));
7079 
7080 		for (i = 0; i < cpycnt; i++, di++, si++) {
7081 			struct tg3_rx_buffer_desc *sbd, *dbd;
7082 			sbd = &spr->rx_std[si];
7083 			dbd = &dpr->rx_std[di];
7084 			dbd->addr_hi = sbd->addr_hi;
7085 			dbd->addr_lo = sbd->addr_lo;
7086 		}
7087 
7088 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7089 				       tp->rx_std_ring_mask;
7090 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7091 				       tp->rx_std_ring_mask;
7092 	}
7093 
7094 	while (1) {
7095 		src_prod_idx = spr->rx_jmb_prod_idx;
7096 
7097 		/* Make sure updates to the rx_jmb_buffers[] entries and
7098 		 * the jumbo producer index are seen in the correct order.
7099 		 */
7100 		smp_rmb();
7101 
7102 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7103 			break;
7104 
7105 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7106 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7107 		else
7108 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7109 				 spr->rx_jmb_cons_idx;
7110 
7111 		cpycnt = min(cpycnt,
7112 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7113 
7114 		si = spr->rx_jmb_cons_idx;
7115 		di = dpr->rx_jmb_prod_idx;
7116 
7117 		for (i = di; i < di + cpycnt; i++) {
7118 			if (dpr->rx_jmb_buffers[i].data) {
7119 				cpycnt = i - di;
7120 				err = -ENOSPC;
7121 				break;
7122 			}
7123 		}
7124 
7125 		if (!cpycnt)
7126 			break;
7127 
7128 		/* Ensure that updates to the rx_jmb_buffers ring and the
7129 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7130 		 * ordered correctly WRT the skb check above.
7131 		 */
7132 		smp_rmb();
7133 
7134 		memcpy(&dpr->rx_jmb_buffers[di],
7135 		       &spr->rx_jmb_buffers[si],
7136 		       cpycnt * sizeof(struct ring_info));
7137 
7138 		for (i = 0; i < cpycnt; i++, di++, si++) {
7139 			struct tg3_rx_buffer_desc *sbd, *dbd;
7140 			sbd = &spr->rx_jmb[si].std;
7141 			dbd = &dpr->rx_jmb[di].std;
7142 			dbd->addr_hi = sbd->addr_hi;
7143 			dbd->addr_lo = sbd->addr_lo;
7144 		}
7145 
7146 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7147 				       tp->rx_jmb_ring_mask;
7148 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7149 				       tp->rx_jmb_ring_mask;
7150 	}
7151 
7152 	return err;
7153 }
7154 
7155 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7156 {
7157 	struct tg3 *tp = tnapi->tp;
7158 
7159 	/* run TX completion thread */
7160 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7161 		tg3_tx(tnapi);
7162 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7163 			return work_done;
7164 	}
7165 
7166 	if (!tnapi->rx_rcb_prod_idx)
7167 		return work_done;
7168 
7169 	/* run RX thread, within the bounds set by NAPI.
7170 	 * All RX "locking" is done by ensuring outside
7171 	 * code synchronizes with tg3->napi.poll()
7172 	 */
7173 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7174 		work_done += tg3_rx(tnapi, budget - work_done);
7175 
7176 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7177 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7178 		int i, err = 0;
7179 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7180 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7181 
7182 		tp->rx_refill = false;
7183 		for (i = 1; i <= tp->rxq_cnt; i++)
7184 			err |= tg3_rx_prodring_xfer(tp, dpr,
7185 						    &tp->napi[i].prodring);
7186 
7187 		wmb();
7188 
7189 		if (std_prod_idx != dpr->rx_std_prod_idx)
7190 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7191 				     dpr->rx_std_prod_idx);
7192 
7193 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7194 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7195 				     dpr->rx_jmb_prod_idx);
7196 
7197 		if (err)
7198 			tw32_f(HOSTCC_MODE, tp->coal_now);
7199 	}
7200 
7201 	return work_done;
7202 }
7203 
7204 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7205 {
7206 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7207 		schedule_work(&tp->reset_task);
7208 }
7209 
7210 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7211 {
7212 	if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7213 		cancel_work_sync(&tp->reset_task);
7214 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7215 }
7216 
7217 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7218 {
7219 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7220 	struct tg3 *tp = tnapi->tp;
7221 	int work_done = 0;
7222 	struct tg3_hw_status *sblk = tnapi->hw_status;
7223 
7224 	while (1) {
7225 		work_done = tg3_poll_work(tnapi, work_done, budget);
7226 
7227 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7228 			goto tx_recovery;
7229 
7230 		if (unlikely(work_done >= budget))
7231 			break;
7232 
7233 		/* tp->last_tag is used in tg3_int_reenable() below
7234 		 * to tell the hw how much work has been processed,
7235 		 * so we must read it before checking for more work.
7236 		 */
7237 		tnapi->last_tag = sblk->status_tag;
7238 		tnapi->last_irq_tag = tnapi->last_tag;
7239 		rmb();
7240 
7241 		/* check for RX/TX work to do */
7242 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7243 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7244 
7245 			/* This test here is not race free, but will reduce
7246 			 * the number of interrupts by looping again.
7247 			 */
7248 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7249 				continue;
7250 
7251 			napi_complete_done(napi, work_done);
7252 			/* Reenable interrupts. */
7253 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7254 
7255 			/* This test here is synchronized by napi_schedule()
7256 			 * and napi_complete() to close the race condition.
7257 			 */
7258 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7259 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7260 						  HOSTCC_MODE_ENABLE |
7261 						  tnapi->coal_now);
7262 			}
7263 			break;
7264 		}
7265 	}
7266 
7267 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7268 	return work_done;
7269 
7270 tx_recovery:
7271 	/* work_done is guaranteed to be less than budget. */
7272 	napi_complete(napi);
7273 	tg3_reset_task_schedule(tp);
7274 	return work_done;
7275 }
7276 
7277 static void tg3_process_error(struct tg3 *tp)
7278 {
7279 	u32 val;
7280 	bool real_error = false;
7281 
7282 	if (tg3_flag(tp, ERROR_PROCESSED))
7283 		return;
7284 
7285 	/* Check Flow Attention register */
7286 	val = tr32(HOSTCC_FLOW_ATTN);
7287 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7288 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7289 		real_error = true;
7290 	}
7291 
7292 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7293 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7294 		real_error = true;
7295 	}
7296 
7297 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7298 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7299 		real_error = true;
7300 	}
7301 
7302 	if (!real_error)
7303 		return;
7304 
7305 	tg3_dump_state(tp);
7306 
7307 	tg3_flag_set(tp, ERROR_PROCESSED);
7308 	tg3_reset_task_schedule(tp);
7309 }
7310 
7311 static int tg3_poll(struct napi_struct *napi, int budget)
7312 {
7313 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7314 	struct tg3 *tp = tnapi->tp;
7315 	int work_done = 0;
7316 	struct tg3_hw_status *sblk = tnapi->hw_status;
7317 
7318 	while (1) {
7319 		if (sblk->status & SD_STATUS_ERROR)
7320 			tg3_process_error(tp);
7321 
7322 		tg3_poll_link(tp);
7323 
7324 		work_done = tg3_poll_work(tnapi, work_done, budget);
7325 
7326 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7327 			goto tx_recovery;
7328 
7329 		if (unlikely(work_done >= budget))
7330 			break;
7331 
7332 		if (tg3_flag(tp, TAGGED_STATUS)) {
7333 			/* tp->last_tag is used in tg3_int_reenable() below
7334 			 * to tell the hw how much work has been processed,
7335 			 * so we must read it before checking for more work.
7336 			 */
7337 			tnapi->last_tag = sblk->status_tag;
7338 			tnapi->last_irq_tag = tnapi->last_tag;
7339 			rmb();
7340 		} else
7341 			sblk->status &= ~SD_STATUS_UPDATED;
7342 
7343 		if (likely(!tg3_has_work(tnapi))) {
7344 			napi_complete_done(napi, work_done);
7345 			tg3_int_reenable(tnapi);
7346 			break;
7347 		}
7348 	}
7349 
7350 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7351 	return work_done;
7352 
7353 tx_recovery:
7354 	/* work_done is guaranteed to be less than budget. */
7355 	napi_complete(napi);
7356 	tg3_reset_task_schedule(tp);
7357 	return work_done;
7358 }
7359 
7360 static void tg3_napi_disable(struct tg3 *tp)
7361 {
7362 	int i;
7363 
7364 	for (i = tp->irq_cnt - 1; i >= 0; i--)
7365 		napi_disable(&tp->napi[i].napi);
7366 }
7367 
7368 static void tg3_napi_enable(struct tg3 *tp)
7369 {
7370 	int i;
7371 
7372 	for (i = 0; i < tp->irq_cnt; i++)
7373 		napi_enable(&tp->napi[i].napi);
7374 }
7375 
7376 static void tg3_napi_init(struct tg3 *tp)
7377 {
7378 	int i;
7379 
7380 	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7381 	for (i = 1; i < tp->irq_cnt; i++)
7382 		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7383 }
7384 
7385 static void tg3_napi_fini(struct tg3 *tp)
7386 {
7387 	int i;
7388 
7389 	for (i = 0; i < tp->irq_cnt; i++)
7390 		netif_napi_del(&tp->napi[i].napi);
7391 }
7392 
7393 static inline void tg3_netif_stop(struct tg3 *tp)
7394 {
7395 	netif_trans_update(tp->dev);	/* prevent tx timeout */
7396 	tg3_napi_disable(tp);
7397 	netif_carrier_off(tp->dev);
7398 	netif_tx_disable(tp->dev);
7399 }
7400 
7401 /* tp->lock must be held */
7402 static inline void tg3_netif_start(struct tg3 *tp)
7403 {
7404 	tg3_ptp_resume(tp);
7405 
7406 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7407 	 * appropriate so long as all callers are assured to
7408 	 * have free tx slots (such as after tg3_init_hw)
7409 	 */
7410 	netif_tx_wake_all_queues(tp->dev);
7411 
7412 	if (tp->link_up)
7413 		netif_carrier_on(tp->dev);
7414 
7415 	tg3_napi_enable(tp);
7416 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7417 	tg3_enable_ints(tp);
7418 }
7419 
7420 static void tg3_irq_quiesce(struct tg3 *tp)
7421 	__releases(tp->lock)
7422 	__acquires(tp->lock)
7423 {
7424 	int i;
7425 
7426 	BUG_ON(tp->irq_sync);
7427 
7428 	tp->irq_sync = 1;
7429 	smp_mb();
7430 
7431 	spin_unlock_bh(&tp->lock);
7432 
7433 	for (i = 0; i < tp->irq_cnt; i++)
7434 		synchronize_irq(tp->napi[i].irq_vec);
7435 
7436 	spin_lock_bh(&tp->lock);
7437 }
7438 
7439 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7440  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7441  * with as well.  Most of the time, this is not necessary except when
7442  * shutting down the device.
7443  */
7444 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7445 {
7446 	spin_lock_bh(&tp->lock);
7447 	if (irq_sync)
7448 		tg3_irq_quiesce(tp);
7449 }
7450 
7451 static inline void tg3_full_unlock(struct tg3 *tp)
7452 {
7453 	spin_unlock_bh(&tp->lock);
7454 }
7455 
7456 /* One-shot MSI handler - Chip automatically disables interrupt
7457  * after sending MSI so driver doesn't have to do it.
7458  */
7459 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7460 {
7461 	struct tg3_napi *tnapi = dev_id;
7462 	struct tg3 *tp = tnapi->tp;
7463 
7464 	prefetch(tnapi->hw_status);
7465 	if (tnapi->rx_rcb)
7466 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7467 
7468 	if (likely(!tg3_irq_sync(tp)))
7469 		napi_schedule(&tnapi->napi);
7470 
7471 	return IRQ_HANDLED;
7472 }
7473 
7474 /* MSI ISR - No need to check for interrupt sharing and no need to
7475  * flush status block and interrupt mailbox. PCI ordering rules
7476  * guarantee that MSI will arrive after the status block.
7477  */
7478 static irqreturn_t tg3_msi(int irq, void *dev_id)
7479 {
7480 	struct tg3_napi *tnapi = dev_id;
7481 	struct tg3 *tp = tnapi->tp;
7482 
7483 	prefetch(tnapi->hw_status);
7484 	if (tnapi->rx_rcb)
7485 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7486 	/*
7487 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7488 	 * chip-internal interrupt pending events.
7489 	 * Writing non-zero to intr-mbox-0 additional tells the
7490 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7491 	 * event coalescing.
7492 	 */
7493 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7494 	if (likely(!tg3_irq_sync(tp)))
7495 		napi_schedule(&tnapi->napi);
7496 
7497 	return IRQ_RETVAL(1);
7498 }
7499 
7500 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7501 {
7502 	struct tg3_napi *tnapi = dev_id;
7503 	struct tg3 *tp = tnapi->tp;
7504 	struct tg3_hw_status *sblk = tnapi->hw_status;
7505 	unsigned int handled = 1;
7506 
7507 	/* In INTx mode, it is possible for the interrupt to arrive at
7508 	 * the CPU before the status block posted prior to the interrupt.
7509 	 * Reading the PCI State register will confirm whether the
7510 	 * interrupt is ours and will flush the status block.
7511 	 */
7512 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7513 		if (tg3_flag(tp, CHIP_RESETTING) ||
7514 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7515 			handled = 0;
7516 			goto out;
7517 		}
7518 	}
7519 
7520 	/*
7521 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7522 	 * chip-internal interrupt pending events.
7523 	 * Writing non-zero to intr-mbox-0 additional tells the
7524 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7525 	 * event coalescing.
7526 	 *
7527 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7528 	 * spurious interrupts.  The flush impacts performance but
7529 	 * excessive spurious interrupts can be worse in some cases.
7530 	 */
7531 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7532 	if (tg3_irq_sync(tp))
7533 		goto out;
7534 	sblk->status &= ~SD_STATUS_UPDATED;
7535 	if (likely(tg3_has_work(tnapi))) {
7536 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7537 		napi_schedule(&tnapi->napi);
7538 	} else {
7539 		/* No work, shared interrupt perhaps?  re-enable
7540 		 * interrupts, and flush that PCI write
7541 		 */
7542 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7543 			       0x00000000);
7544 	}
7545 out:
7546 	return IRQ_RETVAL(handled);
7547 }
7548 
7549 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7550 {
7551 	struct tg3_napi *tnapi = dev_id;
7552 	struct tg3 *tp = tnapi->tp;
7553 	struct tg3_hw_status *sblk = tnapi->hw_status;
7554 	unsigned int handled = 1;
7555 
7556 	/* In INTx mode, it is possible for the interrupt to arrive at
7557 	 * the CPU before the status block posted prior to the interrupt.
7558 	 * Reading the PCI State register will confirm whether the
7559 	 * interrupt is ours and will flush the status block.
7560 	 */
7561 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7562 		if (tg3_flag(tp, CHIP_RESETTING) ||
7563 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7564 			handled = 0;
7565 			goto out;
7566 		}
7567 	}
7568 
7569 	/*
7570 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7571 	 * chip-internal interrupt pending events.
7572 	 * writing non-zero to intr-mbox-0 additional tells the
7573 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7574 	 * event coalescing.
7575 	 *
7576 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7577 	 * spurious interrupts.  The flush impacts performance but
7578 	 * excessive spurious interrupts can be worse in some cases.
7579 	 */
7580 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7581 
7582 	/*
7583 	 * In a shared interrupt configuration, sometimes other devices'
7584 	 * interrupts will scream.  We record the current status tag here
7585 	 * so that the above check can report that the screaming interrupts
7586 	 * are unhandled.  Eventually they will be silenced.
7587 	 */
7588 	tnapi->last_irq_tag = sblk->status_tag;
7589 
7590 	if (tg3_irq_sync(tp))
7591 		goto out;
7592 
7593 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7594 
7595 	napi_schedule(&tnapi->napi);
7596 
7597 out:
7598 	return IRQ_RETVAL(handled);
7599 }
7600 
7601 /* ISR for interrupt test */
7602 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7603 {
7604 	struct tg3_napi *tnapi = dev_id;
7605 	struct tg3 *tp = tnapi->tp;
7606 	struct tg3_hw_status *sblk = tnapi->hw_status;
7607 
7608 	if ((sblk->status & SD_STATUS_UPDATED) ||
7609 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7610 		tg3_disable_ints(tp);
7611 		return IRQ_RETVAL(1);
7612 	}
7613 	return IRQ_RETVAL(0);
7614 }
7615 
7616 #ifdef CONFIG_NET_POLL_CONTROLLER
7617 static void tg3_poll_controller(struct net_device *dev)
7618 {
7619 	int i;
7620 	struct tg3 *tp = netdev_priv(dev);
7621 
7622 	if (tg3_irq_sync(tp))
7623 		return;
7624 
7625 	for (i = 0; i < tp->irq_cnt; i++)
7626 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7627 }
7628 #endif
7629 
7630 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7631 {
7632 	struct tg3 *tp = netdev_priv(dev);
7633 
7634 	if (netif_msg_tx_err(tp)) {
7635 		netdev_err(dev, "transmit timed out, resetting\n");
7636 		tg3_dump_state(tp);
7637 	}
7638 
7639 	tg3_reset_task_schedule(tp);
7640 }
7641 
7642 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7643 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7644 {
7645 	u32 base = (u32) mapping & 0xffffffff;
7646 
7647 	return base + len + 8 < base;
7648 }
7649 
7650 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7651  * of any 4GB boundaries: 4G, 8G, etc
7652  */
7653 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7654 					   u32 len, u32 mss)
7655 {
7656 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7657 		u32 base = (u32) mapping & 0xffffffff;
7658 
7659 		return ((base + len + (mss & 0x3fff)) < base);
7660 	}
7661 	return 0;
7662 }
7663 
7664 /* Test for DMA addresses > 40-bit */
7665 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7666 					  int len)
7667 {
7668 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7669 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7670 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7671 	return 0;
7672 #else
7673 	return 0;
7674 #endif
7675 }
7676 
7677 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7678 				 dma_addr_t mapping, u32 len, u32 flags,
7679 				 u32 mss, u32 vlan)
7680 {
7681 	txbd->addr_hi = ((u64) mapping >> 32);
7682 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7683 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7684 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7685 }
7686 
7687 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7688 			    dma_addr_t map, u32 len, u32 flags,
7689 			    u32 mss, u32 vlan)
7690 {
7691 	struct tg3 *tp = tnapi->tp;
7692 	bool hwbug = false;
7693 
7694 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7695 		hwbug = true;
7696 
7697 	if (tg3_4g_overflow_test(map, len))
7698 		hwbug = true;
7699 
7700 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7701 		hwbug = true;
7702 
7703 	if (tg3_40bit_overflow_test(tp, map, len))
7704 		hwbug = true;
7705 
7706 	if (tp->dma_limit) {
7707 		u32 prvidx = *entry;
7708 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7709 		while (len > tp->dma_limit && *budget) {
7710 			u32 frag_len = tp->dma_limit;
7711 			len -= tp->dma_limit;
7712 
7713 			/* Avoid the 8byte DMA problem */
7714 			if (len <= 8) {
7715 				len += tp->dma_limit / 2;
7716 				frag_len = tp->dma_limit / 2;
7717 			}
7718 
7719 			tnapi->tx_buffers[*entry].fragmented = true;
7720 
7721 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7722 				      frag_len, tmp_flag, mss, vlan);
7723 			*budget -= 1;
7724 			prvidx = *entry;
7725 			*entry = NEXT_TX(*entry);
7726 
7727 			map += frag_len;
7728 		}
7729 
7730 		if (len) {
7731 			if (*budget) {
7732 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7733 					      len, flags, mss, vlan);
7734 				*budget -= 1;
7735 				*entry = NEXT_TX(*entry);
7736 			} else {
7737 				hwbug = true;
7738 				tnapi->tx_buffers[prvidx].fragmented = false;
7739 			}
7740 		}
7741 	} else {
7742 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7743 			      len, flags, mss, vlan);
7744 		*entry = NEXT_TX(*entry);
7745 	}
7746 
7747 	return hwbug;
7748 }
7749 
7750 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7751 {
7752 	int i;
7753 	struct sk_buff *skb;
7754 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7755 
7756 	skb = txb->skb;
7757 	txb->skb = NULL;
7758 
7759 	dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7760 			 skb_headlen(skb), DMA_TO_DEVICE);
7761 
7762 	while (txb->fragmented) {
7763 		txb->fragmented = false;
7764 		entry = NEXT_TX(entry);
7765 		txb = &tnapi->tx_buffers[entry];
7766 	}
7767 
7768 	for (i = 0; i <= last; i++) {
7769 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7770 
7771 		entry = NEXT_TX(entry);
7772 		txb = &tnapi->tx_buffers[entry];
7773 
7774 		dma_unmap_page(&tnapi->tp->pdev->dev,
7775 			       dma_unmap_addr(txb, mapping),
7776 			       skb_frag_size(frag), DMA_TO_DEVICE);
7777 
7778 		while (txb->fragmented) {
7779 			txb->fragmented = false;
7780 			entry = NEXT_TX(entry);
7781 			txb = &tnapi->tx_buffers[entry];
7782 		}
7783 	}
7784 }
7785 
7786 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7787 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7788 				       struct sk_buff **pskb,
7789 				       u32 *entry, u32 *budget,
7790 				       u32 base_flags, u32 mss, u32 vlan)
7791 {
7792 	struct tg3 *tp = tnapi->tp;
7793 	struct sk_buff *new_skb, *skb = *pskb;
7794 	dma_addr_t new_addr = 0;
7795 	int ret = 0;
7796 
7797 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7798 		new_skb = skb_copy(skb, GFP_ATOMIC);
7799 	else {
7800 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7801 
7802 		new_skb = skb_copy_expand(skb,
7803 					  skb_headroom(skb) + more_headroom,
7804 					  skb_tailroom(skb), GFP_ATOMIC);
7805 	}
7806 
7807 	if (!new_skb) {
7808 		ret = -1;
7809 	} else {
7810 		/* New SKB is guaranteed to be linear. */
7811 		new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7812 					  new_skb->len, DMA_TO_DEVICE);
7813 		/* Make sure the mapping succeeded */
7814 		if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7815 			dev_kfree_skb_any(new_skb);
7816 			ret = -1;
7817 		} else {
7818 			u32 save_entry = *entry;
7819 
7820 			base_flags |= TXD_FLAG_END;
7821 
7822 			tnapi->tx_buffers[*entry].skb = new_skb;
7823 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7824 					   mapping, new_addr);
7825 
7826 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7827 					    new_skb->len, base_flags,
7828 					    mss, vlan)) {
7829 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7830 				dev_kfree_skb_any(new_skb);
7831 				ret = -1;
7832 			}
7833 		}
7834 	}
7835 
7836 	dev_consume_skb_any(skb);
7837 	*pskb = new_skb;
7838 	return ret;
7839 }
7840 
7841 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7842 {
7843 	/* Check if we will never have enough descriptors,
7844 	 * as gso_segs can be more than current ring size
7845 	 */
7846 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7847 }
7848 
7849 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7850 
7851 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7852  * indicated in tg3_tx_frag_set()
7853  */
7854 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7855 		       struct netdev_queue *txq, struct sk_buff *skb)
7856 {
7857 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7858 	struct sk_buff *segs, *seg, *next;
7859 
7860 	/* Estimate the number of fragments in the worst case */
7861 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7862 		netif_tx_stop_queue(txq);
7863 
7864 		/* netif_tx_stop_queue() must be done before checking
7865 		 * checking tx index in tg3_tx_avail() below, because in
7866 		 * tg3_tx(), we update tx index before checking for
7867 		 * netif_tx_queue_stopped().
7868 		 */
7869 		smp_mb();
7870 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7871 			return NETDEV_TX_BUSY;
7872 
7873 		netif_tx_wake_queue(txq);
7874 	}
7875 
7876 	segs = skb_gso_segment(skb, tp->dev->features &
7877 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7878 	if (IS_ERR(segs) || !segs)
7879 		goto tg3_tso_bug_end;
7880 
7881 	skb_list_walk_safe(segs, seg, next) {
7882 		skb_mark_not_on_list(seg);
7883 		tg3_start_xmit(seg, tp->dev);
7884 	}
7885 
7886 tg3_tso_bug_end:
7887 	dev_consume_skb_any(skb);
7888 
7889 	return NETDEV_TX_OK;
7890 }
7891 
7892 /* hard_start_xmit for all devices */
7893 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7894 {
7895 	struct tg3 *tp = netdev_priv(dev);
7896 	u32 len, entry, base_flags, mss, vlan = 0;
7897 	u32 budget;
7898 	int i = -1, would_hit_hwbug;
7899 	dma_addr_t mapping;
7900 	struct tg3_napi *tnapi;
7901 	struct netdev_queue *txq;
7902 	unsigned int last;
7903 	struct iphdr *iph = NULL;
7904 	struct tcphdr *tcph = NULL;
7905 	__sum16 tcp_csum = 0, ip_csum = 0;
7906 	__be16 ip_tot_len = 0;
7907 
7908 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7909 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7910 	if (tg3_flag(tp, ENABLE_TSS))
7911 		tnapi++;
7912 
7913 	budget = tg3_tx_avail(tnapi);
7914 
7915 	/* We are running in BH disabled context with netif_tx_lock
7916 	 * and TX reclaim runs via tp->napi.poll inside of a software
7917 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7918 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7919 	 */
7920 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7921 		if (!netif_tx_queue_stopped(txq)) {
7922 			netif_tx_stop_queue(txq);
7923 
7924 			/* This is a hard error, log it. */
7925 			netdev_err(dev,
7926 				   "BUG! Tx Ring full when queue awake!\n");
7927 		}
7928 		return NETDEV_TX_BUSY;
7929 	}
7930 
7931 	entry = tnapi->tx_prod;
7932 	base_flags = 0;
7933 
7934 	mss = skb_shinfo(skb)->gso_size;
7935 	if (mss) {
7936 		u32 tcp_opt_len, hdr_len;
7937 
7938 		if (skb_cow_head(skb, 0))
7939 			goto drop;
7940 
7941 		iph = ip_hdr(skb);
7942 		tcp_opt_len = tcp_optlen(skb);
7943 
7944 		hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7945 
7946 		/* HW/FW can not correctly segment packets that have been
7947 		 * vlan encapsulated.
7948 		 */
7949 		if (skb->protocol == htons(ETH_P_8021Q) ||
7950 		    skb->protocol == htons(ETH_P_8021AD)) {
7951 			if (tg3_tso_bug_gso_check(tnapi, skb))
7952 				return tg3_tso_bug(tp, tnapi, txq, skb);
7953 			goto drop;
7954 		}
7955 
7956 		if (!skb_is_gso_v6(skb)) {
7957 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7958 			    tg3_flag(tp, TSO_BUG)) {
7959 				if (tg3_tso_bug_gso_check(tnapi, skb))
7960 					return tg3_tso_bug(tp, tnapi, txq, skb);
7961 				goto drop;
7962 			}
7963 			ip_csum = iph->check;
7964 			ip_tot_len = iph->tot_len;
7965 			iph->check = 0;
7966 			iph->tot_len = htons(mss + hdr_len);
7967 		}
7968 
7969 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7970 			       TXD_FLAG_CPU_POST_DMA);
7971 
7972 		tcph = tcp_hdr(skb);
7973 		tcp_csum = tcph->check;
7974 
7975 		if (tg3_flag(tp, HW_TSO_1) ||
7976 		    tg3_flag(tp, HW_TSO_2) ||
7977 		    tg3_flag(tp, HW_TSO_3)) {
7978 			tcph->check = 0;
7979 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7980 		} else {
7981 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7982 							 0, IPPROTO_TCP, 0);
7983 		}
7984 
7985 		if (tg3_flag(tp, HW_TSO_3)) {
7986 			mss |= (hdr_len & 0xc) << 12;
7987 			if (hdr_len & 0x10)
7988 				base_flags |= 0x00000010;
7989 			base_flags |= (hdr_len & 0x3e0) << 5;
7990 		} else if (tg3_flag(tp, HW_TSO_2))
7991 			mss |= hdr_len << 9;
7992 		else if (tg3_flag(tp, HW_TSO_1) ||
7993 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
7994 			if (tcp_opt_len || iph->ihl > 5) {
7995 				int tsflags;
7996 
7997 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7998 				mss |= (tsflags << 11);
7999 			}
8000 		} else {
8001 			if (tcp_opt_len || iph->ihl > 5) {
8002 				int tsflags;
8003 
8004 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8005 				base_flags |= tsflags << 12;
8006 			}
8007 		}
8008 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8009 		/* HW/FW can not correctly checksum packets that have been
8010 		 * vlan encapsulated.
8011 		 */
8012 		if (skb->protocol == htons(ETH_P_8021Q) ||
8013 		    skb->protocol == htons(ETH_P_8021AD)) {
8014 			if (skb_checksum_help(skb))
8015 				goto drop;
8016 		} else  {
8017 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8018 		}
8019 	}
8020 
8021 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8022 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8023 		base_flags |= TXD_FLAG_JMB_PKT;
8024 
8025 	if (skb_vlan_tag_present(skb)) {
8026 		base_flags |= TXD_FLAG_VLAN;
8027 		vlan = skb_vlan_tag_get(skb);
8028 	}
8029 
8030 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8031 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8032 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8033 		base_flags |= TXD_FLAG_HWTSTAMP;
8034 	}
8035 
8036 	len = skb_headlen(skb);
8037 
8038 	mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8039 				 DMA_TO_DEVICE);
8040 	if (dma_mapping_error(&tp->pdev->dev, mapping))
8041 		goto drop;
8042 
8043 
8044 	tnapi->tx_buffers[entry].skb = skb;
8045 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8046 
8047 	would_hit_hwbug = 0;
8048 
8049 	if (tg3_flag(tp, 5701_DMA_BUG))
8050 		would_hit_hwbug = 1;
8051 
8052 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8053 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8054 			    mss, vlan)) {
8055 		would_hit_hwbug = 1;
8056 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8057 		u32 tmp_mss = mss;
8058 
8059 		if (!tg3_flag(tp, HW_TSO_1) &&
8060 		    !tg3_flag(tp, HW_TSO_2) &&
8061 		    !tg3_flag(tp, HW_TSO_3))
8062 			tmp_mss = 0;
8063 
8064 		/* Now loop through additional data
8065 		 * fragments, and queue them.
8066 		 */
8067 		last = skb_shinfo(skb)->nr_frags - 1;
8068 		for (i = 0; i <= last; i++) {
8069 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8070 
8071 			len = skb_frag_size(frag);
8072 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8073 						   len, DMA_TO_DEVICE);
8074 
8075 			tnapi->tx_buffers[entry].skb = NULL;
8076 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8077 					   mapping);
8078 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8079 				goto dma_error;
8080 
8081 			if (!budget ||
8082 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8083 					    len, base_flags |
8084 					    ((i == last) ? TXD_FLAG_END : 0),
8085 					    tmp_mss, vlan)) {
8086 				would_hit_hwbug = 1;
8087 				break;
8088 			}
8089 		}
8090 	}
8091 
8092 	if (would_hit_hwbug) {
8093 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8094 
8095 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8096 			/* If it's a TSO packet, do GSO instead of
8097 			 * allocating and copying to a large linear SKB
8098 			 */
8099 			if (ip_tot_len) {
8100 				iph->check = ip_csum;
8101 				iph->tot_len = ip_tot_len;
8102 			}
8103 			tcph->check = tcp_csum;
8104 			return tg3_tso_bug(tp, tnapi, txq, skb);
8105 		}
8106 
8107 		/* If the workaround fails due to memory/mapping
8108 		 * failure, silently drop this packet.
8109 		 */
8110 		entry = tnapi->tx_prod;
8111 		budget = tg3_tx_avail(tnapi);
8112 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8113 						base_flags, mss, vlan))
8114 			goto drop_nofree;
8115 	}
8116 
8117 	skb_tx_timestamp(skb);
8118 	netdev_tx_sent_queue(txq, skb->len);
8119 
8120 	/* Sync BD data before updating mailbox */
8121 	wmb();
8122 
8123 	tnapi->tx_prod = entry;
8124 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8125 		netif_tx_stop_queue(txq);
8126 
8127 		/* netif_tx_stop_queue() must be done before checking
8128 		 * checking tx index in tg3_tx_avail() below, because in
8129 		 * tg3_tx(), we update tx index before checking for
8130 		 * netif_tx_queue_stopped().
8131 		 */
8132 		smp_mb();
8133 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8134 			netif_tx_wake_queue(txq);
8135 	}
8136 
8137 	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8138 		/* Packets are ready, update Tx producer idx on card. */
8139 		tw32_tx_mbox(tnapi->prodmbox, entry);
8140 	}
8141 
8142 	return NETDEV_TX_OK;
8143 
8144 dma_error:
8145 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8146 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8147 drop:
8148 	dev_kfree_skb_any(skb);
8149 drop_nofree:
8150 	tp->tx_dropped++;
8151 	return NETDEV_TX_OK;
8152 }
8153 
8154 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8155 {
8156 	if (enable) {
8157 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8158 				  MAC_MODE_PORT_MODE_MASK);
8159 
8160 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8161 
8162 		if (!tg3_flag(tp, 5705_PLUS))
8163 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8164 
8165 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8166 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8167 		else
8168 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8169 	} else {
8170 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8171 
8172 		if (tg3_flag(tp, 5705_PLUS) ||
8173 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8174 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8175 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8176 	}
8177 
8178 	tw32(MAC_MODE, tp->mac_mode);
8179 	udelay(40);
8180 }
8181 
8182 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8183 {
8184 	u32 val, bmcr, mac_mode, ptest = 0;
8185 
8186 	tg3_phy_toggle_apd(tp, false);
8187 	tg3_phy_toggle_automdix(tp, false);
8188 
8189 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8190 		return -EIO;
8191 
8192 	bmcr = BMCR_FULLDPLX;
8193 	switch (speed) {
8194 	case SPEED_10:
8195 		break;
8196 	case SPEED_100:
8197 		bmcr |= BMCR_SPEED100;
8198 		break;
8199 	case SPEED_1000:
8200 	default:
8201 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8202 			speed = SPEED_100;
8203 			bmcr |= BMCR_SPEED100;
8204 		} else {
8205 			speed = SPEED_1000;
8206 			bmcr |= BMCR_SPEED1000;
8207 		}
8208 	}
8209 
8210 	if (extlpbk) {
8211 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8212 			tg3_readphy(tp, MII_CTRL1000, &val);
8213 			val |= CTL1000_AS_MASTER |
8214 			       CTL1000_ENABLE_MASTER;
8215 			tg3_writephy(tp, MII_CTRL1000, val);
8216 		} else {
8217 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8218 				MII_TG3_FET_PTEST_TRIM_2;
8219 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8220 		}
8221 	} else
8222 		bmcr |= BMCR_LOOPBACK;
8223 
8224 	tg3_writephy(tp, MII_BMCR, bmcr);
8225 
8226 	/* The write needs to be flushed for the FETs */
8227 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8228 		tg3_readphy(tp, MII_BMCR, &bmcr);
8229 
8230 	udelay(40);
8231 
8232 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8233 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8234 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8235 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8236 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8237 
8238 		/* The write needs to be flushed for the AC131 */
8239 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8240 	}
8241 
8242 	/* Reset to prevent losing 1st rx packet intermittently */
8243 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8244 	    tg3_flag(tp, 5780_CLASS)) {
8245 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8246 		udelay(10);
8247 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8248 	}
8249 
8250 	mac_mode = tp->mac_mode &
8251 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8252 	if (speed == SPEED_1000)
8253 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8254 	else
8255 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8256 
8257 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8258 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8259 
8260 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8261 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8262 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8263 			mac_mode |= MAC_MODE_LINK_POLARITY;
8264 
8265 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8266 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8267 	}
8268 
8269 	tw32(MAC_MODE, mac_mode);
8270 	udelay(40);
8271 
8272 	return 0;
8273 }
8274 
8275 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8276 {
8277 	struct tg3 *tp = netdev_priv(dev);
8278 
8279 	if (features & NETIF_F_LOOPBACK) {
8280 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8281 			return;
8282 
8283 		spin_lock_bh(&tp->lock);
8284 		tg3_mac_loopback(tp, true);
8285 		netif_carrier_on(tp->dev);
8286 		spin_unlock_bh(&tp->lock);
8287 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8288 	} else {
8289 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8290 			return;
8291 
8292 		spin_lock_bh(&tp->lock);
8293 		tg3_mac_loopback(tp, false);
8294 		/* Force link status check */
8295 		tg3_setup_phy(tp, true);
8296 		spin_unlock_bh(&tp->lock);
8297 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8298 	}
8299 }
8300 
8301 static netdev_features_t tg3_fix_features(struct net_device *dev,
8302 	netdev_features_t features)
8303 {
8304 	struct tg3 *tp = netdev_priv(dev);
8305 
8306 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8307 		features &= ~NETIF_F_ALL_TSO;
8308 
8309 	return features;
8310 }
8311 
8312 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8313 {
8314 	netdev_features_t changed = dev->features ^ features;
8315 
8316 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8317 		tg3_set_loopback(dev, features);
8318 
8319 	return 0;
8320 }
8321 
8322 static void tg3_rx_prodring_free(struct tg3 *tp,
8323 				 struct tg3_rx_prodring_set *tpr)
8324 {
8325 	int i;
8326 
8327 	if (tpr != &tp->napi[0].prodring) {
8328 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8329 		     i = (i + 1) & tp->rx_std_ring_mask)
8330 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8331 					tp->rx_pkt_map_sz);
8332 
8333 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8334 			for (i = tpr->rx_jmb_cons_idx;
8335 			     i != tpr->rx_jmb_prod_idx;
8336 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8337 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8338 						TG3_RX_JMB_MAP_SZ);
8339 			}
8340 		}
8341 
8342 		return;
8343 	}
8344 
8345 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8346 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8347 				tp->rx_pkt_map_sz);
8348 
8349 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8350 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8351 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8352 					TG3_RX_JMB_MAP_SZ);
8353 	}
8354 }
8355 
8356 /* Initialize rx rings for packet processing.
8357  *
8358  * The chip has been shut down and the driver detached from
8359  * the networking, so no interrupts or new tx packets will
8360  * end up in the driver.  tp->{tx,}lock are held and thus
8361  * we may not sleep.
8362  */
8363 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8364 				 struct tg3_rx_prodring_set *tpr)
8365 {
8366 	u32 i, rx_pkt_dma_sz;
8367 
8368 	tpr->rx_std_cons_idx = 0;
8369 	tpr->rx_std_prod_idx = 0;
8370 	tpr->rx_jmb_cons_idx = 0;
8371 	tpr->rx_jmb_prod_idx = 0;
8372 
8373 	if (tpr != &tp->napi[0].prodring) {
8374 		memset(&tpr->rx_std_buffers[0], 0,
8375 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8376 		if (tpr->rx_jmb_buffers)
8377 			memset(&tpr->rx_jmb_buffers[0], 0,
8378 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8379 		goto done;
8380 	}
8381 
8382 	/* Zero out all descriptors. */
8383 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8384 
8385 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8386 	if (tg3_flag(tp, 5780_CLASS) &&
8387 	    tp->dev->mtu > ETH_DATA_LEN)
8388 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8389 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8390 
8391 	/* Initialize invariants of the rings, we only set this
8392 	 * stuff once.  This works because the card does not
8393 	 * write into the rx buffer posting rings.
8394 	 */
8395 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8396 		struct tg3_rx_buffer_desc *rxd;
8397 
8398 		rxd = &tpr->rx_std[i];
8399 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8400 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8401 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8402 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8403 	}
8404 
8405 	/* Now allocate fresh SKBs for each rx ring. */
8406 	for (i = 0; i < tp->rx_pending; i++) {
8407 		unsigned int frag_size;
8408 
8409 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8410 				      &frag_size) < 0) {
8411 			netdev_warn(tp->dev,
8412 				    "Using a smaller RX standard ring. Only "
8413 				    "%d out of %d buffers were allocated "
8414 				    "successfully\n", i, tp->rx_pending);
8415 			if (i == 0)
8416 				goto initfail;
8417 			tp->rx_pending = i;
8418 			break;
8419 		}
8420 	}
8421 
8422 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8423 		goto done;
8424 
8425 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8426 
8427 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8428 		goto done;
8429 
8430 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8431 		struct tg3_rx_buffer_desc *rxd;
8432 
8433 		rxd = &tpr->rx_jmb[i].std;
8434 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8435 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8436 				  RXD_FLAG_JUMBO;
8437 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8438 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8439 	}
8440 
8441 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8442 		unsigned int frag_size;
8443 
8444 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8445 				      &frag_size) < 0) {
8446 			netdev_warn(tp->dev,
8447 				    "Using a smaller RX jumbo ring. Only %d "
8448 				    "out of %d buffers were allocated "
8449 				    "successfully\n", i, tp->rx_jumbo_pending);
8450 			if (i == 0)
8451 				goto initfail;
8452 			tp->rx_jumbo_pending = i;
8453 			break;
8454 		}
8455 	}
8456 
8457 done:
8458 	return 0;
8459 
8460 initfail:
8461 	tg3_rx_prodring_free(tp, tpr);
8462 	return -ENOMEM;
8463 }
8464 
8465 static void tg3_rx_prodring_fini(struct tg3 *tp,
8466 				 struct tg3_rx_prodring_set *tpr)
8467 {
8468 	kfree(tpr->rx_std_buffers);
8469 	tpr->rx_std_buffers = NULL;
8470 	kfree(tpr->rx_jmb_buffers);
8471 	tpr->rx_jmb_buffers = NULL;
8472 	if (tpr->rx_std) {
8473 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8474 				  tpr->rx_std, tpr->rx_std_mapping);
8475 		tpr->rx_std = NULL;
8476 	}
8477 	if (tpr->rx_jmb) {
8478 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8479 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8480 		tpr->rx_jmb = NULL;
8481 	}
8482 }
8483 
8484 static int tg3_rx_prodring_init(struct tg3 *tp,
8485 				struct tg3_rx_prodring_set *tpr)
8486 {
8487 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8488 				      GFP_KERNEL);
8489 	if (!tpr->rx_std_buffers)
8490 		return -ENOMEM;
8491 
8492 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8493 					 TG3_RX_STD_RING_BYTES(tp),
8494 					 &tpr->rx_std_mapping,
8495 					 GFP_KERNEL);
8496 	if (!tpr->rx_std)
8497 		goto err_out;
8498 
8499 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8500 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8501 					      GFP_KERNEL);
8502 		if (!tpr->rx_jmb_buffers)
8503 			goto err_out;
8504 
8505 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8506 						 TG3_RX_JMB_RING_BYTES(tp),
8507 						 &tpr->rx_jmb_mapping,
8508 						 GFP_KERNEL);
8509 		if (!tpr->rx_jmb)
8510 			goto err_out;
8511 	}
8512 
8513 	return 0;
8514 
8515 err_out:
8516 	tg3_rx_prodring_fini(tp, tpr);
8517 	return -ENOMEM;
8518 }
8519 
8520 /* Free up pending packets in all rx/tx rings.
8521  *
8522  * The chip has been shut down and the driver detached from
8523  * the networking, so no interrupts or new tx packets will
8524  * end up in the driver.  tp->{tx,}lock is not held and we are not
8525  * in an interrupt context and thus may sleep.
8526  */
8527 static void tg3_free_rings(struct tg3 *tp)
8528 {
8529 	int i, j;
8530 
8531 	for (j = 0; j < tp->irq_cnt; j++) {
8532 		struct tg3_napi *tnapi = &tp->napi[j];
8533 
8534 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8535 
8536 		if (!tnapi->tx_buffers)
8537 			continue;
8538 
8539 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8540 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8541 
8542 			if (!skb)
8543 				continue;
8544 
8545 			tg3_tx_skb_unmap(tnapi, i,
8546 					 skb_shinfo(skb)->nr_frags - 1);
8547 
8548 			dev_consume_skb_any(skb);
8549 		}
8550 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8551 	}
8552 }
8553 
8554 /* Initialize tx/rx rings for packet processing.
8555  *
8556  * The chip has been shut down and the driver detached from
8557  * the networking, so no interrupts or new tx packets will
8558  * end up in the driver.  tp->{tx,}lock are held and thus
8559  * we may not sleep.
8560  */
8561 static int tg3_init_rings(struct tg3 *tp)
8562 {
8563 	int i;
8564 
8565 	/* Free up all the SKBs. */
8566 	tg3_free_rings(tp);
8567 
8568 	for (i = 0; i < tp->irq_cnt; i++) {
8569 		struct tg3_napi *tnapi = &tp->napi[i];
8570 
8571 		tnapi->last_tag = 0;
8572 		tnapi->last_irq_tag = 0;
8573 		tnapi->hw_status->status = 0;
8574 		tnapi->hw_status->status_tag = 0;
8575 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8576 
8577 		tnapi->tx_prod = 0;
8578 		tnapi->tx_cons = 0;
8579 		if (tnapi->tx_ring)
8580 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8581 
8582 		tnapi->rx_rcb_ptr = 0;
8583 		if (tnapi->rx_rcb)
8584 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8585 
8586 		if (tnapi->prodring.rx_std &&
8587 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8588 			tg3_free_rings(tp);
8589 			return -ENOMEM;
8590 		}
8591 	}
8592 
8593 	return 0;
8594 }
8595 
8596 static void tg3_mem_tx_release(struct tg3 *tp)
8597 {
8598 	int i;
8599 
8600 	for (i = 0; i < tp->irq_max; i++) {
8601 		struct tg3_napi *tnapi = &tp->napi[i];
8602 
8603 		if (tnapi->tx_ring) {
8604 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8605 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8606 			tnapi->tx_ring = NULL;
8607 		}
8608 
8609 		kfree(tnapi->tx_buffers);
8610 		tnapi->tx_buffers = NULL;
8611 	}
8612 }
8613 
8614 static int tg3_mem_tx_acquire(struct tg3 *tp)
8615 {
8616 	int i;
8617 	struct tg3_napi *tnapi = &tp->napi[0];
8618 
8619 	/* If multivector TSS is enabled, vector 0 does not handle
8620 	 * tx interrupts.  Don't allocate any resources for it.
8621 	 */
8622 	if (tg3_flag(tp, ENABLE_TSS))
8623 		tnapi++;
8624 
8625 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8626 		tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8627 					    sizeof(struct tg3_tx_ring_info),
8628 					    GFP_KERNEL);
8629 		if (!tnapi->tx_buffers)
8630 			goto err_out;
8631 
8632 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8633 						    TG3_TX_RING_BYTES,
8634 						    &tnapi->tx_desc_mapping,
8635 						    GFP_KERNEL);
8636 		if (!tnapi->tx_ring)
8637 			goto err_out;
8638 	}
8639 
8640 	return 0;
8641 
8642 err_out:
8643 	tg3_mem_tx_release(tp);
8644 	return -ENOMEM;
8645 }
8646 
8647 static void tg3_mem_rx_release(struct tg3 *tp)
8648 {
8649 	int i;
8650 
8651 	for (i = 0; i < tp->irq_max; i++) {
8652 		struct tg3_napi *tnapi = &tp->napi[i];
8653 
8654 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8655 
8656 		if (!tnapi->rx_rcb)
8657 			continue;
8658 
8659 		dma_free_coherent(&tp->pdev->dev,
8660 				  TG3_RX_RCB_RING_BYTES(tp),
8661 				  tnapi->rx_rcb,
8662 				  tnapi->rx_rcb_mapping);
8663 		tnapi->rx_rcb = NULL;
8664 	}
8665 }
8666 
8667 static int tg3_mem_rx_acquire(struct tg3 *tp)
8668 {
8669 	unsigned int i, limit;
8670 
8671 	limit = tp->rxq_cnt;
8672 
8673 	/* If RSS is enabled, we need a (dummy) producer ring
8674 	 * set on vector zero.  This is the true hw prodring.
8675 	 */
8676 	if (tg3_flag(tp, ENABLE_RSS))
8677 		limit++;
8678 
8679 	for (i = 0; i < limit; i++) {
8680 		struct tg3_napi *tnapi = &tp->napi[i];
8681 
8682 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8683 			goto err_out;
8684 
8685 		/* If multivector RSS is enabled, vector 0
8686 		 * does not handle rx or tx interrupts.
8687 		 * Don't allocate any resources for it.
8688 		 */
8689 		if (!i && tg3_flag(tp, ENABLE_RSS))
8690 			continue;
8691 
8692 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8693 						   TG3_RX_RCB_RING_BYTES(tp),
8694 						   &tnapi->rx_rcb_mapping,
8695 						   GFP_KERNEL);
8696 		if (!tnapi->rx_rcb)
8697 			goto err_out;
8698 	}
8699 
8700 	return 0;
8701 
8702 err_out:
8703 	tg3_mem_rx_release(tp);
8704 	return -ENOMEM;
8705 }
8706 
8707 /*
8708  * Must not be invoked with interrupt sources disabled and
8709  * the hardware shutdown down.
8710  */
8711 static void tg3_free_consistent(struct tg3 *tp)
8712 {
8713 	int i;
8714 
8715 	for (i = 0; i < tp->irq_cnt; i++) {
8716 		struct tg3_napi *tnapi = &tp->napi[i];
8717 
8718 		if (tnapi->hw_status) {
8719 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8720 					  tnapi->hw_status,
8721 					  tnapi->status_mapping);
8722 			tnapi->hw_status = NULL;
8723 		}
8724 	}
8725 
8726 	tg3_mem_rx_release(tp);
8727 	tg3_mem_tx_release(tp);
8728 
8729 	/* tp->hw_stats can be referenced safely:
8730 	 *     1. under rtnl_lock
8731 	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8732 	 */
8733 	if (tp->hw_stats) {
8734 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8735 				  tp->hw_stats, tp->stats_mapping);
8736 		tp->hw_stats = NULL;
8737 	}
8738 }
8739 
8740 /*
8741  * Must not be invoked with interrupt sources disabled and
8742  * the hardware shutdown down.  Can sleep.
8743  */
8744 static int tg3_alloc_consistent(struct tg3 *tp)
8745 {
8746 	int i;
8747 
8748 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8749 					  sizeof(struct tg3_hw_stats),
8750 					  &tp->stats_mapping, GFP_KERNEL);
8751 	if (!tp->hw_stats)
8752 		goto err_out;
8753 
8754 	for (i = 0; i < tp->irq_cnt; i++) {
8755 		struct tg3_napi *tnapi = &tp->napi[i];
8756 		struct tg3_hw_status *sblk;
8757 
8758 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8759 						      TG3_HW_STATUS_SIZE,
8760 						      &tnapi->status_mapping,
8761 						      GFP_KERNEL);
8762 		if (!tnapi->hw_status)
8763 			goto err_out;
8764 
8765 		sblk = tnapi->hw_status;
8766 
8767 		if (tg3_flag(tp, ENABLE_RSS)) {
8768 			u16 *prodptr = NULL;
8769 
8770 			/*
8771 			 * When RSS is enabled, the status block format changes
8772 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8773 			 * and "rx_mini_consumer" members get mapped to the
8774 			 * other three rx return ring producer indexes.
8775 			 */
8776 			switch (i) {
8777 			case 1:
8778 				prodptr = &sblk->idx[0].rx_producer;
8779 				break;
8780 			case 2:
8781 				prodptr = &sblk->rx_jumbo_consumer;
8782 				break;
8783 			case 3:
8784 				prodptr = &sblk->reserved;
8785 				break;
8786 			case 4:
8787 				prodptr = &sblk->rx_mini_consumer;
8788 				break;
8789 			}
8790 			tnapi->rx_rcb_prod_idx = prodptr;
8791 		} else {
8792 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8793 		}
8794 	}
8795 
8796 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8797 		goto err_out;
8798 
8799 	return 0;
8800 
8801 err_out:
8802 	tg3_free_consistent(tp);
8803 	return -ENOMEM;
8804 }
8805 
8806 #define MAX_WAIT_CNT 1000
8807 
8808 /* To stop a block, clear the enable bit and poll till it
8809  * clears.  tp->lock is held.
8810  */
8811 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8812 {
8813 	unsigned int i;
8814 	u32 val;
8815 
8816 	if (tg3_flag(tp, 5705_PLUS)) {
8817 		switch (ofs) {
8818 		case RCVLSC_MODE:
8819 		case DMAC_MODE:
8820 		case MBFREE_MODE:
8821 		case BUFMGR_MODE:
8822 		case MEMARB_MODE:
8823 			/* We can't enable/disable these bits of the
8824 			 * 5705/5750, just say success.
8825 			 */
8826 			return 0;
8827 
8828 		default:
8829 			break;
8830 		}
8831 	}
8832 
8833 	val = tr32(ofs);
8834 	val &= ~enable_bit;
8835 	tw32_f(ofs, val);
8836 
8837 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8838 		if (pci_channel_offline(tp->pdev)) {
8839 			dev_err(&tp->pdev->dev,
8840 				"tg3_stop_block device offline, "
8841 				"ofs=%lx enable_bit=%x\n",
8842 				ofs, enable_bit);
8843 			return -ENODEV;
8844 		}
8845 
8846 		udelay(100);
8847 		val = tr32(ofs);
8848 		if ((val & enable_bit) == 0)
8849 			break;
8850 	}
8851 
8852 	if (i == MAX_WAIT_CNT && !silent) {
8853 		dev_err(&tp->pdev->dev,
8854 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8855 			ofs, enable_bit);
8856 		return -ENODEV;
8857 	}
8858 
8859 	return 0;
8860 }
8861 
8862 /* tp->lock is held. */
8863 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8864 {
8865 	int i, err;
8866 
8867 	tg3_disable_ints(tp);
8868 
8869 	if (pci_channel_offline(tp->pdev)) {
8870 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8871 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8872 		err = -ENODEV;
8873 		goto err_no_dev;
8874 	}
8875 
8876 	tp->rx_mode &= ~RX_MODE_ENABLE;
8877 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8878 	udelay(10);
8879 
8880 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8881 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8882 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8883 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8884 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8885 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8886 
8887 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8888 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8889 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8890 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8891 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8892 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8893 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8894 
8895 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8896 	tw32_f(MAC_MODE, tp->mac_mode);
8897 	udelay(40);
8898 
8899 	tp->tx_mode &= ~TX_MODE_ENABLE;
8900 	tw32_f(MAC_TX_MODE, tp->tx_mode);
8901 
8902 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8903 		udelay(100);
8904 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8905 			break;
8906 	}
8907 	if (i >= MAX_WAIT_CNT) {
8908 		dev_err(&tp->pdev->dev,
8909 			"%s timed out, TX_MODE_ENABLE will not clear "
8910 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8911 		err |= -ENODEV;
8912 	}
8913 
8914 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8915 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8916 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8917 
8918 	tw32(FTQ_RESET, 0xffffffff);
8919 	tw32(FTQ_RESET, 0x00000000);
8920 
8921 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8922 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8923 
8924 err_no_dev:
8925 	for (i = 0; i < tp->irq_cnt; i++) {
8926 		struct tg3_napi *tnapi = &tp->napi[i];
8927 		if (tnapi->hw_status)
8928 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8929 	}
8930 
8931 	return err;
8932 }
8933 
8934 /* Save PCI command register before chip reset */
8935 static void tg3_save_pci_state(struct tg3 *tp)
8936 {
8937 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8938 }
8939 
8940 /* Restore PCI state after chip reset */
8941 static void tg3_restore_pci_state(struct tg3 *tp)
8942 {
8943 	u32 val;
8944 
8945 	/* Re-enable indirect register accesses. */
8946 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8947 			       tp->misc_host_ctrl);
8948 
8949 	/* Set MAX PCI retry to zero. */
8950 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8951 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8952 	    tg3_flag(tp, PCIX_MODE))
8953 		val |= PCISTATE_RETRY_SAME_DMA;
8954 	/* Allow reads and writes to the APE register and memory space. */
8955 	if (tg3_flag(tp, ENABLE_APE))
8956 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8957 		       PCISTATE_ALLOW_APE_SHMEM_WR |
8958 		       PCISTATE_ALLOW_APE_PSPACE_WR;
8959 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8960 
8961 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8962 
8963 	if (!tg3_flag(tp, PCI_EXPRESS)) {
8964 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8965 				      tp->pci_cacheline_sz);
8966 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8967 				      tp->pci_lat_timer);
8968 	}
8969 
8970 	/* Make sure PCI-X relaxed ordering bit is clear. */
8971 	if (tg3_flag(tp, PCIX_MODE)) {
8972 		u16 pcix_cmd;
8973 
8974 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8975 				     &pcix_cmd);
8976 		pcix_cmd &= ~PCI_X_CMD_ERO;
8977 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8978 				      pcix_cmd);
8979 	}
8980 
8981 	if (tg3_flag(tp, 5780_CLASS)) {
8982 
8983 		/* Chip reset on 5780 will reset MSI enable bit,
8984 		 * so need to restore it.
8985 		 */
8986 		if (tg3_flag(tp, USING_MSI)) {
8987 			u16 ctrl;
8988 
8989 			pci_read_config_word(tp->pdev,
8990 					     tp->msi_cap + PCI_MSI_FLAGS,
8991 					     &ctrl);
8992 			pci_write_config_word(tp->pdev,
8993 					      tp->msi_cap + PCI_MSI_FLAGS,
8994 					      ctrl | PCI_MSI_FLAGS_ENABLE);
8995 			val = tr32(MSGINT_MODE);
8996 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8997 		}
8998 	}
8999 }
9000 
9001 static void tg3_override_clk(struct tg3 *tp)
9002 {
9003 	u32 val;
9004 
9005 	switch (tg3_asic_rev(tp)) {
9006 	case ASIC_REV_5717:
9007 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9008 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9009 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9010 		break;
9011 
9012 	case ASIC_REV_5719:
9013 	case ASIC_REV_5720:
9014 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9015 		break;
9016 
9017 	default:
9018 		return;
9019 	}
9020 }
9021 
9022 static void tg3_restore_clk(struct tg3 *tp)
9023 {
9024 	u32 val;
9025 
9026 	switch (tg3_asic_rev(tp)) {
9027 	case ASIC_REV_5717:
9028 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9029 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9030 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9031 		break;
9032 
9033 	case ASIC_REV_5719:
9034 	case ASIC_REV_5720:
9035 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9036 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9037 		break;
9038 
9039 	default:
9040 		return;
9041 	}
9042 }
9043 
9044 /* tp->lock is held. */
9045 static int tg3_chip_reset(struct tg3 *tp)
9046 	__releases(tp->lock)
9047 	__acquires(tp->lock)
9048 {
9049 	u32 val;
9050 	void (*write_op)(struct tg3 *, u32, u32);
9051 	int i, err;
9052 
9053 	if (!pci_device_is_present(tp->pdev))
9054 		return -ENODEV;
9055 
9056 	tg3_nvram_lock(tp);
9057 
9058 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9059 
9060 	/* No matching tg3_nvram_unlock() after this because
9061 	 * chip reset below will undo the nvram lock.
9062 	 */
9063 	tp->nvram_lock_cnt = 0;
9064 
9065 	/* GRC_MISC_CFG core clock reset will clear the memory
9066 	 * enable bit in PCI register 4 and the MSI enable bit
9067 	 * on some chips, so we save relevant registers here.
9068 	 */
9069 	tg3_save_pci_state(tp);
9070 
9071 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9072 	    tg3_flag(tp, 5755_PLUS))
9073 		tw32(GRC_FASTBOOT_PC, 0);
9074 
9075 	/*
9076 	 * We must avoid the readl() that normally takes place.
9077 	 * It locks machines, causes machine checks, and other
9078 	 * fun things.  So, temporarily disable the 5701
9079 	 * hardware workaround, while we do the reset.
9080 	 */
9081 	write_op = tp->write32;
9082 	if (write_op == tg3_write_flush_reg32)
9083 		tp->write32 = tg3_write32;
9084 
9085 	/* Prevent the irq handler from reading or writing PCI registers
9086 	 * during chip reset when the memory enable bit in the PCI command
9087 	 * register may be cleared.  The chip does not generate interrupt
9088 	 * at this time, but the irq handler may still be called due to irq
9089 	 * sharing or irqpoll.
9090 	 */
9091 	tg3_flag_set(tp, CHIP_RESETTING);
9092 	for (i = 0; i < tp->irq_cnt; i++) {
9093 		struct tg3_napi *tnapi = &tp->napi[i];
9094 		if (tnapi->hw_status) {
9095 			tnapi->hw_status->status = 0;
9096 			tnapi->hw_status->status_tag = 0;
9097 		}
9098 		tnapi->last_tag = 0;
9099 		tnapi->last_irq_tag = 0;
9100 	}
9101 	smp_mb();
9102 
9103 	tg3_full_unlock(tp);
9104 
9105 	for (i = 0; i < tp->irq_cnt; i++)
9106 		synchronize_irq(tp->napi[i].irq_vec);
9107 
9108 	tg3_full_lock(tp, 0);
9109 
9110 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9111 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9112 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9113 	}
9114 
9115 	/* do the reset */
9116 	val = GRC_MISC_CFG_CORECLK_RESET;
9117 
9118 	if (tg3_flag(tp, PCI_EXPRESS)) {
9119 		/* Force PCIe 1.0a mode */
9120 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9121 		    !tg3_flag(tp, 57765_PLUS) &&
9122 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9123 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9124 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9125 
9126 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9127 			tw32(GRC_MISC_CFG, (1 << 29));
9128 			val |= (1 << 29);
9129 		}
9130 	}
9131 
9132 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9133 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9134 		tw32(GRC_VCPU_EXT_CTRL,
9135 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9136 	}
9137 
9138 	/* Set the clock to the highest frequency to avoid timeouts. With link
9139 	 * aware mode, the clock speed could be slow and bootcode does not
9140 	 * complete within the expected time. Override the clock to allow the
9141 	 * bootcode to finish sooner and then restore it.
9142 	 */
9143 	tg3_override_clk(tp);
9144 
9145 	/* Manage gphy power for all CPMU absent PCIe devices. */
9146 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9147 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9148 
9149 	tw32(GRC_MISC_CFG, val);
9150 
9151 	/* restore 5701 hardware bug workaround write method */
9152 	tp->write32 = write_op;
9153 
9154 	/* Unfortunately, we have to delay before the PCI read back.
9155 	 * Some 575X chips even will not respond to a PCI cfg access
9156 	 * when the reset command is given to the chip.
9157 	 *
9158 	 * How do these hardware designers expect things to work
9159 	 * properly if the PCI write is posted for a long period
9160 	 * of time?  It is always necessary to have some method by
9161 	 * which a register read back can occur to push the write
9162 	 * out which does the reset.
9163 	 *
9164 	 * For most tg3 variants the trick below was working.
9165 	 * Ho hum...
9166 	 */
9167 	udelay(120);
9168 
9169 	/* Flush PCI posted writes.  The normal MMIO registers
9170 	 * are inaccessible at this time so this is the only
9171 	 * way to make this reliably (actually, this is no longer
9172 	 * the case, see above).  I tried to use indirect
9173 	 * register read/write but this upset some 5701 variants.
9174 	 */
9175 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9176 
9177 	udelay(120);
9178 
9179 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9180 		u16 val16;
9181 
9182 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9183 			int j;
9184 			u32 cfg_val;
9185 
9186 			/* Wait for link training to complete.  */
9187 			for (j = 0; j < 5000; j++)
9188 				udelay(100);
9189 
9190 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9191 			pci_write_config_dword(tp->pdev, 0xc4,
9192 					       cfg_val | (1 << 15));
9193 		}
9194 
9195 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9196 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9197 		/*
9198 		 * Older PCIe devices only support the 128 byte
9199 		 * MPS setting.  Enforce the restriction.
9200 		 */
9201 		if (!tg3_flag(tp, CPMU_PRESENT))
9202 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9203 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9204 
9205 		/* Clear error status */
9206 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9207 				      PCI_EXP_DEVSTA_CED |
9208 				      PCI_EXP_DEVSTA_NFED |
9209 				      PCI_EXP_DEVSTA_FED |
9210 				      PCI_EXP_DEVSTA_URD);
9211 	}
9212 
9213 	tg3_restore_pci_state(tp);
9214 
9215 	tg3_flag_clear(tp, CHIP_RESETTING);
9216 	tg3_flag_clear(tp, ERROR_PROCESSED);
9217 
9218 	val = 0;
9219 	if (tg3_flag(tp, 5780_CLASS))
9220 		val = tr32(MEMARB_MODE);
9221 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9222 
9223 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9224 		tg3_stop_fw(tp);
9225 		tw32(0x5000, 0x400);
9226 	}
9227 
9228 	if (tg3_flag(tp, IS_SSB_CORE)) {
9229 		/*
9230 		 * BCM4785: In order to avoid repercussions from using
9231 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9232 		 * which is not required.
9233 		 */
9234 		tg3_stop_fw(tp);
9235 		tg3_halt_cpu(tp, RX_CPU_BASE);
9236 	}
9237 
9238 	err = tg3_poll_fw(tp);
9239 	if (err)
9240 		return err;
9241 
9242 	tw32(GRC_MODE, tp->grc_mode);
9243 
9244 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9245 		val = tr32(0xc4);
9246 
9247 		tw32(0xc4, val | (1 << 15));
9248 	}
9249 
9250 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9251 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9252 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9253 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9254 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9255 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9256 	}
9257 
9258 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9259 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9260 		val = tp->mac_mode;
9261 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9262 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9263 		val = tp->mac_mode;
9264 	} else
9265 		val = 0;
9266 
9267 	tw32_f(MAC_MODE, val);
9268 	udelay(40);
9269 
9270 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9271 
9272 	tg3_mdio_start(tp);
9273 
9274 	if (tg3_flag(tp, PCI_EXPRESS) &&
9275 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9276 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9277 	    !tg3_flag(tp, 57765_PLUS)) {
9278 		val = tr32(0x7c00);
9279 
9280 		tw32(0x7c00, val | (1 << 25));
9281 	}
9282 
9283 	tg3_restore_clk(tp);
9284 
9285 	/* Increase the core clock speed to fix tx timeout issue for 5762
9286 	 * with 100Mbps link speed.
9287 	 */
9288 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9289 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9290 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9291 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9292 	}
9293 
9294 	/* Reprobe ASF enable state.  */
9295 	tg3_flag_clear(tp, ENABLE_ASF);
9296 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9297 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9298 
9299 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9300 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9301 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9302 		u32 nic_cfg;
9303 
9304 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9305 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9306 			tg3_flag_set(tp, ENABLE_ASF);
9307 			tp->last_event_jiffies = jiffies;
9308 			if (tg3_flag(tp, 5750_PLUS))
9309 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9310 
9311 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9312 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9313 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9314 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9315 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9316 		}
9317 	}
9318 
9319 	return 0;
9320 }
9321 
9322 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9323 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9324 static void __tg3_set_rx_mode(struct net_device *);
9325 
9326 /* tp->lock is held. */
9327 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9328 {
9329 	int err;
9330 
9331 	tg3_stop_fw(tp);
9332 
9333 	tg3_write_sig_pre_reset(tp, kind);
9334 
9335 	tg3_abort_hw(tp, silent);
9336 	err = tg3_chip_reset(tp);
9337 
9338 	__tg3_set_mac_addr(tp, false);
9339 
9340 	tg3_write_sig_legacy(tp, kind);
9341 	tg3_write_sig_post_reset(tp, kind);
9342 
9343 	if (tp->hw_stats) {
9344 		/* Save the stats across chip resets... */
9345 		tg3_get_nstats(tp, &tp->net_stats_prev);
9346 		tg3_get_estats(tp, &tp->estats_prev);
9347 
9348 		/* And make sure the next sample is new data */
9349 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9350 	}
9351 
9352 	return err;
9353 }
9354 
9355 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9356 {
9357 	struct tg3 *tp = netdev_priv(dev);
9358 	struct sockaddr *addr = p;
9359 	int err = 0;
9360 	bool skip_mac_1 = false;
9361 
9362 	if (!is_valid_ether_addr(addr->sa_data))
9363 		return -EADDRNOTAVAIL;
9364 
9365 	eth_hw_addr_set(dev, addr->sa_data);
9366 
9367 	if (!netif_running(dev))
9368 		return 0;
9369 
9370 	if (tg3_flag(tp, ENABLE_ASF)) {
9371 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9372 
9373 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9374 		addr0_low = tr32(MAC_ADDR_0_LOW);
9375 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9376 		addr1_low = tr32(MAC_ADDR_1_LOW);
9377 
9378 		/* Skip MAC addr 1 if ASF is using it. */
9379 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9380 		    !(addr1_high == 0 && addr1_low == 0))
9381 			skip_mac_1 = true;
9382 	}
9383 	spin_lock_bh(&tp->lock);
9384 	__tg3_set_mac_addr(tp, skip_mac_1);
9385 	__tg3_set_rx_mode(dev);
9386 	spin_unlock_bh(&tp->lock);
9387 
9388 	return err;
9389 }
9390 
9391 /* tp->lock is held. */
9392 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9393 			   dma_addr_t mapping, u32 maxlen_flags,
9394 			   u32 nic_addr)
9395 {
9396 	tg3_write_mem(tp,
9397 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9398 		      ((u64) mapping >> 32));
9399 	tg3_write_mem(tp,
9400 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9401 		      ((u64) mapping & 0xffffffff));
9402 	tg3_write_mem(tp,
9403 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9404 		       maxlen_flags);
9405 
9406 	if (!tg3_flag(tp, 5705_PLUS))
9407 		tg3_write_mem(tp,
9408 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9409 			      nic_addr);
9410 }
9411 
9412 
9413 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9414 {
9415 	int i = 0;
9416 
9417 	if (!tg3_flag(tp, ENABLE_TSS)) {
9418 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9419 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9420 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9421 	} else {
9422 		tw32(HOSTCC_TXCOL_TICKS, 0);
9423 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9424 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9425 
9426 		for (; i < tp->txq_cnt; i++) {
9427 			u32 reg;
9428 
9429 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9430 			tw32(reg, ec->tx_coalesce_usecs);
9431 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9432 			tw32(reg, ec->tx_max_coalesced_frames);
9433 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9434 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9435 		}
9436 	}
9437 
9438 	for (; i < tp->irq_max - 1; i++) {
9439 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9440 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9441 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9442 	}
9443 }
9444 
9445 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9446 {
9447 	int i = 0;
9448 	u32 limit = tp->rxq_cnt;
9449 
9450 	if (!tg3_flag(tp, ENABLE_RSS)) {
9451 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9452 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9453 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9454 		limit--;
9455 	} else {
9456 		tw32(HOSTCC_RXCOL_TICKS, 0);
9457 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9458 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9459 	}
9460 
9461 	for (; i < limit; i++) {
9462 		u32 reg;
9463 
9464 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9465 		tw32(reg, ec->rx_coalesce_usecs);
9466 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9467 		tw32(reg, ec->rx_max_coalesced_frames);
9468 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9469 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9470 	}
9471 
9472 	for (; i < tp->irq_max - 1; i++) {
9473 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9474 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9475 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9476 	}
9477 }
9478 
9479 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9480 {
9481 	tg3_coal_tx_init(tp, ec);
9482 	tg3_coal_rx_init(tp, ec);
9483 
9484 	if (!tg3_flag(tp, 5705_PLUS)) {
9485 		u32 val = ec->stats_block_coalesce_usecs;
9486 
9487 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9488 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9489 
9490 		if (!tp->link_up)
9491 			val = 0;
9492 
9493 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9494 	}
9495 }
9496 
9497 /* tp->lock is held. */
9498 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9499 {
9500 	u32 txrcb, limit;
9501 
9502 	/* Disable all transmit rings but the first. */
9503 	if (!tg3_flag(tp, 5705_PLUS))
9504 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9505 	else if (tg3_flag(tp, 5717_PLUS))
9506 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9507 	else if (tg3_flag(tp, 57765_CLASS) ||
9508 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9509 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9510 	else
9511 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9512 
9513 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9514 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9515 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9516 			      BDINFO_FLAGS_DISABLED);
9517 }
9518 
9519 /* tp->lock is held. */
9520 static void tg3_tx_rcbs_init(struct tg3 *tp)
9521 {
9522 	int i = 0;
9523 	u32 txrcb = NIC_SRAM_SEND_RCB;
9524 
9525 	if (tg3_flag(tp, ENABLE_TSS))
9526 		i++;
9527 
9528 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9529 		struct tg3_napi *tnapi = &tp->napi[i];
9530 
9531 		if (!tnapi->tx_ring)
9532 			continue;
9533 
9534 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9535 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9536 			       NIC_SRAM_TX_BUFFER_DESC);
9537 	}
9538 }
9539 
9540 /* tp->lock is held. */
9541 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9542 {
9543 	u32 rxrcb, limit;
9544 
9545 	/* Disable all receive return rings but the first. */
9546 	if (tg3_flag(tp, 5717_PLUS))
9547 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9548 	else if (!tg3_flag(tp, 5705_PLUS))
9549 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9550 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9551 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9552 		 tg3_flag(tp, 57765_CLASS))
9553 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9554 	else
9555 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9556 
9557 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9558 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9559 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9560 			      BDINFO_FLAGS_DISABLED);
9561 }
9562 
9563 /* tp->lock is held. */
9564 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9565 {
9566 	int i = 0;
9567 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9568 
9569 	if (tg3_flag(tp, ENABLE_RSS))
9570 		i++;
9571 
9572 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9573 		struct tg3_napi *tnapi = &tp->napi[i];
9574 
9575 		if (!tnapi->rx_rcb)
9576 			continue;
9577 
9578 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9579 			       (tp->rx_ret_ring_mask + 1) <<
9580 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9581 	}
9582 }
9583 
9584 /* tp->lock is held. */
9585 static void tg3_rings_reset(struct tg3 *tp)
9586 {
9587 	int i;
9588 	u32 stblk;
9589 	struct tg3_napi *tnapi = &tp->napi[0];
9590 
9591 	tg3_tx_rcbs_disable(tp);
9592 
9593 	tg3_rx_ret_rcbs_disable(tp);
9594 
9595 	/* Disable interrupts */
9596 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9597 	tp->napi[0].chk_msi_cnt = 0;
9598 	tp->napi[0].last_rx_cons = 0;
9599 	tp->napi[0].last_tx_cons = 0;
9600 
9601 	/* Zero mailbox registers. */
9602 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9603 		for (i = 1; i < tp->irq_max; i++) {
9604 			tp->napi[i].tx_prod = 0;
9605 			tp->napi[i].tx_cons = 0;
9606 			if (tg3_flag(tp, ENABLE_TSS))
9607 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9608 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9609 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9610 			tp->napi[i].chk_msi_cnt = 0;
9611 			tp->napi[i].last_rx_cons = 0;
9612 			tp->napi[i].last_tx_cons = 0;
9613 		}
9614 		if (!tg3_flag(tp, ENABLE_TSS))
9615 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9616 	} else {
9617 		tp->napi[0].tx_prod = 0;
9618 		tp->napi[0].tx_cons = 0;
9619 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9620 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9621 	}
9622 
9623 	/* Make sure the NIC-based send BD rings are disabled. */
9624 	if (!tg3_flag(tp, 5705_PLUS)) {
9625 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9626 		for (i = 0; i < 16; i++)
9627 			tw32_tx_mbox(mbox + i * 8, 0);
9628 	}
9629 
9630 	/* Clear status block in ram. */
9631 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9632 
9633 	/* Set status block DMA address */
9634 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9635 	     ((u64) tnapi->status_mapping >> 32));
9636 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9637 	     ((u64) tnapi->status_mapping & 0xffffffff));
9638 
9639 	stblk = HOSTCC_STATBLCK_RING1;
9640 
9641 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9642 		u64 mapping = (u64)tnapi->status_mapping;
9643 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9644 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9645 		stblk += 8;
9646 
9647 		/* Clear status block in ram. */
9648 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9649 	}
9650 
9651 	tg3_tx_rcbs_init(tp);
9652 	tg3_rx_ret_rcbs_init(tp);
9653 }
9654 
9655 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9656 {
9657 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9658 
9659 	if (!tg3_flag(tp, 5750_PLUS) ||
9660 	    tg3_flag(tp, 5780_CLASS) ||
9661 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9662 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9663 	    tg3_flag(tp, 57765_PLUS))
9664 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9665 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9666 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9667 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9668 	else
9669 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9670 
9671 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9672 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9673 
9674 	val = min(nic_rep_thresh, host_rep_thresh);
9675 	tw32(RCVBDI_STD_THRESH, val);
9676 
9677 	if (tg3_flag(tp, 57765_PLUS))
9678 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9679 
9680 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9681 		return;
9682 
9683 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9684 
9685 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9686 
9687 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9688 	tw32(RCVBDI_JUMBO_THRESH, val);
9689 
9690 	if (tg3_flag(tp, 57765_PLUS))
9691 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9692 }
9693 
9694 static inline u32 calc_crc(unsigned char *buf, int len)
9695 {
9696 	u32 reg;
9697 	u32 tmp;
9698 	int j, k;
9699 
9700 	reg = 0xffffffff;
9701 
9702 	for (j = 0; j < len; j++) {
9703 		reg ^= buf[j];
9704 
9705 		for (k = 0; k < 8; k++) {
9706 			tmp = reg & 0x01;
9707 
9708 			reg >>= 1;
9709 
9710 			if (tmp)
9711 				reg ^= CRC32_POLY_LE;
9712 		}
9713 	}
9714 
9715 	return ~reg;
9716 }
9717 
9718 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9719 {
9720 	/* accept or reject all multicast frames */
9721 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9722 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9723 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9724 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9725 }
9726 
9727 static void __tg3_set_rx_mode(struct net_device *dev)
9728 {
9729 	struct tg3 *tp = netdev_priv(dev);
9730 	u32 rx_mode;
9731 
9732 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9733 				  RX_MODE_KEEP_VLAN_TAG);
9734 
9735 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9736 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9737 	 * flag clear.
9738 	 */
9739 	if (!tg3_flag(tp, ENABLE_ASF))
9740 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9741 #endif
9742 
9743 	if (dev->flags & IFF_PROMISC) {
9744 		/* Promiscuous mode. */
9745 		rx_mode |= RX_MODE_PROMISC;
9746 	} else if (dev->flags & IFF_ALLMULTI) {
9747 		/* Accept all multicast. */
9748 		tg3_set_multi(tp, 1);
9749 	} else if (netdev_mc_empty(dev)) {
9750 		/* Reject all multicast. */
9751 		tg3_set_multi(tp, 0);
9752 	} else {
9753 		/* Accept one or more multicast(s). */
9754 		struct netdev_hw_addr *ha;
9755 		u32 mc_filter[4] = { 0, };
9756 		u32 regidx;
9757 		u32 bit;
9758 		u32 crc;
9759 
9760 		netdev_for_each_mc_addr(ha, dev) {
9761 			crc = calc_crc(ha->addr, ETH_ALEN);
9762 			bit = ~crc & 0x7f;
9763 			regidx = (bit & 0x60) >> 5;
9764 			bit &= 0x1f;
9765 			mc_filter[regidx] |= (1 << bit);
9766 		}
9767 
9768 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9769 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9770 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9771 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9772 	}
9773 
9774 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9775 		rx_mode |= RX_MODE_PROMISC;
9776 	} else if (!(dev->flags & IFF_PROMISC)) {
9777 		/* Add all entries into to the mac addr filter list */
9778 		int i = 0;
9779 		struct netdev_hw_addr *ha;
9780 
9781 		netdev_for_each_uc_addr(ha, dev) {
9782 			__tg3_set_one_mac_addr(tp, ha->addr,
9783 					       i + TG3_UCAST_ADDR_IDX(tp));
9784 			i++;
9785 		}
9786 	}
9787 
9788 	if (rx_mode != tp->rx_mode) {
9789 		tp->rx_mode = rx_mode;
9790 		tw32_f(MAC_RX_MODE, rx_mode);
9791 		udelay(10);
9792 	}
9793 }
9794 
9795 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9796 {
9797 	int i;
9798 
9799 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9800 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9801 }
9802 
9803 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9804 {
9805 	int i;
9806 
9807 	if (!tg3_flag(tp, SUPPORT_MSIX))
9808 		return;
9809 
9810 	if (tp->rxq_cnt == 1) {
9811 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9812 		return;
9813 	}
9814 
9815 	/* Validate table against current IRQ count */
9816 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9817 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9818 			break;
9819 	}
9820 
9821 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9822 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9823 }
9824 
9825 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9826 {
9827 	int i = 0;
9828 	u32 reg = MAC_RSS_INDIR_TBL_0;
9829 
9830 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9831 		u32 val = tp->rss_ind_tbl[i];
9832 		i++;
9833 		for (; i % 8; i++) {
9834 			val <<= 4;
9835 			val |= tp->rss_ind_tbl[i];
9836 		}
9837 		tw32(reg, val);
9838 		reg += 4;
9839 	}
9840 }
9841 
9842 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9843 {
9844 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9845 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9846 	else
9847 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9848 }
9849 
9850 /* tp->lock is held. */
9851 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9852 {
9853 	u32 val, rdmac_mode;
9854 	int i, err, limit;
9855 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9856 
9857 	tg3_disable_ints(tp);
9858 
9859 	tg3_stop_fw(tp);
9860 
9861 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9862 
9863 	if (tg3_flag(tp, INIT_COMPLETE))
9864 		tg3_abort_hw(tp, 1);
9865 
9866 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9867 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9868 		tg3_phy_pull_config(tp);
9869 		tg3_eee_pull_config(tp, NULL);
9870 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9871 	}
9872 
9873 	/* Enable MAC control of LPI */
9874 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9875 		tg3_setup_eee(tp);
9876 
9877 	if (reset_phy)
9878 		tg3_phy_reset(tp);
9879 
9880 	err = tg3_chip_reset(tp);
9881 	if (err)
9882 		return err;
9883 
9884 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9885 
9886 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9887 		val = tr32(TG3_CPMU_CTRL);
9888 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9889 		tw32(TG3_CPMU_CTRL, val);
9890 
9891 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9892 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9893 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9894 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9895 
9896 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9897 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9898 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9899 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9900 
9901 		val = tr32(TG3_CPMU_HST_ACC);
9902 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9903 		val |= CPMU_HST_ACC_MACCLK_6_25;
9904 		tw32(TG3_CPMU_HST_ACC, val);
9905 	}
9906 
9907 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9908 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9909 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9910 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9911 		tw32(PCIE_PWR_MGMT_THRESH, val);
9912 
9913 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9914 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9915 
9916 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9917 
9918 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9919 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9920 	}
9921 
9922 	if (tg3_flag(tp, L1PLLPD_EN)) {
9923 		u32 grc_mode = tr32(GRC_MODE);
9924 
9925 		/* Access the lower 1K of PL PCIE block registers. */
9926 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9927 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9928 
9929 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9930 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9931 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9932 
9933 		tw32(GRC_MODE, grc_mode);
9934 	}
9935 
9936 	if (tg3_flag(tp, 57765_CLASS)) {
9937 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9938 			u32 grc_mode = tr32(GRC_MODE);
9939 
9940 			/* Access the lower 1K of PL PCIE block registers. */
9941 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9942 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9943 
9944 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9945 				   TG3_PCIE_PL_LO_PHYCTL5);
9946 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9947 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9948 
9949 			tw32(GRC_MODE, grc_mode);
9950 		}
9951 
9952 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9953 			u32 grc_mode;
9954 
9955 			/* Fix transmit hangs */
9956 			val = tr32(TG3_CPMU_PADRNG_CTL);
9957 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9958 			tw32(TG3_CPMU_PADRNG_CTL, val);
9959 
9960 			grc_mode = tr32(GRC_MODE);
9961 
9962 			/* Access the lower 1K of DL PCIE block registers. */
9963 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9964 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9965 
9966 			val = tr32(TG3_PCIE_TLDLPL_PORT +
9967 				   TG3_PCIE_DL_LO_FTSMAX);
9968 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9969 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9970 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9971 
9972 			tw32(GRC_MODE, grc_mode);
9973 		}
9974 
9975 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9976 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9977 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9978 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9979 	}
9980 
9981 	/* This works around an issue with Athlon chipsets on
9982 	 * B3 tigon3 silicon.  This bit has no effect on any
9983 	 * other revision.  But do not set this on PCI Express
9984 	 * chips and don't even touch the clocks if the CPMU is present.
9985 	 */
9986 	if (!tg3_flag(tp, CPMU_PRESENT)) {
9987 		if (!tg3_flag(tp, PCI_EXPRESS))
9988 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9989 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9990 	}
9991 
9992 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9993 	    tg3_flag(tp, PCIX_MODE)) {
9994 		val = tr32(TG3PCI_PCISTATE);
9995 		val |= PCISTATE_RETRY_SAME_DMA;
9996 		tw32(TG3PCI_PCISTATE, val);
9997 	}
9998 
9999 	if (tg3_flag(tp, ENABLE_APE)) {
10000 		/* Allow reads and writes to the
10001 		 * APE register and memory space.
10002 		 */
10003 		val = tr32(TG3PCI_PCISTATE);
10004 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10005 		       PCISTATE_ALLOW_APE_SHMEM_WR |
10006 		       PCISTATE_ALLOW_APE_PSPACE_WR;
10007 		tw32(TG3PCI_PCISTATE, val);
10008 	}
10009 
10010 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10011 		/* Enable some hw fixes.  */
10012 		val = tr32(TG3PCI_MSI_DATA);
10013 		val |= (1 << 26) | (1 << 28) | (1 << 29);
10014 		tw32(TG3PCI_MSI_DATA, val);
10015 	}
10016 
10017 	/* Descriptor ring init may make accesses to the
10018 	 * NIC SRAM area to setup the TX descriptors, so we
10019 	 * can only do this after the hardware has been
10020 	 * successfully reset.
10021 	 */
10022 	err = tg3_init_rings(tp);
10023 	if (err)
10024 		return err;
10025 
10026 	if (tg3_flag(tp, 57765_PLUS)) {
10027 		val = tr32(TG3PCI_DMA_RW_CTRL) &
10028 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10029 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10030 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10031 		if (!tg3_flag(tp, 57765_CLASS) &&
10032 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10033 		    tg3_asic_rev(tp) != ASIC_REV_5762)
10034 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10035 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10036 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10037 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10038 		/* This value is determined during the probe time DMA
10039 		 * engine test, tg3_test_dma.
10040 		 */
10041 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10042 	}
10043 
10044 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10045 			  GRC_MODE_4X_NIC_SEND_RINGS |
10046 			  GRC_MODE_NO_TX_PHDR_CSUM |
10047 			  GRC_MODE_NO_RX_PHDR_CSUM);
10048 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10049 
10050 	/* Pseudo-header checksum is done by hardware logic and not
10051 	 * the offload processers, so make the chip do the pseudo-
10052 	 * header checksums on receive.  For transmit it is more
10053 	 * convenient to do the pseudo-header checksum in software
10054 	 * as Linux does that on transmit for us in all cases.
10055 	 */
10056 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10057 
10058 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10059 	if (tp->rxptpctl)
10060 		tw32(TG3_RX_PTP_CTL,
10061 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10062 
10063 	if (tg3_flag(tp, PTP_CAPABLE))
10064 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10065 
10066 	tw32(GRC_MODE, tp->grc_mode | val);
10067 
10068 	/* On one of the AMD platform, MRRS is restricted to 4000 because of
10069 	 * south bridge limitation. As a workaround, Driver is setting MRRS
10070 	 * to 2048 instead of default 4096.
10071 	 */
10072 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10073 	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10074 		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10075 		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10076 	}
10077 
10078 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10079 	val = tr32(GRC_MISC_CFG);
10080 	val &= ~0xff;
10081 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10082 	tw32(GRC_MISC_CFG, val);
10083 
10084 	/* Initialize MBUF/DESC pool. */
10085 	if (tg3_flag(tp, 5750_PLUS)) {
10086 		/* Do nothing.  */
10087 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10088 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10089 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10090 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10091 		else
10092 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10093 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10094 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10095 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10096 		int fw_len;
10097 
10098 		fw_len = tp->fw_len;
10099 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10100 		tw32(BUFMGR_MB_POOL_ADDR,
10101 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10102 		tw32(BUFMGR_MB_POOL_SIZE,
10103 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10104 	}
10105 
10106 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10107 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10108 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10109 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10110 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10111 		tw32(BUFMGR_MB_HIGH_WATER,
10112 		     tp->bufmgr_config.mbuf_high_water);
10113 	} else {
10114 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10115 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10116 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10117 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10118 		tw32(BUFMGR_MB_HIGH_WATER,
10119 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10120 	}
10121 	tw32(BUFMGR_DMA_LOW_WATER,
10122 	     tp->bufmgr_config.dma_low_water);
10123 	tw32(BUFMGR_DMA_HIGH_WATER,
10124 	     tp->bufmgr_config.dma_high_water);
10125 
10126 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10127 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10128 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10129 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10130 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10131 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10132 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10133 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10134 	tw32(BUFMGR_MODE, val);
10135 	for (i = 0; i < 2000; i++) {
10136 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10137 			break;
10138 		udelay(10);
10139 	}
10140 	if (i >= 2000) {
10141 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10142 		return -ENODEV;
10143 	}
10144 
10145 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10146 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10147 
10148 	tg3_setup_rxbd_thresholds(tp);
10149 
10150 	/* Initialize TG3_BDINFO's at:
10151 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10152 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10153 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10154 	 *
10155 	 * like so:
10156 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10157 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10158 	 *                              ring attribute flags
10159 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10160 	 *
10161 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10162 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10163 	 *
10164 	 * The size of each ring is fixed in the firmware, but the location is
10165 	 * configurable.
10166 	 */
10167 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10168 	     ((u64) tpr->rx_std_mapping >> 32));
10169 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10170 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10171 	if (!tg3_flag(tp, 5717_PLUS))
10172 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10173 		     NIC_SRAM_RX_BUFFER_DESC);
10174 
10175 	/* Disable the mini ring */
10176 	if (!tg3_flag(tp, 5705_PLUS))
10177 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10178 		     BDINFO_FLAGS_DISABLED);
10179 
10180 	/* Program the jumbo buffer descriptor ring control
10181 	 * blocks on those devices that have them.
10182 	 */
10183 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10184 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10185 
10186 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10187 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10188 			     ((u64) tpr->rx_jmb_mapping >> 32));
10189 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10190 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10191 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10192 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10193 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10194 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10195 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10196 			    tg3_flag(tp, 57765_CLASS) ||
10197 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10198 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10199 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10200 		} else {
10201 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10202 			     BDINFO_FLAGS_DISABLED);
10203 		}
10204 
10205 		if (tg3_flag(tp, 57765_PLUS)) {
10206 			val = TG3_RX_STD_RING_SIZE(tp);
10207 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10208 			val |= (TG3_RX_STD_DMA_SZ << 2);
10209 		} else
10210 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10211 	} else
10212 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10213 
10214 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10215 
10216 	tpr->rx_std_prod_idx = tp->rx_pending;
10217 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10218 
10219 	tpr->rx_jmb_prod_idx =
10220 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10221 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10222 
10223 	tg3_rings_reset(tp);
10224 
10225 	/* Initialize MAC address and backoff seed. */
10226 	__tg3_set_mac_addr(tp, false);
10227 
10228 	/* MTU + ethernet header + FCS + optional VLAN tag */
10229 	tw32(MAC_RX_MTU_SIZE,
10230 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10231 
10232 	/* The slot time is changed by tg3_setup_phy if we
10233 	 * run at gigabit with half duplex.
10234 	 */
10235 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10236 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10237 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10238 
10239 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10240 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10241 		val |= tr32(MAC_TX_LENGTHS) &
10242 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10243 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10244 
10245 	tw32(MAC_TX_LENGTHS, val);
10246 
10247 	/* Receive rules. */
10248 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10249 	tw32(RCVLPC_CONFIG, 0x0181);
10250 
10251 	/* Calculate RDMAC_MODE setting early, we need it to determine
10252 	 * the RCVLPC_STATE_ENABLE mask.
10253 	 */
10254 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10255 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10256 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10257 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10258 		      RDMAC_MODE_LNGREAD_ENAB);
10259 
10260 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10261 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10262 
10263 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10264 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10265 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10266 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10267 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10268 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10269 
10270 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10271 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10272 		if (tg3_flag(tp, TSO_CAPABLE)) {
10273 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10274 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10275 			   !tg3_flag(tp, IS_5788)) {
10276 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10277 		}
10278 	}
10279 
10280 	if (tg3_flag(tp, PCI_EXPRESS))
10281 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10282 
10283 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10284 		tp->dma_limit = 0;
10285 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10286 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10287 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10288 		}
10289 	}
10290 
10291 	if (tg3_flag(tp, HW_TSO_1) ||
10292 	    tg3_flag(tp, HW_TSO_2) ||
10293 	    tg3_flag(tp, HW_TSO_3))
10294 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10295 
10296 	if (tg3_flag(tp, 57765_PLUS) ||
10297 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10298 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10299 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10300 
10301 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10302 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10303 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10304 
10305 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10306 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10307 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10308 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10309 	    tg3_flag(tp, 57765_PLUS)) {
10310 		u32 tgtreg;
10311 
10312 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10313 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10314 		else
10315 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10316 
10317 		val = tr32(tgtreg);
10318 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10319 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10320 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10321 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10322 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10323 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10324 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10325 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10326 		}
10327 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10328 	}
10329 
10330 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10331 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10332 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10333 		u32 tgtreg;
10334 
10335 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10336 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10337 		else
10338 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10339 
10340 		val = tr32(tgtreg);
10341 		tw32(tgtreg, val |
10342 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10343 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10344 	}
10345 
10346 	/* Receive/send statistics. */
10347 	if (tg3_flag(tp, 5750_PLUS)) {
10348 		val = tr32(RCVLPC_STATS_ENABLE);
10349 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10350 		tw32(RCVLPC_STATS_ENABLE, val);
10351 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10352 		   tg3_flag(tp, TSO_CAPABLE)) {
10353 		val = tr32(RCVLPC_STATS_ENABLE);
10354 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10355 		tw32(RCVLPC_STATS_ENABLE, val);
10356 	} else {
10357 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10358 	}
10359 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10360 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10361 	tw32(SNDDATAI_STATSCTRL,
10362 	     (SNDDATAI_SCTRL_ENABLE |
10363 	      SNDDATAI_SCTRL_FASTUPD));
10364 
10365 	/* Setup host coalescing engine. */
10366 	tw32(HOSTCC_MODE, 0);
10367 	for (i = 0; i < 2000; i++) {
10368 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10369 			break;
10370 		udelay(10);
10371 	}
10372 
10373 	__tg3_set_coalesce(tp, &tp->coal);
10374 
10375 	if (!tg3_flag(tp, 5705_PLUS)) {
10376 		/* Status/statistics block address.  See tg3_timer,
10377 		 * the tg3_periodic_fetch_stats call there, and
10378 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10379 		 */
10380 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10381 		     ((u64) tp->stats_mapping >> 32));
10382 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10383 		     ((u64) tp->stats_mapping & 0xffffffff));
10384 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10385 
10386 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10387 
10388 		/* Clear statistics and status block memory areas */
10389 		for (i = NIC_SRAM_STATS_BLK;
10390 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10391 		     i += sizeof(u32)) {
10392 			tg3_write_mem(tp, i, 0);
10393 			udelay(40);
10394 		}
10395 	}
10396 
10397 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10398 
10399 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10400 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10401 	if (!tg3_flag(tp, 5705_PLUS))
10402 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10403 
10404 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10405 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10406 		/* reset to prevent losing 1st rx packet intermittently */
10407 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10408 		udelay(10);
10409 	}
10410 
10411 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10412 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10413 			MAC_MODE_FHDE_ENABLE;
10414 	if (tg3_flag(tp, ENABLE_APE))
10415 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10416 	if (!tg3_flag(tp, 5705_PLUS) &&
10417 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10418 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10419 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10420 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10421 	udelay(40);
10422 
10423 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10424 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10425 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10426 	 * whether used as inputs or outputs, are set by boot code after
10427 	 * reset.
10428 	 */
10429 	if (!tg3_flag(tp, IS_NIC)) {
10430 		u32 gpio_mask;
10431 
10432 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10433 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10434 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10435 
10436 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10437 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10438 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10439 
10440 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10441 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10442 
10443 		tp->grc_local_ctrl &= ~gpio_mask;
10444 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10445 
10446 		/* GPIO1 must be driven high for eeprom write protect */
10447 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10448 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10449 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10450 	}
10451 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10452 	udelay(100);
10453 
10454 	if (tg3_flag(tp, USING_MSIX)) {
10455 		val = tr32(MSGINT_MODE);
10456 		val |= MSGINT_MODE_ENABLE;
10457 		if (tp->irq_cnt > 1)
10458 			val |= MSGINT_MODE_MULTIVEC_EN;
10459 		if (!tg3_flag(tp, 1SHOT_MSI))
10460 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10461 		tw32(MSGINT_MODE, val);
10462 	}
10463 
10464 	if (!tg3_flag(tp, 5705_PLUS)) {
10465 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10466 		udelay(40);
10467 	}
10468 
10469 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10470 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10471 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10472 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10473 	       WDMAC_MODE_LNGREAD_ENAB);
10474 
10475 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10476 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10477 		if (tg3_flag(tp, TSO_CAPABLE) &&
10478 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10479 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10480 			/* nothing */
10481 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10482 			   !tg3_flag(tp, IS_5788)) {
10483 			val |= WDMAC_MODE_RX_ACCEL;
10484 		}
10485 	}
10486 
10487 	/* Enable host coalescing bug fix */
10488 	if (tg3_flag(tp, 5755_PLUS))
10489 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10490 
10491 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10492 		val |= WDMAC_MODE_BURST_ALL_DATA;
10493 
10494 	tw32_f(WDMAC_MODE, val);
10495 	udelay(40);
10496 
10497 	if (tg3_flag(tp, PCIX_MODE)) {
10498 		u16 pcix_cmd;
10499 
10500 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10501 				     &pcix_cmd);
10502 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10503 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10504 			pcix_cmd |= PCI_X_CMD_READ_2K;
10505 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10506 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10507 			pcix_cmd |= PCI_X_CMD_READ_2K;
10508 		}
10509 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10510 				      pcix_cmd);
10511 	}
10512 
10513 	tw32_f(RDMAC_MODE, rdmac_mode);
10514 	udelay(40);
10515 
10516 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10517 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10518 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10519 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10520 				break;
10521 		}
10522 		if (i < TG3_NUM_RDMA_CHANNELS) {
10523 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10524 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10525 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10526 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10527 		}
10528 	}
10529 
10530 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10531 	if (!tg3_flag(tp, 5705_PLUS))
10532 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10533 
10534 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10535 		tw32(SNDDATAC_MODE,
10536 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10537 	else
10538 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10539 
10540 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10541 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10542 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10543 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10544 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10545 	tw32(RCVDBDI_MODE, val);
10546 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10547 	if (tg3_flag(tp, HW_TSO_1) ||
10548 	    tg3_flag(tp, HW_TSO_2) ||
10549 	    tg3_flag(tp, HW_TSO_3))
10550 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10551 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10552 	if (tg3_flag(tp, ENABLE_TSS))
10553 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10554 	tw32(SNDBDI_MODE, val);
10555 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10556 
10557 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10558 		err = tg3_load_5701_a0_firmware_fix(tp);
10559 		if (err)
10560 			return err;
10561 	}
10562 
10563 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10564 		/* Ignore any errors for the firmware download. If download
10565 		 * fails, the device will operate with EEE disabled
10566 		 */
10567 		tg3_load_57766_firmware(tp);
10568 	}
10569 
10570 	if (tg3_flag(tp, TSO_CAPABLE)) {
10571 		err = tg3_load_tso_firmware(tp);
10572 		if (err)
10573 			return err;
10574 	}
10575 
10576 	tp->tx_mode = TX_MODE_ENABLE;
10577 
10578 	if (tg3_flag(tp, 5755_PLUS) ||
10579 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10580 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10581 
10582 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10583 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10584 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10585 		tp->tx_mode &= ~val;
10586 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10587 	}
10588 
10589 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10590 	udelay(100);
10591 
10592 	if (tg3_flag(tp, ENABLE_RSS)) {
10593 		u32 rss_key[10];
10594 
10595 		tg3_rss_write_indir_tbl(tp);
10596 
10597 		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10598 
10599 		for (i = 0; i < 10 ; i++)
10600 			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10601 	}
10602 
10603 	tp->rx_mode = RX_MODE_ENABLE;
10604 	if (tg3_flag(tp, 5755_PLUS))
10605 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10606 
10607 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10608 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10609 
10610 	if (tg3_flag(tp, ENABLE_RSS))
10611 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10612 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10613 			       RX_MODE_RSS_IPV6_HASH_EN |
10614 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10615 			       RX_MODE_RSS_IPV4_HASH_EN |
10616 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10617 
10618 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10619 	udelay(10);
10620 
10621 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10622 
10623 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10624 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10625 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10626 		udelay(10);
10627 	}
10628 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10629 	udelay(10);
10630 
10631 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10632 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10633 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10634 			/* Set drive transmission level to 1.2V  */
10635 			/* only if the signal pre-emphasis bit is not set  */
10636 			val = tr32(MAC_SERDES_CFG);
10637 			val &= 0xfffff000;
10638 			val |= 0x880;
10639 			tw32(MAC_SERDES_CFG, val);
10640 		}
10641 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10642 			tw32(MAC_SERDES_CFG, 0x616000);
10643 	}
10644 
10645 	/* Prevent chip from dropping frames when flow control
10646 	 * is enabled.
10647 	 */
10648 	if (tg3_flag(tp, 57765_CLASS))
10649 		val = 1;
10650 	else
10651 		val = 2;
10652 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10653 
10654 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10655 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10656 		/* Use hardware link auto-negotiation */
10657 		tg3_flag_set(tp, HW_AUTONEG);
10658 	}
10659 
10660 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10661 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10662 		u32 tmp;
10663 
10664 		tmp = tr32(SERDES_RX_CTRL);
10665 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10666 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10667 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10668 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10669 	}
10670 
10671 	if (!tg3_flag(tp, USE_PHYLIB)) {
10672 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10673 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10674 
10675 		err = tg3_setup_phy(tp, false);
10676 		if (err)
10677 			return err;
10678 
10679 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10680 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10681 			u32 tmp;
10682 
10683 			/* Clear CRC stats. */
10684 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10685 				tg3_writephy(tp, MII_TG3_TEST1,
10686 					     tmp | MII_TG3_TEST1_CRC_EN);
10687 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10688 			}
10689 		}
10690 	}
10691 
10692 	__tg3_set_rx_mode(tp->dev);
10693 
10694 	/* Initialize receive rules. */
10695 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10696 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10697 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10698 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10699 
10700 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10701 		limit = 8;
10702 	else
10703 		limit = 16;
10704 	if (tg3_flag(tp, ENABLE_ASF))
10705 		limit -= 4;
10706 	switch (limit) {
10707 	case 16:
10708 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10709 		fallthrough;
10710 	case 15:
10711 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10712 		fallthrough;
10713 	case 14:
10714 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10715 		fallthrough;
10716 	case 13:
10717 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10718 		fallthrough;
10719 	case 12:
10720 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10721 		fallthrough;
10722 	case 11:
10723 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10724 		fallthrough;
10725 	case 10:
10726 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10727 		fallthrough;
10728 	case 9:
10729 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10730 		fallthrough;
10731 	case 8:
10732 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10733 		fallthrough;
10734 	case 7:
10735 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10736 		fallthrough;
10737 	case 6:
10738 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10739 		fallthrough;
10740 	case 5:
10741 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10742 		fallthrough;
10743 	case 4:
10744 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10745 	case 3:
10746 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10747 	case 2:
10748 	case 1:
10749 
10750 	default:
10751 		break;
10752 	}
10753 
10754 	if (tg3_flag(tp, ENABLE_APE))
10755 		/* Write our heartbeat update interval to APE. */
10756 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10757 				APE_HOST_HEARTBEAT_INT_5SEC);
10758 
10759 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10760 
10761 	return 0;
10762 }
10763 
10764 /* Called at device open time to get the chip ready for
10765  * packet processing.  Invoked with tp->lock held.
10766  */
10767 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10768 {
10769 	/* Chip may have been just powered on. If so, the boot code may still
10770 	 * be running initialization. Wait for it to finish to avoid races in
10771 	 * accessing the hardware.
10772 	 */
10773 	tg3_enable_register_access(tp);
10774 	tg3_poll_fw(tp);
10775 
10776 	tg3_switch_clocks(tp);
10777 
10778 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10779 
10780 	return tg3_reset_hw(tp, reset_phy);
10781 }
10782 
10783 #ifdef CONFIG_TIGON3_HWMON
10784 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10785 {
10786 	u32 off, len = TG3_OCIR_LEN;
10787 	int i;
10788 
10789 	for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10790 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10791 
10792 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10793 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10794 			memset(ocir, 0, len);
10795 	}
10796 }
10797 
10798 /* sysfs attributes for hwmon */
10799 static ssize_t tg3_show_temp(struct device *dev,
10800 			     struct device_attribute *devattr, char *buf)
10801 {
10802 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10803 	struct tg3 *tp = dev_get_drvdata(dev);
10804 	u32 temperature;
10805 
10806 	spin_lock_bh(&tp->lock);
10807 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10808 				sizeof(temperature));
10809 	spin_unlock_bh(&tp->lock);
10810 	return sprintf(buf, "%u\n", temperature * 1000);
10811 }
10812 
10813 
10814 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10815 			  TG3_TEMP_SENSOR_OFFSET);
10816 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10817 			  TG3_TEMP_CAUTION_OFFSET);
10818 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10819 			  TG3_TEMP_MAX_OFFSET);
10820 
10821 static struct attribute *tg3_attrs[] = {
10822 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10823 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10824 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10825 	NULL
10826 };
10827 ATTRIBUTE_GROUPS(tg3);
10828 
10829 static void tg3_hwmon_close(struct tg3 *tp)
10830 {
10831 	if (tp->hwmon_dev) {
10832 		hwmon_device_unregister(tp->hwmon_dev);
10833 		tp->hwmon_dev = NULL;
10834 	}
10835 }
10836 
10837 static void tg3_hwmon_open(struct tg3 *tp)
10838 {
10839 	int i;
10840 	u32 size = 0;
10841 	struct pci_dev *pdev = tp->pdev;
10842 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10843 
10844 	tg3_sd_scan_scratchpad(tp, ocirs);
10845 
10846 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10847 		if (!ocirs[i].src_data_length)
10848 			continue;
10849 
10850 		size += ocirs[i].src_hdr_length;
10851 		size += ocirs[i].src_data_length;
10852 	}
10853 
10854 	if (!size)
10855 		return;
10856 
10857 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10858 							  tp, tg3_groups);
10859 	if (IS_ERR(tp->hwmon_dev)) {
10860 		tp->hwmon_dev = NULL;
10861 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10862 	}
10863 }
10864 #else
10865 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10866 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10867 #endif /* CONFIG_TIGON3_HWMON */
10868 
10869 
10870 #define TG3_STAT_ADD32(PSTAT, REG) \
10871 do {	u32 __val = tr32(REG); \
10872 	(PSTAT)->low += __val; \
10873 	if ((PSTAT)->low < __val) \
10874 		(PSTAT)->high += 1; \
10875 } while (0)
10876 
10877 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10878 {
10879 	struct tg3_hw_stats *sp = tp->hw_stats;
10880 
10881 	if (!tp->link_up)
10882 		return;
10883 
10884 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10885 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10886 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10887 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10888 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10889 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10890 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10891 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10892 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10893 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10894 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10895 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10896 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10897 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10898 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10899 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10900 		u32 val;
10901 
10902 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10903 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10904 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10905 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10906 	}
10907 
10908 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10909 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10910 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10911 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10912 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10913 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10914 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10915 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10916 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10917 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10918 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10919 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10920 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10921 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10922 
10923 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10924 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10925 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
10926 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10927 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10928 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10929 	} else {
10930 		u32 val = tr32(HOSTCC_FLOW_ATTN);
10931 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10932 		if (val) {
10933 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10934 			sp->rx_discards.low += val;
10935 			if (sp->rx_discards.low < val)
10936 				sp->rx_discards.high += 1;
10937 		}
10938 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10939 	}
10940 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10941 }
10942 
10943 static void tg3_chk_missed_msi(struct tg3 *tp)
10944 {
10945 	u32 i;
10946 
10947 	for (i = 0; i < tp->irq_cnt; i++) {
10948 		struct tg3_napi *tnapi = &tp->napi[i];
10949 
10950 		if (tg3_has_work(tnapi)) {
10951 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10952 			    tnapi->last_tx_cons == tnapi->tx_cons) {
10953 				if (tnapi->chk_msi_cnt < 1) {
10954 					tnapi->chk_msi_cnt++;
10955 					return;
10956 				}
10957 				tg3_msi(0, tnapi);
10958 			}
10959 		}
10960 		tnapi->chk_msi_cnt = 0;
10961 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10962 		tnapi->last_tx_cons = tnapi->tx_cons;
10963 	}
10964 }
10965 
10966 static void tg3_timer(struct timer_list *t)
10967 {
10968 	struct tg3 *tp = from_timer(tp, t, timer);
10969 
10970 	spin_lock(&tp->lock);
10971 
10972 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10973 		spin_unlock(&tp->lock);
10974 		goto restart_timer;
10975 	}
10976 
10977 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10978 	    tg3_flag(tp, 57765_CLASS))
10979 		tg3_chk_missed_msi(tp);
10980 
10981 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10982 		/* BCM4785: Flush posted writes from GbE to host memory. */
10983 		tr32(HOSTCC_MODE);
10984 	}
10985 
10986 	if (!tg3_flag(tp, TAGGED_STATUS)) {
10987 		/* All of this garbage is because when using non-tagged
10988 		 * IRQ status the mailbox/status_block protocol the chip
10989 		 * uses with the cpu is race prone.
10990 		 */
10991 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10992 			tw32(GRC_LOCAL_CTRL,
10993 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10994 		} else {
10995 			tw32(HOSTCC_MODE, tp->coalesce_mode |
10996 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10997 		}
10998 
10999 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11000 			spin_unlock(&tp->lock);
11001 			tg3_reset_task_schedule(tp);
11002 			goto restart_timer;
11003 		}
11004 	}
11005 
11006 	/* This part only runs once per second. */
11007 	if (!--tp->timer_counter) {
11008 		if (tg3_flag(tp, 5705_PLUS))
11009 			tg3_periodic_fetch_stats(tp);
11010 
11011 		if (tp->setlpicnt && !--tp->setlpicnt)
11012 			tg3_phy_eee_enable(tp);
11013 
11014 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
11015 			u32 mac_stat;
11016 			int phy_event;
11017 
11018 			mac_stat = tr32(MAC_STATUS);
11019 
11020 			phy_event = 0;
11021 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11022 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11023 					phy_event = 1;
11024 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11025 				phy_event = 1;
11026 
11027 			if (phy_event)
11028 				tg3_setup_phy(tp, false);
11029 		} else if (tg3_flag(tp, POLL_SERDES)) {
11030 			u32 mac_stat = tr32(MAC_STATUS);
11031 			int need_setup = 0;
11032 
11033 			if (tp->link_up &&
11034 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11035 				need_setup = 1;
11036 			}
11037 			if (!tp->link_up &&
11038 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11039 					 MAC_STATUS_SIGNAL_DET))) {
11040 				need_setup = 1;
11041 			}
11042 			if (need_setup) {
11043 				if (!tp->serdes_counter) {
11044 					tw32_f(MAC_MODE,
11045 					     (tp->mac_mode &
11046 					      ~MAC_MODE_PORT_MODE_MASK));
11047 					udelay(40);
11048 					tw32_f(MAC_MODE, tp->mac_mode);
11049 					udelay(40);
11050 				}
11051 				tg3_setup_phy(tp, false);
11052 			}
11053 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11054 			   tg3_flag(tp, 5780_CLASS)) {
11055 			tg3_serdes_parallel_detect(tp);
11056 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11057 			u32 cpmu = tr32(TG3_CPMU_STATUS);
11058 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11059 					 TG3_CPMU_STATUS_LINK_MASK);
11060 
11061 			if (link_up != tp->link_up)
11062 				tg3_setup_phy(tp, false);
11063 		}
11064 
11065 		tp->timer_counter = tp->timer_multiplier;
11066 	}
11067 
11068 	/* Heartbeat is only sent once every 2 seconds.
11069 	 *
11070 	 * The heartbeat is to tell the ASF firmware that the host
11071 	 * driver is still alive.  In the event that the OS crashes,
11072 	 * ASF needs to reset the hardware to free up the FIFO space
11073 	 * that may be filled with rx packets destined for the host.
11074 	 * If the FIFO is full, ASF will no longer function properly.
11075 	 *
11076 	 * Unintended resets have been reported on real time kernels
11077 	 * where the timer doesn't run on time.  Netpoll will also have
11078 	 * same problem.
11079 	 *
11080 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11081 	 * to check the ring condition when the heartbeat is expiring
11082 	 * before doing the reset.  This will prevent most unintended
11083 	 * resets.
11084 	 */
11085 	if (!--tp->asf_counter) {
11086 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11087 			tg3_wait_for_event_ack(tp);
11088 
11089 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11090 				      FWCMD_NICDRV_ALIVE3);
11091 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11092 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11093 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11094 
11095 			tg3_generate_fw_event(tp);
11096 		}
11097 		tp->asf_counter = tp->asf_multiplier;
11098 	}
11099 
11100 	/* Update the APE heartbeat every 5 seconds.*/
11101 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11102 
11103 	spin_unlock(&tp->lock);
11104 
11105 restart_timer:
11106 	tp->timer.expires = jiffies + tp->timer_offset;
11107 	add_timer(&tp->timer);
11108 }
11109 
11110 static void tg3_timer_init(struct tg3 *tp)
11111 {
11112 	if (tg3_flag(tp, TAGGED_STATUS) &&
11113 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11114 	    !tg3_flag(tp, 57765_CLASS))
11115 		tp->timer_offset = HZ;
11116 	else
11117 		tp->timer_offset = HZ / 10;
11118 
11119 	BUG_ON(tp->timer_offset > HZ);
11120 
11121 	tp->timer_multiplier = (HZ / tp->timer_offset);
11122 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11123 			     TG3_FW_UPDATE_FREQ_SEC;
11124 
11125 	timer_setup(&tp->timer, tg3_timer, 0);
11126 }
11127 
11128 static void tg3_timer_start(struct tg3 *tp)
11129 {
11130 	tp->asf_counter   = tp->asf_multiplier;
11131 	tp->timer_counter = tp->timer_multiplier;
11132 
11133 	tp->timer.expires = jiffies + tp->timer_offset;
11134 	add_timer(&tp->timer);
11135 }
11136 
11137 static void tg3_timer_stop(struct tg3 *tp)
11138 {
11139 	del_timer_sync(&tp->timer);
11140 }
11141 
11142 /* Restart hardware after configuration changes, self-test, etc.
11143  * Invoked with tp->lock held.
11144  */
11145 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11146 	__releases(tp->lock)
11147 	__acquires(tp->lock)
11148 {
11149 	int err;
11150 
11151 	err = tg3_init_hw(tp, reset_phy);
11152 	if (err) {
11153 		netdev_err(tp->dev,
11154 			   "Failed to re-initialize device, aborting\n");
11155 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11156 		tg3_full_unlock(tp);
11157 		tg3_timer_stop(tp);
11158 		tp->irq_sync = 0;
11159 		tg3_napi_enable(tp);
11160 		dev_close(tp->dev);
11161 		tg3_full_lock(tp, 0);
11162 	}
11163 	return err;
11164 }
11165 
11166 static void tg3_reset_task(struct work_struct *work)
11167 {
11168 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11169 	int err;
11170 
11171 	rtnl_lock();
11172 	tg3_full_lock(tp, 0);
11173 
11174 	if (tp->pcierr_recovery || !netif_running(tp->dev)) {
11175 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11176 		tg3_full_unlock(tp);
11177 		rtnl_unlock();
11178 		return;
11179 	}
11180 
11181 	tg3_full_unlock(tp);
11182 
11183 	tg3_phy_stop(tp);
11184 
11185 	tg3_netif_stop(tp);
11186 
11187 	tg3_full_lock(tp, 1);
11188 
11189 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11190 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11191 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11192 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11193 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11194 	}
11195 
11196 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11197 	err = tg3_init_hw(tp, true);
11198 	if (err) {
11199 		tg3_full_unlock(tp);
11200 		tp->irq_sync = 0;
11201 		tg3_napi_enable(tp);
11202 		/* Clear this flag so that tg3_reset_task_cancel() will not
11203 		 * call cancel_work_sync() and wait forever.
11204 		 */
11205 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11206 		dev_close(tp->dev);
11207 		goto out;
11208 	}
11209 
11210 	tg3_netif_start(tp);
11211 	tg3_full_unlock(tp);
11212 	tg3_phy_start(tp);
11213 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11214 out:
11215 	rtnl_unlock();
11216 }
11217 
11218 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11219 {
11220 	irq_handler_t fn;
11221 	unsigned long flags;
11222 	char *name;
11223 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11224 
11225 	if (tp->irq_cnt == 1)
11226 		name = tp->dev->name;
11227 	else {
11228 		name = &tnapi->irq_lbl[0];
11229 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11230 			snprintf(name, IFNAMSIZ,
11231 				 "%s-txrx-%d", tp->dev->name, irq_num);
11232 		else if (tnapi->tx_buffers)
11233 			snprintf(name, IFNAMSIZ,
11234 				 "%s-tx-%d", tp->dev->name, irq_num);
11235 		else if (tnapi->rx_rcb)
11236 			snprintf(name, IFNAMSIZ,
11237 				 "%s-rx-%d", tp->dev->name, irq_num);
11238 		else
11239 			snprintf(name, IFNAMSIZ,
11240 				 "%s-%d", tp->dev->name, irq_num);
11241 		name[IFNAMSIZ-1] = 0;
11242 	}
11243 
11244 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11245 		fn = tg3_msi;
11246 		if (tg3_flag(tp, 1SHOT_MSI))
11247 			fn = tg3_msi_1shot;
11248 		flags = 0;
11249 	} else {
11250 		fn = tg3_interrupt;
11251 		if (tg3_flag(tp, TAGGED_STATUS))
11252 			fn = tg3_interrupt_tagged;
11253 		flags = IRQF_SHARED;
11254 	}
11255 
11256 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11257 }
11258 
11259 static int tg3_test_interrupt(struct tg3 *tp)
11260 {
11261 	struct tg3_napi *tnapi = &tp->napi[0];
11262 	struct net_device *dev = tp->dev;
11263 	int err, i, intr_ok = 0;
11264 	u32 val;
11265 
11266 	if (!netif_running(dev))
11267 		return -ENODEV;
11268 
11269 	tg3_disable_ints(tp);
11270 
11271 	free_irq(tnapi->irq_vec, tnapi);
11272 
11273 	/*
11274 	 * Turn off MSI one shot mode.  Otherwise this test has no
11275 	 * observable way to know whether the interrupt was delivered.
11276 	 */
11277 	if (tg3_flag(tp, 57765_PLUS)) {
11278 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11279 		tw32(MSGINT_MODE, val);
11280 	}
11281 
11282 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11283 			  IRQF_SHARED, dev->name, tnapi);
11284 	if (err)
11285 		return err;
11286 
11287 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11288 	tg3_enable_ints(tp);
11289 
11290 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11291 	       tnapi->coal_now);
11292 
11293 	for (i = 0; i < 5; i++) {
11294 		u32 int_mbox, misc_host_ctrl;
11295 
11296 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11297 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11298 
11299 		if ((int_mbox != 0) ||
11300 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11301 			intr_ok = 1;
11302 			break;
11303 		}
11304 
11305 		if (tg3_flag(tp, 57765_PLUS) &&
11306 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11307 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11308 
11309 		msleep(10);
11310 	}
11311 
11312 	tg3_disable_ints(tp);
11313 
11314 	free_irq(tnapi->irq_vec, tnapi);
11315 
11316 	err = tg3_request_irq(tp, 0);
11317 
11318 	if (err)
11319 		return err;
11320 
11321 	if (intr_ok) {
11322 		/* Reenable MSI one shot mode. */
11323 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11324 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11325 			tw32(MSGINT_MODE, val);
11326 		}
11327 		return 0;
11328 	}
11329 
11330 	return -EIO;
11331 }
11332 
11333 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11334  * successfully restored
11335  */
11336 static int tg3_test_msi(struct tg3 *tp)
11337 {
11338 	int err;
11339 	u16 pci_cmd;
11340 
11341 	if (!tg3_flag(tp, USING_MSI))
11342 		return 0;
11343 
11344 	/* Turn off SERR reporting in case MSI terminates with Master
11345 	 * Abort.
11346 	 */
11347 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11348 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11349 			      pci_cmd & ~PCI_COMMAND_SERR);
11350 
11351 	err = tg3_test_interrupt(tp);
11352 
11353 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11354 
11355 	if (!err)
11356 		return 0;
11357 
11358 	/* other failures */
11359 	if (err != -EIO)
11360 		return err;
11361 
11362 	/* MSI test failed, go back to INTx mode */
11363 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11364 		    "to INTx mode. Please report this failure to the PCI "
11365 		    "maintainer and include system chipset information\n");
11366 
11367 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11368 
11369 	pci_disable_msi(tp->pdev);
11370 
11371 	tg3_flag_clear(tp, USING_MSI);
11372 	tp->napi[0].irq_vec = tp->pdev->irq;
11373 
11374 	err = tg3_request_irq(tp, 0);
11375 	if (err)
11376 		return err;
11377 
11378 	/* Need to reset the chip because the MSI cycle may have terminated
11379 	 * with Master Abort.
11380 	 */
11381 	tg3_full_lock(tp, 1);
11382 
11383 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11384 	err = tg3_init_hw(tp, true);
11385 
11386 	tg3_full_unlock(tp);
11387 
11388 	if (err)
11389 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11390 
11391 	return err;
11392 }
11393 
11394 static int tg3_request_firmware(struct tg3 *tp)
11395 {
11396 	const struct tg3_firmware_hdr *fw_hdr;
11397 
11398 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11399 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11400 			   tp->fw_needed);
11401 		return -ENOENT;
11402 	}
11403 
11404 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11405 
11406 	/* Firmware blob starts with version numbers, followed by
11407 	 * start address and _full_ length including BSS sections
11408 	 * (which must be longer than the actual data, of course
11409 	 */
11410 
11411 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11412 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11413 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11414 			   tp->fw_len, tp->fw_needed);
11415 		release_firmware(tp->fw);
11416 		tp->fw = NULL;
11417 		return -EINVAL;
11418 	}
11419 
11420 	/* We no longer need firmware; we have it. */
11421 	tp->fw_needed = NULL;
11422 	return 0;
11423 }
11424 
11425 static u32 tg3_irq_count(struct tg3 *tp)
11426 {
11427 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11428 
11429 	if (irq_cnt > 1) {
11430 		/* We want as many rx rings enabled as there are cpus.
11431 		 * In multiqueue MSI-X mode, the first MSI-X vector
11432 		 * only deals with link interrupts, etc, so we add
11433 		 * one to the number of vectors we are requesting.
11434 		 */
11435 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11436 	}
11437 
11438 	return irq_cnt;
11439 }
11440 
11441 static bool tg3_enable_msix(struct tg3 *tp)
11442 {
11443 	int i, rc;
11444 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11445 
11446 	tp->txq_cnt = tp->txq_req;
11447 	tp->rxq_cnt = tp->rxq_req;
11448 	if (!tp->rxq_cnt)
11449 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11450 	if (tp->rxq_cnt > tp->rxq_max)
11451 		tp->rxq_cnt = tp->rxq_max;
11452 
11453 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11454 	 * scheduling of the TX rings can cause starvation of rings with
11455 	 * small packets when other rings have TSO or jumbo packets.
11456 	 */
11457 	if (!tp->txq_req)
11458 		tp->txq_cnt = 1;
11459 
11460 	tp->irq_cnt = tg3_irq_count(tp);
11461 
11462 	for (i = 0; i < tp->irq_max; i++) {
11463 		msix_ent[i].entry  = i;
11464 		msix_ent[i].vector = 0;
11465 	}
11466 
11467 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11468 	if (rc < 0) {
11469 		return false;
11470 	} else if (rc < tp->irq_cnt) {
11471 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11472 			      tp->irq_cnt, rc);
11473 		tp->irq_cnt = rc;
11474 		tp->rxq_cnt = max(rc - 1, 1);
11475 		if (tp->txq_cnt)
11476 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11477 	}
11478 
11479 	for (i = 0; i < tp->irq_max; i++)
11480 		tp->napi[i].irq_vec = msix_ent[i].vector;
11481 
11482 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11483 		pci_disable_msix(tp->pdev);
11484 		return false;
11485 	}
11486 
11487 	if (tp->irq_cnt == 1)
11488 		return true;
11489 
11490 	tg3_flag_set(tp, ENABLE_RSS);
11491 
11492 	if (tp->txq_cnt > 1)
11493 		tg3_flag_set(tp, ENABLE_TSS);
11494 
11495 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11496 
11497 	return true;
11498 }
11499 
11500 static void tg3_ints_init(struct tg3 *tp)
11501 {
11502 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11503 	    !tg3_flag(tp, TAGGED_STATUS)) {
11504 		/* All MSI supporting chips should support tagged
11505 		 * status.  Assert that this is the case.
11506 		 */
11507 		netdev_warn(tp->dev,
11508 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11509 		goto defcfg;
11510 	}
11511 
11512 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11513 		tg3_flag_set(tp, USING_MSIX);
11514 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11515 		tg3_flag_set(tp, USING_MSI);
11516 
11517 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11518 		u32 msi_mode = tr32(MSGINT_MODE);
11519 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11520 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11521 		if (!tg3_flag(tp, 1SHOT_MSI))
11522 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11523 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11524 	}
11525 defcfg:
11526 	if (!tg3_flag(tp, USING_MSIX)) {
11527 		tp->irq_cnt = 1;
11528 		tp->napi[0].irq_vec = tp->pdev->irq;
11529 	}
11530 
11531 	if (tp->irq_cnt == 1) {
11532 		tp->txq_cnt = 1;
11533 		tp->rxq_cnt = 1;
11534 		netif_set_real_num_tx_queues(tp->dev, 1);
11535 		netif_set_real_num_rx_queues(tp->dev, 1);
11536 	}
11537 }
11538 
11539 static void tg3_ints_fini(struct tg3 *tp)
11540 {
11541 	if (tg3_flag(tp, USING_MSIX))
11542 		pci_disable_msix(tp->pdev);
11543 	else if (tg3_flag(tp, USING_MSI))
11544 		pci_disable_msi(tp->pdev);
11545 	tg3_flag_clear(tp, USING_MSI);
11546 	tg3_flag_clear(tp, USING_MSIX);
11547 	tg3_flag_clear(tp, ENABLE_RSS);
11548 	tg3_flag_clear(tp, ENABLE_TSS);
11549 }
11550 
11551 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11552 		     bool init)
11553 {
11554 	struct net_device *dev = tp->dev;
11555 	int i, err;
11556 
11557 	/*
11558 	 * Setup interrupts first so we know how
11559 	 * many NAPI resources to allocate
11560 	 */
11561 	tg3_ints_init(tp);
11562 
11563 	tg3_rss_check_indir_tbl(tp);
11564 
11565 	/* The placement of this call is tied
11566 	 * to the setup and use of Host TX descriptors.
11567 	 */
11568 	err = tg3_alloc_consistent(tp);
11569 	if (err)
11570 		goto out_ints_fini;
11571 
11572 	tg3_napi_init(tp);
11573 
11574 	tg3_napi_enable(tp);
11575 
11576 	for (i = 0; i < tp->irq_cnt; i++) {
11577 		err = tg3_request_irq(tp, i);
11578 		if (err) {
11579 			for (i--; i >= 0; i--) {
11580 				struct tg3_napi *tnapi = &tp->napi[i];
11581 
11582 				free_irq(tnapi->irq_vec, tnapi);
11583 			}
11584 			goto out_napi_fini;
11585 		}
11586 	}
11587 
11588 	tg3_full_lock(tp, 0);
11589 
11590 	if (init)
11591 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11592 
11593 	err = tg3_init_hw(tp, reset_phy);
11594 	if (err) {
11595 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11596 		tg3_free_rings(tp);
11597 	}
11598 
11599 	tg3_full_unlock(tp);
11600 
11601 	if (err)
11602 		goto out_free_irq;
11603 
11604 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11605 		err = tg3_test_msi(tp);
11606 
11607 		if (err) {
11608 			tg3_full_lock(tp, 0);
11609 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11610 			tg3_free_rings(tp);
11611 			tg3_full_unlock(tp);
11612 
11613 			goto out_napi_fini;
11614 		}
11615 
11616 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11617 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11618 
11619 			tw32(PCIE_TRANSACTION_CFG,
11620 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11621 		}
11622 	}
11623 
11624 	tg3_phy_start(tp);
11625 
11626 	tg3_hwmon_open(tp);
11627 
11628 	tg3_full_lock(tp, 0);
11629 
11630 	tg3_timer_start(tp);
11631 	tg3_flag_set(tp, INIT_COMPLETE);
11632 	tg3_enable_ints(tp);
11633 
11634 	tg3_ptp_resume(tp);
11635 
11636 	tg3_full_unlock(tp);
11637 
11638 	netif_tx_start_all_queues(dev);
11639 
11640 	/*
11641 	 * Reset loopback feature if it was turned on while the device was down
11642 	 * make sure that it's installed properly now.
11643 	 */
11644 	if (dev->features & NETIF_F_LOOPBACK)
11645 		tg3_set_loopback(dev, dev->features);
11646 
11647 	return 0;
11648 
11649 out_free_irq:
11650 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11651 		struct tg3_napi *tnapi = &tp->napi[i];
11652 		free_irq(tnapi->irq_vec, tnapi);
11653 	}
11654 
11655 out_napi_fini:
11656 	tg3_napi_disable(tp);
11657 	tg3_napi_fini(tp);
11658 	tg3_free_consistent(tp);
11659 
11660 out_ints_fini:
11661 	tg3_ints_fini(tp);
11662 
11663 	return err;
11664 }
11665 
11666 static void tg3_stop(struct tg3 *tp)
11667 {
11668 	int i;
11669 
11670 	tg3_reset_task_cancel(tp);
11671 	tg3_netif_stop(tp);
11672 
11673 	tg3_timer_stop(tp);
11674 
11675 	tg3_hwmon_close(tp);
11676 
11677 	tg3_phy_stop(tp);
11678 
11679 	tg3_full_lock(tp, 1);
11680 
11681 	tg3_disable_ints(tp);
11682 
11683 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11684 	tg3_free_rings(tp);
11685 	tg3_flag_clear(tp, INIT_COMPLETE);
11686 
11687 	tg3_full_unlock(tp);
11688 
11689 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11690 		struct tg3_napi *tnapi = &tp->napi[i];
11691 		free_irq(tnapi->irq_vec, tnapi);
11692 	}
11693 
11694 	tg3_ints_fini(tp);
11695 
11696 	tg3_napi_fini(tp);
11697 
11698 	tg3_free_consistent(tp);
11699 }
11700 
11701 static int tg3_open(struct net_device *dev)
11702 {
11703 	struct tg3 *tp = netdev_priv(dev);
11704 	int err;
11705 
11706 	if (tp->pcierr_recovery) {
11707 		netdev_err(dev, "Failed to open device. PCI error recovery "
11708 			   "in progress\n");
11709 		return -EAGAIN;
11710 	}
11711 
11712 	if (tp->fw_needed) {
11713 		err = tg3_request_firmware(tp);
11714 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11715 			if (err) {
11716 				netdev_warn(tp->dev, "EEE capability disabled\n");
11717 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11718 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11719 				netdev_warn(tp->dev, "EEE capability restored\n");
11720 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11721 			}
11722 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11723 			if (err)
11724 				return err;
11725 		} else if (err) {
11726 			netdev_warn(tp->dev, "TSO capability disabled\n");
11727 			tg3_flag_clear(tp, TSO_CAPABLE);
11728 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11729 			netdev_notice(tp->dev, "TSO capability restored\n");
11730 			tg3_flag_set(tp, TSO_CAPABLE);
11731 		}
11732 	}
11733 
11734 	tg3_carrier_off(tp);
11735 
11736 	err = tg3_power_up(tp);
11737 	if (err)
11738 		return err;
11739 
11740 	tg3_full_lock(tp, 0);
11741 
11742 	tg3_disable_ints(tp);
11743 	tg3_flag_clear(tp, INIT_COMPLETE);
11744 
11745 	tg3_full_unlock(tp);
11746 
11747 	err = tg3_start(tp,
11748 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11749 			true, true);
11750 	if (err) {
11751 		tg3_frob_aux_power(tp, false);
11752 		pci_set_power_state(tp->pdev, PCI_D3hot);
11753 	}
11754 
11755 	return err;
11756 }
11757 
11758 static int tg3_close(struct net_device *dev)
11759 {
11760 	struct tg3 *tp = netdev_priv(dev);
11761 
11762 	if (tp->pcierr_recovery) {
11763 		netdev_err(dev, "Failed to close device. PCI error recovery "
11764 			   "in progress\n");
11765 		return -EAGAIN;
11766 	}
11767 
11768 	tg3_stop(tp);
11769 
11770 	if (pci_device_is_present(tp->pdev)) {
11771 		tg3_power_down_prepare(tp);
11772 
11773 		tg3_carrier_off(tp);
11774 	}
11775 	return 0;
11776 }
11777 
11778 static inline u64 get_stat64(tg3_stat64_t *val)
11779 {
11780        return ((u64)val->high << 32) | ((u64)val->low);
11781 }
11782 
11783 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11784 {
11785 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11786 
11787 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11788 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11789 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11790 		u32 val;
11791 
11792 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11793 			tg3_writephy(tp, MII_TG3_TEST1,
11794 				     val | MII_TG3_TEST1_CRC_EN);
11795 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11796 		} else
11797 			val = 0;
11798 
11799 		tp->phy_crc_errors += val;
11800 
11801 		return tp->phy_crc_errors;
11802 	}
11803 
11804 	return get_stat64(&hw_stats->rx_fcs_errors);
11805 }
11806 
11807 #define ESTAT_ADD(member) \
11808 	estats->member =	old_estats->member + \
11809 				get_stat64(&hw_stats->member)
11810 
11811 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11812 {
11813 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11814 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11815 
11816 	ESTAT_ADD(rx_octets);
11817 	ESTAT_ADD(rx_fragments);
11818 	ESTAT_ADD(rx_ucast_packets);
11819 	ESTAT_ADD(rx_mcast_packets);
11820 	ESTAT_ADD(rx_bcast_packets);
11821 	ESTAT_ADD(rx_fcs_errors);
11822 	ESTAT_ADD(rx_align_errors);
11823 	ESTAT_ADD(rx_xon_pause_rcvd);
11824 	ESTAT_ADD(rx_xoff_pause_rcvd);
11825 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11826 	ESTAT_ADD(rx_xoff_entered);
11827 	ESTAT_ADD(rx_frame_too_long_errors);
11828 	ESTAT_ADD(rx_jabbers);
11829 	ESTAT_ADD(rx_undersize_packets);
11830 	ESTAT_ADD(rx_in_length_errors);
11831 	ESTAT_ADD(rx_out_length_errors);
11832 	ESTAT_ADD(rx_64_or_less_octet_packets);
11833 	ESTAT_ADD(rx_65_to_127_octet_packets);
11834 	ESTAT_ADD(rx_128_to_255_octet_packets);
11835 	ESTAT_ADD(rx_256_to_511_octet_packets);
11836 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11837 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11838 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11839 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11840 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11841 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11842 
11843 	ESTAT_ADD(tx_octets);
11844 	ESTAT_ADD(tx_collisions);
11845 	ESTAT_ADD(tx_xon_sent);
11846 	ESTAT_ADD(tx_xoff_sent);
11847 	ESTAT_ADD(tx_flow_control);
11848 	ESTAT_ADD(tx_mac_errors);
11849 	ESTAT_ADD(tx_single_collisions);
11850 	ESTAT_ADD(tx_mult_collisions);
11851 	ESTAT_ADD(tx_deferred);
11852 	ESTAT_ADD(tx_excessive_collisions);
11853 	ESTAT_ADD(tx_late_collisions);
11854 	ESTAT_ADD(tx_collide_2times);
11855 	ESTAT_ADD(tx_collide_3times);
11856 	ESTAT_ADD(tx_collide_4times);
11857 	ESTAT_ADD(tx_collide_5times);
11858 	ESTAT_ADD(tx_collide_6times);
11859 	ESTAT_ADD(tx_collide_7times);
11860 	ESTAT_ADD(tx_collide_8times);
11861 	ESTAT_ADD(tx_collide_9times);
11862 	ESTAT_ADD(tx_collide_10times);
11863 	ESTAT_ADD(tx_collide_11times);
11864 	ESTAT_ADD(tx_collide_12times);
11865 	ESTAT_ADD(tx_collide_13times);
11866 	ESTAT_ADD(tx_collide_14times);
11867 	ESTAT_ADD(tx_collide_15times);
11868 	ESTAT_ADD(tx_ucast_packets);
11869 	ESTAT_ADD(tx_mcast_packets);
11870 	ESTAT_ADD(tx_bcast_packets);
11871 	ESTAT_ADD(tx_carrier_sense_errors);
11872 	ESTAT_ADD(tx_discards);
11873 	ESTAT_ADD(tx_errors);
11874 
11875 	ESTAT_ADD(dma_writeq_full);
11876 	ESTAT_ADD(dma_write_prioq_full);
11877 	ESTAT_ADD(rxbds_empty);
11878 	ESTAT_ADD(rx_discards);
11879 	ESTAT_ADD(rx_errors);
11880 	ESTAT_ADD(rx_threshold_hit);
11881 
11882 	ESTAT_ADD(dma_readq_full);
11883 	ESTAT_ADD(dma_read_prioq_full);
11884 	ESTAT_ADD(tx_comp_queue_full);
11885 
11886 	ESTAT_ADD(ring_set_send_prod_index);
11887 	ESTAT_ADD(ring_status_update);
11888 	ESTAT_ADD(nic_irqs);
11889 	ESTAT_ADD(nic_avoided_irqs);
11890 	ESTAT_ADD(nic_tx_threshold_hit);
11891 
11892 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11893 }
11894 
11895 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11896 {
11897 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11898 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11899 
11900 	stats->rx_packets = old_stats->rx_packets +
11901 		get_stat64(&hw_stats->rx_ucast_packets) +
11902 		get_stat64(&hw_stats->rx_mcast_packets) +
11903 		get_stat64(&hw_stats->rx_bcast_packets);
11904 
11905 	stats->tx_packets = old_stats->tx_packets +
11906 		get_stat64(&hw_stats->tx_ucast_packets) +
11907 		get_stat64(&hw_stats->tx_mcast_packets) +
11908 		get_stat64(&hw_stats->tx_bcast_packets);
11909 
11910 	stats->rx_bytes = old_stats->rx_bytes +
11911 		get_stat64(&hw_stats->rx_octets);
11912 	stats->tx_bytes = old_stats->tx_bytes +
11913 		get_stat64(&hw_stats->tx_octets);
11914 
11915 	stats->rx_errors = old_stats->rx_errors +
11916 		get_stat64(&hw_stats->rx_errors);
11917 	stats->tx_errors = old_stats->tx_errors +
11918 		get_stat64(&hw_stats->tx_errors) +
11919 		get_stat64(&hw_stats->tx_mac_errors) +
11920 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11921 		get_stat64(&hw_stats->tx_discards);
11922 
11923 	stats->multicast = old_stats->multicast +
11924 		get_stat64(&hw_stats->rx_mcast_packets);
11925 	stats->collisions = old_stats->collisions +
11926 		get_stat64(&hw_stats->tx_collisions);
11927 
11928 	stats->rx_length_errors = old_stats->rx_length_errors +
11929 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11930 		get_stat64(&hw_stats->rx_undersize_packets);
11931 
11932 	stats->rx_frame_errors = old_stats->rx_frame_errors +
11933 		get_stat64(&hw_stats->rx_align_errors);
11934 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11935 		get_stat64(&hw_stats->tx_discards);
11936 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11937 		get_stat64(&hw_stats->tx_carrier_sense_errors);
11938 
11939 	stats->rx_crc_errors = old_stats->rx_crc_errors +
11940 		tg3_calc_crc_errors(tp);
11941 
11942 	stats->rx_missed_errors = old_stats->rx_missed_errors +
11943 		get_stat64(&hw_stats->rx_discards);
11944 
11945 	stats->rx_dropped = tp->rx_dropped;
11946 	stats->tx_dropped = tp->tx_dropped;
11947 }
11948 
11949 static int tg3_get_regs_len(struct net_device *dev)
11950 {
11951 	return TG3_REG_BLK_SIZE;
11952 }
11953 
11954 static void tg3_get_regs(struct net_device *dev,
11955 		struct ethtool_regs *regs, void *_p)
11956 {
11957 	struct tg3 *tp = netdev_priv(dev);
11958 
11959 	regs->version = 0;
11960 
11961 	memset(_p, 0, TG3_REG_BLK_SIZE);
11962 
11963 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11964 		return;
11965 
11966 	tg3_full_lock(tp, 0);
11967 
11968 	tg3_dump_legacy_regs(tp, (u32 *)_p);
11969 
11970 	tg3_full_unlock(tp);
11971 }
11972 
11973 static int tg3_get_eeprom_len(struct net_device *dev)
11974 {
11975 	struct tg3 *tp = netdev_priv(dev);
11976 
11977 	return tp->nvram_size;
11978 }
11979 
11980 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11981 {
11982 	struct tg3 *tp = netdev_priv(dev);
11983 	int ret, cpmu_restore = 0;
11984 	u8  *pd;
11985 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11986 	__be32 val;
11987 
11988 	if (tg3_flag(tp, NO_NVRAM))
11989 		return -EINVAL;
11990 
11991 	offset = eeprom->offset;
11992 	len = eeprom->len;
11993 	eeprom->len = 0;
11994 
11995 	eeprom->magic = TG3_EEPROM_MAGIC;
11996 
11997 	/* Override clock, link aware and link idle modes */
11998 	if (tg3_flag(tp, CPMU_PRESENT)) {
11999 		cpmu_val = tr32(TG3_CPMU_CTRL);
12000 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12001 				CPMU_CTRL_LINK_IDLE_MODE)) {
12002 			tw32(TG3_CPMU_CTRL, cpmu_val &
12003 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
12004 					     CPMU_CTRL_LINK_IDLE_MODE));
12005 			cpmu_restore = 1;
12006 		}
12007 	}
12008 	tg3_override_clk(tp);
12009 
12010 	if (offset & 3) {
12011 		/* adjustments to start on required 4 byte boundary */
12012 		b_offset = offset & 3;
12013 		b_count = 4 - b_offset;
12014 		if (b_count > len) {
12015 			/* i.e. offset=1 len=2 */
12016 			b_count = len;
12017 		}
12018 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12019 		if (ret)
12020 			goto eeprom_done;
12021 		memcpy(data, ((char *)&val) + b_offset, b_count);
12022 		len -= b_count;
12023 		offset += b_count;
12024 		eeprom->len += b_count;
12025 	}
12026 
12027 	/* read bytes up to the last 4 byte boundary */
12028 	pd = &data[eeprom->len];
12029 	for (i = 0; i < (len - (len & 3)); i += 4) {
12030 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
12031 		if (ret) {
12032 			if (i)
12033 				i -= 4;
12034 			eeprom->len += i;
12035 			goto eeprom_done;
12036 		}
12037 		memcpy(pd + i, &val, 4);
12038 		if (need_resched()) {
12039 			if (signal_pending(current)) {
12040 				eeprom->len += i;
12041 				ret = -EINTR;
12042 				goto eeprom_done;
12043 			}
12044 			cond_resched();
12045 		}
12046 	}
12047 	eeprom->len += i;
12048 
12049 	if (len & 3) {
12050 		/* read last bytes not ending on 4 byte boundary */
12051 		pd = &data[eeprom->len];
12052 		b_count = len & 3;
12053 		b_offset = offset + len - b_count;
12054 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12055 		if (ret)
12056 			goto eeprom_done;
12057 		memcpy(pd, &val, b_count);
12058 		eeprom->len += b_count;
12059 	}
12060 	ret = 0;
12061 
12062 eeprom_done:
12063 	/* Restore clock, link aware and link idle modes */
12064 	tg3_restore_clk(tp);
12065 	if (cpmu_restore)
12066 		tw32(TG3_CPMU_CTRL, cpmu_val);
12067 
12068 	return ret;
12069 }
12070 
12071 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12072 {
12073 	struct tg3 *tp = netdev_priv(dev);
12074 	int ret;
12075 	u32 offset, len, b_offset, odd_len;
12076 	u8 *buf;
12077 	__be32 start = 0, end;
12078 
12079 	if (tg3_flag(tp, NO_NVRAM) ||
12080 	    eeprom->magic != TG3_EEPROM_MAGIC)
12081 		return -EINVAL;
12082 
12083 	offset = eeprom->offset;
12084 	len = eeprom->len;
12085 
12086 	if ((b_offset = (offset & 3))) {
12087 		/* adjustments to start on required 4 byte boundary */
12088 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12089 		if (ret)
12090 			return ret;
12091 		len += b_offset;
12092 		offset &= ~3;
12093 		if (len < 4)
12094 			len = 4;
12095 	}
12096 
12097 	odd_len = 0;
12098 	if (len & 3) {
12099 		/* adjustments to end on required 4 byte boundary */
12100 		odd_len = 1;
12101 		len = (len + 3) & ~3;
12102 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12103 		if (ret)
12104 			return ret;
12105 	}
12106 
12107 	buf = data;
12108 	if (b_offset || odd_len) {
12109 		buf = kmalloc(len, GFP_KERNEL);
12110 		if (!buf)
12111 			return -ENOMEM;
12112 		if (b_offset)
12113 			memcpy(buf, &start, 4);
12114 		if (odd_len)
12115 			memcpy(buf+len-4, &end, 4);
12116 		memcpy(buf + b_offset, data, eeprom->len);
12117 	}
12118 
12119 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12120 
12121 	if (buf != data)
12122 		kfree(buf);
12123 
12124 	return ret;
12125 }
12126 
12127 static int tg3_get_link_ksettings(struct net_device *dev,
12128 				  struct ethtool_link_ksettings *cmd)
12129 {
12130 	struct tg3 *tp = netdev_priv(dev);
12131 	u32 supported, advertising;
12132 
12133 	if (tg3_flag(tp, USE_PHYLIB)) {
12134 		struct phy_device *phydev;
12135 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12136 			return -EAGAIN;
12137 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12138 		phy_ethtool_ksettings_get(phydev, cmd);
12139 
12140 		return 0;
12141 	}
12142 
12143 	supported = (SUPPORTED_Autoneg);
12144 
12145 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12146 		supported |= (SUPPORTED_1000baseT_Half |
12147 			      SUPPORTED_1000baseT_Full);
12148 
12149 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12150 		supported |= (SUPPORTED_100baseT_Half |
12151 			      SUPPORTED_100baseT_Full |
12152 			      SUPPORTED_10baseT_Half |
12153 			      SUPPORTED_10baseT_Full |
12154 			      SUPPORTED_TP);
12155 		cmd->base.port = PORT_TP;
12156 	} else {
12157 		supported |= SUPPORTED_FIBRE;
12158 		cmd->base.port = PORT_FIBRE;
12159 	}
12160 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12161 						supported);
12162 
12163 	advertising = tp->link_config.advertising;
12164 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12165 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12166 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12167 				advertising |= ADVERTISED_Pause;
12168 			} else {
12169 				advertising |= ADVERTISED_Pause |
12170 					ADVERTISED_Asym_Pause;
12171 			}
12172 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12173 			advertising |= ADVERTISED_Asym_Pause;
12174 		}
12175 	}
12176 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12177 						advertising);
12178 
12179 	if (netif_running(dev) && tp->link_up) {
12180 		cmd->base.speed = tp->link_config.active_speed;
12181 		cmd->base.duplex = tp->link_config.active_duplex;
12182 		ethtool_convert_legacy_u32_to_link_mode(
12183 			cmd->link_modes.lp_advertising,
12184 			tp->link_config.rmt_adv);
12185 
12186 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12187 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12188 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12189 			else
12190 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12191 		}
12192 	} else {
12193 		cmd->base.speed = SPEED_UNKNOWN;
12194 		cmd->base.duplex = DUPLEX_UNKNOWN;
12195 		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12196 	}
12197 	cmd->base.phy_address = tp->phy_addr;
12198 	cmd->base.autoneg = tp->link_config.autoneg;
12199 	return 0;
12200 }
12201 
12202 static int tg3_set_link_ksettings(struct net_device *dev,
12203 				  const struct ethtool_link_ksettings *cmd)
12204 {
12205 	struct tg3 *tp = netdev_priv(dev);
12206 	u32 speed = cmd->base.speed;
12207 	u32 advertising;
12208 
12209 	if (tg3_flag(tp, USE_PHYLIB)) {
12210 		struct phy_device *phydev;
12211 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12212 			return -EAGAIN;
12213 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12214 		return phy_ethtool_ksettings_set(phydev, cmd);
12215 	}
12216 
12217 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12218 	    cmd->base.autoneg != AUTONEG_DISABLE)
12219 		return -EINVAL;
12220 
12221 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12222 	    cmd->base.duplex != DUPLEX_FULL &&
12223 	    cmd->base.duplex != DUPLEX_HALF)
12224 		return -EINVAL;
12225 
12226 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12227 						cmd->link_modes.advertising);
12228 
12229 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12230 		u32 mask = ADVERTISED_Autoneg |
12231 			   ADVERTISED_Pause |
12232 			   ADVERTISED_Asym_Pause;
12233 
12234 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12235 			mask |= ADVERTISED_1000baseT_Half |
12236 				ADVERTISED_1000baseT_Full;
12237 
12238 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12239 			mask |= ADVERTISED_100baseT_Half |
12240 				ADVERTISED_100baseT_Full |
12241 				ADVERTISED_10baseT_Half |
12242 				ADVERTISED_10baseT_Full |
12243 				ADVERTISED_TP;
12244 		else
12245 			mask |= ADVERTISED_FIBRE;
12246 
12247 		if (advertising & ~mask)
12248 			return -EINVAL;
12249 
12250 		mask &= (ADVERTISED_1000baseT_Half |
12251 			 ADVERTISED_1000baseT_Full |
12252 			 ADVERTISED_100baseT_Half |
12253 			 ADVERTISED_100baseT_Full |
12254 			 ADVERTISED_10baseT_Half |
12255 			 ADVERTISED_10baseT_Full);
12256 
12257 		advertising &= mask;
12258 	} else {
12259 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12260 			if (speed != SPEED_1000)
12261 				return -EINVAL;
12262 
12263 			if (cmd->base.duplex != DUPLEX_FULL)
12264 				return -EINVAL;
12265 		} else {
12266 			if (speed != SPEED_100 &&
12267 			    speed != SPEED_10)
12268 				return -EINVAL;
12269 		}
12270 	}
12271 
12272 	tg3_full_lock(tp, 0);
12273 
12274 	tp->link_config.autoneg = cmd->base.autoneg;
12275 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12276 		tp->link_config.advertising = (advertising |
12277 					      ADVERTISED_Autoneg);
12278 		tp->link_config.speed = SPEED_UNKNOWN;
12279 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12280 	} else {
12281 		tp->link_config.advertising = 0;
12282 		tp->link_config.speed = speed;
12283 		tp->link_config.duplex = cmd->base.duplex;
12284 	}
12285 
12286 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12287 
12288 	tg3_warn_mgmt_link_flap(tp);
12289 
12290 	if (netif_running(dev))
12291 		tg3_setup_phy(tp, true);
12292 
12293 	tg3_full_unlock(tp);
12294 
12295 	return 0;
12296 }
12297 
12298 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12299 {
12300 	struct tg3 *tp = netdev_priv(dev);
12301 
12302 	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12303 	strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12304 	strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12305 }
12306 
12307 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12308 {
12309 	struct tg3 *tp = netdev_priv(dev);
12310 
12311 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12312 		wol->supported = WAKE_MAGIC;
12313 	else
12314 		wol->supported = 0;
12315 	wol->wolopts = 0;
12316 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12317 		wol->wolopts = WAKE_MAGIC;
12318 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12319 }
12320 
12321 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12322 {
12323 	struct tg3 *tp = netdev_priv(dev);
12324 	struct device *dp = &tp->pdev->dev;
12325 
12326 	if (wol->wolopts & ~WAKE_MAGIC)
12327 		return -EINVAL;
12328 	if ((wol->wolopts & WAKE_MAGIC) &&
12329 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12330 		return -EINVAL;
12331 
12332 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12333 
12334 	if (device_may_wakeup(dp))
12335 		tg3_flag_set(tp, WOL_ENABLE);
12336 	else
12337 		tg3_flag_clear(tp, WOL_ENABLE);
12338 
12339 	return 0;
12340 }
12341 
12342 static u32 tg3_get_msglevel(struct net_device *dev)
12343 {
12344 	struct tg3 *tp = netdev_priv(dev);
12345 	return tp->msg_enable;
12346 }
12347 
12348 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12349 {
12350 	struct tg3 *tp = netdev_priv(dev);
12351 	tp->msg_enable = value;
12352 }
12353 
12354 static int tg3_nway_reset(struct net_device *dev)
12355 {
12356 	struct tg3 *tp = netdev_priv(dev);
12357 	int r;
12358 
12359 	if (!netif_running(dev))
12360 		return -EAGAIN;
12361 
12362 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12363 		return -EINVAL;
12364 
12365 	tg3_warn_mgmt_link_flap(tp);
12366 
12367 	if (tg3_flag(tp, USE_PHYLIB)) {
12368 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12369 			return -EAGAIN;
12370 		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12371 	} else {
12372 		u32 bmcr;
12373 
12374 		spin_lock_bh(&tp->lock);
12375 		r = -EINVAL;
12376 		tg3_readphy(tp, MII_BMCR, &bmcr);
12377 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12378 		    ((bmcr & BMCR_ANENABLE) ||
12379 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12380 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12381 						   BMCR_ANENABLE);
12382 			r = 0;
12383 		}
12384 		spin_unlock_bh(&tp->lock);
12385 	}
12386 
12387 	return r;
12388 }
12389 
12390 static void tg3_get_ringparam(struct net_device *dev,
12391 			      struct ethtool_ringparam *ering,
12392 			      struct kernel_ethtool_ringparam *kernel_ering,
12393 			      struct netlink_ext_ack *extack)
12394 {
12395 	struct tg3 *tp = netdev_priv(dev);
12396 
12397 	ering->rx_max_pending = tp->rx_std_ring_mask;
12398 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12399 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12400 	else
12401 		ering->rx_jumbo_max_pending = 0;
12402 
12403 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12404 
12405 	ering->rx_pending = tp->rx_pending;
12406 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12407 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12408 	else
12409 		ering->rx_jumbo_pending = 0;
12410 
12411 	ering->tx_pending = tp->napi[0].tx_pending;
12412 }
12413 
12414 static int tg3_set_ringparam(struct net_device *dev,
12415 			     struct ethtool_ringparam *ering,
12416 			     struct kernel_ethtool_ringparam *kernel_ering,
12417 			     struct netlink_ext_ack *extack)
12418 {
12419 	struct tg3 *tp = netdev_priv(dev);
12420 	int i, irq_sync = 0, err = 0;
12421 	bool reset_phy = false;
12422 
12423 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12424 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12425 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12426 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12427 	    (tg3_flag(tp, TSO_BUG) &&
12428 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12429 		return -EINVAL;
12430 
12431 	if (netif_running(dev)) {
12432 		tg3_phy_stop(tp);
12433 		tg3_netif_stop(tp);
12434 		irq_sync = 1;
12435 	}
12436 
12437 	tg3_full_lock(tp, irq_sync);
12438 
12439 	tp->rx_pending = ering->rx_pending;
12440 
12441 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12442 	    tp->rx_pending > 63)
12443 		tp->rx_pending = 63;
12444 
12445 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12446 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12447 
12448 	for (i = 0; i < tp->irq_max; i++)
12449 		tp->napi[i].tx_pending = ering->tx_pending;
12450 
12451 	if (netif_running(dev)) {
12452 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12453 		/* Reset PHY to avoid PHY lock up */
12454 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12455 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12456 		    tg3_asic_rev(tp) == ASIC_REV_5720)
12457 			reset_phy = true;
12458 
12459 		err = tg3_restart_hw(tp, reset_phy);
12460 		if (!err)
12461 			tg3_netif_start(tp);
12462 	}
12463 
12464 	tg3_full_unlock(tp);
12465 
12466 	if (irq_sync && !err)
12467 		tg3_phy_start(tp);
12468 
12469 	return err;
12470 }
12471 
12472 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12473 {
12474 	struct tg3 *tp = netdev_priv(dev);
12475 
12476 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12477 
12478 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12479 		epause->rx_pause = 1;
12480 	else
12481 		epause->rx_pause = 0;
12482 
12483 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12484 		epause->tx_pause = 1;
12485 	else
12486 		epause->tx_pause = 0;
12487 }
12488 
12489 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12490 {
12491 	struct tg3 *tp = netdev_priv(dev);
12492 	int err = 0;
12493 	bool reset_phy = false;
12494 
12495 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12496 		tg3_warn_mgmt_link_flap(tp);
12497 
12498 	if (tg3_flag(tp, USE_PHYLIB)) {
12499 		struct phy_device *phydev;
12500 
12501 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12502 
12503 		if (!phy_validate_pause(phydev, epause))
12504 			return -EINVAL;
12505 
12506 		tp->link_config.flowctrl = 0;
12507 		phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12508 		if (epause->rx_pause) {
12509 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12510 
12511 			if (epause->tx_pause) {
12512 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12513 			}
12514 		} else if (epause->tx_pause) {
12515 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12516 		}
12517 
12518 		if (epause->autoneg)
12519 			tg3_flag_set(tp, PAUSE_AUTONEG);
12520 		else
12521 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12522 
12523 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12524 			if (phydev->autoneg) {
12525 				/* phy_set_asym_pause() will
12526 				 * renegotiate the link to inform our
12527 				 * link partner of our flow control
12528 				 * settings, even if the flow control
12529 				 * is forced.  Let tg3_adjust_link()
12530 				 * do the final flow control setup.
12531 				 */
12532 				return 0;
12533 			}
12534 
12535 			if (!epause->autoneg)
12536 				tg3_setup_flow_control(tp, 0, 0);
12537 		}
12538 	} else {
12539 		int irq_sync = 0;
12540 
12541 		if (netif_running(dev)) {
12542 			tg3_netif_stop(tp);
12543 			irq_sync = 1;
12544 		}
12545 
12546 		tg3_full_lock(tp, irq_sync);
12547 
12548 		if (epause->autoneg)
12549 			tg3_flag_set(tp, PAUSE_AUTONEG);
12550 		else
12551 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12552 		if (epause->rx_pause)
12553 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12554 		else
12555 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12556 		if (epause->tx_pause)
12557 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12558 		else
12559 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12560 
12561 		if (netif_running(dev)) {
12562 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12563 			/* Reset PHY to avoid PHY lock up */
12564 			if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12565 			    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12566 			    tg3_asic_rev(tp) == ASIC_REV_5720)
12567 				reset_phy = true;
12568 
12569 			err = tg3_restart_hw(tp, reset_phy);
12570 			if (!err)
12571 				tg3_netif_start(tp);
12572 		}
12573 
12574 		tg3_full_unlock(tp);
12575 	}
12576 
12577 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12578 
12579 	return err;
12580 }
12581 
12582 static int tg3_get_sset_count(struct net_device *dev, int sset)
12583 {
12584 	switch (sset) {
12585 	case ETH_SS_TEST:
12586 		return TG3_NUM_TEST;
12587 	case ETH_SS_STATS:
12588 		return TG3_NUM_STATS;
12589 	default:
12590 		return -EOPNOTSUPP;
12591 	}
12592 }
12593 
12594 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12595 			 u32 *rules __always_unused)
12596 {
12597 	struct tg3 *tp = netdev_priv(dev);
12598 
12599 	if (!tg3_flag(tp, SUPPORT_MSIX))
12600 		return -EOPNOTSUPP;
12601 
12602 	switch (info->cmd) {
12603 	case ETHTOOL_GRXRINGS:
12604 		if (netif_running(tp->dev))
12605 			info->data = tp->rxq_cnt;
12606 		else {
12607 			info->data = num_online_cpus();
12608 			if (info->data > TG3_RSS_MAX_NUM_QS)
12609 				info->data = TG3_RSS_MAX_NUM_QS;
12610 		}
12611 
12612 		return 0;
12613 
12614 	default:
12615 		return -EOPNOTSUPP;
12616 	}
12617 }
12618 
12619 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12620 {
12621 	u32 size = 0;
12622 	struct tg3 *tp = netdev_priv(dev);
12623 
12624 	if (tg3_flag(tp, SUPPORT_MSIX))
12625 		size = TG3_RSS_INDIR_TBL_SIZE;
12626 
12627 	return size;
12628 }
12629 
12630 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12631 {
12632 	struct tg3 *tp = netdev_priv(dev);
12633 	int i;
12634 
12635 	if (hfunc)
12636 		*hfunc = ETH_RSS_HASH_TOP;
12637 	if (!indir)
12638 		return 0;
12639 
12640 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12641 		indir[i] = tp->rss_ind_tbl[i];
12642 
12643 	return 0;
12644 }
12645 
12646 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12647 			const u8 hfunc)
12648 {
12649 	struct tg3 *tp = netdev_priv(dev);
12650 	size_t i;
12651 
12652 	/* We require at least one supported parameter to be changed and no
12653 	 * change in any of the unsupported parameters
12654 	 */
12655 	if (key ||
12656 	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12657 		return -EOPNOTSUPP;
12658 
12659 	if (!indir)
12660 		return 0;
12661 
12662 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12663 		tp->rss_ind_tbl[i] = indir[i];
12664 
12665 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12666 		return 0;
12667 
12668 	/* It is legal to write the indirection
12669 	 * table while the device is running.
12670 	 */
12671 	tg3_full_lock(tp, 0);
12672 	tg3_rss_write_indir_tbl(tp);
12673 	tg3_full_unlock(tp);
12674 
12675 	return 0;
12676 }
12677 
12678 static void tg3_get_channels(struct net_device *dev,
12679 			     struct ethtool_channels *channel)
12680 {
12681 	struct tg3 *tp = netdev_priv(dev);
12682 	u32 deflt_qs = netif_get_num_default_rss_queues();
12683 
12684 	channel->max_rx = tp->rxq_max;
12685 	channel->max_tx = tp->txq_max;
12686 
12687 	if (netif_running(dev)) {
12688 		channel->rx_count = tp->rxq_cnt;
12689 		channel->tx_count = tp->txq_cnt;
12690 	} else {
12691 		if (tp->rxq_req)
12692 			channel->rx_count = tp->rxq_req;
12693 		else
12694 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12695 
12696 		if (tp->txq_req)
12697 			channel->tx_count = tp->txq_req;
12698 		else
12699 			channel->tx_count = min(deflt_qs, tp->txq_max);
12700 	}
12701 }
12702 
12703 static int tg3_set_channels(struct net_device *dev,
12704 			    struct ethtool_channels *channel)
12705 {
12706 	struct tg3 *tp = netdev_priv(dev);
12707 
12708 	if (!tg3_flag(tp, SUPPORT_MSIX))
12709 		return -EOPNOTSUPP;
12710 
12711 	if (channel->rx_count > tp->rxq_max ||
12712 	    channel->tx_count > tp->txq_max)
12713 		return -EINVAL;
12714 
12715 	tp->rxq_req = channel->rx_count;
12716 	tp->txq_req = channel->tx_count;
12717 
12718 	if (!netif_running(dev))
12719 		return 0;
12720 
12721 	tg3_stop(tp);
12722 
12723 	tg3_carrier_off(tp);
12724 
12725 	tg3_start(tp, true, false, false);
12726 
12727 	return 0;
12728 }
12729 
12730 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12731 {
12732 	switch (stringset) {
12733 	case ETH_SS_STATS:
12734 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12735 		break;
12736 	case ETH_SS_TEST:
12737 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12738 		break;
12739 	default:
12740 		WARN_ON(1);	/* we need a WARN() */
12741 		break;
12742 	}
12743 }
12744 
12745 static int tg3_set_phys_id(struct net_device *dev,
12746 			    enum ethtool_phys_id_state state)
12747 {
12748 	struct tg3 *tp = netdev_priv(dev);
12749 
12750 	switch (state) {
12751 	case ETHTOOL_ID_ACTIVE:
12752 		return 1;	/* cycle on/off once per second */
12753 
12754 	case ETHTOOL_ID_ON:
12755 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12756 		     LED_CTRL_1000MBPS_ON |
12757 		     LED_CTRL_100MBPS_ON |
12758 		     LED_CTRL_10MBPS_ON |
12759 		     LED_CTRL_TRAFFIC_OVERRIDE |
12760 		     LED_CTRL_TRAFFIC_BLINK |
12761 		     LED_CTRL_TRAFFIC_LED);
12762 		break;
12763 
12764 	case ETHTOOL_ID_OFF:
12765 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12766 		     LED_CTRL_TRAFFIC_OVERRIDE);
12767 		break;
12768 
12769 	case ETHTOOL_ID_INACTIVE:
12770 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12771 		break;
12772 	}
12773 
12774 	return 0;
12775 }
12776 
12777 static void tg3_get_ethtool_stats(struct net_device *dev,
12778 				   struct ethtool_stats *estats, u64 *tmp_stats)
12779 {
12780 	struct tg3 *tp = netdev_priv(dev);
12781 
12782 	if (tp->hw_stats)
12783 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12784 	else
12785 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12786 }
12787 
12788 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12789 {
12790 	int i;
12791 	__be32 *buf;
12792 	u32 offset = 0, len = 0;
12793 	u32 magic, val;
12794 
12795 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12796 		return NULL;
12797 
12798 	if (magic == TG3_EEPROM_MAGIC) {
12799 		for (offset = TG3_NVM_DIR_START;
12800 		     offset < TG3_NVM_DIR_END;
12801 		     offset += TG3_NVM_DIRENT_SIZE) {
12802 			if (tg3_nvram_read(tp, offset, &val))
12803 				return NULL;
12804 
12805 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12806 			    TG3_NVM_DIRTYPE_EXTVPD)
12807 				break;
12808 		}
12809 
12810 		if (offset != TG3_NVM_DIR_END) {
12811 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12812 			if (tg3_nvram_read(tp, offset + 4, &offset))
12813 				return NULL;
12814 
12815 			offset = tg3_nvram_logical_addr(tp, offset);
12816 		}
12817 
12818 		if (!offset || !len) {
12819 			offset = TG3_NVM_VPD_OFF;
12820 			len = TG3_NVM_VPD_LEN;
12821 		}
12822 
12823 		buf = kmalloc(len, GFP_KERNEL);
12824 		if (!buf)
12825 			return NULL;
12826 
12827 		for (i = 0; i < len; i += 4) {
12828 			/* The data is in little-endian format in NVRAM.
12829 			 * Use the big-endian read routines to preserve
12830 			 * the byte order as it exists in NVRAM.
12831 			 */
12832 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12833 				goto error;
12834 		}
12835 		*vpdlen = len;
12836 	} else {
12837 		buf = pci_vpd_alloc(tp->pdev, vpdlen);
12838 		if (IS_ERR(buf))
12839 			return NULL;
12840 	}
12841 
12842 	return buf;
12843 
12844 error:
12845 	kfree(buf);
12846 	return NULL;
12847 }
12848 
12849 #define NVRAM_TEST_SIZE 0x100
12850 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12851 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12852 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12853 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12854 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12855 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12856 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12857 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12858 
12859 static int tg3_test_nvram(struct tg3 *tp)
12860 {
12861 	u32 csum, magic;
12862 	__be32 *buf;
12863 	int i, j, k, err = 0, size;
12864 	unsigned int len;
12865 
12866 	if (tg3_flag(tp, NO_NVRAM))
12867 		return 0;
12868 
12869 	if (tg3_nvram_read(tp, 0, &magic) != 0)
12870 		return -EIO;
12871 
12872 	if (magic == TG3_EEPROM_MAGIC)
12873 		size = NVRAM_TEST_SIZE;
12874 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12875 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12876 		    TG3_EEPROM_SB_FORMAT_1) {
12877 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12878 			case TG3_EEPROM_SB_REVISION_0:
12879 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12880 				break;
12881 			case TG3_EEPROM_SB_REVISION_2:
12882 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12883 				break;
12884 			case TG3_EEPROM_SB_REVISION_3:
12885 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12886 				break;
12887 			case TG3_EEPROM_SB_REVISION_4:
12888 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12889 				break;
12890 			case TG3_EEPROM_SB_REVISION_5:
12891 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12892 				break;
12893 			case TG3_EEPROM_SB_REVISION_6:
12894 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12895 				break;
12896 			default:
12897 				return -EIO;
12898 			}
12899 		} else
12900 			return 0;
12901 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12902 		size = NVRAM_SELFBOOT_HW_SIZE;
12903 	else
12904 		return -EIO;
12905 
12906 	buf = kmalloc(size, GFP_KERNEL);
12907 	if (buf == NULL)
12908 		return -ENOMEM;
12909 
12910 	err = -EIO;
12911 	for (i = 0, j = 0; i < size; i += 4, j++) {
12912 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12913 		if (err)
12914 			break;
12915 	}
12916 	if (i < size)
12917 		goto out;
12918 
12919 	/* Selfboot format */
12920 	magic = be32_to_cpu(buf[0]);
12921 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12922 	    TG3_EEPROM_MAGIC_FW) {
12923 		u8 *buf8 = (u8 *) buf, csum8 = 0;
12924 
12925 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12926 		    TG3_EEPROM_SB_REVISION_2) {
12927 			/* For rev 2, the csum doesn't include the MBA. */
12928 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12929 				csum8 += buf8[i];
12930 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12931 				csum8 += buf8[i];
12932 		} else {
12933 			for (i = 0; i < size; i++)
12934 				csum8 += buf8[i];
12935 		}
12936 
12937 		if (csum8 == 0) {
12938 			err = 0;
12939 			goto out;
12940 		}
12941 
12942 		err = -EIO;
12943 		goto out;
12944 	}
12945 
12946 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12947 	    TG3_EEPROM_MAGIC_HW) {
12948 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12949 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12950 		u8 *buf8 = (u8 *) buf;
12951 
12952 		/* Separate the parity bits and the data bytes.  */
12953 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12954 			if ((i == 0) || (i == 8)) {
12955 				int l;
12956 				u8 msk;
12957 
12958 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12959 					parity[k++] = buf8[i] & msk;
12960 				i++;
12961 			} else if (i == 16) {
12962 				int l;
12963 				u8 msk;
12964 
12965 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12966 					parity[k++] = buf8[i] & msk;
12967 				i++;
12968 
12969 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12970 					parity[k++] = buf8[i] & msk;
12971 				i++;
12972 			}
12973 			data[j++] = buf8[i];
12974 		}
12975 
12976 		err = -EIO;
12977 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12978 			u8 hw8 = hweight8(data[i]);
12979 
12980 			if ((hw8 & 0x1) && parity[i])
12981 				goto out;
12982 			else if (!(hw8 & 0x1) && !parity[i])
12983 				goto out;
12984 		}
12985 		err = 0;
12986 		goto out;
12987 	}
12988 
12989 	err = -EIO;
12990 
12991 	/* Bootstrap checksum at offset 0x10 */
12992 	csum = calc_crc((unsigned char *) buf, 0x10);
12993 	if (csum != le32_to_cpu(buf[0x10/4]))
12994 		goto out;
12995 
12996 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12997 	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12998 	if (csum != le32_to_cpu(buf[0xfc/4]))
12999 		goto out;
13000 
13001 	kfree(buf);
13002 
13003 	buf = tg3_vpd_readblock(tp, &len);
13004 	if (!buf)
13005 		return -ENOMEM;
13006 
13007 	err = pci_vpd_check_csum(buf, len);
13008 	/* go on if no checksum found */
13009 	if (err == 1)
13010 		err = 0;
13011 out:
13012 	kfree(buf);
13013 	return err;
13014 }
13015 
13016 #define TG3_SERDES_TIMEOUT_SEC	2
13017 #define TG3_COPPER_TIMEOUT_SEC	6
13018 
13019 static int tg3_test_link(struct tg3 *tp)
13020 {
13021 	int i, max;
13022 
13023 	if (!netif_running(tp->dev))
13024 		return -ENODEV;
13025 
13026 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13027 		max = TG3_SERDES_TIMEOUT_SEC;
13028 	else
13029 		max = TG3_COPPER_TIMEOUT_SEC;
13030 
13031 	for (i = 0; i < max; i++) {
13032 		if (tp->link_up)
13033 			return 0;
13034 
13035 		if (msleep_interruptible(1000))
13036 			break;
13037 	}
13038 
13039 	return -EIO;
13040 }
13041 
13042 /* Only test the commonly used registers */
13043 static int tg3_test_registers(struct tg3 *tp)
13044 {
13045 	int i, is_5705, is_5750;
13046 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13047 	static struct {
13048 		u16 offset;
13049 		u16 flags;
13050 #define TG3_FL_5705	0x1
13051 #define TG3_FL_NOT_5705	0x2
13052 #define TG3_FL_NOT_5788	0x4
13053 #define TG3_FL_NOT_5750	0x8
13054 		u32 read_mask;
13055 		u32 write_mask;
13056 	} reg_tbl[] = {
13057 		/* MAC Control Registers */
13058 		{ MAC_MODE, TG3_FL_NOT_5705,
13059 			0x00000000, 0x00ef6f8c },
13060 		{ MAC_MODE, TG3_FL_5705,
13061 			0x00000000, 0x01ef6b8c },
13062 		{ MAC_STATUS, TG3_FL_NOT_5705,
13063 			0x03800107, 0x00000000 },
13064 		{ MAC_STATUS, TG3_FL_5705,
13065 			0x03800100, 0x00000000 },
13066 		{ MAC_ADDR_0_HIGH, 0x0000,
13067 			0x00000000, 0x0000ffff },
13068 		{ MAC_ADDR_0_LOW, 0x0000,
13069 			0x00000000, 0xffffffff },
13070 		{ MAC_RX_MTU_SIZE, 0x0000,
13071 			0x00000000, 0x0000ffff },
13072 		{ MAC_TX_MODE, 0x0000,
13073 			0x00000000, 0x00000070 },
13074 		{ MAC_TX_LENGTHS, 0x0000,
13075 			0x00000000, 0x00003fff },
13076 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13077 			0x00000000, 0x000007fc },
13078 		{ MAC_RX_MODE, TG3_FL_5705,
13079 			0x00000000, 0x000007dc },
13080 		{ MAC_HASH_REG_0, 0x0000,
13081 			0x00000000, 0xffffffff },
13082 		{ MAC_HASH_REG_1, 0x0000,
13083 			0x00000000, 0xffffffff },
13084 		{ MAC_HASH_REG_2, 0x0000,
13085 			0x00000000, 0xffffffff },
13086 		{ MAC_HASH_REG_3, 0x0000,
13087 			0x00000000, 0xffffffff },
13088 
13089 		/* Receive Data and Receive BD Initiator Control Registers. */
13090 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13091 			0x00000000, 0xffffffff },
13092 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13093 			0x00000000, 0xffffffff },
13094 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13095 			0x00000000, 0x00000003 },
13096 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13097 			0x00000000, 0xffffffff },
13098 		{ RCVDBDI_STD_BD+0, 0x0000,
13099 			0x00000000, 0xffffffff },
13100 		{ RCVDBDI_STD_BD+4, 0x0000,
13101 			0x00000000, 0xffffffff },
13102 		{ RCVDBDI_STD_BD+8, 0x0000,
13103 			0x00000000, 0xffff0002 },
13104 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13105 			0x00000000, 0xffffffff },
13106 
13107 		/* Receive BD Initiator Control Registers. */
13108 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13109 			0x00000000, 0xffffffff },
13110 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13111 			0x00000000, 0x000003ff },
13112 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13113 			0x00000000, 0xffffffff },
13114 
13115 		/* Host Coalescing Control Registers. */
13116 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13117 			0x00000000, 0x00000004 },
13118 		{ HOSTCC_MODE, TG3_FL_5705,
13119 			0x00000000, 0x000000f6 },
13120 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13121 			0x00000000, 0xffffffff },
13122 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13123 			0x00000000, 0x000003ff },
13124 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13125 			0x00000000, 0xffffffff },
13126 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13127 			0x00000000, 0x000003ff },
13128 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13129 			0x00000000, 0xffffffff },
13130 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13131 			0x00000000, 0x000000ff },
13132 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13133 			0x00000000, 0xffffffff },
13134 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13135 			0x00000000, 0x000000ff },
13136 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13137 			0x00000000, 0xffffffff },
13138 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13139 			0x00000000, 0xffffffff },
13140 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13141 			0x00000000, 0xffffffff },
13142 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13143 			0x00000000, 0x000000ff },
13144 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13145 			0x00000000, 0xffffffff },
13146 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13147 			0x00000000, 0x000000ff },
13148 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13149 			0x00000000, 0xffffffff },
13150 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13151 			0x00000000, 0xffffffff },
13152 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13153 			0x00000000, 0xffffffff },
13154 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13155 			0x00000000, 0xffffffff },
13156 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13157 			0x00000000, 0xffffffff },
13158 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13159 			0xffffffff, 0x00000000 },
13160 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13161 			0xffffffff, 0x00000000 },
13162 
13163 		/* Buffer Manager Control Registers. */
13164 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13165 			0x00000000, 0x007fff80 },
13166 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13167 			0x00000000, 0x007fffff },
13168 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13169 			0x00000000, 0x0000003f },
13170 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13171 			0x00000000, 0x000001ff },
13172 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13173 			0x00000000, 0x000001ff },
13174 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13175 			0xffffffff, 0x00000000 },
13176 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13177 			0xffffffff, 0x00000000 },
13178 
13179 		/* Mailbox Registers */
13180 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13181 			0x00000000, 0x000001ff },
13182 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13183 			0x00000000, 0x000001ff },
13184 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13185 			0x00000000, 0x000007ff },
13186 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13187 			0x00000000, 0x000001ff },
13188 
13189 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13190 	};
13191 
13192 	is_5705 = is_5750 = 0;
13193 	if (tg3_flag(tp, 5705_PLUS)) {
13194 		is_5705 = 1;
13195 		if (tg3_flag(tp, 5750_PLUS))
13196 			is_5750 = 1;
13197 	}
13198 
13199 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13200 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13201 			continue;
13202 
13203 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13204 			continue;
13205 
13206 		if (tg3_flag(tp, IS_5788) &&
13207 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13208 			continue;
13209 
13210 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13211 			continue;
13212 
13213 		offset = (u32) reg_tbl[i].offset;
13214 		read_mask = reg_tbl[i].read_mask;
13215 		write_mask = reg_tbl[i].write_mask;
13216 
13217 		/* Save the original register content */
13218 		save_val = tr32(offset);
13219 
13220 		/* Determine the read-only value. */
13221 		read_val = save_val & read_mask;
13222 
13223 		/* Write zero to the register, then make sure the read-only bits
13224 		 * are not changed and the read/write bits are all zeros.
13225 		 */
13226 		tw32(offset, 0);
13227 
13228 		val = tr32(offset);
13229 
13230 		/* Test the read-only and read/write bits. */
13231 		if (((val & read_mask) != read_val) || (val & write_mask))
13232 			goto out;
13233 
13234 		/* Write ones to all the bits defined by RdMask and WrMask, then
13235 		 * make sure the read-only bits are not changed and the
13236 		 * read/write bits are all ones.
13237 		 */
13238 		tw32(offset, read_mask | write_mask);
13239 
13240 		val = tr32(offset);
13241 
13242 		/* Test the read-only bits. */
13243 		if ((val & read_mask) != read_val)
13244 			goto out;
13245 
13246 		/* Test the read/write bits. */
13247 		if ((val & write_mask) != write_mask)
13248 			goto out;
13249 
13250 		tw32(offset, save_val);
13251 	}
13252 
13253 	return 0;
13254 
13255 out:
13256 	if (netif_msg_hw(tp))
13257 		netdev_err(tp->dev,
13258 			   "Register test failed at offset %x\n", offset);
13259 	tw32(offset, save_val);
13260 	return -EIO;
13261 }
13262 
13263 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13264 {
13265 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13266 	int i;
13267 	u32 j;
13268 
13269 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13270 		for (j = 0; j < len; j += 4) {
13271 			u32 val;
13272 
13273 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13274 			tg3_read_mem(tp, offset + j, &val);
13275 			if (val != test_pattern[i])
13276 				return -EIO;
13277 		}
13278 	}
13279 	return 0;
13280 }
13281 
13282 static int tg3_test_memory(struct tg3 *tp)
13283 {
13284 	static struct mem_entry {
13285 		u32 offset;
13286 		u32 len;
13287 	} mem_tbl_570x[] = {
13288 		{ 0x00000000, 0x00b50},
13289 		{ 0x00002000, 0x1c000},
13290 		{ 0xffffffff, 0x00000}
13291 	}, mem_tbl_5705[] = {
13292 		{ 0x00000100, 0x0000c},
13293 		{ 0x00000200, 0x00008},
13294 		{ 0x00004000, 0x00800},
13295 		{ 0x00006000, 0x01000},
13296 		{ 0x00008000, 0x02000},
13297 		{ 0x00010000, 0x0e000},
13298 		{ 0xffffffff, 0x00000}
13299 	}, mem_tbl_5755[] = {
13300 		{ 0x00000200, 0x00008},
13301 		{ 0x00004000, 0x00800},
13302 		{ 0x00006000, 0x00800},
13303 		{ 0x00008000, 0x02000},
13304 		{ 0x00010000, 0x0c000},
13305 		{ 0xffffffff, 0x00000}
13306 	}, mem_tbl_5906[] = {
13307 		{ 0x00000200, 0x00008},
13308 		{ 0x00004000, 0x00400},
13309 		{ 0x00006000, 0x00400},
13310 		{ 0x00008000, 0x01000},
13311 		{ 0x00010000, 0x01000},
13312 		{ 0xffffffff, 0x00000}
13313 	}, mem_tbl_5717[] = {
13314 		{ 0x00000200, 0x00008},
13315 		{ 0x00010000, 0x0a000},
13316 		{ 0x00020000, 0x13c00},
13317 		{ 0xffffffff, 0x00000}
13318 	}, mem_tbl_57765[] = {
13319 		{ 0x00000200, 0x00008},
13320 		{ 0x00004000, 0x00800},
13321 		{ 0x00006000, 0x09800},
13322 		{ 0x00010000, 0x0a000},
13323 		{ 0xffffffff, 0x00000}
13324 	};
13325 	struct mem_entry *mem_tbl;
13326 	int err = 0;
13327 	int i;
13328 
13329 	if (tg3_flag(tp, 5717_PLUS))
13330 		mem_tbl = mem_tbl_5717;
13331 	else if (tg3_flag(tp, 57765_CLASS) ||
13332 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13333 		mem_tbl = mem_tbl_57765;
13334 	else if (tg3_flag(tp, 5755_PLUS))
13335 		mem_tbl = mem_tbl_5755;
13336 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13337 		mem_tbl = mem_tbl_5906;
13338 	else if (tg3_flag(tp, 5705_PLUS))
13339 		mem_tbl = mem_tbl_5705;
13340 	else
13341 		mem_tbl = mem_tbl_570x;
13342 
13343 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13344 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13345 		if (err)
13346 			break;
13347 	}
13348 
13349 	return err;
13350 }
13351 
13352 #define TG3_TSO_MSS		500
13353 
13354 #define TG3_TSO_IP_HDR_LEN	20
13355 #define TG3_TSO_TCP_HDR_LEN	20
13356 #define TG3_TSO_TCP_OPT_LEN	12
13357 
13358 static const u8 tg3_tso_header[] = {
13359 0x08, 0x00,
13360 0x45, 0x00, 0x00, 0x00,
13361 0x00, 0x00, 0x40, 0x00,
13362 0x40, 0x06, 0x00, 0x00,
13363 0x0a, 0x00, 0x00, 0x01,
13364 0x0a, 0x00, 0x00, 0x02,
13365 0x0d, 0x00, 0xe0, 0x00,
13366 0x00, 0x00, 0x01, 0x00,
13367 0x00, 0x00, 0x02, 0x00,
13368 0x80, 0x10, 0x10, 0x00,
13369 0x14, 0x09, 0x00, 0x00,
13370 0x01, 0x01, 0x08, 0x0a,
13371 0x11, 0x11, 0x11, 0x11,
13372 0x11, 0x11, 0x11, 0x11,
13373 };
13374 
13375 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13376 {
13377 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13378 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13379 	u32 budget;
13380 	struct sk_buff *skb;
13381 	u8 *tx_data, *rx_data;
13382 	dma_addr_t map;
13383 	int num_pkts, tx_len, rx_len, i, err;
13384 	struct tg3_rx_buffer_desc *desc;
13385 	struct tg3_napi *tnapi, *rnapi;
13386 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13387 
13388 	tnapi = &tp->napi[0];
13389 	rnapi = &tp->napi[0];
13390 	if (tp->irq_cnt > 1) {
13391 		if (tg3_flag(tp, ENABLE_RSS))
13392 			rnapi = &tp->napi[1];
13393 		if (tg3_flag(tp, ENABLE_TSS))
13394 			tnapi = &tp->napi[1];
13395 	}
13396 	coal_now = tnapi->coal_now | rnapi->coal_now;
13397 
13398 	err = -EIO;
13399 
13400 	tx_len = pktsz;
13401 	skb = netdev_alloc_skb(tp->dev, tx_len);
13402 	if (!skb)
13403 		return -ENOMEM;
13404 
13405 	tx_data = skb_put(skb, tx_len);
13406 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13407 	memset(tx_data + ETH_ALEN, 0x0, 8);
13408 
13409 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13410 
13411 	if (tso_loopback) {
13412 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13413 
13414 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13415 			      TG3_TSO_TCP_OPT_LEN;
13416 
13417 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13418 		       sizeof(tg3_tso_header));
13419 		mss = TG3_TSO_MSS;
13420 
13421 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13422 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13423 
13424 		/* Set the total length field in the IP header */
13425 		iph->tot_len = htons((u16)(mss + hdr_len));
13426 
13427 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13428 			      TXD_FLAG_CPU_POST_DMA);
13429 
13430 		if (tg3_flag(tp, HW_TSO_1) ||
13431 		    tg3_flag(tp, HW_TSO_2) ||
13432 		    tg3_flag(tp, HW_TSO_3)) {
13433 			struct tcphdr *th;
13434 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13435 			th = (struct tcphdr *)&tx_data[val];
13436 			th->check = 0;
13437 		} else
13438 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13439 
13440 		if (tg3_flag(tp, HW_TSO_3)) {
13441 			mss |= (hdr_len & 0xc) << 12;
13442 			if (hdr_len & 0x10)
13443 				base_flags |= 0x00000010;
13444 			base_flags |= (hdr_len & 0x3e0) << 5;
13445 		} else if (tg3_flag(tp, HW_TSO_2))
13446 			mss |= hdr_len << 9;
13447 		else if (tg3_flag(tp, HW_TSO_1) ||
13448 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13449 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13450 		} else {
13451 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13452 		}
13453 
13454 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13455 	} else {
13456 		num_pkts = 1;
13457 		data_off = ETH_HLEN;
13458 
13459 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13460 		    tx_len > VLAN_ETH_FRAME_LEN)
13461 			base_flags |= TXD_FLAG_JMB_PKT;
13462 	}
13463 
13464 	for (i = data_off; i < tx_len; i++)
13465 		tx_data[i] = (u8) (i & 0xff);
13466 
13467 	map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13468 	if (dma_mapping_error(&tp->pdev->dev, map)) {
13469 		dev_kfree_skb(skb);
13470 		return -EIO;
13471 	}
13472 
13473 	val = tnapi->tx_prod;
13474 	tnapi->tx_buffers[val].skb = skb;
13475 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13476 
13477 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13478 	       rnapi->coal_now);
13479 
13480 	udelay(10);
13481 
13482 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13483 
13484 	budget = tg3_tx_avail(tnapi);
13485 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13486 			    base_flags | TXD_FLAG_END, mss, 0)) {
13487 		tnapi->tx_buffers[val].skb = NULL;
13488 		dev_kfree_skb(skb);
13489 		return -EIO;
13490 	}
13491 
13492 	tnapi->tx_prod++;
13493 
13494 	/* Sync BD data before updating mailbox */
13495 	wmb();
13496 
13497 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13498 	tr32_mailbox(tnapi->prodmbox);
13499 
13500 	udelay(10);
13501 
13502 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13503 	for (i = 0; i < 35; i++) {
13504 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13505 		       coal_now);
13506 
13507 		udelay(10);
13508 
13509 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13510 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13511 		if ((tx_idx == tnapi->tx_prod) &&
13512 		    (rx_idx == (rx_start_idx + num_pkts)))
13513 			break;
13514 	}
13515 
13516 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13517 	dev_kfree_skb(skb);
13518 
13519 	if (tx_idx != tnapi->tx_prod)
13520 		goto out;
13521 
13522 	if (rx_idx != rx_start_idx + num_pkts)
13523 		goto out;
13524 
13525 	val = data_off;
13526 	while (rx_idx != rx_start_idx) {
13527 		desc = &rnapi->rx_rcb[rx_start_idx++];
13528 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13529 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13530 
13531 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13532 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13533 			goto out;
13534 
13535 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13536 			 - ETH_FCS_LEN;
13537 
13538 		if (!tso_loopback) {
13539 			if (rx_len != tx_len)
13540 				goto out;
13541 
13542 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13543 				if (opaque_key != RXD_OPAQUE_RING_STD)
13544 					goto out;
13545 			} else {
13546 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13547 					goto out;
13548 			}
13549 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13550 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13551 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13552 			goto out;
13553 		}
13554 
13555 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13556 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13557 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13558 					     mapping);
13559 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13560 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13561 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13562 					     mapping);
13563 		} else
13564 			goto out;
13565 
13566 		dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13567 					DMA_FROM_DEVICE);
13568 
13569 		rx_data += TG3_RX_OFFSET(tp);
13570 		for (i = data_off; i < rx_len; i++, val++) {
13571 			if (*(rx_data + i) != (u8) (val & 0xff))
13572 				goto out;
13573 		}
13574 	}
13575 
13576 	err = 0;
13577 
13578 	/* tg3_free_rings will unmap and free the rx_data */
13579 out:
13580 	return err;
13581 }
13582 
13583 #define TG3_STD_LOOPBACK_FAILED		1
13584 #define TG3_JMB_LOOPBACK_FAILED		2
13585 #define TG3_TSO_LOOPBACK_FAILED		4
13586 #define TG3_LOOPBACK_FAILED \
13587 	(TG3_STD_LOOPBACK_FAILED | \
13588 	 TG3_JMB_LOOPBACK_FAILED | \
13589 	 TG3_TSO_LOOPBACK_FAILED)
13590 
13591 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13592 {
13593 	int err = -EIO;
13594 	u32 eee_cap;
13595 	u32 jmb_pkt_sz = 9000;
13596 
13597 	if (tp->dma_limit)
13598 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13599 
13600 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13601 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13602 
13603 	if (!netif_running(tp->dev)) {
13604 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13605 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13606 		if (do_extlpbk)
13607 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13608 		goto done;
13609 	}
13610 
13611 	err = tg3_reset_hw(tp, true);
13612 	if (err) {
13613 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13614 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13615 		if (do_extlpbk)
13616 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13617 		goto done;
13618 	}
13619 
13620 	if (tg3_flag(tp, ENABLE_RSS)) {
13621 		int i;
13622 
13623 		/* Reroute all rx packets to the 1st queue */
13624 		for (i = MAC_RSS_INDIR_TBL_0;
13625 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13626 			tw32(i, 0x0);
13627 	}
13628 
13629 	/* HW errata - mac loopback fails in some cases on 5780.
13630 	 * Normal traffic and PHY loopback are not affected by
13631 	 * errata.  Also, the MAC loopback test is deprecated for
13632 	 * all newer ASIC revisions.
13633 	 */
13634 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13635 	    !tg3_flag(tp, CPMU_PRESENT)) {
13636 		tg3_mac_loopback(tp, true);
13637 
13638 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13639 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13640 
13641 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13642 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13643 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13644 
13645 		tg3_mac_loopback(tp, false);
13646 	}
13647 
13648 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13649 	    !tg3_flag(tp, USE_PHYLIB)) {
13650 		int i;
13651 
13652 		tg3_phy_lpbk_set(tp, 0, false);
13653 
13654 		/* Wait for link */
13655 		for (i = 0; i < 100; i++) {
13656 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13657 				break;
13658 			mdelay(1);
13659 		}
13660 
13661 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13662 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13663 		if (tg3_flag(tp, TSO_CAPABLE) &&
13664 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13665 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13666 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13667 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13668 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13669 
13670 		if (do_extlpbk) {
13671 			tg3_phy_lpbk_set(tp, 0, true);
13672 
13673 			/* All link indications report up, but the hardware
13674 			 * isn't really ready for about 20 msec.  Double it
13675 			 * to be sure.
13676 			 */
13677 			mdelay(40);
13678 
13679 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13680 				data[TG3_EXT_LOOPB_TEST] |=
13681 							TG3_STD_LOOPBACK_FAILED;
13682 			if (tg3_flag(tp, TSO_CAPABLE) &&
13683 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13684 				data[TG3_EXT_LOOPB_TEST] |=
13685 							TG3_TSO_LOOPBACK_FAILED;
13686 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13687 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13688 				data[TG3_EXT_LOOPB_TEST] |=
13689 							TG3_JMB_LOOPBACK_FAILED;
13690 		}
13691 
13692 		/* Re-enable gphy autopowerdown. */
13693 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13694 			tg3_phy_toggle_apd(tp, true);
13695 	}
13696 
13697 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13698 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13699 
13700 done:
13701 	tp->phy_flags |= eee_cap;
13702 
13703 	return err;
13704 }
13705 
13706 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13707 			  u64 *data)
13708 {
13709 	struct tg3 *tp = netdev_priv(dev);
13710 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13711 
13712 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13713 		if (tg3_power_up(tp)) {
13714 			etest->flags |= ETH_TEST_FL_FAILED;
13715 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13716 			return;
13717 		}
13718 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13719 	}
13720 
13721 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13722 
13723 	if (tg3_test_nvram(tp) != 0) {
13724 		etest->flags |= ETH_TEST_FL_FAILED;
13725 		data[TG3_NVRAM_TEST] = 1;
13726 	}
13727 	if (!doextlpbk && tg3_test_link(tp)) {
13728 		etest->flags |= ETH_TEST_FL_FAILED;
13729 		data[TG3_LINK_TEST] = 1;
13730 	}
13731 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13732 		int err, err2 = 0, irq_sync = 0;
13733 
13734 		if (netif_running(dev)) {
13735 			tg3_phy_stop(tp);
13736 			tg3_netif_stop(tp);
13737 			irq_sync = 1;
13738 		}
13739 
13740 		tg3_full_lock(tp, irq_sync);
13741 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13742 		err = tg3_nvram_lock(tp);
13743 		tg3_halt_cpu(tp, RX_CPU_BASE);
13744 		if (!tg3_flag(tp, 5705_PLUS))
13745 			tg3_halt_cpu(tp, TX_CPU_BASE);
13746 		if (!err)
13747 			tg3_nvram_unlock(tp);
13748 
13749 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13750 			tg3_phy_reset(tp);
13751 
13752 		if (tg3_test_registers(tp) != 0) {
13753 			etest->flags |= ETH_TEST_FL_FAILED;
13754 			data[TG3_REGISTER_TEST] = 1;
13755 		}
13756 
13757 		if (tg3_test_memory(tp) != 0) {
13758 			etest->flags |= ETH_TEST_FL_FAILED;
13759 			data[TG3_MEMORY_TEST] = 1;
13760 		}
13761 
13762 		if (doextlpbk)
13763 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13764 
13765 		if (tg3_test_loopback(tp, data, doextlpbk))
13766 			etest->flags |= ETH_TEST_FL_FAILED;
13767 
13768 		tg3_full_unlock(tp);
13769 
13770 		if (tg3_test_interrupt(tp) != 0) {
13771 			etest->flags |= ETH_TEST_FL_FAILED;
13772 			data[TG3_INTERRUPT_TEST] = 1;
13773 		}
13774 
13775 		tg3_full_lock(tp, 0);
13776 
13777 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13778 		if (netif_running(dev)) {
13779 			tg3_flag_set(tp, INIT_COMPLETE);
13780 			err2 = tg3_restart_hw(tp, true);
13781 			if (!err2)
13782 				tg3_netif_start(tp);
13783 		}
13784 
13785 		tg3_full_unlock(tp);
13786 
13787 		if (irq_sync && !err2)
13788 			tg3_phy_start(tp);
13789 	}
13790 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13791 		tg3_power_down_prepare(tp);
13792 
13793 }
13794 
13795 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13796 {
13797 	struct tg3 *tp = netdev_priv(dev);
13798 	struct hwtstamp_config stmpconf;
13799 
13800 	if (!tg3_flag(tp, PTP_CAPABLE))
13801 		return -EOPNOTSUPP;
13802 
13803 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13804 		return -EFAULT;
13805 
13806 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13807 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13808 		return -ERANGE;
13809 
13810 	switch (stmpconf.rx_filter) {
13811 	case HWTSTAMP_FILTER_NONE:
13812 		tp->rxptpctl = 0;
13813 		break;
13814 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13815 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13816 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13817 		break;
13818 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13819 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13820 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13821 		break;
13822 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13823 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13824 			       TG3_RX_PTP_CTL_DELAY_REQ;
13825 		break;
13826 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13827 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13828 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13829 		break;
13830 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13831 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13832 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13833 		break;
13834 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13835 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13836 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13837 		break;
13838 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13839 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13840 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13841 		break;
13842 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13843 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13844 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13845 		break;
13846 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13847 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13848 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13849 		break;
13850 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13851 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13852 			       TG3_RX_PTP_CTL_DELAY_REQ;
13853 		break;
13854 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13855 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13856 			       TG3_RX_PTP_CTL_DELAY_REQ;
13857 		break;
13858 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13859 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13860 			       TG3_RX_PTP_CTL_DELAY_REQ;
13861 		break;
13862 	default:
13863 		return -ERANGE;
13864 	}
13865 
13866 	if (netif_running(dev) && tp->rxptpctl)
13867 		tw32(TG3_RX_PTP_CTL,
13868 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13869 
13870 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13871 		tg3_flag_set(tp, TX_TSTAMP_EN);
13872 	else
13873 		tg3_flag_clear(tp, TX_TSTAMP_EN);
13874 
13875 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13876 		-EFAULT : 0;
13877 }
13878 
13879 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13880 {
13881 	struct tg3 *tp = netdev_priv(dev);
13882 	struct hwtstamp_config stmpconf;
13883 
13884 	if (!tg3_flag(tp, PTP_CAPABLE))
13885 		return -EOPNOTSUPP;
13886 
13887 	stmpconf.flags = 0;
13888 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13889 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13890 
13891 	switch (tp->rxptpctl) {
13892 	case 0:
13893 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13894 		break;
13895 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13896 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13897 		break;
13898 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13899 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13900 		break;
13901 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13902 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13903 		break;
13904 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13905 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13906 		break;
13907 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13908 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13909 		break;
13910 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13911 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13912 		break;
13913 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13914 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13915 		break;
13916 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13917 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13918 		break;
13919 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13920 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13921 		break;
13922 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13923 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13924 		break;
13925 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13926 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13927 		break;
13928 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13929 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13930 		break;
13931 	default:
13932 		WARN_ON_ONCE(1);
13933 		return -ERANGE;
13934 	}
13935 
13936 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13937 		-EFAULT : 0;
13938 }
13939 
13940 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13941 {
13942 	struct mii_ioctl_data *data = if_mii(ifr);
13943 	struct tg3 *tp = netdev_priv(dev);
13944 	int err;
13945 
13946 	if (tg3_flag(tp, USE_PHYLIB)) {
13947 		struct phy_device *phydev;
13948 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13949 			return -EAGAIN;
13950 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13951 		return phy_mii_ioctl(phydev, ifr, cmd);
13952 	}
13953 
13954 	switch (cmd) {
13955 	case SIOCGMIIPHY:
13956 		data->phy_id = tp->phy_addr;
13957 
13958 		fallthrough;
13959 	case SIOCGMIIREG: {
13960 		u32 mii_regval;
13961 
13962 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13963 			break;			/* We have no PHY */
13964 
13965 		if (!netif_running(dev))
13966 			return -EAGAIN;
13967 
13968 		spin_lock_bh(&tp->lock);
13969 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
13970 				    data->reg_num & 0x1f, &mii_regval);
13971 		spin_unlock_bh(&tp->lock);
13972 
13973 		data->val_out = mii_regval;
13974 
13975 		return err;
13976 	}
13977 
13978 	case SIOCSMIIREG:
13979 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13980 			break;			/* We have no PHY */
13981 
13982 		if (!netif_running(dev))
13983 			return -EAGAIN;
13984 
13985 		spin_lock_bh(&tp->lock);
13986 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
13987 				     data->reg_num & 0x1f, data->val_in);
13988 		spin_unlock_bh(&tp->lock);
13989 
13990 		return err;
13991 
13992 	case SIOCSHWTSTAMP:
13993 		return tg3_hwtstamp_set(dev, ifr);
13994 
13995 	case SIOCGHWTSTAMP:
13996 		return tg3_hwtstamp_get(dev, ifr);
13997 
13998 	default:
13999 		/* do nothing */
14000 		break;
14001 	}
14002 	return -EOPNOTSUPP;
14003 }
14004 
14005 static int tg3_get_coalesce(struct net_device *dev,
14006 			    struct ethtool_coalesce *ec,
14007 			    struct kernel_ethtool_coalesce *kernel_coal,
14008 			    struct netlink_ext_ack *extack)
14009 {
14010 	struct tg3 *tp = netdev_priv(dev);
14011 
14012 	memcpy(ec, &tp->coal, sizeof(*ec));
14013 	return 0;
14014 }
14015 
14016 static int tg3_set_coalesce(struct net_device *dev,
14017 			    struct ethtool_coalesce *ec,
14018 			    struct kernel_ethtool_coalesce *kernel_coal,
14019 			    struct netlink_ext_ack *extack)
14020 {
14021 	struct tg3 *tp = netdev_priv(dev);
14022 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14023 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14024 
14025 	if (!tg3_flag(tp, 5705_PLUS)) {
14026 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14027 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14028 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14029 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14030 	}
14031 
14032 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14033 	    (!ec->rx_coalesce_usecs) ||
14034 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14035 	    (!ec->tx_coalesce_usecs) ||
14036 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14037 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14038 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14039 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14040 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14041 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14042 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14043 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14044 		return -EINVAL;
14045 
14046 	/* Only copy relevant parameters, ignore all others. */
14047 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14048 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14049 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14050 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14051 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14052 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14053 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14054 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14055 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14056 
14057 	if (netif_running(dev)) {
14058 		tg3_full_lock(tp, 0);
14059 		__tg3_set_coalesce(tp, &tp->coal);
14060 		tg3_full_unlock(tp);
14061 	}
14062 	return 0;
14063 }
14064 
14065 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14066 {
14067 	struct tg3 *tp = netdev_priv(dev);
14068 
14069 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14070 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14071 		return -EOPNOTSUPP;
14072 	}
14073 
14074 	if (edata->advertised != tp->eee.advertised) {
14075 		netdev_warn(tp->dev,
14076 			    "Direct manipulation of EEE advertisement is not supported\n");
14077 		return -EINVAL;
14078 	}
14079 
14080 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14081 		netdev_warn(tp->dev,
14082 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14083 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14084 		return -EINVAL;
14085 	}
14086 
14087 	tp->eee = *edata;
14088 
14089 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14090 	tg3_warn_mgmt_link_flap(tp);
14091 
14092 	if (netif_running(tp->dev)) {
14093 		tg3_full_lock(tp, 0);
14094 		tg3_setup_eee(tp);
14095 		tg3_phy_reset(tp);
14096 		tg3_full_unlock(tp);
14097 	}
14098 
14099 	return 0;
14100 }
14101 
14102 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14103 {
14104 	struct tg3 *tp = netdev_priv(dev);
14105 
14106 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14107 		netdev_warn(tp->dev,
14108 			    "Board does not support EEE!\n");
14109 		return -EOPNOTSUPP;
14110 	}
14111 
14112 	*edata = tp->eee;
14113 	return 0;
14114 }
14115 
14116 static const struct ethtool_ops tg3_ethtool_ops = {
14117 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14118 				     ETHTOOL_COALESCE_MAX_FRAMES |
14119 				     ETHTOOL_COALESCE_USECS_IRQ |
14120 				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14121 				     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14122 	.get_drvinfo		= tg3_get_drvinfo,
14123 	.get_regs_len		= tg3_get_regs_len,
14124 	.get_regs		= tg3_get_regs,
14125 	.get_wol		= tg3_get_wol,
14126 	.set_wol		= tg3_set_wol,
14127 	.get_msglevel		= tg3_get_msglevel,
14128 	.set_msglevel		= tg3_set_msglevel,
14129 	.nway_reset		= tg3_nway_reset,
14130 	.get_link		= ethtool_op_get_link,
14131 	.get_eeprom_len		= tg3_get_eeprom_len,
14132 	.get_eeprom		= tg3_get_eeprom,
14133 	.set_eeprom		= tg3_set_eeprom,
14134 	.get_ringparam		= tg3_get_ringparam,
14135 	.set_ringparam		= tg3_set_ringparam,
14136 	.get_pauseparam		= tg3_get_pauseparam,
14137 	.set_pauseparam		= tg3_set_pauseparam,
14138 	.self_test		= tg3_self_test,
14139 	.get_strings		= tg3_get_strings,
14140 	.set_phys_id		= tg3_set_phys_id,
14141 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14142 	.get_coalesce		= tg3_get_coalesce,
14143 	.set_coalesce		= tg3_set_coalesce,
14144 	.get_sset_count		= tg3_get_sset_count,
14145 	.get_rxnfc		= tg3_get_rxnfc,
14146 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14147 	.get_rxfh		= tg3_get_rxfh,
14148 	.set_rxfh		= tg3_set_rxfh,
14149 	.get_channels		= tg3_get_channels,
14150 	.set_channels		= tg3_set_channels,
14151 	.get_ts_info		= tg3_get_ts_info,
14152 	.get_eee		= tg3_get_eee,
14153 	.set_eee		= tg3_set_eee,
14154 	.get_link_ksettings	= tg3_get_link_ksettings,
14155 	.set_link_ksettings	= tg3_set_link_ksettings,
14156 };
14157 
14158 static void tg3_get_stats64(struct net_device *dev,
14159 			    struct rtnl_link_stats64 *stats)
14160 {
14161 	struct tg3 *tp = netdev_priv(dev);
14162 
14163 	spin_lock_bh(&tp->lock);
14164 	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14165 		*stats = tp->net_stats_prev;
14166 		spin_unlock_bh(&tp->lock);
14167 		return;
14168 	}
14169 
14170 	tg3_get_nstats(tp, stats);
14171 	spin_unlock_bh(&tp->lock);
14172 }
14173 
14174 static void tg3_set_rx_mode(struct net_device *dev)
14175 {
14176 	struct tg3 *tp = netdev_priv(dev);
14177 
14178 	if (!netif_running(dev))
14179 		return;
14180 
14181 	tg3_full_lock(tp, 0);
14182 	__tg3_set_rx_mode(dev);
14183 	tg3_full_unlock(tp);
14184 }
14185 
14186 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14187 			       int new_mtu)
14188 {
14189 	dev->mtu = new_mtu;
14190 
14191 	if (new_mtu > ETH_DATA_LEN) {
14192 		if (tg3_flag(tp, 5780_CLASS)) {
14193 			netdev_update_features(dev);
14194 			tg3_flag_clear(tp, TSO_CAPABLE);
14195 		} else {
14196 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14197 		}
14198 	} else {
14199 		if (tg3_flag(tp, 5780_CLASS)) {
14200 			tg3_flag_set(tp, TSO_CAPABLE);
14201 			netdev_update_features(dev);
14202 		}
14203 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14204 	}
14205 }
14206 
14207 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14208 {
14209 	struct tg3 *tp = netdev_priv(dev);
14210 	int err;
14211 	bool reset_phy = false;
14212 
14213 	if (!netif_running(dev)) {
14214 		/* We'll just catch it later when the
14215 		 * device is up'd.
14216 		 */
14217 		tg3_set_mtu(dev, tp, new_mtu);
14218 		return 0;
14219 	}
14220 
14221 	tg3_phy_stop(tp);
14222 
14223 	tg3_netif_stop(tp);
14224 
14225 	tg3_set_mtu(dev, tp, new_mtu);
14226 
14227 	tg3_full_lock(tp, 1);
14228 
14229 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14230 
14231 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14232 	 * breaks all requests to 256 bytes.
14233 	 */
14234 	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14235 	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
14236 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14237 	    tg3_asic_rev(tp) == ASIC_REV_5720)
14238 		reset_phy = true;
14239 
14240 	err = tg3_restart_hw(tp, reset_phy);
14241 
14242 	if (!err)
14243 		tg3_netif_start(tp);
14244 
14245 	tg3_full_unlock(tp);
14246 
14247 	if (!err)
14248 		tg3_phy_start(tp);
14249 
14250 	return err;
14251 }
14252 
14253 static const struct net_device_ops tg3_netdev_ops = {
14254 	.ndo_open		= tg3_open,
14255 	.ndo_stop		= tg3_close,
14256 	.ndo_start_xmit		= tg3_start_xmit,
14257 	.ndo_get_stats64	= tg3_get_stats64,
14258 	.ndo_validate_addr	= eth_validate_addr,
14259 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14260 	.ndo_set_mac_address	= tg3_set_mac_addr,
14261 	.ndo_eth_ioctl		= tg3_ioctl,
14262 	.ndo_tx_timeout		= tg3_tx_timeout,
14263 	.ndo_change_mtu		= tg3_change_mtu,
14264 	.ndo_fix_features	= tg3_fix_features,
14265 	.ndo_set_features	= tg3_set_features,
14266 #ifdef CONFIG_NET_POLL_CONTROLLER
14267 	.ndo_poll_controller	= tg3_poll_controller,
14268 #endif
14269 };
14270 
14271 static void tg3_get_eeprom_size(struct tg3 *tp)
14272 {
14273 	u32 cursize, val, magic;
14274 
14275 	tp->nvram_size = EEPROM_CHIP_SIZE;
14276 
14277 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14278 		return;
14279 
14280 	if ((magic != TG3_EEPROM_MAGIC) &&
14281 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14282 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14283 		return;
14284 
14285 	/*
14286 	 * Size the chip by reading offsets at increasing powers of two.
14287 	 * When we encounter our validation signature, we know the addressing
14288 	 * has wrapped around, and thus have our chip size.
14289 	 */
14290 	cursize = 0x10;
14291 
14292 	while (cursize < tp->nvram_size) {
14293 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14294 			return;
14295 
14296 		if (val == magic)
14297 			break;
14298 
14299 		cursize <<= 1;
14300 	}
14301 
14302 	tp->nvram_size = cursize;
14303 }
14304 
14305 static void tg3_get_nvram_size(struct tg3 *tp)
14306 {
14307 	u32 val;
14308 
14309 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14310 		return;
14311 
14312 	/* Selfboot format */
14313 	if (val != TG3_EEPROM_MAGIC) {
14314 		tg3_get_eeprom_size(tp);
14315 		return;
14316 	}
14317 
14318 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14319 		if (val != 0) {
14320 			/* This is confusing.  We want to operate on the
14321 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14322 			 * call will read from NVRAM and byteswap the data
14323 			 * according to the byteswapping settings for all
14324 			 * other register accesses.  This ensures the data we
14325 			 * want will always reside in the lower 16-bits.
14326 			 * However, the data in NVRAM is in LE format, which
14327 			 * means the data from the NVRAM read will always be
14328 			 * opposite the endianness of the CPU.  The 16-bit
14329 			 * byteswap then brings the data to CPU endianness.
14330 			 */
14331 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14332 			return;
14333 		}
14334 	}
14335 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14336 }
14337 
14338 static void tg3_get_nvram_info(struct tg3 *tp)
14339 {
14340 	u32 nvcfg1;
14341 
14342 	nvcfg1 = tr32(NVRAM_CFG1);
14343 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14344 		tg3_flag_set(tp, FLASH);
14345 	} else {
14346 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14347 		tw32(NVRAM_CFG1, nvcfg1);
14348 	}
14349 
14350 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14351 	    tg3_flag(tp, 5780_CLASS)) {
14352 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14353 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14354 			tp->nvram_jedecnum = JEDEC_ATMEL;
14355 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14356 			tg3_flag_set(tp, NVRAM_BUFFERED);
14357 			break;
14358 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14359 			tp->nvram_jedecnum = JEDEC_ATMEL;
14360 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14361 			break;
14362 		case FLASH_VENDOR_ATMEL_EEPROM:
14363 			tp->nvram_jedecnum = JEDEC_ATMEL;
14364 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14365 			tg3_flag_set(tp, NVRAM_BUFFERED);
14366 			break;
14367 		case FLASH_VENDOR_ST:
14368 			tp->nvram_jedecnum = JEDEC_ST;
14369 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14370 			tg3_flag_set(tp, NVRAM_BUFFERED);
14371 			break;
14372 		case FLASH_VENDOR_SAIFUN:
14373 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14374 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14375 			break;
14376 		case FLASH_VENDOR_SST_SMALL:
14377 		case FLASH_VENDOR_SST_LARGE:
14378 			tp->nvram_jedecnum = JEDEC_SST;
14379 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14380 			break;
14381 		}
14382 	} else {
14383 		tp->nvram_jedecnum = JEDEC_ATMEL;
14384 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14385 		tg3_flag_set(tp, NVRAM_BUFFERED);
14386 	}
14387 }
14388 
14389 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14390 {
14391 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14392 	case FLASH_5752PAGE_SIZE_256:
14393 		tp->nvram_pagesize = 256;
14394 		break;
14395 	case FLASH_5752PAGE_SIZE_512:
14396 		tp->nvram_pagesize = 512;
14397 		break;
14398 	case FLASH_5752PAGE_SIZE_1K:
14399 		tp->nvram_pagesize = 1024;
14400 		break;
14401 	case FLASH_5752PAGE_SIZE_2K:
14402 		tp->nvram_pagesize = 2048;
14403 		break;
14404 	case FLASH_5752PAGE_SIZE_4K:
14405 		tp->nvram_pagesize = 4096;
14406 		break;
14407 	case FLASH_5752PAGE_SIZE_264:
14408 		tp->nvram_pagesize = 264;
14409 		break;
14410 	case FLASH_5752PAGE_SIZE_528:
14411 		tp->nvram_pagesize = 528;
14412 		break;
14413 	}
14414 }
14415 
14416 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14417 {
14418 	u32 nvcfg1;
14419 
14420 	nvcfg1 = tr32(NVRAM_CFG1);
14421 
14422 	/* NVRAM protection for TPM */
14423 	if (nvcfg1 & (1 << 27))
14424 		tg3_flag_set(tp, PROTECTED_NVRAM);
14425 
14426 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14427 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14428 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14429 		tp->nvram_jedecnum = JEDEC_ATMEL;
14430 		tg3_flag_set(tp, NVRAM_BUFFERED);
14431 		break;
14432 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14433 		tp->nvram_jedecnum = JEDEC_ATMEL;
14434 		tg3_flag_set(tp, NVRAM_BUFFERED);
14435 		tg3_flag_set(tp, FLASH);
14436 		break;
14437 	case FLASH_5752VENDOR_ST_M45PE10:
14438 	case FLASH_5752VENDOR_ST_M45PE20:
14439 	case FLASH_5752VENDOR_ST_M45PE40:
14440 		tp->nvram_jedecnum = JEDEC_ST;
14441 		tg3_flag_set(tp, NVRAM_BUFFERED);
14442 		tg3_flag_set(tp, FLASH);
14443 		break;
14444 	}
14445 
14446 	if (tg3_flag(tp, FLASH)) {
14447 		tg3_nvram_get_pagesize(tp, nvcfg1);
14448 	} else {
14449 		/* For eeprom, set pagesize to maximum eeprom size */
14450 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14451 
14452 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14453 		tw32(NVRAM_CFG1, nvcfg1);
14454 	}
14455 }
14456 
14457 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14458 {
14459 	u32 nvcfg1, protect = 0;
14460 
14461 	nvcfg1 = tr32(NVRAM_CFG1);
14462 
14463 	/* NVRAM protection for TPM */
14464 	if (nvcfg1 & (1 << 27)) {
14465 		tg3_flag_set(tp, PROTECTED_NVRAM);
14466 		protect = 1;
14467 	}
14468 
14469 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14470 	switch (nvcfg1) {
14471 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14472 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14473 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14474 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14475 		tp->nvram_jedecnum = JEDEC_ATMEL;
14476 		tg3_flag_set(tp, NVRAM_BUFFERED);
14477 		tg3_flag_set(tp, FLASH);
14478 		tp->nvram_pagesize = 264;
14479 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14480 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14481 			tp->nvram_size = (protect ? 0x3e200 :
14482 					  TG3_NVRAM_SIZE_512KB);
14483 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14484 			tp->nvram_size = (protect ? 0x1f200 :
14485 					  TG3_NVRAM_SIZE_256KB);
14486 		else
14487 			tp->nvram_size = (protect ? 0x1f200 :
14488 					  TG3_NVRAM_SIZE_128KB);
14489 		break;
14490 	case FLASH_5752VENDOR_ST_M45PE10:
14491 	case FLASH_5752VENDOR_ST_M45PE20:
14492 	case FLASH_5752VENDOR_ST_M45PE40:
14493 		tp->nvram_jedecnum = JEDEC_ST;
14494 		tg3_flag_set(tp, NVRAM_BUFFERED);
14495 		tg3_flag_set(tp, FLASH);
14496 		tp->nvram_pagesize = 256;
14497 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14498 			tp->nvram_size = (protect ?
14499 					  TG3_NVRAM_SIZE_64KB :
14500 					  TG3_NVRAM_SIZE_128KB);
14501 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14502 			tp->nvram_size = (protect ?
14503 					  TG3_NVRAM_SIZE_64KB :
14504 					  TG3_NVRAM_SIZE_256KB);
14505 		else
14506 			tp->nvram_size = (protect ?
14507 					  TG3_NVRAM_SIZE_128KB :
14508 					  TG3_NVRAM_SIZE_512KB);
14509 		break;
14510 	}
14511 }
14512 
14513 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14514 {
14515 	u32 nvcfg1;
14516 
14517 	nvcfg1 = tr32(NVRAM_CFG1);
14518 
14519 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14520 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14521 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14522 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14523 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14524 		tp->nvram_jedecnum = JEDEC_ATMEL;
14525 		tg3_flag_set(tp, NVRAM_BUFFERED);
14526 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14527 
14528 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14529 		tw32(NVRAM_CFG1, nvcfg1);
14530 		break;
14531 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14532 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14533 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14534 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14535 		tp->nvram_jedecnum = JEDEC_ATMEL;
14536 		tg3_flag_set(tp, NVRAM_BUFFERED);
14537 		tg3_flag_set(tp, FLASH);
14538 		tp->nvram_pagesize = 264;
14539 		break;
14540 	case FLASH_5752VENDOR_ST_M45PE10:
14541 	case FLASH_5752VENDOR_ST_M45PE20:
14542 	case FLASH_5752VENDOR_ST_M45PE40:
14543 		tp->nvram_jedecnum = JEDEC_ST;
14544 		tg3_flag_set(tp, NVRAM_BUFFERED);
14545 		tg3_flag_set(tp, FLASH);
14546 		tp->nvram_pagesize = 256;
14547 		break;
14548 	}
14549 }
14550 
14551 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14552 {
14553 	u32 nvcfg1, protect = 0;
14554 
14555 	nvcfg1 = tr32(NVRAM_CFG1);
14556 
14557 	/* NVRAM protection for TPM */
14558 	if (nvcfg1 & (1 << 27)) {
14559 		tg3_flag_set(tp, PROTECTED_NVRAM);
14560 		protect = 1;
14561 	}
14562 
14563 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14564 	switch (nvcfg1) {
14565 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14566 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14567 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14568 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14569 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14570 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14571 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14572 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14573 		tp->nvram_jedecnum = JEDEC_ATMEL;
14574 		tg3_flag_set(tp, NVRAM_BUFFERED);
14575 		tg3_flag_set(tp, FLASH);
14576 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14577 		tp->nvram_pagesize = 256;
14578 		break;
14579 	case FLASH_5761VENDOR_ST_A_M45PE20:
14580 	case FLASH_5761VENDOR_ST_A_M45PE40:
14581 	case FLASH_5761VENDOR_ST_A_M45PE80:
14582 	case FLASH_5761VENDOR_ST_A_M45PE16:
14583 	case FLASH_5761VENDOR_ST_M_M45PE20:
14584 	case FLASH_5761VENDOR_ST_M_M45PE40:
14585 	case FLASH_5761VENDOR_ST_M_M45PE80:
14586 	case FLASH_5761VENDOR_ST_M_M45PE16:
14587 		tp->nvram_jedecnum = JEDEC_ST;
14588 		tg3_flag_set(tp, NVRAM_BUFFERED);
14589 		tg3_flag_set(tp, FLASH);
14590 		tp->nvram_pagesize = 256;
14591 		break;
14592 	}
14593 
14594 	if (protect) {
14595 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14596 	} else {
14597 		switch (nvcfg1) {
14598 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14599 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14600 		case FLASH_5761VENDOR_ST_A_M45PE16:
14601 		case FLASH_5761VENDOR_ST_M_M45PE16:
14602 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14603 			break;
14604 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14605 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14606 		case FLASH_5761VENDOR_ST_A_M45PE80:
14607 		case FLASH_5761VENDOR_ST_M_M45PE80:
14608 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14609 			break;
14610 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14611 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14612 		case FLASH_5761VENDOR_ST_A_M45PE40:
14613 		case FLASH_5761VENDOR_ST_M_M45PE40:
14614 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14615 			break;
14616 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14617 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14618 		case FLASH_5761VENDOR_ST_A_M45PE20:
14619 		case FLASH_5761VENDOR_ST_M_M45PE20:
14620 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14621 			break;
14622 		}
14623 	}
14624 }
14625 
14626 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14627 {
14628 	tp->nvram_jedecnum = JEDEC_ATMEL;
14629 	tg3_flag_set(tp, NVRAM_BUFFERED);
14630 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14631 }
14632 
14633 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14634 {
14635 	u32 nvcfg1;
14636 
14637 	nvcfg1 = tr32(NVRAM_CFG1);
14638 
14639 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14640 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14641 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14642 		tp->nvram_jedecnum = JEDEC_ATMEL;
14643 		tg3_flag_set(tp, NVRAM_BUFFERED);
14644 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14645 
14646 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14647 		tw32(NVRAM_CFG1, nvcfg1);
14648 		return;
14649 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14650 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14651 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14652 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14653 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14654 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14655 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14656 		tp->nvram_jedecnum = JEDEC_ATMEL;
14657 		tg3_flag_set(tp, NVRAM_BUFFERED);
14658 		tg3_flag_set(tp, FLASH);
14659 
14660 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14661 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14662 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14663 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14664 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14665 			break;
14666 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14667 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14668 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14669 			break;
14670 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14671 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14672 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14673 			break;
14674 		}
14675 		break;
14676 	case FLASH_5752VENDOR_ST_M45PE10:
14677 	case FLASH_5752VENDOR_ST_M45PE20:
14678 	case FLASH_5752VENDOR_ST_M45PE40:
14679 		tp->nvram_jedecnum = JEDEC_ST;
14680 		tg3_flag_set(tp, NVRAM_BUFFERED);
14681 		tg3_flag_set(tp, FLASH);
14682 
14683 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14684 		case FLASH_5752VENDOR_ST_M45PE10:
14685 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14686 			break;
14687 		case FLASH_5752VENDOR_ST_M45PE20:
14688 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14689 			break;
14690 		case FLASH_5752VENDOR_ST_M45PE40:
14691 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14692 			break;
14693 		}
14694 		break;
14695 	default:
14696 		tg3_flag_set(tp, NO_NVRAM);
14697 		return;
14698 	}
14699 
14700 	tg3_nvram_get_pagesize(tp, nvcfg1);
14701 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14702 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14703 }
14704 
14705 
14706 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14707 {
14708 	u32 nvcfg1;
14709 
14710 	nvcfg1 = tr32(NVRAM_CFG1);
14711 
14712 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14713 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14714 	case FLASH_5717VENDOR_MICRO_EEPROM:
14715 		tp->nvram_jedecnum = JEDEC_ATMEL;
14716 		tg3_flag_set(tp, NVRAM_BUFFERED);
14717 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14718 
14719 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14720 		tw32(NVRAM_CFG1, nvcfg1);
14721 		return;
14722 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14723 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14724 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14725 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14726 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14727 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14728 	case FLASH_5717VENDOR_ATMEL_45USPT:
14729 		tp->nvram_jedecnum = JEDEC_ATMEL;
14730 		tg3_flag_set(tp, NVRAM_BUFFERED);
14731 		tg3_flag_set(tp, FLASH);
14732 
14733 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14734 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14735 			/* Detect size with tg3_nvram_get_size() */
14736 			break;
14737 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14738 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14739 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14740 			break;
14741 		default:
14742 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14743 			break;
14744 		}
14745 		break;
14746 	case FLASH_5717VENDOR_ST_M_M25PE10:
14747 	case FLASH_5717VENDOR_ST_A_M25PE10:
14748 	case FLASH_5717VENDOR_ST_M_M45PE10:
14749 	case FLASH_5717VENDOR_ST_A_M45PE10:
14750 	case FLASH_5717VENDOR_ST_M_M25PE20:
14751 	case FLASH_5717VENDOR_ST_A_M25PE20:
14752 	case FLASH_5717VENDOR_ST_M_M45PE20:
14753 	case FLASH_5717VENDOR_ST_A_M45PE20:
14754 	case FLASH_5717VENDOR_ST_25USPT:
14755 	case FLASH_5717VENDOR_ST_45USPT:
14756 		tp->nvram_jedecnum = JEDEC_ST;
14757 		tg3_flag_set(tp, NVRAM_BUFFERED);
14758 		tg3_flag_set(tp, FLASH);
14759 
14760 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14761 		case FLASH_5717VENDOR_ST_M_M25PE20:
14762 		case FLASH_5717VENDOR_ST_M_M45PE20:
14763 			/* Detect size with tg3_nvram_get_size() */
14764 			break;
14765 		case FLASH_5717VENDOR_ST_A_M25PE20:
14766 		case FLASH_5717VENDOR_ST_A_M45PE20:
14767 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14768 			break;
14769 		default:
14770 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14771 			break;
14772 		}
14773 		break;
14774 	default:
14775 		tg3_flag_set(tp, NO_NVRAM);
14776 		return;
14777 	}
14778 
14779 	tg3_nvram_get_pagesize(tp, nvcfg1);
14780 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14781 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14782 }
14783 
14784 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14785 {
14786 	u32 nvcfg1, nvmpinstrp, nv_status;
14787 
14788 	nvcfg1 = tr32(NVRAM_CFG1);
14789 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14790 
14791 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14792 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14793 			tg3_flag_set(tp, NO_NVRAM);
14794 			return;
14795 		}
14796 
14797 		switch (nvmpinstrp) {
14798 		case FLASH_5762_MX25L_100:
14799 		case FLASH_5762_MX25L_200:
14800 		case FLASH_5762_MX25L_400:
14801 		case FLASH_5762_MX25L_800:
14802 		case FLASH_5762_MX25L_160_320:
14803 			tp->nvram_pagesize = 4096;
14804 			tp->nvram_jedecnum = JEDEC_MACRONIX;
14805 			tg3_flag_set(tp, NVRAM_BUFFERED);
14806 			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14807 			tg3_flag_set(tp, FLASH);
14808 			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14809 			tp->nvram_size =
14810 				(1 << (nv_status >> AUTOSENSE_DEVID &
14811 						AUTOSENSE_DEVID_MASK)
14812 					<< AUTOSENSE_SIZE_IN_MB);
14813 			return;
14814 
14815 		case FLASH_5762_EEPROM_HD:
14816 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14817 			break;
14818 		case FLASH_5762_EEPROM_LD:
14819 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14820 			break;
14821 		case FLASH_5720VENDOR_M_ST_M45PE20:
14822 			/* This pinstrap supports multiple sizes, so force it
14823 			 * to read the actual size from location 0xf0.
14824 			 */
14825 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14826 			break;
14827 		}
14828 	}
14829 
14830 	switch (nvmpinstrp) {
14831 	case FLASH_5720_EEPROM_HD:
14832 	case FLASH_5720_EEPROM_LD:
14833 		tp->nvram_jedecnum = JEDEC_ATMEL;
14834 		tg3_flag_set(tp, NVRAM_BUFFERED);
14835 
14836 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14837 		tw32(NVRAM_CFG1, nvcfg1);
14838 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14839 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14840 		else
14841 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14842 		return;
14843 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14844 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14845 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14846 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14847 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14848 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14849 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14850 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14851 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14852 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14853 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14854 	case FLASH_5720VENDOR_ATMEL_45USPT:
14855 		tp->nvram_jedecnum = JEDEC_ATMEL;
14856 		tg3_flag_set(tp, NVRAM_BUFFERED);
14857 		tg3_flag_set(tp, FLASH);
14858 
14859 		switch (nvmpinstrp) {
14860 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14861 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14862 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14863 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14864 			break;
14865 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14866 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14867 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14868 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14869 			break;
14870 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14871 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14872 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14873 			break;
14874 		default:
14875 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14876 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14877 			break;
14878 		}
14879 		break;
14880 	case FLASH_5720VENDOR_M_ST_M25PE10:
14881 	case FLASH_5720VENDOR_M_ST_M45PE10:
14882 	case FLASH_5720VENDOR_A_ST_M25PE10:
14883 	case FLASH_5720VENDOR_A_ST_M45PE10:
14884 	case FLASH_5720VENDOR_M_ST_M25PE20:
14885 	case FLASH_5720VENDOR_M_ST_M45PE20:
14886 	case FLASH_5720VENDOR_A_ST_M25PE20:
14887 	case FLASH_5720VENDOR_A_ST_M45PE20:
14888 	case FLASH_5720VENDOR_M_ST_M25PE40:
14889 	case FLASH_5720VENDOR_M_ST_M45PE40:
14890 	case FLASH_5720VENDOR_A_ST_M25PE40:
14891 	case FLASH_5720VENDOR_A_ST_M45PE40:
14892 	case FLASH_5720VENDOR_M_ST_M25PE80:
14893 	case FLASH_5720VENDOR_M_ST_M45PE80:
14894 	case FLASH_5720VENDOR_A_ST_M25PE80:
14895 	case FLASH_5720VENDOR_A_ST_M45PE80:
14896 	case FLASH_5720VENDOR_ST_25USPT:
14897 	case FLASH_5720VENDOR_ST_45USPT:
14898 		tp->nvram_jedecnum = JEDEC_ST;
14899 		tg3_flag_set(tp, NVRAM_BUFFERED);
14900 		tg3_flag_set(tp, FLASH);
14901 
14902 		switch (nvmpinstrp) {
14903 		case FLASH_5720VENDOR_M_ST_M25PE20:
14904 		case FLASH_5720VENDOR_M_ST_M45PE20:
14905 		case FLASH_5720VENDOR_A_ST_M25PE20:
14906 		case FLASH_5720VENDOR_A_ST_M45PE20:
14907 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14908 			break;
14909 		case FLASH_5720VENDOR_M_ST_M25PE40:
14910 		case FLASH_5720VENDOR_M_ST_M45PE40:
14911 		case FLASH_5720VENDOR_A_ST_M25PE40:
14912 		case FLASH_5720VENDOR_A_ST_M45PE40:
14913 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14914 			break;
14915 		case FLASH_5720VENDOR_M_ST_M25PE80:
14916 		case FLASH_5720VENDOR_M_ST_M45PE80:
14917 		case FLASH_5720VENDOR_A_ST_M25PE80:
14918 		case FLASH_5720VENDOR_A_ST_M45PE80:
14919 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14920 			break;
14921 		default:
14922 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14923 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14924 			break;
14925 		}
14926 		break;
14927 	default:
14928 		tg3_flag_set(tp, NO_NVRAM);
14929 		return;
14930 	}
14931 
14932 	tg3_nvram_get_pagesize(tp, nvcfg1);
14933 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14934 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14935 
14936 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14937 		u32 val;
14938 
14939 		if (tg3_nvram_read(tp, 0, &val))
14940 			return;
14941 
14942 		if (val != TG3_EEPROM_MAGIC &&
14943 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14944 			tg3_flag_set(tp, NO_NVRAM);
14945 	}
14946 }
14947 
14948 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14949 static void tg3_nvram_init(struct tg3 *tp)
14950 {
14951 	if (tg3_flag(tp, IS_SSB_CORE)) {
14952 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14953 		tg3_flag_clear(tp, NVRAM);
14954 		tg3_flag_clear(tp, NVRAM_BUFFERED);
14955 		tg3_flag_set(tp, NO_NVRAM);
14956 		return;
14957 	}
14958 
14959 	tw32_f(GRC_EEPROM_ADDR,
14960 	     (EEPROM_ADDR_FSM_RESET |
14961 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14962 	       EEPROM_ADDR_CLKPERD_SHIFT)));
14963 
14964 	msleep(1);
14965 
14966 	/* Enable seeprom accesses. */
14967 	tw32_f(GRC_LOCAL_CTRL,
14968 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14969 	udelay(100);
14970 
14971 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14972 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
14973 		tg3_flag_set(tp, NVRAM);
14974 
14975 		if (tg3_nvram_lock(tp)) {
14976 			netdev_warn(tp->dev,
14977 				    "Cannot get nvram lock, %s failed\n",
14978 				    __func__);
14979 			return;
14980 		}
14981 		tg3_enable_nvram_access(tp);
14982 
14983 		tp->nvram_size = 0;
14984 
14985 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
14986 			tg3_get_5752_nvram_info(tp);
14987 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14988 			tg3_get_5755_nvram_info(tp);
14989 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14990 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14991 			 tg3_asic_rev(tp) == ASIC_REV_5785)
14992 			tg3_get_5787_nvram_info(tp);
14993 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14994 			tg3_get_5761_nvram_info(tp);
14995 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14996 			tg3_get_5906_nvram_info(tp);
14997 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14998 			 tg3_flag(tp, 57765_CLASS))
14999 			tg3_get_57780_nvram_info(tp);
15000 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15001 			 tg3_asic_rev(tp) == ASIC_REV_5719)
15002 			tg3_get_5717_nvram_info(tp);
15003 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15004 			 tg3_asic_rev(tp) == ASIC_REV_5762)
15005 			tg3_get_5720_nvram_info(tp);
15006 		else
15007 			tg3_get_nvram_info(tp);
15008 
15009 		if (tp->nvram_size == 0)
15010 			tg3_get_nvram_size(tp);
15011 
15012 		tg3_disable_nvram_access(tp);
15013 		tg3_nvram_unlock(tp);
15014 
15015 	} else {
15016 		tg3_flag_clear(tp, NVRAM);
15017 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15018 
15019 		tg3_get_eeprom_size(tp);
15020 	}
15021 }
15022 
15023 struct subsys_tbl_ent {
15024 	u16 subsys_vendor, subsys_devid;
15025 	u32 phy_id;
15026 };
15027 
15028 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15029 	/* Broadcom boards. */
15030 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15031 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15032 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15033 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15034 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15035 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15036 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15037 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15038 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15039 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15040 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15041 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15042 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15043 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15044 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15045 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15046 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15047 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15048 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15049 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15050 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15051 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15052 
15053 	/* 3com boards. */
15054 	{ TG3PCI_SUBVENDOR_ID_3COM,
15055 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15056 	{ TG3PCI_SUBVENDOR_ID_3COM,
15057 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15058 	{ TG3PCI_SUBVENDOR_ID_3COM,
15059 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15060 	{ TG3PCI_SUBVENDOR_ID_3COM,
15061 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15062 	{ TG3PCI_SUBVENDOR_ID_3COM,
15063 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15064 
15065 	/* DELL boards. */
15066 	{ TG3PCI_SUBVENDOR_ID_DELL,
15067 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15068 	{ TG3PCI_SUBVENDOR_ID_DELL,
15069 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15070 	{ TG3PCI_SUBVENDOR_ID_DELL,
15071 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15072 	{ TG3PCI_SUBVENDOR_ID_DELL,
15073 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15074 
15075 	/* Compaq boards. */
15076 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15077 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15078 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15079 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15080 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15081 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15082 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15083 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15084 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15085 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15086 
15087 	/* IBM boards. */
15088 	{ TG3PCI_SUBVENDOR_ID_IBM,
15089 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15090 };
15091 
15092 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15093 {
15094 	int i;
15095 
15096 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15097 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15098 		     tp->pdev->subsystem_vendor) &&
15099 		    (subsys_id_to_phy_id[i].subsys_devid ==
15100 		     tp->pdev->subsystem_device))
15101 			return &subsys_id_to_phy_id[i];
15102 	}
15103 	return NULL;
15104 }
15105 
15106 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15107 {
15108 	u32 val;
15109 
15110 	tp->phy_id = TG3_PHY_ID_INVALID;
15111 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15112 
15113 	/* Assume an onboard device and WOL capable by default.  */
15114 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15115 	tg3_flag_set(tp, WOL_CAP);
15116 
15117 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15118 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15119 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15120 			tg3_flag_set(tp, IS_NIC);
15121 		}
15122 		val = tr32(VCPU_CFGSHDW);
15123 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15124 			tg3_flag_set(tp, ASPM_WORKAROUND);
15125 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15126 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15127 			tg3_flag_set(tp, WOL_ENABLE);
15128 			device_set_wakeup_enable(&tp->pdev->dev, true);
15129 		}
15130 		goto done;
15131 	}
15132 
15133 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15134 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15135 		u32 nic_cfg, led_cfg;
15136 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15137 		u32 nic_phy_id, ver, eeprom_phy_id;
15138 		int eeprom_phy_serdes = 0;
15139 
15140 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15141 		tp->nic_sram_data_cfg = nic_cfg;
15142 
15143 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15144 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15145 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15146 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15147 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15148 		    (ver > 0) && (ver < 0x100))
15149 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15150 
15151 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15152 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15153 
15154 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15155 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15156 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15157 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15158 
15159 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15160 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15161 			eeprom_phy_serdes = 1;
15162 
15163 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15164 		if (nic_phy_id != 0) {
15165 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15166 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15167 
15168 			eeprom_phy_id  = (id1 >> 16) << 10;
15169 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15170 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15171 		} else
15172 			eeprom_phy_id = 0;
15173 
15174 		tp->phy_id = eeprom_phy_id;
15175 		if (eeprom_phy_serdes) {
15176 			if (!tg3_flag(tp, 5705_PLUS))
15177 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15178 			else
15179 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15180 		}
15181 
15182 		if (tg3_flag(tp, 5750_PLUS))
15183 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15184 				    SHASTA_EXT_LED_MODE_MASK);
15185 		else
15186 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15187 
15188 		switch (led_cfg) {
15189 		default:
15190 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15191 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15192 			break;
15193 
15194 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15195 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15196 			break;
15197 
15198 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15199 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15200 
15201 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15202 			 * read on some older 5700/5701 bootcode.
15203 			 */
15204 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15205 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15206 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15207 
15208 			break;
15209 
15210 		case SHASTA_EXT_LED_SHARED:
15211 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15212 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15213 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15214 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15215 						 LED_CTRL_MODE_PHY_2);
15216 
15217 			if (tg3_flag(tp, 5717_PLUS) ||
15218 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15219 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15220 						LED_CTRL_BLINK_RATE_MASK;
15221 
15222 			break;
15223 
15224 		case SHASTA_EXT_LED_MAC:
15225 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15226 			break;
15227 
15228 		case SHASTA_EXT_LED_COMBO:
15229 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15230 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15231 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15232 						 LED_CTRL_MODE_PHY_2);
15233 			break;
15234 
15235 		}
15236 
15237 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15238 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15239 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15240 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15241 
15242 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15243 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15244 
15245 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15246 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15247 			if ((tp->pdev->subsystem_vendor ==
15248 			     PCI_VENDOR_ID_ARIMA) &&
15249 			    (tp->pdev->subsystem_device == 0x205a ||
15250 			     tp->pdev->subsystem_device == 0x2063))
15251 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15252 		} else {
15253 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15254 			tg3_flag_set(tp, IS_NIC);
15255 		}
15256 
15257 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15258 			tg3_flag_set(tp, ENABLE_ASF);
15259 			if (tg3_flag(tp, 5750_PLUS))
15260 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15261 		}
15262 
15263 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15264 		    tg3_flag(tp, 5750_PLUS))
15265 			tg3_flag_set(tp, ENABLE_APE);
15266 
15267 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15268 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15269 			tg3_flag_clear(tp, WOL_CAP);
15270 
15271 		if (tg3_flag(tp, WOL_CAP) &&
15272 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15273 			tg3_flag_set(tp, WOL_ENABLE);
15274 			device_set_wakeup_enable(&tp->pdev->dev, true);
15275 		}
15276 
15277 		if (cfg2 & (1 << 17))
15278 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15279 
15280 		/* serdes signal pre-emphasis in register 0x590 set by */
15281 		/* bootcode if bit 18 is set */
15282 		if (cfg2 & (1 << 18))
15283 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15284 
15285 		if ((tg3_flag(tp, 57765_PLUS) ||
15286 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15287 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15288 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15289 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15290 
15291 		if (tg3_flag(tp, PCI_EXPRESS)) {
15292 			u32 cfg3;
15293 
15294 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15295 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15296 			    !tg3_flag(tp, 57765_PLUS) &&
15297 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15298 				tg3_flag_set(tp, ASPM_WORKAROUND);
15299 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15300 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15301 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15302 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15303 		}
15304 
15305 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15306 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15307 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15308 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15309 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15310 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15311 
15312 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15313 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15314 	}
15315 done:
15316 	if (tg3_flag(tp, WOL_CAP))
15317 		device_set_wakeup_enable(&tp->pdev->dev,
15318 					 tg3_flag(tp, WOL_ENABLE));
15319 	else
15320 		device_set_wakeup_capable(&tp->pdev->dev, false);
15321 }
15322 
15323 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15324 {
15325 	int i, err;
15326 	u32 val2, off = offset * 8;
15327 
15328 	err = tg3_nvram_lock(tp);
15329 	if (err)
15330 		return err;
15331 
15332 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15333 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15334 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15335 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15336 	udelay(10);
15337 
15338 	for (i = 0; i < 100; i++) {
15339 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15340 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15341 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15342 			break;
15343 		}
15344 		udelay(10);
15345 	}
15346 
15347 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15348 
15349 	tg3_nvram_unlock(tp);
15350 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15351 		return 0;
15352 
15353 	return -EBUSY;
15354 }
15355 
15356 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15357 {
15358 	int i;
15359 	u32 val;
15360 
15361 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15362 	tw32(OTP_CTRL, cmd);
15363 
15364 	/* Wait for up to 1 ms for command to execute. */
15365 	for (i = 0; i < 100; i++) {
15366 		val = tr32(OTP_STATUS);
15367 		if (val & OTP_STATUS_CMD_DONE)
15368 			break;
15369 		udelay(10);
15370 	}
15371 
15372 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15373 }
15374 
15375 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15376  * configuration is a 32-bit value that straddles the alignment boundary.
15377  * We do two 32-bit reads and then shift and merge the results.
15378  */
15379 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15380 {
15381 	u32 bhalf_otp, thalf_otp;
15382 
15383 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15384 
15385 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15386 		return 0;
15387 
15388 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15389 
15390 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15391 		return 0;
15392 
15393 	thalf_otp = tr32(OTP_READ_DATA);
15394 
15395 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15396 
15397 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15398 		return 0;
15399 
15400 	bhalf_otp = tr32(OTP_READ_DATA);
15401 
15402 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15403 }
15404 
15405 static void tg3_phy_init_link_config(struct tg3 *tp)
15406 {
15407 	u32 adv = ADVERTISED_Autoneg;
15408 
15409 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15410 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15411 			adv |= ADVERTISED_1000baseT_Half;
15412 		adv |= ADVERTISED_1000baseT_Full;
15413 	}
15414 
15415 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15416 		adv |= ADVERTISED_100baseT_Half |
15417 		       ADVERTISED_100baseT_Full |
15418 		       ADVERTISED_10baseT_Half |
15419 		       ADVERTISED_10baseT_Full |
15420 		       ADVERTISED_TP;
15421 	else
15422 		adv |= ADVERTISED_FIBRE;
15423 
15424 	tp->link_config.advertising = adv;
15425 	tp->link_config.speed = SPEED_UNKNOWN;
15426 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15427 	tp->link_config.autoneg = AUTONEG_ENABLE;
15428 	tp->link_config.active_speed = SPEED_UNKNOWN;
15429 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15430 
15431 	tp->old_link = -1;
15432 }
15433 
15434 static int tg3_phy_probe(struct tg3 *tp)
15435 {
15436 	u32 hw_phy_id_1, hw_phy_id_2;
15437 	u32 hw_phy_id, hw_phy_id_masked;
15438 	int err;
15439 
15440 	/* flow control autonegotiation is default behavior */
15441 	tg3_flag_set(tp, PAUSE_AUTONEG);
15442 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15443 
15444 	if (tg3_flag(tp, ENABLE_APE)) {
15445 		switch (tp->pci_fn) {
15446 		case 0:
15447 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15448 			break;
15449 		case 1:
15450 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15451 			break;
15452 		case 2:
15453 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15454 			break;
15455 		case 3:
15456 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15457 			break;
15458 		}
15459 	}
15460 
15461 	if (!tg3_flag(tp, ENABLE_ASF) &&
15462 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15463 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15464 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15465 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15466 
15467 	if (tg3_flag(tp, USE_PHYLIB))
15468 		return tg3_phy_init(tp);
15469 
15470 	/* Reading the PHY ID register can conflict with ASF
15471 	 * firmware access to the PHY hardware.
15472 	 */
15473 	err = 0;
15474 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15475 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15476 	} else {
15477 		/* Now read the physical PHY_ID from the chip and verify
15478 		 * that it is sane.  If it doesn't look good, we fall back
15479 		 * to either the hard-coded table based PHY_ID and failing
15480 		 * that the value found in the eeprom area.
15481 		 */
15482 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15483 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15484 
15485 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15486 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15487 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15488 
15489 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15490 	}
15491 
15492 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15493 		tp->phy_id = hw_phy_id;
15494 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15495 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15496 		else
15497 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15498 	} else {
15499 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15500 			/* Do nothing, phy ID already set up in
15501 			 * tg3_get_eeprom_hw_cfg().
15502 			 */
15503 		} else {
15504 			struct subsys_tbl_ent *p;
15505 
15506 			/* No eeprom signature?  Try the hardcoded
15507 			 * subsys device table.
15508 			 */
15509 			p = tg3_lookup_by_subsys(tp);
15510 			if (p) {
15511 				tp->phy_id = p->phy_id;
15512 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15513 				/* For now we saw the IDs 0xbc050cd0,
15514 				 * 0xbc050f80 and 0xbc050c30 on devices
15515 				 * connected to an BCM4785 and there are
15516 				 * probably more. Just assume that the phy is
15517 				 * supported when it is connected to a SSB core
15518 				 * for now.
15519 				 */
15520 				return -ENODEV;
15521 			}
15522 
15523 			if (!tp->phy_id ||
15524 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15525 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15526 		}
15527 	}
15528 
15529 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15530 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15531 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15532 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15533 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15534 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15535 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15536 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15537 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15538 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15539 
15540 		tp->eee.supported = SUPPORTED_100baseT_Full |
15541 				    SUPPORTED_1000baseT_Full;
15542 		tp->eee.advertised = ADVERTISED_100baseT_Full |
15543 				     ADVERTISED_1000baseT_Full;
15544 		tp->eee.eee_enabled = 1;
15545 		tp->eee.tx_lpi_enabled = 1;
15546 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15547 	}
15548 
15549 	tg3_phy_init_link_config(tp);
15550 
15551 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15552 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15553 	    !tg3_flag(tp, ENABLE_APE) &&
15554 	    !tg3_flag(tp, ENABLE_ASF)) {
15555 		u32 bmsr, dummy;
15556 
15557 		tg3_readphy(tp, MII_BMSR, &bmsr);
15558 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15559 		    (bmsr & BMSR_LSTATUS))
15560 			goto skip_phy_reset;
15561 
15562 		err = tg3_phy_reset(tp);
15563 		if (err)
15564 			return err;
15565 
15566 		tg3_phy_set_wirespeed(tp);
15567 
15568 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15569 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15570 					    tp->link_config.flowctrl);
15571 
15572 			tg3_writephy(tp, MII_BMCR,
15573 				     BMCR_ANENABLE | BMCR_ANRESTART);
15574 		}
15575 	}
15576 
15577 skip_phy_reset:
15578 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15579 		err = tg3_init_5401phy_dsp(tp);
15580 		if (err)
15581 			return err;
15582 
15583 		err = tg3_init_5401phy_dsp(tp);
15584 	}
15585 
15586 	return err;
15587 }
15588 
15589 static void tg3_read_vpd(struct tg3 *tp)
15590 {
15591 	u8 *vpd_data;
15592 	unsigned int len, vpdlen;
15593 	int i;
15594 
15595 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15596 	if (!vpd_data)
15597 		goto out_no_vpd;
15598 
15599 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15600 					 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15601 	if (i < 0)
15602 		goto partno;
15603 
15604 	if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15605 		goto partno;
15606 
15607 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15608 					 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15609 	if (i < 0)
15610 		goto partno;
15611 
15612 	memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15613 	snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15614 
15615 partno:
15616 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15617 					 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15618 	if (i < 0)
15619 		goto out_not_found;
15620 
15621 	if (len > TG3_BPN_SIZE)
15622 		goto out_not_found;
15623 
15624 	memcpy(tp->board_part_number, &vpd_data[i], len);
15625 
15626 out_not_found:
15627 	kfree(vpd_data);
15628 	if (tp->board_part_number[0])
15629 		return;
15630 
15631 out_no_vpd:
15632 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15633 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15634 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15635 			strcpy(tp->board_part_number, "BCM5717");
15636 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15637 			strcpy(tp->board_part_number, "BCM5718");
15638 		else
15639 			goto nomatch;
15640 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15641 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15642 			strcpy(tp->board_part_number, "BCM57780");
15643 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15644 			strcpy(tp->board_part_number, "BCM57760");
15645 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15646 			strcpy(tp->board_part_number, "BCM57790");
15647 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15648 			strcpy(tp->board_part_number, "BCM57788");
15649 		else
15650 			goto nomatch;
15651 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15652 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15653 			strcpy(tp->board_part_number, "BCM57761");
15654 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15655 			strcpy(tp->board_part_number, "BCM57765");
15656 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15657 			strcpy(tp->board_part_number, "BCM57781");
15658 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15659 			strcpy(tp->board_part_number, "BCM57785");
15660 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15661 			strcpy(tp->board_part_number, "BCM57791");
15662 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15663 			strcpy(tp->board_part_number, "BCM57795");
15664 		else
15665 			goto nomatch;
15666 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15667 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15668 			strcpy(tp->board_part_number, "BCM57762");
15669 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15670 			strcpy(tp->board_part_number, "BCM57766");
15671 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15672 			strcpy(tp->board_part_number, "BCM57782");
15673 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15674 			strcpy(tp->board_part_number, "BCM57786");
15675 		else
15676 			goto nomatch;
15677 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15678 		strcpy(tp->board_part_number, "BCM95906");
15679 	} else {
15680 nomatch:
15681 		strcpy(tp->board_part_number, "none");
15682 	}
15683 }
15684 
15685 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15686 {
15687 	u32 val;
15688 
15689 	if (tg3_nvram_read(tp, offset, &val) ||
15690 	    (val & 0xfc000000) != 0x0c000000 ||
15691 	    tg3_nvram_read(tp, offset + 4, &val) ||
15692 	    val != 0)
15693 		return 0;
15694 
15695 	return 1;
15696 }
15697 
15698 static void tg3_read_bc_ver(struct tg3 *tp)
15699 {
15700 	u32 val, offset, start, ver_offset;
15701 	int i, dst_off;
15702 	bool newver = false;
15703 
15704 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15705 	    tg3_nvram_read(tp, 0x4, &start))
15706 		return;
15707 
15708 	offset = tg3_nvram_logical_addr(tp, offset);
15709 
15710 	if (tg3_nvram_read(tp, offset, &val))
15711 		return;
15712 
15713 	if ((val & 0xfc000000) == 0x0c000000) {
15714 		if (tg3_nvram_read(tp, offset + 4, &val))
15715 			return;
15716 
15717 		if (val == 0)
15718 			newver = true;
15719 	}
15720 
15721 	dst_off = strlen(tp->fw_ver);
15722 
15723 	if (newver) {
15724 		if (TG3_VER_SIZE - dst_off < 16 ||
15725 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15726 			return;
15727 
15728 		offset = offset + ver_offset - start;
15729 		for (i = 0; i < 16; i += 4) {
15730 			__be32 v;
15731 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15732 				return;
15733 
15734 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15735 		}
15736 	} else {
15737 		u32 major, minor;
15738 
15739 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15740 			return;
15741 
15742 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15743 			TG3_NVM_BCVER_MAJSFT;
15744 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15745 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15746 			 "v%d.%02d", major, minor);
15747 	}
15748 }
15749 
15750 static void tg3_read_hwsb_ver(struct tg3 *tp)
15751 {
15752 	u32 val, major, minor;
15753 
15754 	/* Use native endian representation */
15755 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15756 		return;
15757 
15758 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15759 		TG3_NVM_HWSB_CFG1_MAJSFT;
15760 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15761 		TG3_NVM_HWSB_CFG1_MINSFT;
15762 
15763 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15764 }
15765 
15766 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15767 {
15768 	u32 offset, major, minor, build;
15769 
15770 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15771 
15772 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15773 		return;
15774 
15775 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15776 	case TG3_EEPROM_SB_REVISION_0:
15777 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15778 		break;
15779 	case TG3_EEPROM_SB_REVISION_2:
15780 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15781 		break;
15782 	case TG3_EEPROM_SB_REVISION_3:
15783 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15784 		break;
15785 	case TG3_EEPROM_SB_REVISION_4:
15786 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15787 		break;
15788 	case TG3_EEPROM_SB_REVISION_5:
15789 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15790 		break;
15791 	case TG3_EEPROM_SB_REVISION_6:
15792 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15793 		break;
15794 	default:
15795 		return;
15796 	}
15797 
15798 	if (tg3_nvram_read(tp, offset, &val))
15799 		return;
15800 
15801 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15802 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15803 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15804 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15805 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15806 
15807 	if (minor > 99 || build > 26)
15808 		return;
15809 
15810 	offset = strlen(tp->fw_ver);
15811 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15812 		 " v%d.%02d", major, minor);
15813 
15814 	if (build > 0) {
15815 		offset = strlen(tp->fw_ver);
15816 		if (offset < TG3_VER_SIZE - 1)
15817 			tp->fw_ver[offset] = 'a' + build - 1;
15818 	}
15819 }
15820 
15821 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15822 {
15823 	u32 val, offset, start;
15824 	int i, vlen;
15825 
15826 	for (offset = TG3_NVM_DIR_START;
15827 	     offset < TG3_NVM_DIR_END;
15828 	     offset += TG3_NVM_DIRENT_SIZE) {
15829 		if (tg3_nvram_read(tp, offset, &val))
15830 			return;
15831 
15832 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15833 			break;
15834 	}
15835 
15836 	if (offset == TG3_NVM_DIR_END)
15837 		return;
15838 
15839 	if (!tg3_flag(tp, 5705_PLUS))
15840 		start = 0x08000000;
15841 	else if (tg3_nvram_read(tp, offset - 4, &start))
15842 		return;
15843 
15844 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15845 	    !tg3_fw_img_is_valid(tp, offset) ||
15846 	    tg3_nvram_read(tp, offset + 8, &val))
15847 		return;
15848 
15849 	offset += val - start;
15850 
15851 	vlen = strlen(tp->fw_ver);
15852 
15853 	tp->fw_ver[vlen++] = ',';
15854 	tp->fw_ver[vlen++] = ' ';
15855 
15856 	for (i = 0; i < 4; i++) {
15857 		__be32 v;
15858 		if (tg3_nvram_read_be32(tp, offset, &v))
15859 			return;
15860 
15861 		offset += sizeof(v);
15862 
15863 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15864 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15865 			break;
15866 		}
15867 
15868 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15869 		vlen += sizeof(v);
15870 	}
15871 }
15872 
15873 static void tg3_probe_ncsi(struct tg3 *tp)
15874 {
15875 	u32 apedata;
15876 
15877 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15878 	if (apedata != APE_SEG_SIG_MAGIC)
15879 		return;
15880 
15881 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15882 	if (!(apedata & APE_FW_STATUS_READY))
15883 		return;
15884 
15885 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15886 		tg3_flag_set(tp, APE_HAS_NCSI);
15887 }
15888 
15889 static void tg3_read_dash_ver(struct tg3 *tp)
15890 {
15891 	int vlen;
15892 	u32 apedata;
15893 	char *fwtype;
15894 
15895 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15896 
15897 	if (tg3_flag(tp, APE_HAS_NCSI))
15898 		fwtype = "NCSI";
15899 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15900 		fwtype = "SMASH";
15901 	else
15902 		fwtype = "DASH";
15903 
15904 	vlen = strlen(tp->fw_ver);
15905 
15906 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15907 		 fwtype,
15908 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15909 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15910 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15911 		 (apedata & APE_FW_VERSION_BLDMSK));
15912 }
15913 
15914 static void tg3_read_otp_ver(struct tg3 *tp)
15915 {
15916 	u32 val, val2;
15917 
15918 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15919 		return;
15920 
15921 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15922 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15923 	    TG3_OTP_MAGIC0_VALID(val)) {
15924 		u64 val64 = (u64) val << 32 | val2;
15925 		u32 ver = 0;
15926 		int i, vlen;
15927 
15928 		for (i = 0; i < 7; i++) {
15929 			if ((val64 & 0xff) == 0)
15930 				break;
15931 			ver = val64 & 0xff;
15932 			val64 >>= 8;
15933 		}
15934 		vlen = strlen(tp->fw_ver);
15935 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15936 	}
15937 }
15938 
15939 static void tg3_read_fw_ver(struct tg3 *tp)
15940 {
15941 	u32 val;
15942 	bool vpd_vers = false;
15943 
15944 	if (tp->fw_ver[0] != 0)
15945 		vpd_vers = true;
15946 
15947 	if (tg3_flag(tp, NO_NVRAM)) {
15948 		strcat(tp->fw_ver, "sb");
15949 		tg3_read_otp_ver(tp);
15950 		return;
15951 	}
15952 
15953 	if (tg3_nvram_read(tp, 0, &val))
15954 		return;
15955 
15956 	if (val == TG3_EEPROM_MAGIC)
15957 		tg3_read_bc_ver(tp);
15958 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15959 		tg3_read_sb_ver(tp, val);
15960 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15961 		tg3_read_hwsb_ver(tp);
15962 
15963 	if (tg3_flag(tp, ENABLE_ASF)) {
15964 		if (tg3_flag(tp, ENABLE_APE)) {
15965 			tg3_probe_ncsi(tp);
15966 			if (!vpd_vers)
15967 				tg3_read_dash_ver(tp);
15968 		} else if (!vpd_vers) {
15969 			tg3_read_mgmtfw_ver(tp);
15970 		}
15971 	}
15972 
15973 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15974 }
15975 
15976 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15977 {
15978 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
15979 		return TG3_RX_RET_MAX_SIZE_5717;
15980 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15981 		return TG3_RX_RET_MAX_SIZE_5700;
15982 	else
15983 		return TG3_RX_RET_MAX_SIZE_5705;
15984 }
15985 
15986 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15987 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15988 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15989 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15990 	{ },
15991 };
15992 
15993 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15994 {
15995 	struct pci_dev *peer;
15996 	unsigned int func, devnr = tp->pdev->devfn & ~7;
15997 
15998 	for (func = 0; func < 8; func++) {
15999 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16000 		if (peer && peer != tp->pdev)
16001 			break;
16002 		pci_dev_put(peer);
16003 	}
16004 	/* 5704 can be configured in single-port mode, set peer to
16005 	 * tp->pdev in that case.
16006 	 */
16007 	if (!peer) {
16008 		peer = tp->pdev;
16009 		return peer;
16010 	}
16011 
16012 	/*
16013 	 * We don't need to keep the refcount elevated; there's no way
16014 	 * to remove one half of this device without removing the other
16015 	 */
16016 	pci_dev_put(peer);
16017 
16018 	return peer;
16019 }
16020 
16021 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16022 {
16023 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16024 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16025 		u32 reg;
16026 
16027 		/* All devices that use the alternate
16028 		 * ASIC REV location have a CPMU.
16029 		 */
16030 		tg3_flag_set(tp, CPMU_PRESENT);
16031 
16032 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16033 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16034 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16035 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16036 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16037 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16038 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16039 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16040 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16041 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16042 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16043 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16044 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16045 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16046 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16047 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16048 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16049 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16050 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16051 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16052 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16053 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16054 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16055 		else
16056 			reg = TG3PCI_PRODID_ASICREV;
16057 
16058 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16059 	}
16060 
16061 	/* Wrong chip ID in 5752 A0. This code can be removed later
16062 	 * as A0 is not in production.
16063 	 */
16064 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16065 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16066 
16067 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16068 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16069 
16070 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16071 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16072 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16073 		tg3_flag_set(tp, 5717_PLUS);
16074 
16075 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16076 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16077 		tg3_flag_set(tp, 57765_CLASS);
16078 
16079 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16080 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16081 		tg3_flag_set(tp, 57765_PLUS);
16082 
16083 	/* Intentionally exclude ASIC_REV_5906 */
16084 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16085 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16086 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16087 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16088 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16089 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16090 	    tg3_flag(tp, 57765_PLUS))
16091 		tg3_flag_set(tp, 5755_PLUS);
16092 
16093 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16094 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16095 		tg3_flag_set(tp, 5780_CLASS);
16096 
16097 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16098 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16099 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16100 	    tg3_flag(tp, 5755_PLUS) ||
16101 	    tg3_flag(tp, 5780_CLASS))
16102 		tg3_flag_set(tp, 5750_PLUS);
16103 
16104 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16105 	    tg3_flag(tp, 5750_PLUS))
16106 		tg3_flag_set(tp, 5705_PLUS);
16107 }
16108 
16109 static bool tg3_10_100_only_device(struct tg3 *tp,
16110 				   const struct pci_device_id *ent)
16111 {
16112 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16113 
16114 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16115 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16116 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16117 		return true;
16118 
16119 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16120 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16121 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16122 				return true;
16123 		} else {
16124 			return true;
16125 		}
16126 	}
16127 
16128 	return false;
16129 }
16130 
16131 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16132 {
16133 	u32 misc_ctrl_reg;
16134 	u32 pci_state_reg, grc_misc_cfg;
16135 	u32 val;
16136 	u16 pci_cmd;
16137 	int err;
16138 
16139 	/* Force memory write invalidate off.  If we leave it on,
16140 	 * then on 5700_BX chips we have to enable a workaround.
16141 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16142 	 * to match the cacheline size.  The Broadcom driver have this
16143 	 * workaround but turns MWI off all the times so never uses
16144 	 * it.  This seems to suggest that the workaround is insufficient.
16145 	 */
16146 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16147 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16148 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16149 
16150 	/* Important! -- Make sure register accesses are byteswapped
16151 	 * correctly.  Also, for those chips that require it, make
16152 	 * sure that indirect register accesses are enabled before
16153 	 * the first operation.
16154 	 */
16155 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16156 			      &misc_ctrl_reg);
16157 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16158 			       MISC_HOST_CTRL_CHIPREV);
16159 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16160 			       tp->misc_host_ctrl);
16161 
16162 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16163 
16164 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16165 	 * we need to disable memory and use config. cycles
16166 	 * only to access all registers. The 5702/03 chips
16167 	 * can mistakenly decode the special cycles from the
16168 	 * ICH chipsets as memory write cycles, causing corruption
16169 	 * of register and memory space. Only certain ICH bridges
16170 	 * will drive special cycles with non-zero data during the
16171 	 * address phase which can fall within the 5703's address
16172 	 * range. This is not an ICH bug as the PCI spec allows
16173 	 * non-zero address during special cycles. However, only
16174 	 * these ICH bridges are known to drive non-zero addresses
16175 	 * during special cycles.
16176 	 *
16177 	 * Since special cycles do not cross PCI bridges, we only
16178 	 * enable this workaround if the 5703 is on the secondary
16179 	 * bus of these ICH bridges.
16180 	 */
16181 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16182 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16183 		static struct tg3_dev_id {
16184 			u32	vendor;
16185 			u32	device;
16186 			u32	rev;
16187 		} ich_chipsets[] = {
16188 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16189 			  PCI_ANY_ID },
16190 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16191 			  PCI_ANY_ID },
16192 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16193 			  0xa },
16194 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16195 			  PCI_ANY_ID },
16196 			{ },
16197 		};
16198 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16199 		struct pci_dev *bridge = NULL;
16200 
16201 		while (pci_id->vendor != 0) {
16202 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16203 						bridge);
16204 			if (!bridge) {
16205 				pci_id++;
16206 				continue;
16207 			}
16208 			if (pci_id->rev != PCI_ANY_ID) {
16209 				if (bridge->revision > pci_id->rev)
16210 					continue;
16211 			}
16212 			if (bridge->subordinate &&
16213 			    (bridge->subordinate->number ==
16214 			     tp->pdev->bus->number)) {
16215 				tg3_flag_set(tp, ICH_WORKAROUND);
16216 				pci_dev_put(bridge);
16217 				break;
16218 			}
16219 		}
16220 	}
16221 
16222 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16223 		static struct tg3_dev_id {
16224 			u32	vendor;
16225 			u32	device;
16226 		} bridge_chipsets[] = {
16227 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16228 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16229 			{ },
16230 		};
16231 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16232 		struct pci_dev *bridge = NULL;
16233 
16234 		while (pci_id->vendor != 0) {
16235 			bridge = pci_get_device(pci_id->vendor,
16236 						pci_id->device,
16237 						bridge);
16238 			if (!bridge) {
16239 				pci_id++;
16240 				continue;
16241 			}
16242 			if (bridge->subordinate &&
16243 			    (bridge->subordinate->number <=
16244 			     tp->pdev->bus->number) &&
16245 			    (bridge->subordinate->busn_res.end >=
16246 			     tp->pdev->bus->number)) {
16247 				tg3_flag_set(tp, 5701_DMA_BUG);
16248 				pci_dev_put(bridge);
16249 				break;
16250 			}
16251 		}
16252 	}
16253 
16254 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16255 	 * DMA addresses > 40-bit. This bridge may have other additional
16256 	 * 57xx devices behind it in some 4-port NIC designs for example.
16257 	 * Any tg3 device found behind the bridge will also need the 40-bit
16258 	 * DMA workaround.
16259 	 */
16260 	if (tg3_flag(tp, 5780_CLASS)) {
16261 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16262 		tp->msi_cap = tp->pdev->msi_cap;
16263 	} else {
16264 		struct pci_dev *bridge = NULL;
16265 
16266 		do {
16267 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16268 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16269 						bridge);
16270 			if (bridge && bridge->subordinate &&
16271 			    (bridge->subordinate->number <=
16272 			     tp->pdev->bus->number) &&
16273 			    (bridge->subordinate->busn_res.end >=
16274 			     tp->pdev->bus->number)) {
16275 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16276 				pci_dev_put(bridge);
16277 				break;
16278 			}
16279 		} while (bridge);
16280 	}
16281 
16282 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16283 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16284 		tp->pdev_peer = tg3_find_peer(tp);
16285 
16286 	/* Determine TSO capabilities */
16287 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16288 		; /* Do nothing. HW bug. */
16289 	else if (tg3_flag(tp, 57765_PLUS))
16290 		tg3_flag_set(tp, HW_TSO_3);
16291 	else if (tg3_flag(tp, 5755_PLUS) ||
16292 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16293 		tg3_flag_set(tp, HW_TSO_2);
16294 	else if (tg3_flag(tp, 5750_PLUS)) {
16295 		tg3_flag_set(tp, HW_TSO_1);
16296 		tg3_flag_set(tp, TSO_BUG);
16297 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16298 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16299 			tg3_flag_clear(tp, TSO_BUG);
16300 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16301 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16302 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16303 		tg3_flag_set(tp, FW_TSO);
16304 		tg3_flag_set(tp, TSO_BUG);
16305 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16306 			tp->fw_needed = FIRMWARE_TG3TSO5;
16307 		else
16308 			tp->fw_needed = FIRMWARE_TG3TSO;
16309 	}
16310 
16311 	/* Selectively allow TSO based on operating conditions */
16312 	if (tg3_flag(tp, HW_TSO_1) ||
16313 	    tg3_flag(tp, HW_TSO_2) ||
16314 	    tg3_flag(tp, HW_TSO_3) ||
16315 	    tg3_flag(tp, FW_TSO)) {
16316 		/* For firmware TSO, assume ASF is disabled.
16317 		 * We'll disable TSO later if we discover ASF
16318 		 * is enabled in tg3_get_eeprom_hw_cfg().
16319 		 */
16320 		tg3_flag_set(tp, TSO_CAPABLE);
16321 	} else {
16322 		tg3_flag_clear(tp, TSO_CAPABLE);
16323 		tg3_flag_clear(tp, TSO_BUG);
16324 		tp->fw_needed = NULL;
16325 	}
16326 
16327 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16328 		tp->fw_needed = FIRMWARE_TG3;
16329 
16330 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16331 		tp->fw_needed = FIRMWARE_TG357766;
16332 
16333 	tp->irq_max = 1;
16334 
16335 	if (tg3_flag(tp, 5750_PLUS)) {
16336 		tg3_flag_set(tp, SUPPORT_MSI);
16337 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16338 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16339 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16340 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16341 		     tp->pdev_peer == tp->pdev))
16342 			tg3_flag_clear(tp, SUPPORT_MSI);
16343 
16344 		if (tg3_flag(tp, 5755_PLUS) ||
16345 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16346 			tg3_flag_set(tp, 1SHOT_MSI);
16347 		}
16348 
16349 		if (tg3_flag(tp, 57765_PLUS)) {
16350 			tg3_flag_set(tp, SUPPORT_MSIX);
16351 			tp->irq_max = TG3_IRQ_MAX_VECS;
16352 		}
16353 	}
16354 
16355 	tp->txq_max = 1;
16356 	tp->rxq_max = 1;
16357 	if (tp->irq_max > 1) {
16358 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16359 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16360 
16361 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16362 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16363 			tp->txq_max = tp->irq_max - 1;
16364 	}
16365 
16366 	if (tg3_flag(tp, 5755_PLUS) ||
16367 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16368 		tg3_flag_set(tp, SHORT_DMA_BUG);
16369 
16370 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16371 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16372 
16373 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16374 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16375 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16376 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16377 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16378 
16379 	if (tg3_flag(tp, 57765_PLUS) &&
16380 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16381 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16382 
16383 	if (!tg3_flag(tp, 5705_PLUS) ||
16384 	    tg3_flag(tp, 5780_CLASS) ||
16385 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16386 		tg3_flag_set(tp, JUMBO_CAPABLE);
16387 
16388 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16389 			      &pci_state_reg);
16390 
16391 	if (pci_is_pcie(tp->pdev)) {
16392 		u16 lnkctl;
16393 
16394 		tg3_flag_set(tp, PCI_EXPRESS);
16395 
16396 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16397 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16398 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16399 				tg3_flag_clear(tp, HW_TSO_2);
16400 				tg3_flag_clear(tp, TSO_CAPABLE);
16401 			}
16402 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16403 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16404 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16405 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16406 				tg3_flag_set(tp, CLKREQ_BUG);
16407 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16408 			tg3_flag_set(tp, L1PLLPD_EN);
16409 		}
16410 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16411 		/* BCM5785 devices are effectively PCIe devices, and should
16412 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16413 		 * section.
16414 		 */
16415 		tg3_flag_set(tp, PCI_EXPRESS);
16416 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16417 		   tg3_flag(tp, 5780_CLASS)) {
16418 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16419 		if (!tp->pcix_cap) {
16420 			dev_err(&tp->pdev->dev,
16421 				"Cannot find PCI-X capability, aborting\n");
16422 			return -EIO;
16423 		}
16424 
16425 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16426 			tg3_flag_set(tp, PCIX_MODE);
16427 	}
16428 
16429 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16430 	 * reordering to the mailbox registers done by the host
16431 	 * controller can cause major troubles.  We read back from
16432 	 * every mailbox register write to force the writes to be
16433 	 * posted to the chip in order.
16434 	 */
16435 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16436 	    !tg3_flag(tp, PCI_EXPRESS))
16437 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16438 
16439 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16440 			     &tp->pci_cacheline_sz);
16441 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16442 			     &tp->pci_lat_timer);
16443 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16444 	    tp->pci_lat_timer < 64) {
16445 		tp->pci_lat_timer = 64;
16446 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16447 				      tp->pci_lat_timer);
16448 	}
16449 
16450 	/* Important! -- It is critical that the PCI-X hw workaround
16451 	 * situation is decided before the first MMIO register access.
16452 	 */
16453 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16454 		/* 5700 BX chips need to have their TX producer index
16455 		 * mailboxes written twice to workaround a bug.
16456 		 */
16457 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16458 
16459 		/* If we are in PCI-X mode, enable register write workaround.
16460 		 *
16461 		 * The workaround is to use indirect register accesses
16462 		 * for all chip writes not to mailbox registers.
16463 		 */
16464 		if (tg3_flag(tp, PCIX_MODE)) {
16465 			u32 pm_reg;
16466 
16467 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16468 
16469 			/* The chip can have it's power management PCI config
16470 			 * space registers clobbered due to this bug.
16471 			 * So explicitly force the chip into D0 here.
16472 			 */
16473 			pci_read_config_dword(tp->pdev,
16474 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16475 					      &pm_reg);
16476 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16477 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16478 			pci_write_config_dword(tp->pdev,
16479 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16480 					       pm_reg);
16481 
16482 			/* Also, force SERR#/PERR# in PCI command. */
16483 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16484 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16485 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16486 		}
16487 	}
16488 
16489 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16490 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16491 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16492 		tg3_flag_set(tp, PCI_32BIT);
16493 
16494 	/* Chip-specific fixup from Broadcom driver */
16495 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16496 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16497 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16498 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16499 	}
16500 
16501 	/* Default fast path register access methods */
16502 	tp->read32 = tg3_read32;
16503 	tp->write32 = tg3_write32;
16504 	tp->read32_mbox = tg3_read32;
16505 	tp->write32_mbox = tg3_write32;
16506 	tp->write32_tx_mbox = tg3_write32;
16507 	tp->write32_rx_mbox = tg3_write32;
16508 
16509 	/* Various workaround register access methods */
16510 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16511 		tp->write32 = tg3_write_indirect_reg32;
16512 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16513 		 (tg3_flag(tp, PCI_EXPRESS) &&
16514 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16515 		/*
16516 		 * Back to back register writes can cause problems on these
16517 		 * chips, the workaround is to read back all reg writes
16518 		 * except those to mailbox regs.
16519 		 *
16520 		 * See tg3_write_indirect_reg32().
16521 		 */
16522 		tp->write32 = tg3_write_flush_reg32;
16523 	}
16524 
16525 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16526 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16527 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16528 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16529 	}
16530 
16531 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16532 		tp->read32 = tg3_read_indirect_reg32;
16533 		tp->write32 = tg3_write_indirect_reg32;
16534 		tp->read32_mbox = tg3_read_indirect_mbox;
16535 		tp->write32_mbox = tg3_write_indirect_mbox;
16536 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16537 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16538 
16539 		iounmap(tp->regs);
16540 		tp->regs = NULL;
16541 
16542 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16543 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16544 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16545 	}
16546 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16547 		tp->read32_mbox = tg3_read32_mbox_5906;
16548 		tp->write32_mbox = tg3_write32_mbox_5906;
16549 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16550 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16551 	}
16552 
16553 	if (tp->write32 == tg3_write_indirect_reg32 ||
16554 	    (tg3_flag(tp, PCIX_MODE) &&
16555 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16556 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16557 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16558 
16559 	/* The memory arbiter has to be enabled in order for SRAM accesses
16560 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16561 	 * sure it is enabled, but other entities such as system netboot
16562 	 * code might disable it.
16563 	 */
16564 	val = tr32(MEMARB_MODE);
16565 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16566 
16567 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16568 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16569 	    tg3_flag(tp, 5780_CLASS)) {
16570 		if (tg3_flag(tp, PCIX_MODE)) {
16571 			pci_read_config_dword(tp->pdev,
16572 					      tp->pcix_cap + PCI_X_STATUS,
16573 					      &val);
16574 			tp->pci_fn = val & 0x7;
16575 		}
16576 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16577 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16578 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16579 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16580 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16581 			val = tr32(TG3_CPMU_STATUS);
16582 
16583 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16584 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16585 		else
16586 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16587 				     TG3_CPMU_STATUS_FSHFT_5719;
16588 	}
16589 
16590 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16591 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16592 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16593 	}
16594 
16595 	/* Get eeprom hw config before calling tg3_set_power_state().
16596 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16597 	 * determined before calling tg3_set_power_state() so that
16598 	 * we know whether or not to switch out of Vaux power.
16599 	 * When the flag is set, it means that GPIO1 is used for eeprom
16600 	 * write protect and also implies that it is a LOM where GPIOs
16601 	 * are not used to switch power.
16602 	 */
16603 	tg3_get_eeprom_hw_cfg(tp);
16604 
16605 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16606 		tg3_flag_clear(tp, TSO_CAPABLE);
16607 		tg3_flag_clear(tp, TSO_BUG);
16608 		tp->fw_needed = NULL;
16609 	}
16610 
16611 	if (tg3_flag(tp, ENABLE_APE)) {
16612 		/* Allow reads and writes to the
16613 		 * APE register and memory space.
16614 		 */
16615 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16616 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16617 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16618 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16619 				       pci_state_reg);
16620 
16621 		tg3_ape_lock_init(tp);
16622 		tp->ape_hb_interval =
16623 			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16624 	}
16625 
16626 	/* Set up tp->grc_local_ctrl before calling
16627 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16628 	 * will bring 5700's external PHY out of reset.
16629 	 * It is also used as eeprom write protect on LOMs.
16630 	 */
16631 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16632 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16633 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16634 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16635 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16636 	/* Unused GPIO3 must be driven as output on 5752 because there
16637 	 * are no pull-up resistors on unused GPIO pins.
16638 	 */
16639 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16640 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16641 
16642 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16643 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16644 	    tg3_flag(tp, 57765_CLASS))
16645 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16646 
16647 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16648 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16649 		/* Turn off the debug UART. */
16650 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16651 		if (tg3_flag(tp, IS_NIC))
16652 			/* Keep VMain power. */
16653 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16654 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16655 	}
16656 
16657 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16658 		tp->grc_local_ctrl |=
16659 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16660 
16661 	/* Switch out of Vaux if it is a NIC */
16662 	tg3_pwrsrc_switch_to_vmain(tp);
16663 
16664 	/* Derive initial jumbo mode from MTU assigned in
16665 	 * ether_setup() via the alloc_etherdev() call
16666 	 */
16667 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16668 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16669 
16670 	/* Determine WakeOnLan speed to use. */
16671 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16672 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16673 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16674 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16675 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16676 	} else {
16677 		tg3_flag_set(tp, WOL_SPEED_100MB);
16678 	}
16679 
16680 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16681 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16682 
16683 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16684 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16685 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16686 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16687 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16688 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16689 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16690 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16691 
16692 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16693 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16694 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16695 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16696 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16697 
16698 	if (tg3_flag(tp, 5705_PLUS) &&
16699 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16700 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16701 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16702 	    !tg3_flag(tp, 57765_PLUS)) {
16703 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16704 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16705 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16706 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16707 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16708 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16709 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16710 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16711 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16712 		} else
16713 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16714 	}
16715 
16716 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16717 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16718 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16719 		if (tp->phy_otp == 0)
16720 			tp->phy_otp = TG3_OTP_DEFAULT;
16721 	}
16722 
16723 	if (tg3_flag(tp, CPMU_PRESENT))
16724 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16725 	else
16726 		tp->mi_mode = MAC_MI_MODE_BASE;
16727 
16728 	tp->coalesce_mode = 0;
16729 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16730 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16731 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16732 
16733 	/* Set these bits to enable statistics workaround. */
16734 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16735 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16736 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16737 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16738 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16739 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16740 	}
16741 
16742 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16743 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16744 		tg3_flag_set(tp, USE_PHYLIB);
16745 
16746 	err = tg3_mdio_init(tp);
16747 	if (err)
16748 		return err;
16749 
16750 	/* Initialize data/descriptor byte/word swapping. */
16751 	val = tr32(GRC_MODE);
16752 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16753 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16754 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16755 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16756 			GRC_MODE_B2HRX_ENABLE |
16757 			GRC_MODE_HTX2B_ENABLE |
16758 			GRC_MODE_HOST_STACKUP);
16759 	else
16760 		val &= GRC_MODE_HOST_STACKUP;
16761 
16762 	tw32(GRC_MODE, val | tp->grc_mode);
16763 
16764 	tg3_switch_clocks(tp);
16765 
16766 	/* Clear this out for sanity. */
16767 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16768 
16769 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16770 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16771 
16772 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16773 			      &pci_state_reg);
16774 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16775 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16776 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16777 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16778 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16779 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16780 			void __iomem *sram_base;
16781 
16782 			/* Write some dummy words into the SRAM status block
16783 			 * area, see if it reads back correctly.  If the return
16784 			 * value is bad, force enable the PCIX workaround.
16785 			 */
16786 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16787 
16788 			writel(0x00000000, sram_base);
16789 			writel(0x00000000, sram_base + 4);
16790 			writel(0xffffffff, sram_base + 4);
16791 			if (readl(sram_base) != 0x00000000)
16792 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16793 		}
16794 	}
16795 
16796 	udelay(50);
16797 	tg3_nvram_init(tp);
16798 
16799 	/* If the device has an NVRAM, no need to load patch firmware */
16800 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16801 	    !tg3_flag(tp, NO_NVRAM))
16802 		tp->fw_needed = NULL;
16803 
16804 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16805 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16806 
16807 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16808 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16809 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16810 		tg3_flag_set(tp, IS_5788);
16811 
16812 	if (!tg3_flag(tp, IS_5788) &&
16813 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16814 		tg3_flag_set(tp, TAGGED_STATUS);
16815 	if (tg3_flag(tp, TAGGED_STATUS)) {
16816 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16817 				      HOSTCC_MODE_CLRTICK_TXBD);
16818 
16819 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16820 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16821 				       tp->misc_host_ctrl);
16822 	}
16823 
16824 	/* Preserve the APE MAC_MODE bits */
16825 	if (tg3_flag(tp, ENABLE_APE))
16826 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16827 	else
16828 		tp->mac_mode = 0;
16829 
16830 	if (tg3_10_100_only_device(tp, ent))
16831 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16832 
16833 	err = tg3_phy_probe(tp);
16834 	if (err) {
16835 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16836 		/* ... but do not return immediately ... */
16837 		tg3_mdio_fini(tp);
16838 	}
16839 
16840 	tg3_read_vpd(tp);
16841 	tg3_read_fw_ver(tp);
16842 
16843 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16844 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16845 	} else {
16846 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16847 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16848 		else
16849 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16850 	}
16851 
16852 	/* 5700 {AX,BX} chips have a broken status block link
16853 	 * change bit implementation, so we must use the
16854 	 * status register in those cases.
16855 	 */
16856 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16857 		tg3_flag_set(tp, USE_LINKCHG_REG);
16858 	else
16859 		tg3_flag_clear(tp, USE_LINKCHG_REG);
16860 
16861 	/* The led_ctrl is set during tg3_phy_probe, here we might
16862 	 * have to force the link status polling mechanism based
16863 	 * upon subsystem IDs.
16864 	 */
16865 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16866 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16867 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16868 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16869 		tg3_flag_set(tp, USE_LINKCHG_REG);
16870 	}
16871 
16872 	/* For all SERDES we poll the MAC status register. */
16873 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16874 		tg3_flag_set(tp, POLL_SERDES);
16875 	else
16876 		tg3_flag_clear(tp, POLL_SERDES);
16877 
16878 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16879 		tg3_flag_set(tp, POLL_CPMU_LINK);
16880 
16881 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16882 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16883 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16884 	    tg3_flag(tp, PCIX_MODE)) {
16885 		tp->rx_offset = NET_SKB_PAD;
16886 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16887 		tp->rx_copy_thresh = ~(u16)0;
16888 #endif
16889 	}
16890 
16891 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16892 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16893 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16894 
16895 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16896 
16897 	/* Increment the rx prod index on the rx std ring by at most
16898 	 * 8 for these chips to workaround hw errata.
16899 	 */
16900 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16901 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16902 	    tg3_asic_rev(tp) == ASIC_REV_5755)
16903 		tp->rx_std_max_post = 8;
16904 
16905 	if (tg3_flag(tp, ASPM_WORKAROUND))
16906 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16907 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16908 
16909 	return err;
16910 }
16911 
16912 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
16913 {
16914 	u32 hi, lo, mac_offset;
16915 	int addr_ok = 0;
16916 	int err;
16917 
16918 	if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
16919 		return 0;
16920 
16921 	if (tg3_flag(tp, IS_SSB_CORE)) {
16922 		err = ssb_gige_get_macaddr(tp->pdev, addr);
16923 		if (!err && is_valid_ether_addr(addr))
16924 			return 0;
16925 	}
16926 
16927 	mac_offset = 0x7c;
16928 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16929 	    tg3_flag(tp, 5780_CLASS)) {
16930 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16931 			mac_offset = 0xcc;
16932 		if (tg3_nvram_lock(tp))
16933 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16934 		else
16935 			tg3_nvram_unlock(tp);
16936 	} else if (tg3_flag(tp, 5717_PLUS)) {
16937 		if (tp->pci_fn & 1)
16938 			mac_offset = 0xcc;
16939 		if (tp->pci_fn > 1)
16940 			mac_offset += 0x18c;
16941 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16942 		mac_offset = 0x10;
16943 
16944 	/* First try to get it from MAC address mailbox. */
16945 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16946 	if ((hi >> 16) == 0x484b) {
16947 		addr[0] = (hi >>  8) & 0xff;
16948 		addr[1] = (hi >>  0) & 0xff;
16949 
16950 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16951 		addr[2] = (lo >> 24) & 0xff;
16952 		addr[3] = (lo >> 16) & 0xff;
16953 		addr[4] = (lo >>  8) & 0xff;
16954 		addr[5] = (lo >>  0) & 0xff;
16955 
16956 		/* Some old bootcode may report a 0 MAC address in SRAM */
16957 		addr_ok = is_valid_ether_addr(addr);
16958 	}
16959 	if (!addr_ok) {
16960 		/* Next, try NVRAM. */
16961 		if (!tg3_flag(tp, NO_NVRAM) &&
16962 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16963 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16964 			memcpy(&addr[0], ((char *)&hi) + 2, 2);
16965 			memcpy(&addr[2], (char *)&lo, sizeof(lo));
16966 		}
16967 		/* Finally just fetch it out of the MAC control regs. */
16968 		else {
16969 			hi = tr32(MAC_ADDR_0_HIGH);
16970 			lo = tr32(MAC_ADDR_0_LOW);
16971 
16972 			addr[5] = lo & 0xff;
16973 			addr[4] = (lo >> 8) & 0xff;
16974 			addr[3] = (lo >> 16) & 0xff;
16975 			addr[2] = (lo >> 24) & 0xff;
16976 			addr[1] = hi & 0xff;
16977 			addr[0] = (hi >> 8) & 0xff;
16978 		}
16979 	}
16980 
16981 	if (!is_valid_ether_addr(addr))
16982 		return -EINVAL;
16983 	return 0;
16984 }
16985 
16986 #define BOUNDARY_SINGLE_CACHELINE	1
16987 #define BOUNDARY_MULTI_CACHELINE	2
16988 
16989 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16990 {
16991 	int cacheline_size;
16992 	u8 byte;
16993 	int goal;
16994 
16995 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16996 	if (byte == 0)
16997 		cacheline_size = 1024;
16998 	else
16999 		cacheline_size = (int) byte * 4;
17000 
17001 	/* On 5703 and later chips, the boundary bits have no
17002 	 * effect.
17003 	 */
17004 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17005 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17006 	    !tg3_flag(tp, PCI_EXPRESS))
17007 		goto out;
17008 
17009 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17010 	goal = BOUNDARY_MULTI_CACHELINE;
17011 #else
17012 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17013 	goal = BOUNDARY_SINGLE_CACHELINE;
17014 #else
17015 	goal = 0;
17016 #endif
17017 #endif
17018 
17019 	if (tg3_flag(tp, 57765_PLUS)) {
17020 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17021 		goto out;
17022 	}
17023 
17024 	if (!goal)
17025 		goto out;
17026 
17027 	/* PCI controllers on most RISC systems tend to disconnect
17028 	 * when a device tries to burst across a cache-line boundary.
17029 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17030 	 *
17031 	 * Unfortunately, for PCI-E there are only limited
17032 	 * write-side controls for this, and thus for reads
17033 	 * we will still get the disconnects.  We'll also waste
17034 	 * these PCI cycles for both read and write for chips
17035 	 * other than 5700 and 5701 which do not implement the
17036 	 * boundary bits.
17037 	 */
17038 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17039 		switch (cacheline_size) {
17040 		case 16:
17041 		case 32:
17042 		case 64:
17043 		case 128:
17044 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17045 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17046 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17047 			} else {
17048 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17049 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17050 			}
17051 			break;
17052 
17053 		case 256:
17054 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17055 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17056 			break;
17057 
17058 		default:
17059 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17060 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17061 			break;
17062 		}
17063 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17064 		switch (cacheline_size) {
17065 		case 16:
17066 		case 32:
17067 		case 64:
17068 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17069 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17070 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17071 				break;
17072 			}
17073 			fallthrough;
17074 		case 128:
17075 		default:
17076 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17077 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17078 			break;
17079 		}
17080 	} else {
17081 		switch (cacheline_size) {
17082 		case 16:
17083 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17084 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17085 					DMA_RWCTRL_WRITE_BNDRY_16);
17086 				break;
17087 			}
17088 			fallthrough;
17089 		case 32:
17090 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17091 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17092 					DMA_RWCTRL_WRITE_BNDRY_32);
17093 				break;
17094 			}
17095 			fallthrough;
17096 		case 64:
17097 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17098 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17099 					DMA_RWCTRL_WRITE_BNDRY_64);
17100 				break;
17101 			}
17102 			fallthrough;
17103 		case 128:
17104 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17105 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17106 					DMA_RWCTRL_WRITE_BNDRY_128);
17107 				break;
17108 			}
17109 			fallthrough;
17110 		case 256:
17111 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17112 				DMA_RWCTRL_WRITE_BNDRY_256);
17113 			break;
17114 		case 512:
17115 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17116 				DMA_RWCTRL_WRITE_BNDRY_512);
17117 			break;
17118 		case 1024:
17119 		default:
17120 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17121 				DMA_RWCTRL_WRITE_BNDRY_1024);
17122 			break;
17123 		}
17124 	}
17125 
17126 out:
17127 	return val;
17128 }
17129 
17130 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17131 			   int size, bool to_device)
17132 {
17133 	struct tg3_internal_buffer_desc test_desc;
17134 	u32 sram_dma_descs;
17135 	int i, ret;
17136 
17137 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17138 
17139 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17140 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17141 	tw32(RDMAC_STATUS, 0);
17142 	tw32(WDMAC_STATUS, 0);
17143 
17144 	tw32(BUFMGR_MODE, 0);
17145 	tw32(FTQ_RESET, 0);
17146 
17147 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17148 	test_desc.addr_lo = buf_dma & 0xffffffff;
17149 	test_desc.nic_mbuf = 0x00002100;
17150 	test_desc.len = size;
17151 
17152 	/*
17153 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17154 	 * the *second* time the tg3 driver was getting loaded after an
17155 	 * initial scan.
17156 	 *
17157 	 * Broadcom tells me:
17158 	 *   ...the DMA engine is connected to the GRC block and a DMA
17159 	 *   reset may affect the GRC block in some unpredictable way...
17160 	 *   The behavior of resets to individual blocks has not been tested.
17161 	 *
17162 	 * Broadcom noted the GRC reset will also reset all sub-components.
17163 	 */
17164 	if (to_device) {
17165 		test_desc.cqid_sqid = (13 << 8) | 2;
17166 
17167 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17168 		udelay(40);
17169 	} else {
17170 		test_desc.cqid_sqid = (16 << 8) | 7;
17171 
17172 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17173 		udelay(40);
17174 	}
17175 	test_desc.flags = 0x00000005;
17176 
17177 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17178 		u32 val;
17179 
17180 		val = *(((u32 *)&test_desc) + i);
17181 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17182 				       sram_dma_descs + (i * sizeof(u32)));
17183 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17184 	}
17185 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17186 
17187 	if (to_device)
17188 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17189 	else
17190 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17191 
17192 	ret = -ENODEV;
17193 	for (i = 0; i < 40; i++) {
17194 		u32 val;
17195 
17196 		if (to_device)
17197 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17198 		else
17199 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17200 		if ((val & 0xffff) == sram_dma_descs) {
17201 			ret = 0;
17202 			break;
17203 		}
17204 
17205 		udelay(100);
17206 	}
17207 
17208 	return ret;
17209 }
17210 
17211 #define TEST_BUFFER_SIZE	0x2000
17212 
17213 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17214 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17215 	{ },
17216 };
17217 
17218 static int tg3_test_dma(struct tg3 *tp)
17219 {
17220 	dma_addr_t buf_dma;
17221 	u32 *buf, saved_dma_rwctrl;
17222 	int ret = 0;
17223 
17224 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17225 				 &buf_dma, GFP_KERNEL);
17226 	if (!buf) {
17227 		ret = -ENOMEM;
17228 		goto out_nofree;
17229 	}
17230 
17231 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17232 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17233 
17234 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17235 
17236 	if (tg3_flag(tp, 57765_PLUS))
17237 		goto out;
17238 
17239 	if (tg3_flag(tp, PCI_EXPRESS)) {
17240 		/* DMA read watermark not used on PCIE */
17241 		tp->dma_rwctrl |= 0x00180000;
17242 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17243 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17244 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17245 			tp->dma_rwctrl |= 0x003f0000;
17246 		else
17247 			tp->dma_rwctrl |= 0x003f000f;
17248 	} else {
17249 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17250 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17251 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17252 			u32 read_water = 0x7;
17253 
17254 			/* If the 5704 is behind the EPB bridge, we can
17255 			 * do the less restrictive ONE_DMA workaround for
17256 			 * better performance.
17257 			 */
17258 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17259 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17260 				tp->dma_rwctrl |= 0x8000;
17261 			else if (ccval == 0x6 || ccval == 0x7)
17262 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17263 
17264 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17265 				read_water = 4;
17266 			/* Set bit 23 to enable PCIX hw bug fix */
17267 			tp->dma_rwctrl |=
17268 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17269 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17270 				(1 << 23);
17271 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17272 			/* 5780 always in PCIX mode */
17273 			tp->dma_rwctrl |= 0x00144000;
17274 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17275 			/* 5714 always in PCIX mode */
17276 			tp->dma_rwctrl |= 0x00148000;
17277 		} else {
17278 			tp->dma_rwctrl |= 0x001b000f;
17279 		}
17280 	}
17281 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17282 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17283 
17284 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17285 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17286 		tp->dma_rwctrl &= 0xfffffff0;
17287 
17288 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17289 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17290 		/* Remove this if it causes problems for some boards. */
17291 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17292 
17293 		/* On 5700/5701 chips, we need to set this bit.
17294 		 * Otherwise the chip will issue cacheline transactions
17295 		 * to streamable DMA memory with not all the byte
17296 		 * enables turned on.  This is an error on several
17297 		 * RISC PCI controllers, in particular sparc64.
17298 		 *
17299 		 * On 5703/5704 chips, this bit has been reassigned
17300 		 * a different meaning.  In particular, it is used
17301 		 * on those chips to enable a PCI-X workaround.
17302 		 */
17303 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17304 	}
17305 
17306 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17307 
17308 
17309 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17310 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17311 		goto out;
17312 
17313 	/* It is best to perform DMA test with maximum write burst size
17314 	 * to expose the 5700/5701 write DMA bug.
17315 	 */
17316 	saved_dma_rwctrl = tp->dma_rwctrl;
17317 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17318 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17319 
17320 	while (1) {
17321 		u32 *p = buf, i;
17322 
17323 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17324 			p[i] = i;
17325 
17326 		/* Send the buffer to the chip. */
17327 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17328 		if (ret) {
17329 			dev_err(&tp->pdev->dev,
17330 				"%s: Buffer write failed. err = %d\n",
17331 				__func__, ret);
17332 			break;
17333 		}
17334 
17335 		/* Now read it back. */
17336 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17337 		if (ret) {
17338 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17339 				"err = %d\n", __func__, ret);
17340 			break;
17341 		}
17342 
17343 		/* Verify it. */
17344 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17345 			if (p[i] == i)
17346 				continue;
17347 
17348 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17349 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17350 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17351 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17352 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17353 				break;
17354 			} else {
17355 				dev_err(&tp->pdev->dev,
17356 					"%s: Buffer corrupted on read back! "
17357 					"(%d != %d)\n", __func__, p[i], i);
17358 				ret = -ENODEV;
17359 				goto out;
17360 			}
17361 		}
17362 
17363 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17364 			/* Success. */
17365 			ret = 0;
17366 			break;
17367 		}
17368 	}
17369 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17370 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17371 		/* DMA test passed without adjusting DMA boundary,
17372 		 * now look for chipsets that are known to expose the
17373 		 * DMA bug without failing the test.
17374 		 */
17375 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17376 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17377 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17378 		} else {
17379 			/* Safe to use the calculated DMA boundary. */
17380 			tp->dma_rwctrl = saved_dma_rwctrl;
17381 		}
17382 
17383 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17384 	}
17385 
17386 out:
17387 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17388 out_nofree:
17389 	return ret;
17390 }
17391 
17392 static void tg3_init_bufmgr_config(struct tg3 *tp)
17393 {
17394 	if (tg3_flag(tp, 57765_PLUS)) {
17395 		tp->bufmgr_config.mbuf_read_dma_low_water =
17396 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17397 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17398 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17399 		tp->bufmgr_config.mbuf_high_water =
17400 			DEFAULT_MB_HIGH_WATER_57765;
17401 
17402 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17403 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17404 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17405 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17406 		tp->bufmgr_config.mbuf_high_water_jumbo =
17407 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17408 	} else if (tg3_flag(tp, 5705_PLUS)) {
17409 		tp->bufmgr_config.mbuf_read_dma_low_water =
17410 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17411 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17412 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17413 		tp->bufmgr_config.mbuf_high_water =
17414 			DEFAULT_MB_HIGH_WATER_5705;
17415 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17416 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17417 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17418 			tp->bufmgr_config.mbuf_high_water =
17419 				DEFAULT_MB_HIGH_WATER_5906;
17420 		}
17421 
17422 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17423 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17424 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17425 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17426 		tp->bufmgr_config.mbuf_high_water_jumbo =
17427 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17428 	} else {
17429 		tp->bufmgr_config.mbuf_read_dma_low_water =
17430 			DEFAULT_MB_RDMA_LOW_WATER;
17431 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17432 			DEFAULT_MB_MACRX_LOW_WATER;
17433 		tp->bufmgr_config.mbuf_high_water =
17434 			DEFAULT_MB_HIGH_WATER;
17435 
17436 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17437 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17438 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17439 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17440 		tp->bufmgr_config.mbuf_high_water_jumbo =
17441 			DEFAULT_MB_HIGH_WATER_JUMBO;
17442 	}
17443 
17444 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17445 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17446 }
17447 
17448 static char *tg3_phy_string(struct tg3 *tp)
17449 {
17450 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17451 	case TG3_PHY_ID_BCM5400:	return "5400";
17452 	case TG3_PHY_ID_BCM5401:	return "5401";
17453 	case TG3_PHY_ID_BCM5411:	return "5411";
17454 	case TG3_PHY_ID_BCM5701:	return "5701";
17455 	case TG3_PHY_ID_BCM5703:	return "5703";
17456 	case TG3_PHY_ID_BCM5704:	return "5704";
17457 	case TG3_PHY_ID_BCM5705:	return "5705";
17458 	case TG3_PHY_ID_BCM5750:	return "5750";
17459 	case TG3_PHY_ID_BCM5752:	return "5752";
17460 	case TG3_PHY_ID_BCM5714:	return "5714";
17461 	case TG3_PHY_ID_BCM5780:	return "5780";
17462 	case TG3_PHY_ID_BCM5755:	return "5755";
17463 	case TG3_PHY_ID_BCM5787:	return "5787";
17464 	case TG3_PHY_ID_BCM5784:	return "5784";
17465 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17466 	case TG3_PHY_ID_BCM5906:	return "5906";
17467 	case TG3_PHY_ID_BCM5761:	return "5761";
17468 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17469 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17470 	case TG3_PHY_ID_BCM57765:	return "57765";
17471 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17472 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17473 	case TG3_PHY_ID_BCM5762:	return "5762C";
17474 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17475 	case 0:			return "serdes";
17476 	default:		return "unknown";
17477 	}
17478 }
17479 
17480 static char *tg3_bus_string(struct tg3 *tp, char *str)
17481 {
17482 	if (tg3_flag(tp, PCI_EXPRESS)) {
17483 		strcpy(str, "PCI Express");
17484 		return str;
17485 	} else if (tg3_flag(tp, PCIX_MODE)) {
17486 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17487 
17488 		strcpy(str, "PCIX:");
17489 
17490 		if ((clock_ctrl == 7) ||
17491 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17492 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17493 			strcat(str, "133MHz");
17494 		else if (clock_ctrl == 0)
17495 			strcat(str, "33MHz");
17496 		else if (clock_ctrl == 2)
17497 			strcat(str, "50MHz");
17498 		else if (clock_ctrl == 4)
17499 			strcat(str, "66MHz");
17500 		else if (clock_ctrl == 6)
17501 			strcat(str, "100MHz");
17502 	} else {
17503 		strcpy(str, "PCI:");
17504 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17505 			strcat(str, "66MHz");
17506 		else
17507 			strcat(str, "33MHz");
17508 	}
17509 	if (tg3_flag(tp, PCI_32BIT))
17510 		strcat(str, ":32-bit");
17511 	else
17512 		strcat(str, ":64-bit");
17513 	return str;
17514 }
17515 
17516 static void tg3_init_coal(struct tg3 *tp)
17517 {
17518 	struct ethtool_coalesce *ec = &tp->coal;
17519 
17520 	memset(ec, 0, sizeof(*ec));
17521 	ec->cmd = ETHTOOL_GCOALESCE;
17522 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17523 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17524 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17525 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17526 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17527 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17528 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17529 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17530 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17531 
17532 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17533 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17534 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17535 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17536 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17537 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17538 	}
17539 
17540 	if (tg3_flag(tp, 5705_PLUS)) {
17541 		ec->rx_coalesce_usecs_irq = 0;
17542 		ec->tx_coalesce_usecs_irq = 0;
17543 		ec->stats_block_coalesce_usecs = 0;
17544 	}
17545 }
17546 
17547 static int tg3_init_one(struct pci_dev *pdev,
17548 				  const struct pci_device_id *ent)
17549 {
17550 	struct net_device *dev;
17551 	struct tg3 *tp;
17552 	int i, err;
17553 	u32 sndmbx, rcvmbx, intmbx;
17554 	char str[40];
17555 	u64 dma_mask, persist_dma_mask;
17556 	netdev_features_t features = 0;
17557 	u8 addr[ETH_ALEN] __aligned(2);
17558 
17559 	err = pci_enable_device(pdev);
17560 	if (err) {
17561 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17562 		return err;
17563 	}
17564 
17565 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17566 	if (err) {
17567 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17568 		goto err_out_disable_pdev;
17569 	}
17570 
17571 	pci_set_master(pdev);
17572 
17573 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17574 	if (!dev) {
17575 		err = -ENOMEM;
17576 		goto err_out_free_res;
17577 	}
17578 
17579 	SET_NETDEV_DEV(dev, &pdev->dev);
17580 
17581 	tp = netdev_priv(dev);
17582 	tp->pdev = pdev;
17583 	tp->dev = dev;
17584 	tp->rx_mode = TG3_DEF_RX_MODE;
17585 	tp->tx_mode = TG3_DEF_TX_MODE;
17586 	tp->irq_sync = 1;
17587 	tp->pcierr_recovery = false;
17588 
17589 	if (tg3_debug > 0)
17590 		tp->msg_enable = tg3_debug;
17591 	else
17592 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17593 
17594 	if (pdev_is_ssb_gige_core(pdev)) {
17595 		tg3_flag_set(tp, IS_SSB_CORE);
17596 		if (ssb_gige_must_flush_posted_writes(pdev))
17597 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17598 		if (ssb_gige_one_dma_at_once(pdev))
17599 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17600 		if (ssb_gige_have_roboswitch(pdev)) {
17601 			tg3_flag_set(tp, USE_PHYLIB);
17602 			tg3_flag_set(tp, ROBOSWITCH);
17603 		}
17604 		if (ssb_gige_is_rgmii(pdev))
17605 			tg3_flag_set(tp, RGMII_MODE);
17606 	}
17607 
17608 	/* The word/byte swap controls here control register access byte
17609 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17610 	 * setting below.
17611 	 */
17612 	tp->misc_host_ctrl =
17613 		MISC_HOST_CTRL_MASK_PCI_INT |
17614 		MISC_HOST_CTRL_WORD_SWAP |
17615 		MISC_HOST_CTRL_INDIR_ACCESS |
17616 		MISC_HOST_CTRL_PCISTATE_RW;
17617 
17618 	/* The NONFRM (non-frame) byte/word swap controls take effect
17619 	 * on descriptor entries, anything which isn't packet data.
17620 	 *
17621 	 * The StrongARM chips on the board (one for tx, one for rx)
17622 	 * are running in big-endian mode.
17623 	 */
17624 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17625 			GRC_MODE_WSWAP_NONFRM_DATA);
17626 #ifdef __BIG_ENDIAN
17627 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17628 #endif
17629 	spin_lock_init(&tp->lock);
17630 	spin_lock_init(&tp->indirect_lock);
17631 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17632 
17633 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17634 	if (!tp->regs) {
17635 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17636 		err = -ENOMEM;
17637 		goto err_out_free_dev;
17638 	}
17639 
17640 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17641 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17642 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17643 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17644 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17645 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17646 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17647 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17648 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17649 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17650 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17651 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17652 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17653 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17654 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17655 		tg3_flag_set(tp, ENABLE_APE);
17656 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17657 		if (!tp->aperegs) {
17658 			dev_err(&pdev->dev,
17659 				"Cannot map APE registers, aborting\n");
17660 			err = -ENOMEM;
17661 			goto err_out_iounmap;
17662 		}
17663 	}
17664 
17665 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17666 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17667 
17668 	dev->ethtool_ops = &tg3_ethtool_ops;
17669 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17670 	dev->netdev_ops = &tg3_netdev_ops;
17671 	dev->irq = pdev->irq;
17672 
17673 	err = tg3_get_invariants(tp, ent);
17674 	if (err) {
17675 		dev_err(&pdev->dev,
17676 			"Problem fetching invariants of chip, aborting\n");
17677 		goto err_out_apeunmap;
17678 	}
17679 
17680 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17681 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17682 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17683 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17684 	 * do DMA address check in tg3_start_xmit().
17685 	 */
17686 	if (tg3_flag(tp, IS_5788))
17687 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17688 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17689 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17690 #ifdef CONFIG_HIGHMEM
17691 		dma_mask = DMA_BIT_MASK(64);
17692 #endif
17693 	} else
17694 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17695 
17696 	/* Configure DMA attributes. */
17697 	if (dma_mask > DMA_BIT_MASK(32)) {
17698 		err = dma_set_mask(&pdev->dev, dma_mask);
17699 		if (!err) {
17700 			features |= NETIF_F_HIGHDMA;
17701 			err = dma_set_coherent_mask(&pdev->dev,
17702 						    persist_dma_mask);
17703 			if (err < 0) {
17704 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17705 					"DMA for consistent allocations\n");
17706 				goto err_out_apeunmap;
17707 			}
17708 		}
17709 	}
17710 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17711 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17712 		if (err) {
17713 			dev_err(&pdev->dev,
17714 				"No usable DMA configuration, aborting\n");
17715 			goto err_out_apeunmap;
17716 		}
17717 	}
17718 
17719 	tg3_init_bufmgr_config(tp);
17720 
17721 	/* 5700 B0 chips do not support checksumming correctly due
17722 	 * to hardware bugs.
17723 	 */
17724 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17725 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17726 
17727 		if (tg3_flag(tp, 5755_PLUS))
17728 			features |= NETIF_F_IPV6_CSUM;
17729 	}
17730 
17731 	/* TSO is on by default on chips that support hardware TSO.
17732 	 * Firmware TSO on older chips gives lower performance, so it
17733 	 * is off by default, but can be enabled using ethtool.
17734 	 */
17735 	if ((tg3_flag(tp, HW_TSO_1) ||
17736 	     tg3_flag(tp, HW_TSO_2) ||
17737 	     tg3_flag(tp, HW_TSO_3)) &&
17738 	    (features & NETIF_F_IP_CSUM))
17739 		features |= NETIF_F_TSO;
17740 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17741 		if (features & NETIF_F_IPV6_CSUM)
17742 			features |= NETIF_F_TSO6;
17743 		if (tg3_flag(tp, HW_TSO_3) ||
17744 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17745 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17746 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17747 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17748 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17749 			features |= NETIF_F_TSO_ECN;
17750 	}
17751 
17752 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17753 			 NETIF_F_HW_VLAN_CTAG_RX;
17754 	dev->vlan_features |= features;
17755 
17756 	/*
17757 	 * Add loopback capability only for a subset of devices that support
17758 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17759 	 * loopback for the remaining devices.
17760 	 */
17761 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17762 	    !tg3_flag(tp, CPMU_PRESENT))
17763 		/* Add the loopback capability */
17764 		features |= NETIF_F_LOOPBACK;
17765 
17766 	dev->hw_features |= features;
17767 	dev->priv_flags |= IFF_UNICAST_FLT;
17768 
17769 	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17770 	dev->min_mtu = TG3_MIN_MTU;
17771 	dev->max_mtu = TG3_MAX_MTU(tp);
17772 
17773 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17774 	    !tg3_flag(tp, TSO_CAPABLE) &&
17775 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17776 		tg3_flag_set(tp, MAX_RXPEND_64);
17777 		tp->rx_pending = 63;
17778 	}
17779 
17780 	err = tg3_get_device_address(tp, addr);
17781 	if (err) {
17782 		dev_err(&pdev->dev,
17783 			"Could not obtain valid ethernet address, aborting\n");
17784 		goto err_out_apeunmap;
17785 	}
17786 	eth_hw_addr_set(dev, addr);
17787 
17788 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17789 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17790 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17791 	for (i = 0; i < tp->irq_max; i++) {
17792 		struct tg3_napi *tnapi = &tp->napi[i];
17793 
17794 		tnapi->tp = tp;
17795 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17796 
17797 		tnapi->int_mbox = intmbx;
17798 		if (i <= 4)
17799 			intmbx += 0x8;
17800 		else
17801 			intmbx += 0x4;
17802 
17803 		tnapi->consmbox = rcvmbx;
17804 		tnapi->prodmbox = sndmbx;
17805 
17806 		if (i)
17807 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17808 		else
17809 			tnapi->coal_now = HOSTCC_MODE_NOW;
17810 
17811 		if (!tg3_flag(tp, SUPPORT_MSIX))
17812 			break;
17813 
17814 		/*
17815 		 * If we support MSIX, we'll be using RSS.  If we're using
17816 		 * RSS, the first vector only handles link interrupts and the
17817 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17818 		 * mailbox values for the next iteration.  The values we setup
17819 		 * above are still useful for the single vectored mode.
17820 		 */
17821 		if (!i)
17822 			continue;
17823 
17824 		rcvmbx += 0x8;
17825 
17826 		if (sndmbx & 0x4)
17827 			sndmbx -= 0x4;
17828 		else
17829 			sndmbx += 0xc;
17830 	}
17831 
17832 	/*
17833 	 * Reset chip in case UNDI or EFI driver did not shutdown
17834 	 * DMA self test will enable WDMAC and we'll see (spurious)
17835 	 * pending DMA on the PCI bus at that point.
17836 	 */
17837 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17838 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17839 		tg3_full_lock(tp, 0);
17840 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17841 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17842 		tg3_full_unlock(tp);
17843 	}
17844 
17845 	err = tg3_test_dma(tp);
17846 	if (err) {
17847 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17848 		goto err_out_apeunmap;
17849 	}
17850 
17851 	tg3_init_coal(tp);
17852 
17853 	pci_set_drvdata(pdev, dev);
17854 
17855 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17856 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17857 	    tg3_asic_rev(tp) == ASIC_REV_5762)
17858 		tg3_flag_set(tp, PTP_CAPABLE);
17859 
17860 	tg3_timer_init(tp);
17861 
17862 	tg3_carrier_off(tp);
17863 
17864 	err = register_netdev(dev);
17865 	if (err) {
17866 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17867 		goto err_out_apeunmap;
17868 	}
17869 
17870 	if (tg3_flag(tp, PTP_CAPABLE)) {
17871 		tg3_ptp_init(tp);
17872 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17873 						   &tp->pdev->dev);
17874 		if (IS_ERR(tp->ptp_clock))
17875 			tp->ptp_clock = NULL;
17876 	}
17877 
17878 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17879 		    tp->board_part_number,
17880 		    tg3_chip_rev_id(tp),
17881 		    tg3_bus_string(tp, str),
17882 		    dev->dev_addr);
17883 
17884 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17885 		char *ethtype;
17886 
17887 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17888 			ethtype = "10/100Base-TX";
17889 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17890 			ethtype = "1000Base-SX";
17891 		else
17892 			ethtype = "10/100/1000Base-T";
17893 
17894 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17895 			    "(WireSpeed[%d], EEE[%d])\n",
17896 			    tg3_phy_string(tp), ethtype,
17897 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17898 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17899 	}
17900 
17901 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17902 		    (dev->features & NETIF_F_RXCSUM) != 0,
17903 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17904 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17905 		    tg3_flag(tp, ENABLE_ASF) != 0,
17906 		    tg3_flag(tp, TSO_CAPABLE) != 0);
17907 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17908 		    tp->dma_rwctrl,
17909 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17910 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17911 
17912 	pci_save_state(pdev);
17913 
17914 	return 0;
17915 
17916 err_out_apeunmap:
17917 	if (tp->aperegs) {
17918 		iounmap(tp->aperegs);
17919 		tp->aperegs = NULL;
17920 	}
17921 
17922 err_out_iounmap:
17923 	if (tp->regs) {
17924 		iounmap(tp->regs);
17925 		tp->regs = NULL;
17926 	}
17927 
17928 err_out_free_dev:
17929 	free_netdev(dev);
17930 
17931 err_out_free_res:
17932 	pci_release_regions(pdev);
17933 
17934 err_out_disable_pdev:
17935 	if (pci_is_enabled(pdev))
17936 		pci_disable_device(pdev);
17937 	return err;
17938 }
17939 
17940 static void tg3_remove_one(struct pci_dev *pdev)
17941 {
17942 	struct net_device *dev = pci_get_drvdata(pdev);
17943 
17944 	if (dev) {
17945 		struct tg3 *tp = netdev_priv(dev);
17946 
17947 		tg3_ptp_fini(tp);
17948 
17949 		release_firmware(tp->fw);
17950 
17951 		tg3_reset_task_cancel(tp);
17952 
17953 		if (tg3_flag(tp, USE_PHYLIB)) {
17954 			tg3_phy_fini(tp);
17955 			tg3_mdio_fini(tp);
17956 		}
17957 
17958 		unregister_netdev(dev);
17959 		if (tp->aperegs) {
17960 			iounmap(tp->aperegs);
17961 			tp->aperegs = NULL;
17962 		}
17963 		if (tp->regs) {
17964 			iounmap(tp->regs);
17965 			tp->regs = NULL;
17966 		}
17967 		free_netdev(dev);
17968 		pci_release_regions(pdev);
17969 		pci_disable_device(pdev);
17970 	}
17971 }
17972 
17973 #ifdef CONFIG_PM_SLEEP
17974 static int tg3_suspend(struct device *device)
17975 {
17976 	struct net_device *dev = dev_get_drvdata(device);
17977 	struct tg3 *tp = netdev_priv(dev);
17978 	int err = 0;
17979 
17980 	rtnl_lock();
17981 
17982 	if (!netif_running(dev))
17983 		goto unlock;
17984 
17985 	tg3_reset_task_cancel(tp);
17986 	tg3_phy_stop(tp);
17987 	tg3_netif_stop(tp);
17988 
17989 	tg3_timer_stop(tp);
17990 
17991 	tg3_full_lock(tp, 1);
17992 	tg3_disable_ints(tp);
17993 	tg3_full_unlock(tp);
17994 
17995 	netif_device_detach(dev);
17996 
17997 	tg3_full_lock(tp, 0);
17998 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17999 	tg3_flag_clear(tp, INIT_COMPLETE);
18000 	tg3_full_unlock(tp);
18001 
18002 	err = tg3_power_down_prepare(tp);
18003 	if (err) {
18004 		int err2;
18005 
18006 		tg3_full_lock(tp, 0);
18007 
18008 		tg3_flag_set(tp, INIT_COMPLETE);
18009 		err2 = tg3_restart_hw(tp, true);
18010 		if (err2)
18011 			goto out;
18012 
18013 		tg3_timer_start(tp);
18014 
18015 		netif_device_attach(dev);
18016 		tg3_netif_start(tp);
18017 
18018 out:
18019 		tg3_full_unlock(tp);
18020 
18021 		if (!err2)
18022 			tg3_phy_start(tp);
18023 	}
18024 
18025 unlock:
18026 	rtnl_unlock();
18027 	return err;
18028 }
18029 
18030 static int tg3_resume(struct device *device)
18031 {
18032 	struct net_device *dev = dev_get_drvdata(device);
18033 	struct tg3 *tp = netdev_priv(dev);
18034 	int err = 0;
18035 
18036 	rtnl_lock();
18037 
18038 	if (!netif_running(dev))
18039 		goto unlock;
18040 
18041 	netif_device_attach(dev);
18042 
18043 	tg3_full_lock(tp, 0);
18044 
18045 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18046 
18047 	tg3_flag_set(tp, INIT_COMPLETE);
18048 	err = tg3_restart_hw(tp,
18049 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18050 	if (err)
18051 		goto out;
18052 
18053 	tg3_timer_start(tp);
18054 
18055 	tg3_netif_start(tp);
18056 
18057 out:
18058 	tg3_full_unlock(tp);
18059 
18060 	if (!err)
18061 		tg3_phy_start(tp);
18062 
18063 unlock:
18064 	rtnl_unlock();
18065 	return err;
18066 }
18067 #endif /* CONFIG_PM_SLEEP */
18068 
18069 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18070 
18071 static void tg3_shutdown(struct pci_dev *pdev)
18072 {
18073 	struct net_device *dev = pci_get_drvdata(pdev);
18074 	struct tg3 *tp = netdev_priv(dev);
18075 
18076 	tg3_reset_task_cancel(tp);
18077 
18078 	rtnl_lock();
18079 
18080 	netif_device_detach(dev);
18081 
18082 	if (netif_running(dev))
18083 		dev_close(dev);
18084 
18085 	tg3_power_down(tp);
18086 
18087 	rtnl_unlock();
18088 
18089 	pci_disable_device(pdev);
18090 }
18091 
18092 /**
18093  * tg3_io_error_detected - called when PCI error is detected
18094  * @pdev: Pointer to PCI device
18095  * @state: The current pci connection state
18096  *
18097  * This function is called after a PCI bus error affecting
18098  * this device has been detected.
18099  */
18100 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18101 					      pci_channel_state_t state)
18102 {
18103 	struct net_device *netdev = pci_get_drvdata(pdev);
18104 	struct tg3 *tp = netdev_priv(netdev);
18105 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18106 
18107 	netdev_info(netdev, "PCI I/O error detected\n");
18108 
18109 	/* Want to make sure that the reset task doesn't run */
18110 	tg3_reset_task_cancel(tp);
18111 
18112 	rtnl_lock();
18113 
18114 	/* Could be second call or maybe we don't have netdev yet */
18115 	if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18116 		goto done;
18117 
18118 	/* We needn't recover from permanent error */
18119 	if (state == pci_channel_io_frozen)
18120 		tp->pcierr_recovery = true;
18121 
18122 	tg3_phy_stop(tp);
18123 
18124 	tg3_netif_stop(tp);
18125 
18126 	tg3_timer_stop(tp);
18127 
18128 	netif_device_detach(netdev);
18129 
18130 	/* Clean up software state, even if MMIO is blocked */
18131 	tg3_full_lock(tp, 0);
18132 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18133 	tg3_full_unlock(tp);
18134 
18135 done:
18136 	if (state == pci_channel_io_perm_failure) {
18137 		if (netdev) {
18138 			tg3_napi_enable(tp);
18139 			dev_close(netdev);
18140 		}
18141 		err = PCI_ERS_RESULT_DISCONNECT;
18142 	} else {
18143 		pci_disable_device(pdev);
18144 	}
18145 
18146 	rtnl_unlock();
18147 
18148 	return err;
18149 }
18150 
18151 /**
18152  * tg3_io_slot_reset - called after the pci bus has been reset.
18153  * @pdev: Pointer to PCI device
18154  *
18155  * Restart the card from scratch, as if from a cold-boot.
18156  * At this point, the card has exprienced a hard reset,
18157  * followed by fixups by BIOS, and has its config space
18158  * set up identically to what it was at cold boot.
18159  */
18160 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18161 {
18162 	struct net_device *netdev = pci_get_drvdata(pdev);
18163 	struct tg3 *tp = netdev_priv(netdev);
18164 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18165 	int err;
18166 
18167 	rtnl_lock();
18168 
18169 	if (pci_enable_device(pdev)) {
18170 		dev_err(&pdev->dev,
18171 			"Cannot re-enable PCI device after reset.\n");
18172 		goto done;
18173 	}
18174 
18175 	pci_set_master(pdev);
18176 	pci_restore_state(pdev);
18177 	pci_save_state(pdev);
18178 
18179 	if (!netdev || !netif_running(netdev)) {
18180 		rc = PCI_ERS_RESULT_RECOVERED;
18181 		goto done;
18182 	}
18183 
18184 	err = tg3_power_up(tp);
18185 	if (err)
18186 		goto done;
18187 
18188 	rc = PCI_ERS_RESULT_RECOVERED;
18189 
18190 done:
18191 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18192 		tg3_napi_enable(tp);
18193 		dev_close(netdev);
18194 	}
18195 	rtnl_unlock();
18196 
18197 	return rc;
18198 }
18199 
18200 /**
18201  * tg3_io_resume - called when traffic can start flowing again.
18202  * @pdev: Pointer to PCI device
18203  *
18204  * This callback is called when the error recovery driver tells
18205  * us that its OK to resume normal operation.
18206  */
18207 static void tg3_io_resume(struct pci_dev *pdev)
18208 {
18209 	struct net_device *netdev = pci_get_drvdata(pdev);
18210 	struct tg3 *tp = netdev_priv(netdev);
18211 	int err;
18212 
18213 	rtnl_lock();
18214 
18215 	if (!netdev || !netif_running(netdev))
18216 		goto done;
18217 
18218 	tg3_full_lock(tp, 0);
18219 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18220 	tg3_flag_set(tp, INIT_COMPLETE);
18221 	err = tg3_restart_hw(tp, true);
18222 	if (err) {
18223 		tg3_full_unlock(tp);
18224 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18225 		goto done;
18226 	}
18227 
18228 	netif_device_attach(netdev);
18229 
18230 	tg3_timer_start(tp);
18231 
18232 	tg3_netif_start(tp);
18233 
18234 	tg3_full_unlock(tp);
18235 
18236 	tg3_phy_start(tp);
18237 
18238 done:
18239 	tp->pcierr_recovery = false;
18240 	rtnl_unlock();
18241 }
18242 
18243 static const struct pci_error_handlers tg3_err_handler = {
18244 	.error_detected	= tg3_io_error_detected,
18245 	.slot_reset	= tg3_io_slot_reset,
18246 	.resume		= tg3_io_resume
18247 };
18248 
18249 static struct pci_driver tg3_driver = {
18250 	.name		= DRV_MODULE_NAME,
18251 	.id_table	= tg3_pci_tbl,
18252 	.probe		= tg3_init_one,
18253 	.remove		= tg3_remove_one,
18254 	.err_handler	= &tg3_err_handler,
18255 	.driver.pm	= &tg3_pm_ops,
18256 	.shutdown	= tg3_shutdown,
18257 };
18258 
18259 module_pci_driver(tg3_driver);
18260