xref: /linux/drivers/net/ethernet/broadcom/tg3.c (revision df2e3152f1cb798ed8ffa7e488c50261e6dc50e3)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *	Derived from proprietary unpublished source code,
14  *	Copyright (C) 2000-2016 Broadcom Corporation.
15  *	Copyright (C) 2016-2017 Broadcom Ltd.
16  *	Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *	refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *	Permission is hereby granted for the distribution of this firmware
20  *	data in hexadecimal or equivalent format, provided this copyright
21  *	notice is accompanying it.
22  */
23 
24 
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58 
59 #include <net/checksum.h>
60 #include <net/gso.h>
61 #include <net/ip.h>
62 
63 #include <linux/io.h>
64 #include <asm/byteorder.h>
65 #include <linux/uaccess.h>
66 
67 #include <uapi/linux/net_tstamp.h>
68 #include <linux/ptp_clock_kernel.h>
69 
70 #define BAR_0	0
71 #define BAR_2	2
72 
73 #include "tg3.h"
74 
75 /* Functions & macros to verify TG3_FLAGS types */
76 
77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79 	return test_bit(flag, bits);
80 }
81 
82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84 	set_bit(flag, bits);
85 }
86 
87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 {
89 	clear_bit(flag, bits);
90 }
91 
92 #define tg3_flag(tp, flag)				\
93 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define tg3_flag_set(tp, flag)				\
95 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define tg3_flag_clear(tp, flag)			\
97 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98 
99 #define DRV_MODULE_NAME		"tg3"
100 /* DO NOT UPDATE TG3_*_NUM defines */
101 #define TG3_MAJ_NUM			3
102 #define TG3_MIN_NUM			137
103 
104 #define RESET_KIND_SHUTDOWN	0
105 #define RESET_KIND_INIT		1
106 #define RESET_KIND_SUSPEND	2
107 
108 #define TG3_DEF_RX_MODE		0
109 #define TG3_DEF_TX_MODE		0
110 #define TG3_DEF_MSG_ENABLE	  \
111 	(NETIF_MSG_DRV		| \
112 	 NETIF_MSG_PROBE	| \
113 	 NETIF_MSG_LINK		| \
114 	 NETIF_MSG_TIMER	| \
115 	 NETIF_MSG_IFDOWN	| \
116 	 NETIF_MSG_IFUP		| \
117 	 NETIF_MSG_RX_ERR	| \
118 	 NETIF_MSG_TX_ERR)
119 
120 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
121 
122 /* length of time before we decide the hardware is borked,
123  * and dev->tx_timeout() should be called to fix the problem
124  */
125 
126 #define TG3_TX_TIMEOUT			(5 * HZ)
127 
128 /* hardware minimum and maximum for a single frame's data payload */
129 #define TG3_MIN_MTU			ETH_ZLEN
130 #define TG3_MAX_MTU(tp)	\
131 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132 
133 /* These numbers seem to be hard coded in the NIC firmware somehow.
134  * You can't change the ring sizes, but you can change where you place
135  * them in the NIC onboard memory.
136  */
137 #define TG3_RX_STD_RING_SIZE(tp) \
138 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
140 #define TG3_DEF_RX_RING_PENDING		200
141 #define TG3_RX_JMB_RING_SIZE(tp) \
142 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
143 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
144 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
145 
146 /* Do not place this n-ring entries value into the tp struct itself,
147  * we really want to expose these constants to GCC so that modulo et
148  * al.  operations are done with shifts and masks instead of with
149  * hw multiply/modulo instructions.  Another solution would be to
150  * replace things like '% foo' with '& (foo - 1)'.
151  */
152 
153 #define TG3_TX_RING_SIZE		512
154 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
155 
156 #define TG3_RX_STD_RING_BYTES(tp) \
157 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
158 #define TG3_RX_JMB_RING_BYTES(tp) \
159 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
160 #define TG3_RX_RCB_RING_BYTES(tp) \
161 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
162 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
163 				 TG3_TX_RING_SIZE)
164 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
165 
166 #define TG3_DMA_BYTE_ENAB		64
167 
168 #define TG3_RX_STD_DMA_SZ		1536
169 #define TG3_RX_JMB_DMA_SZ		9046
170 
171 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
172 
173 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
174 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175 
176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
177 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178 
179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
180 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181 
182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
183  * that are at least dword aligned when used in PCIX mode.  The driver
184  * works around this bug by double copying the packet.  This workaround
185  * is built into the normal double copy length check for efficiency.
186  *
187  * However, the double copy is only necessary on those architectures
188  * where unaligned memory accesses are inefficient.  For those architectures
189  * where unaligned memory accesses incur little penalty, we can reintegrate
190  * the 5701 in the normal rx path.  Doing so saves a device structure
191  * dereference by hardcoding the double copy threshold in place.
192  */
193 #define TG3_RX_COPY_THRESHOLD		256
194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
195 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
196 #else
197 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
198 #endif
199 
200 #if (NET_IP_ALIGN != 0)
201 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
202 #else
203 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
204 #endif
205 
206 /* minimum number of free TX descriptors required to wake up TX process */
207 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
208 #define TG3_TX_BD_DMA_MAX_2K		2048
209 #define TG3_TX_BD_DMA_MAX_4K		4096
210 
211 #define TG3_RAW_IP_ALIGN 2
212 
213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215 
216 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
217 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218 
219 #define FIRMWARE_TG3		"tigon/tg3.bin"
220 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
221 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
222 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
223 
224 MODULE_AUTHOR("David S. Miller <davem@redhat.com> and Jeff Garzik <jgarzik@pobox.com>");
225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
226 MODULE_LICENSE("GPL");
227 MODULE_FIRMWARE(FIRMWARE_TG3);
228 MODULE_FIRMWARE(FIRMWARE_TG357766);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
231 
232 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
233 module_param(tg3_debug, int, 0);
234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
235 
236 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
237 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
238 
239 static const struct pci_device_id tg3_pci_tbl[] = {
240 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
259 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 			TG3_DRV_DATA_FLAG_5705_10_100},
261 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
262 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 			TG3_DRV_DATA_FLAG_5705_10_100},
264 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
266 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 			TG3_DRV_DATA_FLAG_5705_10_100},
268 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
274 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
275 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
280 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
288 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
289 			PCI_VENDOR_ID_LENOVO,
290 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
291 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
294 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
295 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
312 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
313 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
315 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
317 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
318 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
320 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
322 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
330 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
332 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
334 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
352 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
353 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
354 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
355 	{}
356 };
357 
358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
359 
360 static const struct {
361 	const char string[ETH_GSTRING_LEN];
362 } ethtool_stats_keys[] = {
363 	{ "rx_octets" },
364 	{ "rx_fragments" },
365 	{ "rx_ucast_packets" },
366 	{ "rx_mcast_packets" },
367 	{ "rx_bcast_packets" },
368 	{ "rx_fcs_errors" },
369 	{ "rx_align_errors" },
370 	{ "rx_xon_pause_rcvd" },
371 	{ "rx_xoff_pause_rcvd" },
372 	{ "rx_mac_ctrl_rcvd" },
373 	{ "rx_xoff_entered" },
374 	{ "rx_frame_too_long_errors" },
375 	{ "rx_jabbers" },
376 	{ "rx_undersize_packets" },
377 	{ "rx_in_length_errors" },
378 	{ "rx_out_length_errors" },
379 	{ "rx_64_or_less_octet_packets" },
380 	{ "rx_65_to_127_octet_packets" },
381 	{ "rx_128_to_255_octet_packets" },
382 	{ "rx_256_to_511_octet_packets" },
383 	{ "rx_512_to_1023_octet_packets" },
384 	{ "rx_1024_to_1522_octet_packets" },
385 	{ "rx_1523_to_2047_octet_packets" },
386 	{ "rx_2048_to_4095_octet_packets" },
387 	{ "rx_4096_to_8191_octet_packets" },
388 	{ "rx_8192_to_9022_octet_packets" },
389 
390 	{ "tx_octets" },
391 	{ "tx_collisions" },
392 
393 	{ "tx_xon_sent" },
394 	{ "tx_xoff_sent" },
395 	{ "tx_flow_control" },
396 	{ "tx_mac_errors" },
397 	{ "tx_single_collisions" },
398 	{ "tx_mult_collisions" },
399 	{ "tx_deferred" },
400 	{ "tx_excessive_collisions" },
401 	{ "tx_late_collisions" },
402 	{ "tx_collide_2times" },
403 	{ "tx_collide_3times" },
404 	{ "tx_collide_4times" },
405 	{ "tx_collide_5times" },
406 	{ "tx_collide_6times" },
407 	{ "tx_collide_7times" },
408 	{ "tx_collide_8times" },
409 	{ "tx_collide_9times" },
410 	{ "tx_collide_10times" },
411 	{ "tx_collide_11times" },
412 	{ "tx_collide_12times" },
413 	{ "tx_collide_13times" },
414 	{ "tx_collide_14times" },
415 	{ "tx_collide_15times" },
416 	{ "tx_ucast_packets" },
417 	{ "tx_mcast_packets" },
418 	{ "tx_bcast_packets" },
419 	{ "tx_carrier_sense_errors" },
420 	{ "tx_discards" },
421 	{ "tx_errors" },
422 
423 	{ "dma_writeq_full" },
424 	{ "dma_write_prioq_full" },
425 	{ "rxbds_empty" },
426 	{ "rx_discards" },
427 	{ "rx_errors" },
428 	{ "rx_threshold_hit" },
429 
430 	{ "dma_readq_full" },
431 	{ "dma_read_prioq_full" },
432 	{ "tx_comp_queue_full" },
433 
434 	{ "ring_set_send_prod_index" },
435 	{ "ring_status_update" },
436 	{ "nic_irqs" },
437 	{ "nic_avoided_irqs" },
438 	{ "nic_tx_threshold_hit" },
439 
440 	{ "mbuf_lwm_thresh_hit" },
441 };
442 
443 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
444 #define TG3_NVRAM_TEST		0
445 #define TG3_LINK_TEST		1
446 #define TG3_REGISTER_TEST	2
447 #define TG3_MEMORY_TEST		3
448 #define TG3_MAC_LOOPB_TEST	4
449 #define TG3_PHY_LOOPB_TEST	5
450 #define TG3_EXT_LOOPB_TEST	6
451 #define TG3_INTERRUPT_TEST	7
452 
453 
454 static const struct {
455 	const char string[ETH_GSTRING_LEN];
456 } ethtool_test_keys[] = {
457 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
458 	[TG3_LINK_TEST]		= { "link test         (online) " },
459 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
460 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
461 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
462 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
463 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
464 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
465 };
466 
467 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
468 
469 
470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
471 {
472 	writel(val, tp->regs + off);
473 }
474 
475 static u32 tg3_read32(struct tg3 *tp, u32 off)
476 {
477 	return readl(tp->regs + off);
478 }
479 
480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
481 {
482 	writel(val, tp->aperegs + off);
483 }
484 
485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
486 {
487 	return readl(tp->aperegs + off);
488 }
489 
490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
491 {
492 	unsigned long flags;
493 
494 	spin_lock_irqsave(&tp->indirect_lock, flags);
495 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
497 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 }
499 
500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
501 {
502 	writel(val, tp->regs + off);
503 	readl(tp->regs + off);
504 }
505 
506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
507 {
508 	unsigned long flags;
509 	u32 val;
510 
511 	spin_lock_irqsave(&tp->indirect_lock, flags);
512 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
513 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
514 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
515 	return val;
516 }
517 
518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
519 {
520 	unsigned long flags;
521 
522 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
523 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
524 				       TG3_64BIT_REG_LOW, val);
525 		return;
526 	}
527 	if (off == TG3_RX_STD_PROD_IDX_REG) {
528 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
529 				       TG3_64BIT_REG_LOW, val);
530 		return;
531 	}
532 
533 	spin_lock_irqsave(&tp->indirect_lock, flags);
534 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
535 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
536 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
537 
538 	/* In indirect mode when disabling interrupts, we also need
539 	 * to clear the interrupt bit in the GRC local ctrl register.
540 	 */
541 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
542 	    (val == 0x1)) {
543 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
544 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
545 	}
546 }
547 
548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
549 {
550 	unsigned long flags;
551 	u32 val;
552 
553 	spin_lock_irqsave(&tp->indirect_lock, flags);
554 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
555 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
556 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
557 	return val;
558 }
559 
560 /* usec_wait specifies the wait time in usec when writing to certain registers
561  * where it is unsafe to read back the register without some delay.
562  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
563  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
564  */
565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
566 {
567 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
568 		/* Non-posted methods */
569 		tp->write32(tp, off, val);
570 	else {
571 		/* Posted method */
572 		tg3_write32(tp, off, val);
573 		if (usec_wait)
574 			udelay(usec_wait);
575 		tp->read32(tp, off);
576 	}
577 	/* Wait again after the read for the posted method to guarantee that
578 	 * the wait time is met.
579 	 */
580 	if (usec_wait)
581 		udelay(usec_wait);
582 }
583 
584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
585 {
586 	tp->write32_mbox(tp, off, val);
587 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
588 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
589 	     !tg3_flag(tp, ICH_WORKAROUND)))
590 		tp->read32_mbox(tp, off);
591 }
592 
593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
594 {
595 	void __iomem *mbox = tp->regs + off;
596 	writel(val, mbox);
597 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
598 		writel(val, mbox);
599 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
600 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
601 		readl(mbox);
602 }
603 
604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
605 {
606 	return readl(tp->regs + off + GRCMBOX_BASE);
607 }
608 
609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
610 {
611 	writel(val, tp->regs + off + GRCMBOX_BASE);
612 }
613 
614 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
615 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
616 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
617 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
618 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
619 
620 #define tw32(reg, val)			tp->write32(tp, reg, val)
621 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
622 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
623 #define tr32(reg)			tp->read32(tp, reg)
624 
625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
626 {
627 	unsigned long flags;
628 
629 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
630 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
631 		return;
632 
633 	spin_lock_irqsave(&tp->indirect_lock, flags);
634 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
635 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
636 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
637 
638 		/* Always leave this as zero. */
639 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 	} else {
641 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
642 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
643 
644 		/* Always leave this as zero. */
645 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
646 	}
647 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 }
649 
650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
651 {
652 	unsigned long flags;
653 
654 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
655 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
656 		*val = 0;
657 		return;
658 	}
659 
660 	spin_lock_irqsave(&tp->indirect_lock, flags);
661 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
662 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
663 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
664 
665 		/* Always leave this as zero. */
666 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 	} else {
668 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
669 		*val = tr32(TG3PCI_MEM_WIN_DATA);
670 
671 		/* Always leave this as zero. */
672 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
673 	}
674 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 }
676 
677 static void tg3_ape_lock_init(struct tg3 *tp)
678 {
679 	int i;
680 	u32 regbase, bit;
681 
682 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
683 		regbase = TG3_APE_LOCK_GRANT;
684 	else
685 		regbase = TG3_APE_PER_LOCK_GRANT;
686 
687 	/* Make sure the driver hasn't any stale locks. */
688 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
689 		switch (i) {
690 		case TG3_APE_LOCK_PHY0:
691 		case TG3_APE_LOCK_PHY1:
692 		case TG3_APE_LOCK_PHY2:
693 		case TG3_APE_LOCK_PHY3:
694 			bit = APE_LOCK_GRANT_DRIVER;
695 			break;
696 		default:
697 			if (!tp->pci_fn)
698 				bit = APE_LOCK_GRANT_DRIVER;
699 			else
700 				bit = 1 << tp->pci_fn;
701 		}
702 		tg3_ape_write32(tp, regbase + 4 * i, bit);
703 	}
704 
705 }
706 
707 static int tg3_ape_lock(struct tg3 *tp, int locknum)
708 {
709 	int i, off;
710 	int ret = 0;
711 	u32 status, req, gnt, bit;
712 
713 	if (!tg3_flag(tp, ENABLE_APE))
714 		return 0;
715 
716 	switch (locknum) {
717 	case TG3_APE_LOCK_GPIO:
718 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
719 			return 0;
720 		fallthrough;
721 	case TG3_APE_LOCK_GRC:
722 	case TG3_APE_LOCK_MEM:
723 		if (!tp->pci_fn)
724 			bit = APE_LOCK_REQ_DRIVER;
725 		else
726 			bit = 1 << tp->pci_fn;
727 		break;
728 	case TG3_APE_LOCK_PHY0:
729 	case TG3_APE_LOCK_PHY1:
730 	case TG3_APE_LOCK_PHY2:
731 	case TG3_APE_LOCK_PHY3:
732 		bit = APE_LOCK_REQ_DRIVER;
733 		break;
734 	default:
735 		return -EINVAL;
736 	}
737 
738 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 		req = TG3_APE_LOCK_REQ;
740 		gnt = TG3_APE_LOCK_GRANT;
741 	} else {
742 		req = TG3_APE_PER_LOCK_REQ;
743 		gnt = TG3_APE_PER_LOCK_GRANT;
744 	}
745 
746 	off = 4 * locknum;
747 
748 	tg3_ape_write32(tp, req + off, bit);
749 
750 	/* Wait for up to 1 millisecond to acquire lock. */
751 	for (i = 0; i < 100; i++) {
752 		status = tg3_ape_read32(tp, gnt + off);
753 		if (status == bit)
754 			break;
755 		if (pci_channel_offline(tp->pdev))
756 			break;
757 
758 		udelay(10);
759 	}
760 
761 	if (status != bit) {
762 		/* Revoke the lock request. */
763 		tg3_ape_write32(tp, gnt + off, bit);
764 		ret = -EBUSY;
765 	}
766 
767 	return ret;
768 }
769 
770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772 	u32 gnt, bit;
773 
774 	if (!tg3_flag(tp, ENABLE_APE))
775 		return;
776 
777 	switch (locknum) {
778 	case TG3_APE_LOCK_GPIO:
779 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 			return;
781 		fallthrough;
782 	case TG3_APE_LOCK_GRC:
783 	case TG3_APE_LOCK_MEM:
784 		if (!tp->pci_fn)
785 			bit = APE_LOCK_GRANT_DRIVER;
786 		else
787 			bit = 1 << tp->pci_fn;
788 		break;
789 	case TG3_APE_LOCK_PHY0:
790 	case TG3_APE_LOCK_PHY1:
791 	case TG3_APE_LOCK_PHY2:
792 	case TG3_APE_LOCK_PHY3:
793 		bit = APE_LOCK_GRANT_DRIVER;
794 		break;
795 	default:
796 		return;
797 	}
798 
799 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
800 		gnt = TG3_APE_LOCK_GRANT;
801 	else
802 		gnt = TG3_APE_PER_LOCK_GRANT;
803 
804 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806 
807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809 	u32 apedata;
810 
811 	while (timeout_us) {
812 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813 			return -EBUSY;
814 
815 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817 			break;
818 
819 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820 
821 		udelay(10);
822 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823 	}
824 
825 	return timeout_us ? 0 : -EBUSY;
826 }
827 
828 #ifdef CONFIG_TIGON3_HWMON
829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
830 {
831 	u32 i, apedata;
832 
833 	for (i = 0; i < timeout_us / 10; i++) {
834 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
835 
836 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
837 			break;
838 
839 		udelay(10);
840 	}
841 
842 	return i == timeout_us / 10;
843 }
844 
845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
846 				   u32 len)
847 {
848 	int err;
849 	u32 i, bufoff, msgoff, maxlen, apedata;
850 
851 	if (!tg3_flag(tp, APE_HAS_NCSI))
852 		return 0;
853 
854 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855 	if (apedata != APE_SEG_SIG_MAGIC)
856 		return -ENODEV;
857 
858 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859 	if (!(apedata & APE_FW_STATUS_READY))
860 		return -EAGAIN;
861 
862 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
863 		 TG3_APE_SHMEM_BASE;
864 	msgoff = bufoff + 2 * sizeof(u32);
865 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
866 
867 	while (len) {
868 		u32 length;
869 
870 		/* Cap xfer sizes to scratchpad limits. */
871 		length = (len > maxlen) ? maxlen : len;
872 		len -= length;
873 
874 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875 		if (!(apedata & APE_FW_STATUS_READY))
876 			return -EAGAIN;
877 
878 		/* Wait for up to 1 msec for APE to service previous event. */
879 		err = tg3_ape_event_lock(tp, 1000);
880 		if (err)
881 			return err;
882 
883 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884 			  APE_EVENT_STATUS_SCRTCHPD_READ |
885 			  APE_EVENT_STATUS_EVENT_PENDING;
886 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
887 
888 		tg3_ape_write32(tp, bufoff, base_off);
889 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
890 
891 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
893 
894 		base_off += length;
895 
896 		if (tg3_ape_wait_for_event(tp, 30000))
897 			return -EAGAIN;
898 
899 		for (i = 0; length; i += 4, length -= 4) {
900 			u32 val = tg3_ape_read32(tp, msgoff + i);
901 			memcpy(data, &val, sizeof(u32));
902 			data++;
903 		}
904 	}
905 
906 	return 0;
907 }
908 #endif
909 
910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
911 {
912 	int err;
913 	u32 apedata;
914 
915 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916 	if (apedata != APE_SEG_SIG_MAGIC)
917 		return -EAGAIN;
918 
919 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920 	if (!(apedata & APE_FW_STATUS_READY))
921 		return -EAGAIN;
922 
923 	/* Wait for up to 20 millisecond for APE to service previous event. */
924 	err = tg3_ape_event_lock(tp, 20000);
925 	if (err)
926 		return err;
927 
928 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929 			event | APE_EVENT_STATUS_EVENT_PENDING);
930 
931 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
933 
934 	return 0;
935 }
936 
937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
938 {
939 	u32 event;
940 	u32 apedata;
941 
942 	if (!tg3_flag(tp, ENABLE_APE))
943 		return;
944 
945 	switch (kind) {
946 	case RESET_KIND_INIT:
947 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
948 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
949 				APE_HOST_SEG_SIG_MAGIC);
950 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
951 				APE_HOST_SEG_LEN_MAGIC);
952 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
953 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
954 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
955 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
956 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
957 				APE_HOST_BEHAV_NO_PHYLOCK);
958 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
959 				    TG3_APE_HOST_DRVR_STATE_START);
960 
961 		event = APE_EVENT_STATUS_STATE_START;
962 		break;
963 	case RESET_KIND_SHUTDOWN:
964 		if (device_may_wakeup(&tp->pdev->dev) &&
965 		    tg3_flag(tp, WOL_ENABLE)) {
966 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 					    TG3_APE_HOST_WOL_SPEED_AUTO);
968 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969 		} else
970 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
971 
972 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
973 
974 		event = APE_EVENT_STATUS_STATE_UNLOAD;
975 		break;
976 	default:
977 		return;
978 	}
979 
980 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
981 
982 	tg3_ape_send_event(tp, event);
983 }
984 
985 static void tg3_send_ape_heartbeat(struct tg3 *tp,
986 				   unsigned long interval)
987 {
988 	/* Check if hb interval has exceeded */
989 	if (!tg3_flag(tp, ENABLE_APE) ||
990 	    time_before(jiffies, tp->ape_hb_jiffies + interval))
991 		return;
992 
993 	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
994 	tp->ape_hb_jiffies = jiffies;
995 }
996 
997 static void tg3_disable_ints(struct tg3 *tp)
998 {
999 	int i;
1000 
1001 	tw32(TG3PCI_MISC_HOST_CTRL,
1002 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1003 	for (i = 0; i < tp->irq_max; i++)
1004 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1005 }
1006 
1007 static void tg3_enable_ints(struct tg3 *tp)
1008 {
1009 	int i;
1010 
1011 	tp->irq_sync = 0;
1012 	wmb();
1013 
1014 	tw32(TG3PCI_MISC_HOST_CTRL,
1015 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1016 
1017 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1018 	for (i = 0; i < tp->irq_cnt; i++) {
1019 		struct tg3_napi *tnapi = &tp->napi[i];
1020 
1021 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022 		if (tg3_flag(tp, 1SHOT_MSI))
1023 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024 
1025 		tp->coal_now |= tnapi->coal_now;
1026 	}
1027 
1028 	/* Force an initial interrupt */
1029 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1030 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1031 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1032 	else
1033 		tw32(HOSTCC_MODE, tp->coal_now);
1034 
1035 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1036 }
1037 
1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1039 {
1040 	struct tg3 *tp = tnapi->tp;
1041 	struct tg3_hw_status *sblk = tnapi->hw_status;
1042 	unsigned int work_exists = 0;
1043 
1044 	/* check for phy events */
1045 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1046 		if (sblk->status & SD_STATUS_LINK_CHG)
1047 			work_exists = 1;
1048 	}
1049 
1050 	/* check for TX work to do */
1051 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1052 		work_exists = 1;
1053 
1054 	/* check for RX work to do */
1055 	if (tnapi->rx_rcb_prod_idx &&
1056 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1057 		work_exists = 1;
1058 
1059 	return work_exists;
1060 }
1061 
1062 /* tg3_int_reenable
1063  *  similar to tg3_enable_ints, but it accurately determines whether there
1064  *  is new work pending and can return without flushing the PIO write
1065  *  which reenables interrupts
1066  */
1067 static void tg3_int_reenable(struct tg3_napi *tnapi)
1068 {
1069 	struct tg3 *tp = tnapi->tp;
1070 
1071 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1072 
1073 	/* When doing tagged status, this work check is unnecessary.
1074 	 * The last_tag we write above tells the chip which piece of
1075 	 * work we've completed.
1076 	 */
1077 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1078 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1079 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1080 }
1081 
1082 static void tg3_switch_clocks(struct tg3 *tp)
1083 {
1084 	u32 clock_ctrl;
1085 	u32 orig_clock_ctrl;
1086 
1087 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1088 		return;
1089 
1090 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1091 
1092 	orig_clock_ctrl = clock_ctrl;
1093 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1094 		       CLOCK_CTRL_CLKRUN_OENABLE |
1095 		       0x1f);
1096 	tp->pci_clock_ctrl = clock_ctrl;
1097 
1098 	if (tg3_flag(tp, 5705_PLUS)) {
1099 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1100 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1102 		}
1103 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1104 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1105 			    clock_ctrl |
1106 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1107 			    40);
1108 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1110 			    40);
1111 	}
1112 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1113 }
1114 
1115 #define PHY_BUSY_LOOPS	5000
1116 
1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1118 			 u32 *val)
1119 {
1120 	u32 frame_val;
1121 	unsigned int loops;
1122 	int ret;
1123 
1124 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1125 		tw32_f(MAC_MI_MODE,
1126 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1127 		udelay(80);
1128 	}
1129 
1130 	tg3_ape_lock(tp, tp->phy_ape_lock);
1131 
1132 	*val = 0x0;
1133 
1134 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1135 		      MI_COM_PHY_ADDR_MASK);
1136 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1137 		      MI_COM_REG_ADDR_MASK);
1138 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1139 
1140 	tw32_f(MAC_MI_COM, frame_val);
1141 
1142 	loops = PHY_BUSY_LOOPS;
1143 	while (loops != 0) {
1144 		udelay(10);
1145 		frame_val = tr32(MAC_MI_COM);
1146 
1147 		if ((frame_val & MI_COM_BUSY) == 0) {
1148 			udelay(5);
1149 			frame_val = tr32(MAC_MI_COM);
1150 			break;
1151 		}
1152 		loops -= 1;
1153 	}
1154 
1155 	ret = -EBUSY;
1156 	if (loops != 0) {
1157 		*val = frame_val & MI_COM_DATA_MASK;
1158 		ret = 0;
1159 	}
1160 
1161 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1162 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1163 		udelay(80);
1164 	}
1165 
1166 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1167 
1168 	return ret;
1169 }
1170 
1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1172 {
1173 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1174 }
1175 
1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1177 			  u32 val)
1178 {
1179 	u32 frame_val;
1180 	unsigned int loops;
1181 	int ret;
1182 
1183 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1184 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1185 		return 0;
1186 
1187 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188 		tw32_f(MAC_MI_MODE,
1189 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1190 		udelay(80);
1191 	}
1192 
1193 	tg3_ape_lock(tp, tp->phy_ape_lock);
1194 
1195 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1196 		      MI_COM_PHY_ADDR_MASK);
1197 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1198 		      MI_COM_REG_ADDR_MASK);
1199 	frame_val |= (val & MI_COM_DATA_MASK);
1200 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1201 
1202 	tw32_f(MAC_MI_COM, frame_val);
1203 
1204 	loops = PHY_BUSY_LOOPS;
1205 	while (loops != 0) {
1206 		udelay(10);
1207 		frame_val = tr32(MAC_MI_COM);
1208 		if ((frame_val & MI_COM_BUSY) == 0) {
1209 			udelay(5);
1210 			frame_val = tr32(MAC_MI_COM);
1211 			break;
1212 		}
1213 		loops -= 1;
1214 	}
1215 
1216 	ret = -EBUSY;
1217 	if (loops != 0)
1218 		ret = 0;
1219 
1220 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1221 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1222 		udelay(80);
1223 	}
1224 
1225 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1226 
1227 	return ret;
1228 }
1229 
1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1231 {
1232 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1233 }
1234 
1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1236 {
1237 	int err;
1238 
1239 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1240 	if (err)
1241 		goto done;
1242 
1243 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1244 	if (err)
1245 		goto done;
1246 
1247 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1248 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1249 	if (err)
1250 		goto done;
1251 
1252 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1253 
1254 done:
1255 	return err;
1256 }
1257 
1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1259 {
1260 	int err;
1261 
1262 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1263 	if (err)
1264 		goto done;
1265 
1266 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1267 	if (err)
1268 		goto done;
1269 
1270 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1271 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1272 	if (err)
1273 		goto done;
1274 
1275 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1276 
1277 done:
1278 	return err;
1279 }
1280 
1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1282 {
1283 	int err;
1284 
1285 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286 	if (!err)
1287 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1288 
1289 	return err;
1290 }
1291 
1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1293 {
1294 	int err;
1295 
1296 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1297 	if (!err)
1298 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1299 
1300 	return err;
1301 }
1302 
1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1304 {
1305 	int err;
1306 
1307 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1308 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1309 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1310 	if (!err)
1311 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1312 
1313 	return err;
1314 }
1315 
1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1317 {
1318 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1319 		set |= MII_TG3_AUXCTL_MISC_WREN;
1320 
1321 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1322 }
1323 
1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1325 {
1326 	u32 val;
1327 	int err;
1328 
1329 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1330 
1331 	if (err)
1332 		return err;
1333 
1334 	if (enable)
1335 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336 	else
1337 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1338 
1339 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1340 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1341 
1342 	return err;
1343 }
1344 
1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1346 {
1347 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1348 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1349 }
1350 
1351 static int tg3_bmcr_reset(struct tg3 *tp)
1352 {
1353 	u32 phy_control;
1354 	int limit, err;
1355 
1356 	/* OK, reset it, and poll the BMCR_RESET bit until it
1357 	 * clears or we time out.
1358 	 */
1359 	phy_control = BMCR_RESET;
1360 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1361 	if (err != 0)
1362 		return -EBUSY;
1363 
1364 	limit = 5000;
1365 	while (limit--) {
1366 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1367 		if (err != 0)
1368 			return -EBUSY;
1369 
1370 		if ((phy_control & BMCR_RESET) == 0) {
1371 			udelay(40);
1372 			break;
1373 		}
1374 		udelay(10);
1375 	}
1376 	if (limit < 0)
1377 		return -EBUSY;
1378 
1379 	return 0;
1380 }
1381 
1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1383 {
1384 	struct tg3 *tp = bp->priv;
1385 	u32 val;
1386 
1387 	spin_lock_bh(&tp->lock);
1388 
1389 	if (__tg3_readphy(tp, mii_id, reg, &val))
1390 		val = -EIO;
1391 
1392 	spin_unlock_bh(&tp->lock);
1393 
1394 	return val;
1395 }
1396 
1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1398 {
1399 	struct tg3 *tp = bp->priv;
1400 	u32 ret = 0;
1401 
1402 	spin_lock_bh(&tp->lock);
1403 
1404 	if (__tg3_writephy(tp, mii_id, reg, val))
1405 		ret = -EIO;
1406 
1407 	spin_unlock_bh(&tp->lock);
1408 
1409 	return ret;
1410 }
1411 
1412 static void tg3_mdio_config_5785(struct tg3 *tp)
1413 {
1414 	u32 val;
1415 	struct phy_device *phydev;
1416 
1417 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1418 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1419 	case PHY_ID_BCM50610:
1420 	case PHY_ID_BCM50610M:
1421 		val = MAC_PHYCFG2_50610_LED_MODES;
1422 		break;
1423 	case PHY_ID_BCMAC131:
1424 		val = MAC_PHYCFG2_AC131_LED_MODES;
1425 		break;
1426 	case PHY_ID_RTL8211C:
1427 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1428 		break;
1429 	case PHY_ID_RTL8201E:
1430 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1431 		break;
1432 	default:
1433 		return;
1434 	}
1435 
1436 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1437 		tw32(MAC_PHYCFG2, val);
1438 
1439 		val = tr32(MAC_PHYCFG1);
1440 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1441 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1442 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1443 		tw32(MAC_PHYCFG1, val);
1444 
1445 		return;
1446 	}
1447 
1448 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1449 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1450 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1451 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1452 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1453 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1454 		       MAC_PHYCFG2_INBAND_ENABLE;
1455 
1456 	tw32(MAC_PHYCFG2, val);
1457 
1458 	val = tr32(MAC_PHYCFG1);
1459 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1460 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1461 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1464 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1465 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1466 	}
1467 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1468 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1469 	tw32(MAC_PHYCFG1, val);
1470 
1471 	val = tr32(MAC_EXT_RGMII_MODE);
1472 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1473 		 MAC_RGMII_MODE_RX_QUALITY |
1474 		 MAC_RGMII_MODE_RX_ACTIVITY |
1475 		 MAC_RGMII_MODE_RX_ENG_DET |
1476 		 MAC_RGMII_MODE_TX_ENABLE |
1477 		 MAC_RGMII_MODE_TX_LOWPWR |
1478 		 MAC_RGMII_MODE_TX_RESET);
1479 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1480 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1481 			val |= MAC_RGMII_MODE_RX_INT_B |
1482 			       MAC_RGMII_MODE_RX_QUALITY |
1483 			       MAC_RGMII_MODE_RX_ACTIVITY |
1484 			       MAC_RGMII_MODE_RX_ENG_DET;
1485 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1486 			val |= MAC_RGMII_MODE_TX_ENABLE |
1487 			       MAC_RGMII_MODE_TX_LOWPWR |
1488 			       MAC_RGMII_MODE_TX_RESET;
1489 	}
1490 	tw32(MAC_EXT_RGMII_MODE, val);
1491 }
1492 
1493 static void tg3_mdio_start(struct tg3 *tp)
1494 {
1495 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1496 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1497 	udelay(80);
1498 
1499 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1500 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1501 		tg3_mdio_config_5785(tp);
1502 }
1503 
1504 static int tg3_mdio_init(struct tg3 *tp)
1505 {
1506 	int i;
1507 	u32 reg;
1508 	struct phy_device *phydev;
1509 
1510 	if (tg3_flag(tp, 5717_PLUS)) {
1511 		u32 is_serdes;
1512 
1513 		tp->phy_addr = tp->pci_fn + 1;
1514 
1515 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1516 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1517 		else
1518 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1519 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1520 		if (is_serdes)
1521 			tp->phy_addr += 7;
1522 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1523 		int addr;
1524 
1525 		addr = ssb_gige_get_phyaddr(tp->pdev);
1526 		if (addr < 0)
1527 			return addr;
1528 		tp->phy_addr = addr;
1529 	} else
1530 		tp->phy_addr = TG3_PHY_MII_ADDR;
1531 
1532 	tg3_mdio_start(tp);
1533 
1534 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1535 		return 0;
1536 
1537 	tp->mdio_bus = mdiobus_alloc();
1538 	if (tp->mdio_bus == NULL)
1539 		return -ENOMEM;
1540 
1541 	tp->mdio_bus->name     = "tg3 mdio bus";
1542 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1543 	tp->mdio_bus->priv     = tp;
1544 	tp->mdio_bus->parent   = &tp->pdev->dev;
1545 	tp->mdio_bus->read     = &tg3_mdio_read;
1546 	tp->mdio_bus->write    = &tg3_mdio_write;
1547 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1548 
1549 	/* The bus registration will look for all the PHYs on the mdio bus.
1550 	 * Unfortunately, it does not ensure the PHY is powered up before
1551 	 * accessing the PHY ID registers.  A chip reset is the
1552 	 * quickest way to bring the device back to an operational state..
1553 	 */
1554 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1555 		tg3_bmcr_reset(tp);
1556 
1557 	i = mdiobus_register(tp->mdio_bus);
1558 	if (i) {
1559 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560 		mdiobus_free(tp->mdio_bus);
1561 		return i;
1562 	}
1563 
1564 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1565 
1566 	if (!phydev || !phydev->drv) {
1567 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568 		mdiobus_unregister(tp->mdio_bus);
1569 		mdiobus_free(tp->mdio_bus);
1570 		return -ENODEV;
1571 	}
1572 
1573 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574 	case PHY_ID_BCM57780:
1575 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1576 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 		break;
1578 	case PHY_ID_BCM50610:
1579 	case PHY_ID_BCM50610M:
1580 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581 				     PHY_BRCM_RX_REFCLK_UNUSED |
1582 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 		fallthrough;
1585 	case PHY_ID_RTL8211C:
1586 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1587 		break;
1588 	case PHY_ID_RTL8201E:
1589 	case PHY_ID_BCMAC131:
1590 		phydev->interface = PHY_INTERFACE_MODE_MII;
1591 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1592 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1593 		break;
1594 	}
1595 
1596 	tg3_flag_set(tp, MDIOBUS_INITED);
1597 
1598 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1599 		tg3_mdio_config_5785(tp);
1600 
1601 	return 0;
1602 }
1603 
1604 static void tg3_mdio_fini(struct tg3 *tp)
1605 {
1606 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1607 		tg3_flag_clear(tp, MDIOBUS_INITED);
1608 		mdiobus_unregister(tp->mdio_bus);
1609 		mdiobus_free(tp->mdio_bus);
1610 	}
1611 }
1612 
1613 /* tp->lock is held. */
1614 static inline void tg3_generate_fw_event(struct tg3 *tp)
1615 {
1616 	u32 val;
1617 
1618 	val = tr32(GRC_RX_CPU_EVENT);
1619 	val |= GRC_RX_CPU_DRIVER_EVENT;
1620 	tw32_f(GRC_RX_CPU_EVENT, val);
1621 
1622 	tp->last_event_jiffies = jiffies;
1623 }
1624 
1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1626 
1627 /* tp->lock is held. */
1628 static void tg3_wait_for_event_ack(struct tg3 *tp)
1629 {
1630 	int i;
1631 	unsigned int delay_cnt;
1632 	long time_remain;
1633 
1634 	/* If enough time has passed, no wait is necessary. */
1635 	time_remain = (long)(tp->last_event_jiffies + 1 +
1636 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1637 		      (long)jiffies;
1638 	if (time_remain < 0)
1639 		return;
1640 
1641 	/* Check if we can shorten the wait time. */
1642 	delay_cnt = jiffies_to_usecs(time_remain);
1643 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1644 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1645 	delay_cnt = (delay_cnt >> 3) + 1;
1646 
1647 	for (i = 0; i < delay_cnt; i++) {
1648 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1649 			break;
1650 		if (pci_channel_offline(tp->pdev))
1651 			break;
1652 
1653 		udelay(8);
1654 	}
1655 }
1656 
1657 /* tp->lock is held. */
1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1659 {
1660 	u32 reg, val;
1661 
1662 	val = 0;
1663 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1664 		val = reg << 16;
1665 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1666 		val |= (reg & 0xffff);
1667 	*data++ = val;
1668 
1669 	val = 0;
1670 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1671 		val = reg << 16;
1672 	if (!tg3_readphy(tp, MII_LPA, &reg))
1673 		val |= (reg & 0xffff);
1674 	*data++ = val;
1675 
1676 	val = 0;
1677 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1678 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1679 			val = reg << 16;
1680 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1681 			val |= (reg & 0xffff);
1682 	}
1683 	*data++ = val;
1684 
1685 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1686 		val = reg << 16;
1687 	else
1688 		val = 0;
1689 	*data++ = val;
1690 }
1691 
1692 /* tp->lock is held. */
1693 static void tg3_ump_link_report(struct tg3 *tp)
1694 {
1695 	u32 data[4];
1696 
1697 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1698 		return;
1699 
1700 	tg3_phy_gather_ump_data(tp, data);
1701 
1702 	tg3_wait_for_event_ack(tp);
1703 
1704 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1705 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1706 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1707 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1708 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1709 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1710 
1711 	tg3_generate_fw_event(tp);
1712 }
1713 
1714 /* tp->lock is held. */
1715 static void tg3_stop_fw(struct tg3 *tp)
1716 {
1717 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1718 		/* Wait for RX cpu to ACK the previous event. */
1719 		tg3_wait_for_event_ack(tp);
1720 
1721 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1722 
1723 		tg3_generate_fw_event(tp);
1724 
1725 		/* Wait for RX cpu to ACK this event. */
1726 		tg3_wait_for_event_ack(tp);
1727 	}
1728 }
1729 
1730 /* tp->lock is held. */
1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1732 {
1733 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1734 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1735 
1736 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1737 		switch (kind) {
1738 		case RESET_KIND_INIT:
1739 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1740 				      DRV_STATE_START);
1741 			break;
1742 
1743 		case RESET_KIND_SHUTDOWN:
1744 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745 				      DRV_STATE_UNLOAD);
1746 			break;
1747 
1748 		case RESET_KIND_SUSPEND:
1749 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750 				      DRV_STATE_SUSPEND);
1751 			break;
1752 
1753 		default:
1754 			break;
1755 		}
1756 	}
1757 }
1758 
1759 /* tp->lock is held. */
1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1761 {
1762 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1763 		switch (kind) {
1764 		case RESET_KIND_INIT:
1765 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766 				      DRV_STATE_START_DONE);
1767 			break;
1768 
1769 		case RESET_KIND_SHUTDOWN:
1770 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1771 				      DRV_STATE_UNLOAD_DONE);
1772 			break;
1773 
1774 		default:
1775 			break;
1776 		}
1777 	}
1778 }
1779 
1780 /* tp->lock is held. */
1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1782 {
1783 	if (tg3_flag(tp, ENABLE_ASF)) {
1784 		switch (kind) {
1785 		case RESET_KIND_INIT:
1786 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787 				      DRV_STATE_START);
1788 			break;
1789 
1790 		case RESET_KIND_SHUTDOWN:
1791 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1792 				      DRV_STATE_UNLOAD);
1793 			break;
1794 
1795 		case RESET_KIND_SUSPEND:
1796 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797 				      DRV_STATE_SUSPEND);
1798 			break;
1799 
1800 		default:
1801 			break;
1802 		}
1803 	}
1804 }
1805 
1806 static int tg3_poll_fw(struct tg3 *tp)
1807 {
1808 	int i;
1809 	u32 val;
1810 
1811 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1812 		return 0;
1813 
1814 	if (tg3_flag(tp, IS_SSB_CORE)) {
1815 		/* We don't use firmware. */
1816 		return 0;
1817 	}
1818 
1819 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1820 		/* Wait up to 20ms for init done. */
1821 		for (i = 0; i < 200; i++) {
1822 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1823 				return 0;
1824 			if (pci_channel_offline(tp->pdev))
1825 				return -ENODEV;
1826 
1827 			udelay(100);
1828 		}
1829 		return -ENODEV;
1830 	}
1831 
1832 	/* Wait for firmware initialization to complete. */
1833 	for (i = 0; i < 100000; i++) {
1834 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1835 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1836 			break;
1837 		if (pci_channel_offline(tp->pdev)) {
1838 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1839 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1840 				netdev_info(tp->dev, "No firmware running\n");
1841 			}
1842 
1843 			break;
1844 		}
1845 
1846 		udelay(10);
1847 	}
1848 
1849 	/* Chip might not be fitted with firmware.  Some Sun onboard
1850 	 * parts are configured like that.  So don't signal the timeout
1851 	 * of the above loop as an error, but do report the lack of
1852 	 * running firmware once.
1853 	 */
1854 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1855 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1856 
1857 		netdev_info(tp->dev, "No firmware running\n");
1858 	}
1859 
1860 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1861 		/* The 57765 A0 needs a little more
1862 		 * time to do some important work.
1863 		 */
1864 		mdelay(10);
1865 	}
1866 
1867 	return 0;
1868 }
1869 
1870 static void tg3_link_report(struct tg3 *tp)
1871 {
1872 	if (!netif_carrier_ok(tp->dev)) {
1873 		netif_info(tp, link, tp->dev, "Link is down\n");
1874 		tg3_ump_link_report(tp);
1875 	} else if (netif_msg_link(tp)) {
1876 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1877 			    (tp->link_config.active_speed == SPEED_1000 ?
1878 			     1000 :
1879 			     (tp->link_config.active_speed == SPEED_100 ?
1880 			      100 : 10)),
1881 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1882 			     "full" : "half"));
1883 
1884 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1885 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1886 			    "on" : "off",
1887 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1888 			    "on" : "off");
1889 
1890 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1891 			netdev_info(tp->dev, "EEE is %s\n",
1892 				    tp->setlpicnt ? "enabled" : "disabled");
1893 
1894 		tg3_ump_link_report(tp);
1895 	}
1896 
1897 	tp->link_up = netif_carrier_ok(tp->dev);
1898 }
1899 
1900 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1901 {
1902 	u32 flowctrl = 0;
1903 
1904 	if (adv & ADVERTISE_PAUSE_CAP) {
1905 		flowctrl |= FLOW_CTRL_RX;
1906 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1907 			flowctrl |= FLOW_CTRL_TX;
1908 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1909 		flowctrl |= FLOW_CTRL_TX;
1910 
1911 	return flowctrl;
1912 }
1913 
1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1915 {
1916 	u16 miireg;
1917 
1918 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1919 		miireg = ADVERTISE_1000XPAUSE;
1920 	else if (flow_ctrl & FLOW_CTRL_TX)
1921 		miireg = ADVERTISE_1000XPSE_ASYM;
1922 	else if (flow_ctrl & FLOW_CTRL_RX)
1923 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1924 	else
1925 		miireg = 0;
1926 
1927 	return miireg;
1928 }
1929 
1930 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1931 {
1932 	u32 flowctrl = 0;
1933 
1934 	if (adv & ADVERTISE_1000XPAUSE) {
1935 		flowctrl |= FLOW_CTRL_RX;
1936 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1937 			flowctrl |= FLOW_CTRL_TX;
1938 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1939 		flowctrl |= FLOW_CTRL_TX;
1940 
1941 	return flowctrl;
1942 }
1943 
1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1945 {
1946 	u8 cap = 0;
1947 
1948 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1949 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1950 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1951 		if (lcladv & ADVERTISE_1000XPAUSE)
1952 			cap = FLOW_CTRL_RX;
1953 		if (rmtadv & ADVERTISE_1000XPAUSE)
1954 			cap = FLOW_CTRL_TX;
1955 	}
1956 
1957 	return cap;
1958 }
1959 
1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1961 {
1962 	u8 autoneg;
1963 	u8 flowctrl = 0;
1964 	u32 old_rx_mode = tp->rx_mode;
1965 	u32 old_tx_mode = tp->tx_mode;
1966 
1967 	if (tg3_flag(tp, USE_PHYLIB))
1968 		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1969 	else
1970 		autoneg = tp->link_config.autoneg;
1971 
1972 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1973 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1974 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1975 		else
1976 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1977 	} else
1978 		flowctrl = tp->link_config.flowctrl;
1979 
1980 	tp->link_config.active_flowctrl = flowctrl;
1981 
1982 	if (flowctrl & FLOW_CTRL_RX)
1983 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1984 	else
1985 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1986 
1987 	if (old_rx_mode != tp->rx_mode)
1988 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1989 
1990 	if (flowctrl & FLOW_CTRL_TX)
1991 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1992 	else
1993 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1994 
1995 	if (old_tx_mode != tp->tx_mode)
1996 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1997 }
1998 
1999 static void tg3_adjust_link(struct net_device *dev)
2000 {
2001 	u8 oldflowctrl, linkmesg = 0;
2002 	u32 mac_mode, lcl_adv, rmt_adv;
2003 	struct tg3 *tp = netdev_priv(dev);
2004 	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2005 
2006 	spin_lock_bh(&tp->lock);
2007 
2008 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2009 				    MAC_MODE_HALF_DUPLEX);
2010 
2011 	oldflowctrl = tp->link_config.active_flowctrl;
2012 
2013 	if (phydev->link) {
2014 		lcl_adv = 0;
2015 		rmt_adv = 0;
2016 
2017 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2018 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2019 		else if (phydev->speed == SPEED_1000 ||
2020 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2021 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2022 		else
2023 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2024 
2025 		if (phydev->duplex == DUPLEX_HALF)
2026 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2027 		else {
2028 			lcl_adv = mii_advertise_flowctrl(
2029 				  tp->link_config.flowctrl);
2030 
2031 			if (phydev->pause)
2032 				rmt_adv = LPA_PAUSE_CAP;
2033 			if (phydev->asym_pause)
2034 				rmt_adv |= LPA_PAUSE_ASYM;
2035 		}
2036 
2037 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2038 	} else
2039 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2040 
2041 	if (mac_mode != tp->mac_mode) {
2042 		tp->mac_mode = mac_mode;
2043 		tw32_f(MAC_MODE, tp->mac_mode);
2044 		udelay(40);
2045 	}
2046 
2047 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2048 		if (phydev->speed == SPEED_10)
2049 			tw32(MAC_MI_STAT,
2050 			     MAC_MI_STAT_10MBPS_MODE |
2051 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052 		else
2053 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2054 	}
2055 
2056 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2057 		tw32(MAC_TX_LENGTHS,
2058 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2059 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2060 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2061 	else
2062 		tw32(MAC_TX_LENGTHS,
2063 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2064 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2065 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066 
2067 	if (phydev->link != tp->old_link ||
2068 	    phydev->speed != tp->link_config.active_speed ||
2069 	    phydev->duplex != tp->link_config.active_duplex ||
2070 	    oldflowctrl != tp->link_config.active_flowctrl)
2071 		linkmesg = 1;
2072 
2073 	tp->old_link = phydev->link;
2074 	tp->link_config.active_speed = phydev->speed;
2075 	tp->link_config.active_duplex = phydev->duplex;
2076 
2077 	spin_unlock_bh(&tp->lock);
2078 
2079 	if (linkmesg)
2080 		tg3_link_report(tp);
2081 }
2082 
2083 static int tg3_phy_init(struct tg3 *tp)
2084 {
2085 	struct phy_device *phydev;
2086 
2087 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2088 		return 0;
2089 
2090 	/* Bring the PHY back to a known state. */
2091 	tg3_bmcr_reset(tp);
2092 
2093 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2094 
2095 	/* Attach the MAC to the PHY. */
2096 	phydev = phy_connect(tp->dev, phydev_name(phydev),
2097 			     tg3_adjust_link, phydev->interface);
2098 	if (IS_ERR(phydev)) {
2099 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2100 		return PTR_ERR(phydev);
2101 	}
2102 
2103 	/* Mask with MAC supported features. */
2104 	switch (phydev->interface) {
2105 	case PHY_INTERFACE_MODE_GMII:
2106 	case PHY_INTERFACE_MODE_RGMII:
2107 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2108 			phy_set_max_speed(phydev, SPEED_1000);
2109 			phy_support_asym_pause(phydev);
2110 			break;
2111 		}
2112 		fallthrough;
2113 	case PHY_INTERFACE_MODE_MII:
2114 		phy_set_max_speed(phydev, SPEED_100);
2115 		phy_support_asym_pause(phydev);
2116 		break;
2117 	default:
2118 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2119 		return -EINVAL;
2120 	}
2121 
2122 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123 
2124 	phy_attached_info(phydev);
2125 
2126 	return 0;
2127 }
2128 
2129 static void tg3_phy_start(struct tg3 *tp)
2130 {
2131 	struct phy_device *phydev;
2132 
2133 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2134 		return;
2135 
2136 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2137 
2138 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140 		phydev->speed = tp->link_config.speed;
2141 		phydev->duplex = tp->link_config.duplex;
2142 		phydev->autoneg = tp->link_config.autoneg;
2143 		ethtool_convert_legacy_u32_to_link_mode(
2144 			phydev->advertising, tp->link_config.advertising);
2145 	}
2146 
2147 	phy_start(phydev);
2148 
2149 	phy_start_aneg(phydev);
2150 }
2151 
2152 static void tg3_phy_stop(struct tg3 *tp)
2153 {
2154 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2155 		return;
2156 
2157 	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2158 }
2159 
2160 static void tg3_phy_fini(struct tg3 *tp)
2161 {
2162 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2163 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2164 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2165 	}
2166 }
2167 
2168 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2169 {
2170 	int err;
2171 	u32 val;
2172 
2173 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2174 		return 0;
2175 
2176 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2177 		/* Cannot do read-modify-write on 5401 */
2178 		err = tg3_phy_auxctl_write(tp,
2179 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2180 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2181 					   0x4c20);
2182 		goto done;
2183 	}
2184 
2185 	err = tg3_phy_auxctl_read(tp,
2186 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2187 	if (err)
2188 		return err;
2189 
2190 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2191 	err = tg3_phy_auxctl_write(tp,
2192 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2193 
2194 done:
2195 	return err;
2196 }
2197 
2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2199 {
2200 	u32 phytest;
2201 
2202 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2203 		u32 phy;
2204 
2205 		tg3_writephy(tp, MII_TG3_FET_TEST,
2206 			     phytest | MII_TG3_FET_SHADOW_EN);
2207 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2208 			if (enable)
2209 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2210 			else
2211 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2212 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2213 		}
2214 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2215 	}
2216 }
2217 
2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2219 {
2220 	u32 reg;
2221 
2222 	if (!tg3_flag(tp, 5705_PLUS) ||
2223 	    (tg3_flag(tp, 5717_PLUS) &&
2224 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2225 		return;
2226 
2227 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2228 		tg3_phy_fet_toggle_apd(tp, enable);
2229 		return;
2230 	}
2231 
2232 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2233 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2234 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2235 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2236 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2237 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2238 
2239 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2240 
2241 
2242 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2243 	if (enable)
2244 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2245 
2246 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2247 }
2248 
2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2250 {
2251 	u32 phy;
2252 
2253 	if (!tg3_flag(tp, 5705_PLUS) ||
2254 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2255 		return;
2256 
2257 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2258 		u32 ephy;
2259 
2260 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2261 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2262 
2263 			tg3_writephy(tp, MII_TG3_FET_TEST,
2264 				     ephy | MII_TG3_FET_SHADOW_EN);
2265 			if (!tg3_readphy(tp, reg, &phy)) {
2266 				if (enable)
2267 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2268 				else
2269 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2270 				tg3_writephy(tp, reg, phy);
2271 			}
2272 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2273 		}
2274 	} else {
2275 		int ret;
2276 
2277 		ret = tg3_phy_auxctl_read(tp,
2278 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2279 		if (!ret) {
2280 			if (enable)
2281 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2282 			else
2283 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2284 			tg3_phy_auxctl_write(tp,
2285 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2286 		}
2287 	}
2288 }
2289 
2290 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2291 {
2292 	int ret;
2293 	u32 val;
2294 
2295 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2296 		return;
2297 
2298 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2299 	if (!ret)
2300 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2301 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2302 }
2303 
2304 static void tg3_phy_apply_otp(struct tg3 *tp)
2305 {
2306 	u32 otp, phy;
2307 
2308 	if (!tp->phy_otp)
2309 		return;
2310 
2311 	otp = tp->phy_otp;
2312 
2313 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2314 		return;
2315 
2316 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2317 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2318 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2319 
2320 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2321 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2322 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2323 
2324 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2325 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2326 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2327 
2328 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2329 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2330 
2331 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2332 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2333 
2334 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2335 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2336 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2337 
2338 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2339 }
2340 
2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
2342 {
2343 	u32 val;
2344 	struct ethtool_keee *dest = &tp->eee;
2345 
2346 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2347 		return;
2348 
2349 	if (eee)
2350 		dest = eee;
2351 
2352 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2353 		return;
2354 
2355 	/* Pull eee_active */
2356 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2357 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2358 		dest->eee_active = 1;
2359 	} else
2360 		dest->eee_active = 0;
2361 
2362 	/* Pull lp advertised settings */
2363 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2364 		return;
2365 	mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
2366 
2367 	/* Pull advertised and eee_enabled settings */
2368 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2369 		return;
2370 	dest->eee_enabled = !!val;
2371 	mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
2372 
2373 	/* Pull tx_lpi_enabled */
2374 	val = tr32(TG3_CPMU_EEE_MODE);
2375 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2376 
2377 	/* Pull lpi timer value */
2378 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2379 }
2380 
2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2382 {
2383 	u32 val;
2384 
2385 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2386 		return;
2387 
2388 	tp->setlpicnt = 0;
2389 
2390 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2391 	    current_link_up &&
2392 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2393 	    (tp->link_config.active_speed == SPEED_100 ||
2394 	     tp->link_config.active_speed == SPEED_1000)) {
2395 		u32 eeectl;
2396 
2397 		if (tp->link_config.active_speed == SPEED_1000)
2398 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2399 		else
2400 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2401 
2402 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2403 
2404 		tg3_eee_pull_config(tp, NULL);
2405 		if (tp->eee.eee_active)
2406 			tp->setlpicnt = 2;
2407 	}
2408 
2409 	if (!tp->setlpicnt) {
2410 		if (current_link_up &&
2411 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2412 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2413 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2414 		}
2415 
2416 		val = tr32(TG3_CPMU_EEE_MODE);
2417 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2418 	}
2419 }
2420 
2421 static void tg3_phy_eee_enable(struct tg3 *tp)
2422 {
2423 	u32 val;
2424 
2425 	if (tp->link_config.active_speed == SPEED_1000 &&
2426 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2427 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2428 	     tg3_flag(tp, 57765_CLASS)) &&
2429 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2430 		val = MII_TG3_DSP_TAP26_ALNOKO |
2431 		      MII_TG3_DSP_TAP26_RMRXSTO;
2432 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2433 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2434 	}
2435 
2436 	val = tr32(TG3_CPMU_EEE_MODE);
2437 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2438 }
2439 
2440 static int tg3_wait_macro_done(struct tg3 *tp)
2441 {
2442 	int limit = 100;
2443 
2444 	while (limit--) {
2445 		u32 tmp32;
2446 
2447 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2448 			if ((tmp32 & 0x1000) == 0)
2449 				break;
2450 		}
2451 	}
2452 	if (limit < 0)
2453 		return -EBUSY;
2454 
2455 	return 0;
2456 }
2457 
2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2459 {
2460 	static const u32 test_pat[4][6] = {
2461 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2462 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2463 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2464 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2465 	};
2466 	int chan;
2467 
2468 	for (chan = 0; chan < 4; chan++) {
2469 		int i;
2470 
2471 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2472 			     (chan * 0x2000) | 0x0200);
2473 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2474 
2475 		for (i = 0; i < 6; i++)
2476 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2477 				     test_pat[chan][i]);
2478 
2479 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2480 		if (tg3_wait_macro_done(tp)) {
2481 			*resetp = 1;
2482 			return -EBUSY;
2483 		}
2484 
2485 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2486 			     (chan * 0x2000) | 0x0200);
2487 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2488 		if (tg3_wait_macro_done(tp)) {
2489 			*resetp = 1;
2490 			return -EBUSY;
2491 		}
2492 
2493 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2494 		if (tg3_wait_macro_done(tp)) {
2495 			*resetp = 1;
2496 			return -EBUSY;
2497 		}
2498 
2499 		for (i = 0; i < 6; i += 2) {
2500 			u32 low, high;
2501 
2502 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2503 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2504 			    tg3_wait_macro_done(tp)) {
2505 				*resetp = 1;
2506 				return -EBUSY;
2507 			}
2508 			low &= 0x7fff;
2509 			high &= 0x000f;
2510 			if (low != test_pat[chan][i] ||
2511 			    high != test_pat[chan][i+1]) {
2512 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2513 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2514 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2515 
2516 				return -EBUSY;
2517 			}
2518 		}
2519 	}
2520 
2521 	return 0;
2522 }
2523 
2524 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2525 {
2526 	int chan;
2527 
2528 	for (chan = 0; chan < 4; chan++) {
2529 		int i;
2530 
2531 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2532 			     (chan * 0x2000) | 0x0200);
2533 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2534 		for (i = 0; i < 6; i++)
2535 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2536 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2537 		if (tg3_wait_macro_done(tp))
2538 			return -EBUSY;
2539 	}
2540 
2541 	return 0;
2542 }
2543 
2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2545 {
2546 	u32 reg32, phy9_orig;
2547 	int retries, do_phy_reset, err;
2548 
2549 	retries = 10;
2550 	do_phy_reset = 1;
2551 	do {
2552 		if (do_phy_reset) {
2553 			err = tg3_bmcr_reset(tp);
2554 			if (err)
2555 				return err;
2556 			do_phy_reset = 0;
2557 		}
2558 
2559 		/* Disable transmitter and interrupt.  */
2560 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2561 			continue;
2562 
2563 		reg32 |= 0x3000;
2564 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2565 
2566 		/* Set full-duplex, 1000 mbps.  */
2567 		tg3_writephy(tp, MII_BMCR,
2568 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2569 
2570 		/* Set to master mode.  */
2571 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2572 			continue;
2573 
2574 		tg3_writephy(tp, MII_CTRL1000,
2575 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2576 
2577 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2578 		if (err)
2579 			return err;
2580 
2581 		/* Block the PHY control access.  */
2582 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2583 
2584 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2585 		if (!err)
2586 			break;
2587 	} while (--retries);
2588 
2589 	err = tg3_phy_reset_chanpat(tp);
2590 	if (err)
2591 		return err;
2592 
2593 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2594 
2595 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2596 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2597 
2598 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2599 
2600 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2601 
2602 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2603 	if (err)
2604 		return err;
2605 
2606 	reg32 &= ~0x3000;
2607 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2608 
2609 	return 0;
2610 }
2611 
2612 static void tg3_carrier_off(struct tg3 *tp)
2613 {
2614 	netif_carrier_off(tp->dev);
2615 	tp->link_up = false;
2616 }
2617 
2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2619 {
2620 	if (tg3_flag(tp, ENABLE_ASF))
2621 		netdev_warn(tp->dev,
2622 			    "Management side-band traffic will be interrupted during phy settings change\n");
2623 }
2624 
2625 /* This will reset the tigon3 PHY if there is no valid
2626  * link unless the FORCE argument is non-zero.
2627  */
2628 static int tg3_phy_reset(struct tg3 *tp)
2629 {
2630 	u32 val, cpmuctrl;
2631 	int err;
2632 
2633 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2634 		val = tr32(GRC_MISC_CFG);
2635 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2636 		udelay(40);
2637 	}
2638 	err  = tg3_readphy(tp, MII_BMSR, &val);
2639 	err |= tg3_readphy(tp, MII_BMSR, &val);
2640 	if (err != 0)
2641 		return -EBUSY;
2642 
2643 	if (netif_running(tp->dev) && tp->link_up) {
2644 		netif_carrier_off(tp->dev);
2645 		tg3_link_report(tp);
2646 	}
2647 
2648 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2649 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2650 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2651 		err = tg3_phy_reset_5703_4_5(tp);
2652 		if (err)
2653 			return err;
2654 		goto out;
2655 	}
2656 
2657 	cpmuctrl = 0;
2658 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2659 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2660 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2661 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2662 			tw32(TG3_CPMU_CTRL,
2663 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2664 	}
2665 
2666 	err = tg3_bmcr_reset(tp);
2667 	if (err)
2668 		return err;
2669 
2670 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2671 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2672 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2673 
2674 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2675 	}
2676 
2677 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2678 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2679 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2680 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2681 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2682 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2683 			udelay(40);
2684 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2685 		}
2686 	}
2687 
2688 	if (tg3_flag(tp, 5717_PLUS) &&
2689 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2690 		return 0;
2691 
2692 	tg3_phy_apply_otp(tp);
2693 
2694 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2695 		tg3_phy_toggle_apd(tp, true);
2696 	else
2697 		tg3_phy_toggle_apd(tp, false);
2698 
2699 out:
2700 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2701 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2702 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2703 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2704 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2705 	}
2706 
2707 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2708 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2709 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710 	}
2711 
2712 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2713 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2714 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2715 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2716 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2717 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2718 		}
2719 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2720 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2721 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2722 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2723 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2724 				tg3_writephy(tp, MII_TG3_TEST1,
2725 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2726 			} else
2727 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2728 
2729 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2730 		}
2731 	}
2732 
2733 	/* Set Extended packet length bit (bit 14) on all chips that */
2734 	/* support jumbo frames */
2735 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2736 		/* Cannot do read-modify-write on 5401 */
2737 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2738 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2739 		/* Set bit 14 with read-modify-write to preserve other bits */
2740 		err = tg3_phy_auxctl_read(tp,
2741 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2742 		if (!err)
2743 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2744 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2745 	}
2746 
2747 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2748 	 * jumbo frames transmission.
2749 	 */
2750 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2751 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2752 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2753 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2754 	}
2755 
2756 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2757 		/* adjust output voltage */
2758 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2759 	}
2760 
2761 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2762 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2763 
2764 	tg3_phy_toggle_automdix(tp, true);
2765 	tg3_phy_set_wirespeed(tp);
2766 	return 0;
2767 }
2768 
2769 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2770 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2771 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2772 					  TG3_GPIO_MSG_NEED_VAUX)
2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2774 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2775 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2776 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2777 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2778 
2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2780 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2781 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2782 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2783 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2784 
2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2786 {
2787 	u32 status, shift;
2788 
2789 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2790 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2791 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2792 	else
2793 		status = tr32(TG3_CPMU_DRV_STATUS);
2794 
2795 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2796 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2797 	status |= (newstat << shift);
2798 
2799 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2800 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2801 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2802 	else
2803 		tw32(TG3_CPMU_DRV_STATUS, status);
2804 
2805 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2806 }
2807 
2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2809 {
2810 	if (!tg3_flag(tp, IS_NIC))
2811 		return 0;
2812 
2813 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2814 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2815 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2816 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2817 			return -EIO;
2818 
2819 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2820 
2821 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2822 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2823 
2824 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2825 	} else {
2826 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2828 	}
2829 
2830 	return 0;
2831 }
2832 
2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2834 {
2835 	u32 grc_local_ctrl;
2836 
2837 	if (!tg3_flag(tp, IS_NIC) ||
2838 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2839 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2840 		return;
2841 
2842 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2843 
2844 	tw32_wait_f(GRC_LOCAL_CTRL,
2845 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2847 
2848 	tw32_wait_f(GRC_LOCAL_CTRL,
2849 		    grc_local_ctrl,
2850 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2851 
2852 	tw32_wait_f(GRC_LOCAL_CTRL,
2853 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2854 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2855 }
2856 
2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2858 {
2859 	if (!tg3_flag(tp, IS_NIC))
2860 		return;
2861 
2862 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2863 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2864 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2865 			    (GRC_LCLCTRL_GPIO_OE0 |
2866 			     GRC_LCLCTRL_GPIO_OE1 |
2867 			     GRC_LCLCTRL_GPIO_OE2 |
2868 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2869 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2870 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2871 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2872 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2873 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2874 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2875 				     GRC_LCLCTRL_GPIO_OE1 |
2876 				     GRC_LCLCTRL_GPIO_OE2 |
2877 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2878 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2879 				     tp->grc_local_ctrl;
2880 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2882 
2883 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2884 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2886 
2887 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2888 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2890 	} else {
2891 		u32 no_gpio2;
2892 		u32 grc_local_ctrl = 0;
2893 
2894 		/* Workaround to prevent overdrawing Amps. */
2895 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2896 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2897 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2898 				    grc_local_ctrl,
2899 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2900 		}
2901 
2902 		/* On 5753 and variants, GPIO2 cannot be used. */
2903 		no_gpio2 = tp->nic_sram_data_cfg &
2904 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2905 
2906 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2907 				  GRC_LCLCTRL_GPIO_OE1 |
2908 				  GRC_LCLCTRL_GPIO_OE2 |
2909 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2910 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2911 		if (no_gpio2) {
2912 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2913 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2914 		}
2915 		tw32_wait_f(GRC_LOCAL_CTRL,
2916 			    tp->grc_local_ctrl | grc_local_ctrl,
2917 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2918 
2919 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2920 
2921 		tw32_wait_f(GRC_LOCAL_CTRL,
2922 			    tp->grc_local_ctrl | grc_local_ctrl,
2923 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2924 
2925 		if (!no_gpio2) {
2926 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2927 			tw32_wait_f(GRC_LOCAL_CTRL,
2928 				    tp->grc_local_ctrl | grc_local_ctrl,
2929 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2930 		}
2931 	}
2932 }
2933 
2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2935 {
2936 	u32 msg = 0;
2937 
2938 	/* Serialize power state transitions */
2939 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2940 		return;
2941 
2942 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2943 		msg = TG3_GPIO_MSG_NEED_VAUX;
2944 
2945 	msg = tg3_set_function_status(tp, msg);
2946 
2947 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2948 		goto done;
2949 
2950 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2951 		tg3_pwrsrc_switch_to_vaux(tp);
2952 	else
2953 		tg3_pwrsrc_die_with_vmain(tp);
2954 
2955 done:
2956 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2957 }
2958 
2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2960 {
2961 	bool need_vaux = false;
2962 
2963 	/* The GPIOs do something completely different on 57765. */
2964 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2965 		return;
2966 
2967 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2968 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2969 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2970 		tg3_frob_aux_power_5717(tp, include_wol ?
2971 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2972 		return;
2973 	}
2974 
2975 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2976 		struct net_device *dev_peer;
2977 
2978 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2979 
2980 		/* remove_one() may have been run on the peer. */
2981 		if (dev_peer) {
2982 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2983 
2984 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2985 				return;
2986 
2987 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2988 			    tg3_flag(tp_peer, ENABLE_ASF))
2989 				need_vaux = true;
2990 		}
2991 	}
2992 
2993 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2994 	    tg3_flag(tp, ENABLE_ASF))
2995 		need_vaux = true;
2996 
2997 	if (need_vaux)
2998 		tg3_pwrsrc_switch_to_vaux(tp);
2999 	else
3000 		tg3_pwrsrc_die_with_vmain(tp);
3001 }
3002 
3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3004 {
3005 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3006 		return 1;
3007 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3008 		if (speed != SPEED_10)
3009 			return 1;
3010 	} else if (speed == SPEED_10)
3011 		return 1;
3012 
3013 	return 0;
3014 }
3015 
3016 static bool tg3_phy_power_bug(struct tg3 *tp)
3017 {
3018 	switch (tg3_asic_rev(tp)) {
3019 	case ASIC_REV_5700:
3020 	case ASIC_REV_5704:
3021 		return true;
3022 	case ASIC_REV_5780:
3023 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3024 			return true;
3025 		return false;
3026 	case ASIC_REV_5717:
3027 		if (!tp->pci_fn)
3028 			return true;
3029 		return false;
3030 	case ASIC_REV_5719:
3031 	case ASIC_REV_5720:
3032 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3033 		    !tp->pci_fn)
3034 			return true;
3035 		return false;
3036 	}
3037 
3038 	return false;
3039 }
3040 
3041 static bool tg3_phy_led_bug(struct tg3 *tp)
3042 {
3043 	switch (tg3_asic_rev(tp)) {
3044 	case ASIC_REV_5719:
3045 	case ASIC_REV_5720:
3046 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3047 		    !tp->pci_fn)
3048 			return true;
3049 		return false;
3050 	}
3051 
3052 	return false;
3053 }
3054 
3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3056 {
3057 	u32 val;
3058 
3059 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3060 		return;
3061 
3062 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3063 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3064 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3065 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3066 
3067 			sg_dig_ctrl |=
3068 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3069 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3070 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3071 		}
3072 		return;
3073 	}
3074 
3075 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3076 		tg3_bmcr_reset(tp);
3077 		val = tr32(GRC_MISC_CFG);
3078 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3079 		udelay(40);
3080 		return;
3081 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3082 		u32 phytest;
3083 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3084 			u32 phy;
3085 
3086 			tg3_writephy(tp, MII_ADVERTISE, 0);
3087 			tg3_writephy(tp, MII_BMCR,
3088 				     BMCR_ANENABLE | BMCR_ANRESTART);
3089 
3090 			tg3_writephy(tp, MII_TG3_FET_TEST,
3091 				     phytest | MII_TG3_FET_SHADOW_EN);
3092 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3093 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3094 				tg3_writephy(tp,
3095 					     MII_TG3_FET_SHDW_AUXMODE4,
3096 					     phy);
3097 			}
3098 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3099 		}
3100 		return;
3101 	} else if (do_low_power) {
3102 		if (!tg3_phy_led_bug(tp))
3103 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3104 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3105 
3106 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3107 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3108 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3109 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3110 	}
3111 
3112 	/* The PHY should not be powered down on some chips because
3113 	 * of bugs.
3114 	 */
3115 	if (tg3_phy_power_bug(tp))
3116 		return;
3117 
3118 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3119 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3120 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3121 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3122 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3123 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3124 	}
3125 
3126 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3127 }
3128 
3129 /* tp->lock is held. */
3130 static int tg3_nvram_lock(struct tg3 *tp)
3131 {
3132 	if (tg3_flag(tp, NVRAM)) {
3133 		int i;
3134 
3135 		if (tp->nvram_lock_cnt == 0) {
3136 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3137 			for (i = 0; i < 8000; i++) {
3138 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3139 					break;
3140 				udelay(20);
3141 			}
3142 			if (i == 8000) {
3143 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3144 				return -ENODEV;
3145 			}
3146 		}
3147 		tp->nvram_lock_cnt++;
3148 	}
3149 	return 0;
3150 }
3151 
3152 /* tp->lock is held. */
3153 static void tg3_nvram_unlock(struct tg3 *tp)
3154 {
3155 	if (tg3_flag(tp, NVRAM)) {
3156 		if (tp->nvram_lock_cnt > 0)
3157 			tp->nvram_lock_cnt--;
3158 		if (tp->nvram_lock_cnt == 0)
3159 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3160 	}
3161 }
3162 
3163 /* tp->lock is held. */
3164 static void tg3_enable_nvram_access(struct tg3 *tp)
3165 {
3166 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3167 		u32 nvaccess = tr32(NVRAM_ACCESS);
3168 
3169 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3170 	}
3171 }
3172 
3173 /* tp->lock is held. */
3174 static void tg3_disable_nvram_access(struct tg3 *tp)
3175 {
3176 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3177 		u32 nvaccess = tr32(NVRAM_ACCESS);
3178 
3179 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3180 	}
3181 }
3182 
3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3184 					u32 offset, u32 *val)
3185 {
3186 	u32 tmp;
3187 	int i;
3188 
3189 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3190 		return -EINVAL;
3191 
3192 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3193 					EEPROM_ADDR_DEVID_MASK |
3194 					EEPROM_ADDR_READ);
3195 	tw32(GRC_EEPROM_ADDR,
3196 	     tmp |
3197 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3198 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3199 	      EEPROM_ADDR_ADDR_MASK) |
3200 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3201 
3202 	for (i = 0; i < 1000; i++) {
3203 		tmp = tr32(GRC_EEPROM_ADDR);
3204 
3205 		if (tmp & EEPROM_ADDR_COMPLETE)
3206 			break;
3207 		msleep(1);
3208 	}
3209 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3210 		return -EBUSY;
3211 
3212 	tmp = tr32(GRC_EEPROM_DATA);
3213 
3214 	/*
3215 	 * The data will always be opposite the native endian
3216 	 * format.  Perform a blind byteswap to compensate.
3217 	 */
3218 	*val = swab32(tmp);
3219 
3220 	return 0;
3221 }
3222 
3223 #define NVRAM_CMD_TIMEOUT 10000
3224 
3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3226 {
3227 	int i;
3228 
3229 	tw32(NVRAM_CMD, nvram_cmd);
3230 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3231 		usleep_range(10, 40);
3232 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3233 			udelay(10);
3234 			break;
3235 		}
3236 	}
3237 
3238 	if (i == NVRAM_CMD_TIMEOUT)
3239 		return -EBUSY;
3240 
3241 	return 0;
3242 }
3243 
3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3245 {
3246 	if (tg3_flag(tp, NVRAM) &&
3247 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3248 	    tg3_flag(tp, FLASH) &&
3249 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3250 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3251 
3252 		addr = ((addr / tp->nvram_pagesize) <<
3253 			ATMEL_AT45DB0X1B_PAGE_POS) +
3254 		       (addr % tp->nvram_pagesize);
3255 
3256 	return addr;
3257 }
3258 
3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3260 {
3261 	if (tg3_flag(tp, NVRAM) &&
3262 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3263 	    tg3_flag(tp, FLASH) &&
3264 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3265 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3266 
3267 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3268 			tp->nvram_pagesize) +
3269 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3270 
3271 	return addr;
3272 }
3273 
3274 /* NOTE: Data read in from NVRAM is byteswapped according to
3275  * the byteswapping settings for all other register accesses.
3276  * tg3 devices are BE devices, so on a BE machine, the data
3277  * returned will be exactly as it is seen in NVRAM.  On a LE
3278  * machine, the 32-bit value will be byteswapped.
3279  */
3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3281 {
3282 	int ret;
3283 
3284 	if (!tg3_flag(tp, NVRAM))
3285 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3286 
3287 	offset = tg3_nvram_phys_addr(tp, offset);
3288 
3289 	if (offset > NVRAM_ADDR_MSK)
3290 		return -EINVAL;
3291 
3292 	ret = tg3_nvram_lock(tp);
3293 	if (ret)
3294 		return ret;
3295 
3296 	tg3_enable_nvram_access(tp);
3297 
3298 	tw32(NVRAM_ADDR, offset);
3299 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3300 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3301 
3302 	if (ret == 0)
3303 		*val = tr32(NVRAM_RDDATA);
3304 
3305 	tg3_disable_nvram_access(tp);
3306 
3307 	tg3_nvram_unlock(tp);
3308 
3309 	return ret;
3310 }
3311 
3312 /* Ensures NVRAM data is in bytestream format. */
3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3314 {
3315 	u32 v;
3316 	int res = tg3_nvram_read(tp, offset, &v);
3317 	if (!res)
3318 		*val = cpu_to_be32(v);
3319 	return res;
3320 }
3321 
3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3323 				    u32 offset, u32 len, u8 *buf)
3324 {
3325 	int i, j, rc = 0;
3326 	u32 val;
3327 
3328 	for (i = 0; i < len; i += 4) {
3329 		u32 addr;
3330 		__be32 data;
3331 
3332 		addr = offset + i;
3333 
3334 		memcpy(&data, buf + i, 4);
3335 
3336 		/*
3337 		 * The SEEPROM interface expects the data to always be opposite
3338 		 * the native endian format.  We accomplish this by reversing
3339 		 * all the operations that would have been performed on the
3340 		 * data from a call to tg3_nvram_read_be32().
3341 		 */
3342 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3343 
3344 		val = tr32(GRC_EEPROM_ADDR);
3345 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3346 
3347 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3348 			EEPROM_ADDR_READ);
3349 		tw32(GRC_EEPROM_ADDR, val |
3350 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3351 			(addr & EEPROM_ADDR_ADDR_MASK) |
3352 			EEPROM_ADDR_START |
3353 			EEPROM_ADDR_WRITE);
3354 
3355 		for (j = 0; j < 1000; j++) {
3356 			val = tr32(GRC_EEPROM_ADDR);
3357 
3358 			if (val & EEPROM_ADDR_COMPLETE)
3359 				break;
3360 			msleep(1);
3361 		}
3362 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3363 			rc = -EBUSY;
3364 			break;
3365 		}
3366 	}
3367 
3368 	return rc;
3369 }
3370 
3371 /* offset and length are dword aligned */
3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3373 		u8 *buf)
3374 {
3375 	int ret = 0;
3376 	u32 pagesize = tp->nvram_pagesize;
3377 	u32 pagemask = pagesize - 1;
3378 	u32 nvram_cmd;
3379 	u8 *tmp;
3380 
3381 	tmp = kmalloc(pagesize, GFP_KERNEL);
3382 	if (tmp == NULL)
3383 		return -ENOMEM;
3384 
3385 	while (len) {
3386 		int j;
3387 		u32 phy_addr, page_off, size;
3388 
3389 		phy_addr = offset & ~pagemask;
3390 
3391 		for (j = 0; j < pagesize; j += 4) {
3392 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3393 						  (__be32 *) (tmp + j));
3394 			if (ret)
3395 				break;
3396 		}
3397 		if (ret)
3398 			break;
3399 
3400 		page_off = offset & pagemask;
3401 		size = pagesize;
3402 		if (len < size)
3403 			size = len;
3404 
3405 		len -= size;
3406 
3407 		memcpy(tmp + page_off, buf, size);
3408 
3409 		offset = offset + (pagesize - page_off);
3410 
3411 		tg3_enable_nvram_access(tp);
3412 
3413 		/*
3414 		 * Before we can erase the flash page, we need
3415 		 * to issue a special "write enable" command.
3416 		 */
3417 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418 
3419 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3420 			break;
3421 
3422 		/* Erase the target page */
3423 		tw32(NVRAM_ADDR, phy_addr);
3424 
3425 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3426 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3427 
3428 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3429 			break;
3430 
3431 		/* Issue another write enable to start the write. */
3432 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3433 
3434 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435 			break;
3436 
3437 		for (j = 0; j < pagesize; j += 4) {
3438 			__be32 data;
3439 
3440 			data = *((__be32 *) (tmp + j));
3441 
3442 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3443 
3444 			tw32(NVRAM_ADDR, phy_addr + j);
3445 
3446 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3447 				NVRAM_CMD_WR;
3448 
3449 			if (j == 0)
3450 				nvram_cmd |= NVRAM_CMD_FIRST;
3451 			else if (j == (pagesize - 4))
3452 				nvram_cmd |= NVRAM_CMD_LAST;
3453 
3454 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3455 			if (ret)
3456 				break;
3457 		}
3458 		if (ret)
3459 			break;
3460 	}
3461 
3462 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3463 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3464 
3465 	kfree(tmp);
3466 
3467 	return ret;
3468 }
3469 
3470 /* offset and length are dword aligned */
3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3472 		u8 *buf)
3473 {
3474 	int i, ret = 0;
3475 
3476 	for (i = 0; i < len; i += 4, offset += 4) {
3477 		u32 page_off, phy_addr, nvram_cmd;
3478 		__be32 data;
3479 
3480 		memcpy(&data, buf + i, 4);
3481 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3482 
3483 		page_off = offset % tp->nvram_pagesize;
3484 
3485 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3486 
3487 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3488 
3489 		if (page_off == 0 || i == 0)
3490 			nvram_cmd |= NVRAM_CMD_FIRST;
3491 		if (page_off == (tp->nvram_pagesize - 4))
3492 			nvram_cmd |= NVRAM_CMD_LAST;
3493 
3494 		if (i == (len - 4))
3495 			nvram_cmd |= NVRAM_CMD_LAST;
3496 
3497 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3498 		    !tg3_flag(tp, FLASH) ||
3499 		    !tg3_flag(tp, 57765_PLUS))
3500 			tw32(NVRAM_ADDR, phy_addr);
3501 
3502 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3503 		    !tg3_flag(tp, 5755_PLUS) &&
3504 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3505 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3506 			u32 cmd;
3507 
3508 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3509 			ret = tg3_nvram_exec_cmd(tp, cmd);
3510 			if (ret)
3511 				break;
3512 		}
3513 		if (!tg3_flag(tp, FLASH)) {
3514 			/* We always do complete word writes to eeprom. */
3515 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3516 		}
3517 
3518 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3519 		if (ret)
3520 			break;
3521 	}
3522 	return ret;
3523 }
3524 
3525 /* offset and length are dword aligned */
3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3527 {
3528 	int ret;
3529 
3530 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3531 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3532 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3533 		udelay(40);
3534 	}
3535 
3536 	if (!tg3_flag(tp, NVRAM)) {
3537 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3538 	} else {
3539 		u32 grc_mode;
3540 
3541 		ret = tg3_nvram_lock(tp);
3542 		if (ret)
3543 			return ret;
3544 
3545 		tg3_enable_nvram_access(tp);
3546 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3547 			tw32(NVRAM_WRITE1, 0x406);
3548 
3549 		grc_mode = tr32(GRC_MODE);
3550 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3551 
3552 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3553 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3554 				buf);
3555 		} else {
3556 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3557 				buf);
3558 		}
3559 
3560 		grc_mode = tr32(GRC_MODE);
3561 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3562 
3563 		tg3_disable_nvram_access(tp);
3564 		tg3_nvram_unlock(tp);
3565 	}
3566 
3567 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3568 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3569 		udelay(40);
3570 	}
3571 
3572 	return ret;
3573 }
3574 
3575 #define RX_CPU_SCRATCH_BASE	0x30000
3576 #define RX_CPU_SCRATCH_SIZE	0x04000
3577 #define TX_CPU_SCRATCH_BASE	0x34000
3578 #define TX_CPU_SCRATCH_SIZE	0x04000
3579 
3580 /* tp->lock is held. */
3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3582 {
3583 	int i;
3584 	const int iters = 10000;
3585 
3586 	for (i = 0; i < iters; i++) {
3587 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3588 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3589 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3590 			break;
3591 		if (pci_channel_offline(tp->pdev))
3592 			return -EBUSY;
3593 	}
3594 
3595 	return (i == iters) ? -EBUSY : 0;
3596 }
3597 
3598 /* tp->lock is held. */
3599 static int tg3_rxcpu_pause(struct tg3 *tp)
3600 {
3601 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3602 
3603 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3604 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3605 	udelay(10);
3606 
3607 	return rc;
3608 }
3609 
3610 /* tp->lock is held. */
3611 static int tg3_txcpu_pause(struct tg3 *tp)
3612 {
3613 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3614 }
3615 
3616 /* tp->lock is held. */
3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3618 {
3619 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3620 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3621 }
3622 
3623 /* tp->lock is held. */
3624 static void tg3_rxcpu_resume(struct tg3 *tp)
3625 {
3626 	tg3_resume_cpu(tp, RX_CPU_BASE);
3627 }
3628 
3629 /* tp->lock is held. */
3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3631 {
3632 	int rc;
3633 
3634 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3635 
3636 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3637 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3638 
3639 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3640 		return 0;
3641 	}
3642 	if (cpu_base == RX_CPU_BASE) {
3643 		rc = tg3_rxcpu_pause(tp);
3644 	} else {
3645 		/*
3646 		 * There is only an Rx CPU for the 5750 derivative in the
3647 		 * BCM4785.
3648 		 */
3649 		if (tg3_flag(tp, IS_SSB_CORE))
3650 			return 0;
3651 
3652 		rc = tg3_txcpu_pause(tp);
3653 	}
3654 
3655 	if (rc) {
3656 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3657 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3658 		return -ENODEV;
3659 	}
3660 
3661 	/* Clear firmware's nvram arbitration. */
3662 	if (tg3_flag(tp, NVRAM))
3663 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3664 	return 0;
3665 }
3666 
3667 static int tg3_fw_data_len(struct tg3 *tp,
3668 			   const struct tg3_firmware_hdr *fw_hdr)
3669 {
3670 	int fw_len;
3671 
3672 	/* Non fragmented firmware have one firmware header followed by a
3673 	 * contiguous chunk of data to be written. The length field in that
3674 	 * header is not the length of data to be written but the complete
3675 	 * length of the bss. The data length is determined based on
3676 	 * tp->fw->size minus headers.
3677 	 *
3678 	 * Fragmented firmware have a main header followed by multiple
3679 	 * fragments. Each fragment is identical to non fragmented firmware
3680 	 * with a firmware header followed by a contiguous chunk of data. In
3681 	 * the main header, the length field is unused and set to 0xffffffff.
3682 	 * In each fragment header the length is the entire size of that
3683 	 * fragment i.e. fragment data + header length. Data length is
3684 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3685 	 */
3686 	if (tp->fw_len == 0xffffffff)
3687 		fw_len = be32_to_cpu(fw_hdr->len);
3688 	else
3689 		fw_len = tp->fw->size;
3690 
3691 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3692 }
3693 
3694 /* tp->lock is held. */
3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3696 				 u32 cpu_scratch_base, int cpu_scratch_size,
3697 				 const struct tg3_firmware_hdr *fw_hdr)
3698 {
3699 	int err, i;
3700 	void (*write_op)(struct tg3 *, u32, u32);
3701 	int total_len = tp->fw->size;
3702 
3703 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3704 		netdev_err(tp->dev,
3705 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3706 			   __func__);
3707 		return -EINVAL;
3708 	}
3709 
3710 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3711 		write_op = tg3_write_mem;
3712 	else
3713 		write_op = tg3_write_indirect_reg32;
3714 
3715 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3716 		/* It is possible that bootcode is still loading at this point.
3717 		 * Get the nvram lock first before halting the cpu.
3718 		 */
3719 		int lock_err = tg3_nvram_lock(tp);
3720 		err = tg3_halt_cpu(tp, cpu_base);
3721 		if (!lock_err)
3722 			tg3_nvram_unlock(tp);
3723 		if (err)
3724 			goto out;
3725 
3726 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3727 			write_op(tp, cpu_scratch_base + i, 0);
3728 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3729 		tw32(cpu_base + CPU_MODE,
3730 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3731 	} else {
3732 		/* Subtract additional main header for fragmented firmware and
3733 		 * advance to the first fragment
3734 		 */
3735 		total_len -= TG3_FW_HDR_LEN;
3736 		fw_hdr++;
3737 	}
3738 
3739 	do {
3740 		__be32 *fw_data = (__be32 *)(fw_hdr + 1);
3741 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3742 			write_op(tp, cpu_scratch_base +
3743 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3744 				     (i * sizeof(u32)),
3745 				 be32_to_cpu(fw_data[i]));
3746 
3747 		total_len -= be32_to_cpu(fw_hdr->len);
3748 
3749 		/* Advance to next fragment */
3750 		fw_hdr = (struct tg3_firmware_hdr *)
3751 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3752 	} while (total_len > 0);
3753 
3754 	err = 0;
3755 
3756 out:
3757 	return err;
3758 }
3759 
3760 /* tp->lock is held. */
3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3762 {
3763 	int i;
3764 	const int iters = 5;
3765 
3766 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3767 	tw32_f(cpu_base + CPU_PC, pc);
3768 
3769 	for (i = 0; i < iters; i++) {
3770 		if (tr32(cpu_base + CPU_PC) == pc)
3771 			break;
3772 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3773 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3774 		tw32_f(cpu_base + CPU_PC, pc);
3775 		udelay(1000);
3776 	}
3777 
3778 	return (i == iters) ? -EBUSY : 0;
3779 }
3780 
3781 /* tp->lock is held. */
3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3783 {
3784 	const struct tg3_firmware_hdr *fw_hdr;
3785 	int err;
3786 
3787 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3788 
3789 	/* Firmware blob starts with version numbers, followed by
3790 	   start address and length. We are setting complete length.
3791 	   length = end_address_of_bss - start_address_of_text.
3792 	   Remainder is the blob to be loaded contiguously
3793 	   from start address. */
3794 
3795 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3796 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3797 				    fw_hdr);
3798 	if (err)
3799 		return err;
3800 
3801 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3802 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3803 				    fw_hdr);
3804 	if (err)
3805 		return err;
3806 
3807 	/* Now startup only the RX cpu. */
3808 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3809 				       be32_to_cpu(fw_hdr->base_addr));
3810 	if (err) {
3811 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3812 			   "should be %08x\n", __func__,
3813 			   tr32(RX_CPU_BASE + CPU_PC),
3814 				be32_to_cpu(fw_hdr->base_addr));
3815 		return -ENODEV;
3816 	}
3817 
3818 	tg3_rxcpu_resume(tp);
3819 
3820 	return 0;
3821 }
3822 
3823 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3824 {
3825 	const int iters = 1000;
3826 	int i;
3827 	u32 val;
3828 
3829 	/* Wait for boot code to complete initialization and enter service
3830 	 * loop. It is then safe to download service patches
3831 	 */
3832 	for (i = 0; i < iters; i++) {
3833 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3834 			break;
3835 
3836 		udelay(10);
3837 	}
3838 
3839 	if (i == iters) {
3840 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3841 		return -EBUSY;
3842 	}
3843 
3844 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3845 	if (val & 0xff) {
3846 		netdev_warn(tp->dev,
3847 			    "Other patches exist. Not downloading EEE patch\n");
3848 		return -EEXIST;
3849 	}
3850 
3851 	return 0;
3852 }
3853 
3854 /* tp->lock is held. */
3855 static void tg3_load_57766_firmware(struct tg3 *tp)
3856 {
3857 	struct tg3_firmware_hdr *fw_hdr;
3858 
3859 	if (!tg3_flag(tp, NO_NVRAM))
3860 		return;
3861 
3862 	if (tg3_validate_rxcpu_state(tp))
3863 		return;
3864 
3865 	if (!tp->fw)
3866 		return;
3867 
3868 	/* This firmware blob has a different format than older firmware
3869 	 * releases as given below. The main difference is we have fragmented
3870 	 * data to be written to non-contiguous locations.
3871 	 *
3872 	 * In the beginning we have a firmware header identical to other
3873 	 * firmware which consists of version, base addr and length. The length
3874 	 * here is unused and set to 0xffffffff.
3875 	 *
3876 	 * This is followed by a series of firmware fragments which are
3877 	 * individually identical to previous firmware. i.e. they have the
3878 	 * firmware header and followed by data for that fragment. The version
3879 	 * field of the individual fragment header is unused.
3880 	 */
3881 
3882 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3883 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3884 		return;
3885 
3886 	if (tg3_rxcpu_pause(tp))
3887 		return;
3888 
3889 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3890 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3891 
3892 	tg3_rxcpu_resume(tp);
3893 }
3894 
3895 /* tp->lock is held. */
3896 static int tg3_load_tso_firmware(struct tg3 *tp)
3897 {
3898 	const struct tg3_firmware_hdr *fw_hdr;
3899 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3900 	int err;
3901 
3902 	if (!tg3_flag(tp, FW_TSO))
3903 		return 0;
3904 
3905 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3906 
3907 	/* Firmware blob starts with version numbers, followed by
3908 	   start address and length. We are setting complete length.
3909 	   length = end_address_of_bss - start_address_of_text.
3910 	   Remainder is the blob to be loaded contiguously
3911 	   from start address. */
3912 
3913 	cpu_scratch_size = tp->fw_len;
3914 
3915 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3916 		cpu_base = RX_CPU_BASE;
3917 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3918 	} else {
3919 		cpu_base = TX_CPU_BASE;
3920 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3921 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3922 	}
3923 
3924 	err = tg3_load_firmware_cpu(tp, cpu_base,
3925 				    cpu_scratch_base, cpu_scratch_size,
3926 				    fw_hdr);
3927 	if (err)
3928 		return err;
3929 
3930 	/* Now startup the cpu. */
3931 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3932 				       be32_to_cpu(fw_hdr->base_addr));
3933 	if (err) {
3934 		netdev_err(tp->dev,
3935 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3936 			   __func__, tr32(cpu_base + CPU_PC),
3937 			   be32_to_cpu(fw_hdr->base_addr));
3938 		return -ENODEV;
3939 	}
3940 
3941 	tg3_resume_cpu(tp, cpu_base);
3942 	return 0;
3943 }
3944 
3945 /* tp->lock is held. */
3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3947 				   int index)
3948 {
3949 	u32 addr_high, addr_low;
3950 
3951 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3952 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3953 		    (mac_addr[4] <<  8) | mac_addr[5]);
3954 
3955 	if (index < 4) {
3956 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3957 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3958 	} else {
3959 		index -= 4;
3960 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3961 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3962 	}
3963 }
3964 
3965 /* tp->lock is held. */
3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3967 {
3968 	u32 addr_high;
3969 	int i;
3970 
3971 	for (i = 0; i < 4; i++) {
3972 		if (i == 1 && skip_mac_1)
3973 			continue;
3974 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3975 	}
3976 
3977 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3978 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3979 		for (i = 4; i < 16; i++)
3980 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3981 	}
3982 
3983 	addr_high = (tp->dev->dev_addr[0] +
3984 		     tp->dev->dev_addr[1] +
3985 		     tp->dev->dev_addr[2] +
3986 		     tp->dev->dev_addr[3] +
3987 		     tp->dev->dev_addr[4] +
3988 		     tp->dev->dev_addr[5]) &
3989 		TX_BACKOFF_SEED_MASK;
3990 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3991 }
3992 
3993 static void tg3_enable_register_access(struct tg3 *tp)
3994 {
3995 	/*
3996 	 * Make sure register accesses (indirect or otherwise) will function
3997 	 * correctly.
3998 	 */
3999 	pci_write_config_dword(tp->pdev,
4000 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4001 }
4002 
4003 static int tg3_power_up(struct tg3 *tp)
4004 {
4005 	int err;
4006 
4007 	tg3_enable_register_access(tp);
4008 
4009 	err = pci_set_power_state(tp->pdev, PCI_D0);
4010 	if (!err) {
4011 		/* Switch out of Vaux if it is a NIC */
4012 		tg3_pwrsrc_switch_to_vmain(tp);
4013 	} else {
4014 		netdev_err(tp->dev, "Transition to D0 failed\n");
4015 	}
4016 
4017 	return err;
4018 }
4019 
4020 static int tg3_setup_phy(struct tg3 *, bool);
4021 
4022 static void tg3_power_down_prepare(struct tg3 *tp)
4023 {
4024 	u32 misc_host_ctrl;
4025 	bool device_should_wake, do_low_power;
4026 
4027 	tg3_enable_register_access(tp);
4028 
4029 	/* Restore the CLKREQ setting. */
4030 	if (tg3_flag(tp, CLKREQ_BUG))
4031 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4032 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4033 
4034 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4035 	tw32(TG3PCI_MISC_HOST_CTRL,
4036 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4037 
4038 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4039 			     tg3_flag(tp, WOL_ENABLE);
4040 
4041 	if (tg3_flag(tp, USE_PHYLIB)) {
4042 		do_low_power = false;
4043 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4044 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4045 			__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4046 			struct phy_device *phydev;
4047 			u32 phyid;
4048 
4049 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4050 
4051 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4052 
4053 			tp->link_config.speed = phydev->speed;
4054 			tp->link_config.duplex = phydev->duplex;
4055 			tp->link_config.autoneg = phydev->autoneg;
4056 			ethtool_convert_link_mode_to_legacy_u32(
4057 				&tp->link_config.advertising,
4058 				phydev->advertising);
4059 
4060 			linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4061 			linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4062 					 advertising);
4063 			linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4064 					 advertising);
4065 			linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4066 					 advertising);
4067 
4068 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4069 				if (tg3_flag(tp, WOL_SPEED_100MB)) {
4070 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4071 							 advertising);
4072 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4073 							 advertising);
4074 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4075 							 advertising);
4076 				} else {
4077 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4078 							 advertising);
4079 				}
4080 			}
4081 
4082 			linkmode_copy(phydev->advertising, advertising);
4083 			phy_start_aneg(phydev);
4084 
4085 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4086 			if (phyid != PHY_ID_BCMAC131) {
4087 				phyid &= PHY_BCM_OUI_MASK;
4088 				if (phyid == PHY_BCM_OUI_1 ||
4089 				    phyid == PHY_BCM_OUI_2 ||
4090 				    phyid == PHY_BCM_OUI_3)
4091 					do_low_power = true;
4092 			}
4093 		}
4094 	} else {
4095 		do_low_power = true;
4096 
4097 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4098 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4099 
4100 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4101 			tg3_setup_phy(tp, false);
4102 	}
4103 
4104 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4105 		u32 val;
4106 
4107 		val = tr32(GRC_VCPU_EXT_CTRL);
4108 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4109 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4110 		int i;
4111 		u32 val;
4112 
4113 		for (i = 0; i < 200; i++) {
4114 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4115 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4116 				break;
4117 			msleep(1);
4118 		}
4119 	}
4120 	if (tg3_flag(tp, WOL_CAP))
4121 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4122 						     WOL_DRV_STATE_SHUTDOWN |
4123 						     WOL_DRV_WOL |
4124 						     WOL_SET_MAGIC_PKT);
4125 
4126 	if (device_should_wake) {
4127 		u32 mac_mode;
4128 
4129 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4130 			if (do_low_power &&
4131 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4132 				tg3_phy_auxctl_write(tp,
4133 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4134 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4135 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4136 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4137 				udelay(40);
4138 			}
4139 
4140 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4141 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4142 			else if (tp->phy_flags &
4143 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4144 				if (tp->link_config.active_speed == SPEED_1000)
4145 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4146 				else
4147 					mac_mode = MAC_MODE_PORT_MODE_MII;
4148 			} else
4149 				mac_mode = MAC_MODE_PORT_MODE_MII;
4150 
4151 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4152 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4153 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4154 					     SPEED_100 : SPEED_10;
4155 				if (tg3_5700_link_polarity(tp, speed))
4156 					mac_mode |= MAC_MODE_LINK_POLARITY;
4157 				else
4158 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4159 			}
4160 		} else {
4161 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4162 		}
4163 
4164 		if (!tg3_flag(tp, 5750_PLUS))
4165 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4166 
4167 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4168 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4169 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4170 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4171 
4172 		if (tg3_flag(tp, ENABLE_APE))
4173 			mac_mode |= MAC_MODE_APE_TX_EN |
4174 				    MAC_MODE_APE_RX_EN |
4175 				    MAC_MODE_TDE_ENABLE;
4176 
4177 		tw32_f(MAC_MODE, mac_mode);
4178 		udelay(100);
4179 
4180 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4181 		udelay(10);
4182 	}
4183 
4184 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4185 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4186 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4187 		u32 base_val;
4188 
4189 		base_val = tp->pci_clock_ctrl;
4190 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4191 			     CLOCK_CTRL_TXCLK_DISABLE);
4192 
4193 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4194 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4195 	} else if (tg3_flag(tp, 5780_CLASS) ||
4196 		   tg3_flag(tp, CPMU_PRESENT) ||
4197 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4198 		/* do nothing */
4199 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4200 		u32 newbits1, newbits2;
4201 
4202 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4203 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4204 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4205 				    CLOCK_CTRL_TXCLK_DISABLE |
4206 				    CLOCK_CTRL_ALTCLK);
4207 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208 		} else if (tg3_flag(tp, 5705_PLUS)) {
4209 			newbits1 = CLOCK_CTRL_625_CORE;
4210 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4211 		} else {
4212 			newbits1 = CLOCK_CTRL_ALTCLK;
4213 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4214 		}
4215 
4216 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4217 			    40);
4218 
4219 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4220 			    40);
4221 
4222 		if (!tg3_flag(tp, 5705_PLUS)) {
4223 			u32 newbits3;
4224 
4225 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4226 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4227 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4228 					    CLOCK_CTRL_TXCLK_DISABLE |
4229 					    CLOCK_CTRL_44MHZ_CORE);
4230 			} else {
4231 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4232 			}
4233 
4234 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4235 				    tp->pci_clock_ctrl | newbits3, 40);
4236 		}
4237 	}
4238 
4239 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4240 		tg3_power_down_phy(tp, do_low_power);
4241 
4242 	tg3_frob_aux_power(tp, true);
4243 
4244 	/* Workaround for unstable PLL clock */
4245 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4246 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4247 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4248 		u32 val = tr32(0x7d00);
4249 
4250 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4251 		tw32(0x7d00, val);
4252 		if (!tg3_flag(tp, ENABLE_ASF)) {
4253 			int err;
4254 
4255 			err = tg3_nvram_lock(tp);
4256 			tg3_halt_cpu(tp, RX_CPU_BASE);
4257 			if (!err)
4258 				tg3_nvram_unlock(tp);
4259 		}
4260 	}
4261 
4262 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4263 
4264 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4265 
4266 	return;
4267 }
4268 
4269 static void tg3_power_down(struct tg3 *tp)
4270 {
4271 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4272 	pci_set_power_state(tp->pdev, PCI_D3hot);
4273 }
4274 
4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4276 {
4277 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4278 	case MII_TG3_AUX_STAT_10HALF:
4279 		*speed = SPEED_10;
4280 		*duplex = DUPLEX_HALF;
4281 		break;
4282 
4283 	case MII_TG3_AUX_STAT_10FULL:
4284 		*speed = SPEED_10;
4285 		*duplex = DUPLEX_FULL;
4286 		break;
4287 
4288 	case MII_TG3_AUX_STAT_100HALF:
4289 		*speed = SPEED_100;
4290 		*duplex = DUPLEX_HALF;
4291 		break;
4292 
4293 	case MII_TG3_AUX_STAT_100FULL:
4294 		*speed = SPEED_100;
4295 		*duplex = DUPLEX_FULL;
4296 		break;
4297 
4298 	case MII_TG3_AUX_STAT_1000HALF:
4299 		*speed = SPEED_1000;
4300 		*duplex = DUPLEX_HALF;
4301 		break;
4302 
4303 	case MII_TG3_AUX_STAT_1000FULL:
4304 		*speed = SPEED_1000;
4305 		*duplex = DUPLEX_FULL;
4306 		break;
4307 
4308 	default:
4309 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4310 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4311 				 SPEED_10;
4312 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4313 				  DUPLEX_HALF;
4314 			break;
4315 		}
4316 		*speed = SPEED_UNKNOWN;
4317 		*duplex = DUPLEX_UNKNOWN;
4318 		break;
4319 	}
4320 }
4321 
4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4323 {
4324 	int err = 0;
4325 	u32 val, new_adv;
4326 
4327 	new_adv = ADVERTISE_CSMA;
4328 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4329 	new_adv |= mii_advertise_flowctrl(flowctrl);
4330 
4331 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4332 	if (err)
4333 		goto done;
4334 
4335 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4336 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4337 
4338 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4339 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4340 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4341 
4342 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4343 		if (err)
4344 			goto done;
4345 	}
4346 
4347 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4348 		goto done;
4349 
4350 	tw32(TG3_CPMU_EEE_MODE,
4351 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4352 
4353 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4354 	if (!err) {
4355 		u32 err2;
4356 
4357 		if (!tp->eee.eee_enabled)
4358 			val = 0;
4359 		else
4360 			val = ethtool_adv_to_mmd_eee_adv_t(advertise);
4361 
4362 		mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
4363 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4364 		if (err)
4365 			val = 0;
4366 
4367 		switch (tg3_asic_rev(tp)) {
4368 		case ASIC_REV_5717:
4369 		case ASIC_REV_57765:
4370 		case ASIC_REV_57766:
4371 		case ASIC_REV_5719:
4372 			/* If we advertised any eee advertisements above... */
4373 			if (val)
4374 				val = MII_TG3_DSP_TAP26_ALNOKO |
4375 				      MII_TG3_DSP_TAP26_RMRXSTO |
4376 				      MII_TG3_DSP_TAP26_OPCSINPT;
4377 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4378 			fallthrough;
4379 		case ASIC_REV_5720:
4380 		case ASIC_REV_5762:
4381 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4382 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4383 						 MII_TG3_DSP_CH34TP2_HIBW01);
4384 		}
4385 
4386 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4387 		if (!err)
4388 			err = err2;
4389 	}
4390 
4391 done:
4392 	return err;
4393 }
4394 
4395 static void tg3_phy_copper_begin(struct tg3 *tp)
4396 {
4397 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4398 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4399 		u32 adv, fc;
4400 
4401 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4402 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4403 			adv = ADVERTISED_10baseT_Half |
4404 			      ADVERTISED_10baseT_Full;
4405 			if (tg3_flag(tp, WOL_SPEED_100MB))
4406 				adv |= ADVERTISED_100baseT_Half |
4407 				       ADVERTISED_100baseT_Full;
4408 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4409 				if (!(tp->phy_flags &
4410 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4411 					adv |= ADVERTISED_1000baseT_Half;
4412 				adv |= ADVERTISED_1000baseT_Full;
4413 			}
4414 
4415 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4416 		} else {
4417 			adv = tp->link_config.advertising;
4418 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4419 				adv &= ~(ADVERTISED_1000baseT_Half |
4420 					 ADVERTISED_1000baseT_Full);
4421 
4422 			fc = tp->link_config.flowctrl;
4423 		}
4424 
4425 		tg3_phy_autoneg_cfg(tp, adv, fc);
4426 
4427 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4428 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4429 			/* Normally during power down we want to autonegotiate
4430 			 * the lowest possible speed for WOL. However, to avoid
4431 			 * link flap, we leave it untouched.
4432 			 */
4433 			return;
4434 		}
4435 
4436 		tg3_writephy(tp, MII_BMCR,
4437 			     BMCR_ANENABLE | BMCR_ANRESTART);
4438 	} else {
4439 		int i;
4440 		u32 bmcr, orig_bmcr;
4441 
4442 		tp->link_config.active_speed = tp->link_config.speed;
4443 		tp->link_config.active_duplex = tp->link_config.duplex;
4444 
4445 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4446 			/* With autoneg disabled, 5715 only links up when the
4447 			 * advertisement register has the configured speed
4448 			 * enabled.
4449 			 */
4450 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4451 		}
4452 
4453 		bmcr = 0;
4454 		switch (tp->link_config.speed) {
4455 		default:
4456 		case SPEED_10:
4457 			break;
4458 
4459 		case SPEED_100:
4460 			bmcr |= BMCR_SPEED100;
4461 			break;
4462 
4463 		case SPEED_1000:
4464 			bmcr |= BMCR_SPEED1000;
4465 			break;
4466 		}
4467 
4468 		if (tp->link_config.duplex == DUPLEX_FULL)
4469 			bmcr |= BMCR_FULLDPLX;
4470 
4471 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4472 		    (bmcr != orig_bmcr)) {
4473 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4474 			for (i = 0; i < 1500; i++) {
4475 				u32 tmp;
4476 
4477 				udelay(10);
4478 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4479 				    tg3_readphy(tp, MII_BMSR, &tmp))
4480 					continue;
4481 				if (!(tmp & BMSR_LSTATUS)) {
4482 					udelay(40);
4483 					break;
4484 				}
4485 			}
4486 			tg3_writephy(tp, MII_BMCR, bmcr);
4487 			udelay(40);
4488 		}
4489 	}
4490 }
4491 
4492 static int tg3_phy_pull_config(struct tg3 *tp)
4493 {
4494 	int err;
4495 	u32 val;
4496 
4497 	err = tg3_readphy(tp, MII_BMCR, &val);
4498 	if (err)
4499 		goto done;
4500 
4501 	if (!(val & BMCR_ANENABLE)) {
4502 		tp->link_config.autoneg = AUTONEG_DISABLE;
4503 		tp->link_config.advertising = 0;
4504 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4505 
4506 		err = -EIO;
4507 
4508 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4509 		case 0:
4510 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4511 				goto done;
4512 
4513 			tp->link_config.speed = SPEED_10;
4514 			break;
4515 		case BMCR_SPEED100:
4516 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4517 				goto done;
4518 
4519 			tp->link_config.speed = SPEED_100;
4520 			break;
4521 		case BMCR_SPEED1000:
4522 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4523 				tp->link_config.speed = SPEED_1000;
4524 				break;
4525 			}
4526 			fallthrough;
4527 		default:
4528 			goto done;
4529 		}
4530 
4531 		if (val & BMCR_FULLDPLX)
4532 			tp->link_config.duplex = DUPLEX_FULL;
4533 		else
4534 			tp->link_config.duplex = DUPLEX_HALF;
4535 
4536 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4537 
4538 		err = 0;
4539 		goto done;
4540 	}
4541 
4542 	tp->link_config.autoneg = AUTONEG_ENABLE;
4543 	tp->link_config.advertising = ADVERTISED_Autoneg;
4544 	tg3_flag_set(tp, PAUSE_AUTONEG);
4545 
4546 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4547 		u32 adv;
4548 
4549 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4550 		if (err)
4551 			goto done;
4552 
4553 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4554 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4555 
4556 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4557 	} else {
4558 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4559 	}
4560 
4561 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4562 		u32 adv;
4563 
4564 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4565 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4566 			if (err)
4567 				goto done;
4568 
4569 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4570 		} else {
4571 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4572 			if (err)
4573 				goto done;
4574 
4575 			adv = tg3_decode_flowctrl_1000X(val);
4576 			tp->link_config.flowctrl = adv;
4577 
4578 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4579 			adv = mii_adv_to_ethtool_adv_x(val);
4580 		}
4581 
4582 		tp->link_config.advertising |= adv;
4583 	}
4584 
4585 done:
4586 	return err;
4587 }
4588 
4589 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4590 {
4591 	int err;
4592 
4593 	/* Turn off tap power management. */
4594 	/* Set Extended packet length bit */
4595 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4596 
4597 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4598 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4599 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4600 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4601 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4602 
4603 	udelay(40);
4604 
4605 	return err;
4606 }
4607 
4608 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4609 {
4610 	struct ethtool_keee eee = {};
4611 
4612 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4613 		return true;
4614 
4615 	tg3_eee_pull_config(tp, &eee);
4616 
4617 	if (tp->eee.eee_enabled) {
4618 		if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
4619 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4620 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4621 			return false;
4622 	} else {
4623 		/* EEE is disabled but we're advertising */
4624 		if (!linkmode_empty(eee.advertised))
4625 			return false;
4626 	}
4627 
4628 	return true;
4629 }
4630 
4631 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4632 {
4633 	u32 advmsk, tgtadv, advertising;
4634 
4635 	advertising = tp->link_config.advertising;
4636 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4637 
4638 	advmsk = ADVERTISE_ALL;
4639 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4640 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4641 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4642 	}
4643 
4644 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4645 		return false;
4646 
4647 	if ((*lcladv & advmsk) != tgtadv)
4648 		return false;
4649 
4650 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4651 		u32 tg3_ctrl;
4652 
4653 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4654 
4655 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4656 			return false;
4657 
4658 		if (tgtadv &&
4659 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4660 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4661 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4662 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4663 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4664 		} else {
4665 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4666 		}
4667 
4668 		if (tg3_ctrl != tgtadv)
4669 			return false;
4670 	}
4671 
4672 	return true;
4673 }
4674 
4675 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4676 {
4677 	u32 lpeth = 0;
4678 
4679 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4680 		u32 val;
4681 
4682 		if (tg3_readphy(tp, MII_STAT1000, &val))
4683 			return false;
4684 
4685 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4686 	}
4687 
4688 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4689 		return false;
4690 
4691 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4692 	tp->link_config.rmt_adv = lpeth;
4693 
4694 	return true;
4695 }
4696 
4697 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4698 {
4699 	if (curr_link_up != tp->link_up) {
4700 		if (curr_link_up) {
4701 			netif_carrier_on(tp->dev);
4702 		} else {
4703 			netif_carrier_off(tp->dev);
4704 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4705 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4706 		}
4707 
4708 		tg3_link_report(tp);
4709 		return true;
4710 	}
4711 
4712 	return false;
4713 }
4714 
4715 static void tg3_clear_mac_status(struct tg3 *tp)
4716 {
4717 	tw32(MAC_EVENT, 0);
4718 
4719 	tw32_f(MAC_STATUS,
4720 	       MAC_STATUS_SYNC_CHANGED |
4721 	       MAC_STATUS_CFG_CHANGED |
4722 	       MAC_STATUS_MI_COMPLETION |
4723 	       MAC_STATUS_LNKSTATE_CHANGED);
4724 	udelay(40);
4725 }
4726 
4727 static void tg3_setup_eee(struct tg3 *tp)
4728 {
4729 	u32 val;
4730 
4731 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4732 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4733 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4734 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4735 
4736 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4737 
4738 	tw32_f(TG3_CPMU_EEE_CTRL,
4739 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4740 
4741 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4742 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4743 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4744 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4745 
4746 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4747 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4748 
4749 	if (tg3_flag(tp, ENABLE_APE))
4750 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4751 
4752 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4753 
4754 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4755 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4756 	       (tp->eee.tx_lpi_timer & 0xffff));
4757 
4758 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4759 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4760 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4761 }
4762 
4763 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4764 {
4765 	bool current_link_up;
4766 	u32 bmsr, val;
4767 	u32 lcl_adv, rmt_adv;
4768 	u32 current_speed;
4769 	u8 current_duplex;
4770 	int i, err;
4771 
4772 	tg3_clear_mac_status(tp);
4773 
4774 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4775 		tw32_f(MAC_MI_MODE,
4776 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4777 		udelay(80);
4778 	}
4779 
4780 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4781 
4782 	/* Some third-party PHYs need to be reset on link going
4783 	 * down.
4784 	 */
4785 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4786 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4787 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4788 	    tp->link_up) {
4789 		tg3_readphy(tp, MII_BMSR, &bmsr);
4790 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4791 		    !(bmsr & BMSR_LSTATUS))
4792 			force_reset = true;
4793 	}
4794 	if (force_reset)
4795 		tg3_phy_reset(tp);
4796 
4797 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4798 		tg3_readphy(tp, MII_BMSR, &bmsr);
4799 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4800 		    !tg3_flag(tp, INIT_COMPLETE))
4801 			bmsr = 0;
4802 
4803 		if (!(bmsr & BMSR_LSTATUS)) {
4804 			err = tg3_init_5401phy_dsp(tp);
4805 			if (err)
4806 				return err;
4807 
4808 			tg3_readphy(tp, MII_BMSR, &bmsr);
4809 			for (i = 0; i < 1000; i++) {
4810 				udelay(10);
4811 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4812 				    (bmsr & BMSR_LSTATUS)) {
4813 					udelay(40);
4814 					break;
4815 				}
4816 			}
4817 
4818 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4819 			    TG3_PHY_REV_BCM5401_B0 &&
4820 			    !(bmsr & BMSR_LSTATUS) &&
4821 			    tp->link_config.active_speed == SPEED_1000) {
4822 				err = tg3_phy_reset(tp);
4823 				if (!err)
4824 					err = tg3_init_5401phy_dsp(tp);
4825 				if (err)
4826 					return err;
4827 			}
4828 		}
4829 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4830 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4831 		/* 5701 {A0,B0} CRC bug workaround */
4832 		tg3_writephy(tp, 0x15, 0x0a75);
4833 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4834 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4835 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4836 	}
4837 
4838 	/* Clear pending interrupts... */
4839 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4840 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4841 
4842 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4843 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4844 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4845 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4846 
4847 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4848 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4849 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4850 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4851 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4852 		else
4853 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4854 	}
4855 
4856 	current_link_up = false;
4857 	current_speed = SPEED_UNKNOWN;
4858 	current_duplex = DUPLEX_UNKNOWN;
4859 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4860 	tp->link_config.rmt_adv = 0;
4861 
4862 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4863 		err = tg3_phy_auxctl_read(tp,
4864 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4865 					  &val);
4866 		if (!err && !(val & (1 << 10))) {
4867 			tg3_phy_auxctl_write(tp,
4868 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4869 					     val | (1 << 10));
4870 			goto relink;
4871 		}
4872 	}
4873 
4874 	bmsr = 0;
4875 	for (i = 0; i < 100; i++) {
4876 		tg3_readphy(tp, MII_BMSR, &bmsr);
4877 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4878 		    (bmsr & BMSR_LSTATUS))
4879 			break;
4880 		udelay(40);
4881 	}
4882 
4883 	if (bmsr & BMSR_LSTATUS) {
4884 		u32 aux_stat, bmcr;
4885 
4886 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4887 		for (i = 0; i < 2000; i++) {
4888 			udelay(10);
4889 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4890 			    aux_stat)
4891 				break;
4892 		}
4893 
4894 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4895 					     &current_speed,
4896 					     &current_duplex);
4897 
4898 		bmcr = 0;
4899 		for (i = 0; i < 200; i++) {
4900 			tg3_readphy(tp, MII_BMCR, &bmcr);
4901 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4902 				continue;
4903 			if (bmcr && bmcr != 0x7fff)
4904 				break;
4905 			udelay(10);
4906 		}
4907 
4908 		lcl_adv = 0;
4909 		rmt_adv = 0;
4910 
4911 		tp->link_config.active_speed = current_speed;
4912 		tp->link_config.active_duplex = current_duplex;
4913 
4914 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4915 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4916 
4917 			if ((bmcr & BMCR_ANENABLE) &&
4918 			    eee_config_ok &&
4919 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4920 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4921 				current_link_up = true;
4922 
4923 			/* EEE settings changes take effect only after a phy
4924 			 * reset.  If we have skipped a reset due to Link Flap
4925 			 * Avoidance being enabled, do it now.
4926 			 */
4927 			if (!eee_config_ok &&
4928 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4929 			    !force_reset) {
4930 				tg3_setup_eee(tp);
4931 				tg3_phy_reset(tp);
4932 			}
4933 		} else {
4934 			if (!(bmcr & BMCR_ANENABLE) &&
4935 			    tp->link_config.speed == current_speed &&
4936 			    tp->link_config.duplex == current_duplex) {
4937 				current_link_up = true;
4938 			}
4939 		}
4940 
4941 		if (current_link_up &&
4942 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4943 			u32 reg, bit;
4944 
4945 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4946 				reg = MII_TG3_FET_GEN_STAT;
4947 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4948 			} else {
4949 				reg = MII_TG3_EXT_STAT;
4950 				bit = MII_TG3_EXT_STAT_MDIX;
4951 			}
4952 
4953 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4954 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4955 
4956 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4957 		}
4958 	}
4959 
4960 relink:
4961 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4962 		tg3_phy_copper_begin(tp);
4963 
4964 		if (tg3_flag(tp, ROBOSWITCH)) {
4965 			current_link_up = true;
4966 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4967 			current_speed = SPEED_1000;
4968 			current_duplex = DUPLEX_FULL;
4969 			tp->link_config.active_speed = current_speed;
4970 			tp->link_config.active_duplex = current_duplex;
4971 		}
4972 
4973 		tg3_readphy(tp, MII_BMSR, &bmsr);
4974 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4975 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4976 			current_link_up = true;
4977 	}
4978 
4979 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4980 	if (current_link_up) {
4981 		if (tp->link_config.active_speed == SPEED_100 ||
4982 		    tp->link_config.active_speed == SPEED_10)
4983 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4984 		else
4985 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4986 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4987 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4988 	else
4989 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4990 
4991 	/* In order for the 5750 core in BCM4785 chip to work properly
4992 	 * in RGMII mode, the Led Control Register must be set up.
4993 	 */
4994 	if (tg3_flag(tp, RGMII_MODE)) {
4995 		u32 led_ctrl = tr32(MAC_LED_CTRL);
4996 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4997 
4998 		if (tp->link_config.active_speed == SPEED_10)
4999 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5000 		else if (tp->link_config.active_speed == SPEED_100)
5001 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5002 				     LED_CTRL_100MBPS_ON);
5003 		else if (tp->link_config.active_speed == SPEED_1000)
5004 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5005 				     LED_CTRL_1000MBPS_ON);
5006 
5007 		tw32(MAC_LED_CTRL, led_ctrl);
5008 		udelay(40);
5009 	}
5010 
5011 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5012 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5013 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5014 
5015 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5016 		if (current_link_up &&
5017 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5018 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5019 		else
5020 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5021 	}
5022 
5023 	/* ??? Without this setting Netgear GA302T PHY does not
5024 	 * ??? send/receive packets...
5025 	 */
5026 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5027 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5028 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5029 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5030 		udelay(80);
5031 	}
5032 
5033 	tw32_f(MAC_MODE, tp->mac_mode);
5034 	udelay(40);
5035 
5036 	tg3_phy_eee_adjust(tp, current_link_up);
5037 
5038 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5039 		/* Polled via timer. */
5040 		tw32_f(MAC_EVENT, 0);
5041 	} else {
5042 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5043 	}
5044 	udelay(40);
5045 
5046 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5047 	    current_link_up &&
5048 	    tp->link_config.active_speed == SPEED_1000 &&
5049 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5050 		udelay(120);
5051 		tw32_f(MAC_STATUS,
5052 		     (MAC_STATUS_SYNC_CHANGED |
5053 		      MAC_STATUS_CFG_CHANGED));
5054 		udelay(40);
5055 		tg3_write_mem(tp,
5056 			      NIC_SRAM_FIRMWARE_MBOX,
5057 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5058 	}
5059 
5060 	/* Prevent send BD corruption. */
5061 	if (tg3_flag(tp, CLKREQ_BUG)) {
5062 		if (tp->link_config.active_speed == SPEED_100 ||
5063 		    tp->link_config.active_speed == SPEED_10)
5064 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5065 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5066 		else
5067 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5068 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5069 	}
5070 
5071 	tg3_test_and_report_link_chg(tp, current_link_up);
5072 
5073 	return 0;
5074 }
5075 
5076 struct tg3_fiber_aneginfo {
5077 	int state;
5078 #define ANEG_STATE_UNKNOWN		0
5079 #define ANEG_STATE_AN_ENABLE		1
5080 #define ANEG_STATE_RESTART_INIT		2
5081 #define ANEG_STATE_RESTART		3
5082 #define ANEG_STATE_DISABLE_LINK_OK	4
5083 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5084 #define ANEG_STATE_ABILITY_DETECT	6
5085 #define ANEG_STATE_ACK_DETECT_INIT	7
5086 #define ANEG_STATE_ACK_DETECT		8
5087 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5088 #define ANEG_STATE_COMPLETE_ACK		10
5089 #define ANEG_STATE_IDLE_DETECT_INIT	11
5090 #define ANEG_STATE_IDLE_DETECT		12
5091 #define ANEG_STATE_LINK_OK		13
5092 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5093 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5094 
5095 	u32 flags;
5096 #define MR_AN_ENABLE		0x00000001
5097 #define MR_RESTART_AN		0x00000002
5098 #define MR_AN_COMPLETE		0x00000004
5099 #define MR_PAGE_RX		0x00000008
5100 #define MR_NP_LOADED		0x00000010
5101 #define MR_TOGGLE_TX		0x00000020
5102 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5103 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5104 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5105 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5106 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5107 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5108 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5109 #define MR_TOGGLE_RX		0x00002000
5110 #define MR_NP_RX		0x00004000
5111 
5112 #define MR_LINK_OK		0x80000000
5113 
5114 	unsigned long link_time, cur_time;
5115 
5116 	u32 ability_match_cfg;
5117 	int ability_match_count;
5118 
5119 	char ability_match, idle_match, ack_match;
5120 
5121 	u32 txconfig, rxconfig;
5122 #define ANEG_CFG_NP		0x00000080
5123 #define ANEG_CFG_ACK		0x00000040
5124 #define ANEG_CFG_RF2		0x00000020
5125 #define ANEG_CFG_RF1		0x00000010
5126 #define ANEG_CFG_PS2		0x00000001
5127 #define ANEG_CFG_PS1		0x00008000
5128 #define ANEG_CFG_HD		0x00004000
5129 #define ANEG_CFG_FD		0x00002000
5130 #define ANEG_CFG_INVAL		0x00001f06
5131 
5132 };
5133 #define ANEG_OK		0
5134 #define ANEG_DONE	1
5135 #define ANEG_TIMER_ENAB	2
5136 #define ANEG_FAILED	-1
5137 
5138 #define ANEG_STATE_SETTLE_TIME	10000
5139 
5140 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5141 				   struct tg3_fiber_aneginfo *ap)
5142 {
5143 	u16 flowctrl;
5144 	unsigned long delta;
5145 	u32 rx_cfg_reg;
5146 	int ret;
5147 
5148 	if (ap->state == ANEG_STATE_UNKNOWN) {
5149 		ap->rxconfig = 0;
5150 		ap->link_time = 0;
5151 		ap->cur_time = 0;
5152 		ap->ability_match_cfg = 0;
5153 		ap->ability_match_count = 0;
5154 		ap->ability_match = 0;
5155 		ap->idle_match = 0;
5156 		ap->ack_match = 0;
5157 	}
5158 	ap->cur_time++;
5159 
5160 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5161 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5162 
5163 		if (rx_cfg_reg != ap->ability_match_cfg) {
5164 			ap->ability_match_cfg = rx_cfg_reg;
5165 			ap->ability_match = 0;
5166 			ap->ability_match_count = 0;
5167 		} else {
5168 			if (++ap->ability_match_count > 1) {
5169 				ap->ability_match = 1;
5170 				ap->ability_match_cfg = rx_cfg_reg;
5171 			}
5172 		}
5173 		if (rx_cfg_reg & ANEG_CFG_ACK)
5174 			ap->ack_match = 1;
5175 		else
5176 			ap->ack_match = 0;
5177 
5178 		ap->idle_match = 0;
5179 	} else {
5180 		ap->idle_match = 1;
5181 		ap->ability_match_cfg = 0;
5182 		ap->ability_match_count = 0;
5183 		ap->ability_match = 0;
5184 		ap->ack_match = 0;
5185 
5186 		rx_cfg_reg = 0;
5187 	}
5188 
5189 	ap->rxconfig = rx_cfg_reg;
5190 	ret = ANEG_OK;
5191 
5192 	switch (ap->state) {
5193 	case ANEG_STATE_UNKNOWN:
5194 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5195 			ap->state = ANEG_STATE_AN_ENABLE;
5196 
5197 		fallthrough;
5198 	case ANEG_STATE_AN_ENABLE:
5199 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5200 		if (ap->flags & MR_AN_ENABLE) {
5201 			ap->link_time = 0;
5202 			ap->cur_time = 0;
5203 			ap->ability_match_cfg = 0;
5204 			ap->ability_match_count = 0;
5205 			ap->ability_match = 0;
5206 			ap->idle_match = 0;
5207 			ap->ack_match = 0;
5208 
5209 			ap->state = ANEG_STATE_RESTART_INIT;
5210 		} else {
5211 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5212 		}
5213 		break;
5214 
5215 	case ANEG_STATE_RESTART_INIT:
5216 		ap->link_time = ap->cur_time;
5217 		ap->flags &= ~(MR_NP_LOADED);
5218 		ap->txconfig = 0;
5219 		tw32(MAC_TX_AUTO_NEG, 0);
5220 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5221 		tw32_f(MAC_MODE, tp->mac_mode);
5222 		udelay(40);
5223 
5224 		ret = ANEG_TIMER_ENAB;
5225 		ap->state = ANEG_STATE_RESTART;
5226 
5227 		fallthrough;
5228 	case ANEG_STATE_RESTART:
5229 		delta = ap->cur_time - ap->link_time;
5230 		if (delta > ANEG_STATE_SETTLE_TIME)
5231 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5232 		else
5233 			ret = ANEG_TIMER_ENAB;
5234 		break;
5235 
5236 	case ANEG_STATE_DISABLE_LINK_OK:
5237 		ret = ANEG_DONE;
5238 		break;
5239 
5240 	case ANEG_STATE_ABILITY_DETECT_INIT:
5241 		ap->flags &= ~(MR_TOGGLE_TX);
5242 		ap->txconfig = ANEG_CFG_FD;
5243 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5244 		if (flowctrl & ADVERTISE_1000XPAUSE)
5245 			ap->txconfig |= ANEG_CFG_PS1;
5246 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5247 			ap->txconfig |= ANEG_CFG_PS2;
5248 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5249 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5250 		tw32_f(MAC_MODE, tp->mac_mode);
5251 		udelay(40);
5252 
5253 		ap->state = ANEG_STATE_ABILITY_DETECT;
5254 		break;
5255 
5256 	case ANEG_STATE_ABILITY_DETECT:
5257 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5258 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5259 		break;
5260 
5261 	case ANEG_STATE_ACK_DETECT_INIT:
5262 		ap->txconfig |= ANEG_CFG_ACK;
5263 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5264 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5265 		tw32_f(MAC_MODE, tp->mac_mode);
5266 		udelay(40);
5267 
5268 		ap->state = ANEG_STATE_ACK_DETECT;
5269 
5270 		fallthrough;
5271 	case ANEG_STATE_ACK_DETECT:
5272 		if (ap->ack_match != 0) {
5273 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5274 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5275 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5276 			} else {
5277 				ap->state = ANEG_STATE_AN_ENABLE;
5278 			}
5279 		} else if (ap->ability_match != 0 &&
5280 			   ap->rxconfig == 0) {
5281 			ap->state = ANEG_STATE_AN_ENABLE;
5282 		}
5283 		break;
5284 
5285 	case ANEG_STATE_COMPLETE_ACK_INIT:
5286 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5287 			ret = ANEG_FAILED;
5288 			break;
5289 		}
5290 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5291 			       MR_LP_ADV_HALF_DUPLEX |
5292 			       MR_LP_ADV_SYM_PAUSE |
5293 			       MR_LP_ADV_ASYM_PAUSE |
5294 			       MR_LP_ADV_REMOTE_FAULT1 |
5295 			       MR_LP_ADV_REMOTE_FAULT2 |
5296 			       MR_LP_ADV_NEXT_PAGE |
5297 			       MR_TOGGLE_RX |
5298 			       MR_NP_RX);
5299 		if (ap->rxconfig & ANEG_CFG_FD)
5300 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5301 		if (ap->rxconfig & ANEG_CFG_HD)
5302 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5303 		if (ap->rxconfig & ANEG_CFG_PS1)
5304 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5305 		if (ap->rxconfig & ANEG_CFG_PS2)
5306 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5307 		if (ap->rxconfig & ANEG_CFG_RF1)
5308 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5309 		if (ap->rxconfig & ANEG_CFG_RF2)
5310 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5311 		if (ap->rxconfig & ANEG_CFG_NP)
5312 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5313 
5314 		ap->link_time = ap->cur_time;
5315 
5316 		ap->flags ^= (MR_TOGGLE_TX);
5317 		if (ap->rxconfig & 0x0008)
5318 			ap->flags |= MR_TOGGLE_RX;
5319 		if (ap->rxconfig & ANEG_CFG_NP)
5320 			ap->flags |= MR_NP_RX;
5321 		ap->flags |= MR_PAGE_RX;
5322 
5323 		ap->state = ANEG_STATE_COMPLETE_ACK;
5324 		ret = ANEG_TIMER_ENAB;
5325 		break;
5326 
5327 	case ANEG_STATE_COMPLETE_ACK:
5328 		if (ap->ability_match != 0 &&
5329 		    ap->rxconfig == 0) {
5330 			ap->state = ANEG_STATE_AN_ENABLE;
5331 			break;
5332 		}
5333 		delta = ap->cur_time - ap->link_time;
5334 		if (delta > ANEG_STATE_SETTLE_TIME) {
5335 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5336 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5337 			} else {
5338 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5339 				    !(ap->flags & MR_NP_RX)) {
5340 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5341 				} else {
5342 					ret = ANEG_FAILED;
5343 				}
5344 			}
5345 		}
5346 		break;
5347 
5348 	case ANEG_STATE_IDLE_DETECT_INIT:
5349 		ap->link_time = ap->cur_time;
5350 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5351 		tw32_f(MAC_MODE, tp->mac_mode);
5352 		udelay(40);
5353 
5354 		ap->state = ANEG_STATE_IDLE_DETECT;
5355 		ret = ANEG_TIMER_ENAB;
5356 		break;
5357 
5358 	case ANEG_STATE_IDLE_DETECT:
5359 		if (ap->ability_match != 0 &&
5360 		    ap->rxconfig == 0) {
5361 			ap->state = ANEG_STATE_AN_ENABLE;
5362 			break;
5363 		}
5364 		delta = ap->cur_time - ap->link_time;
5365 		if (delta > ANEG_STATE_SETTLE_TIME) {
5366 			/* XXX another gem from the Broadcom driver :( */
5367 			ap->state = ANEG_STATE_LINK_OK;
5368 		}
5369 		break;
5370 
5371 	case ANEG_STATE_LINK_OK:
5372 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5373 		ret = ANEG_DONE;
5374 		break;
5375 
5376 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5377 		/* ??? unimplemented */
5378 		break;
5379 
5380 	case ANEG_STATE_NEXT_PAGE_WAIT:
5381 		/* ??? unimplemented */
5382 		break;
5383 
5384 	default:
5385 		ret = ANEG_FAILED;
5386 		break;
5387 	}
5388 
5389 	return ret;
5390 }
5391 
5392 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5393 {
5394 	int res = 0;
5395 	struct tg3_fiber_aneginfo aninfo;
5396 	int status = ANEG_FAILED;
5397 	unsigned int tick;
5398 	u32 tmp;
5399 
5400 	tw32_f(MAC_TX_AUTO_NEG, 0);
5401 
5402 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5403 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5404 	udelay(40);
5405 
5406 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5407 	udelay(40);
5408 
5409 	memset(&aninfo, 0, sizeof(aninfo));
5410 	aninfo.flags |= MR_AN_ENABLE;
5411 	aninfo.state = ANEG_STATE_UNKNOWN;
5412 	aninfo.cur_time = 0;
5413 	tick = 0;
5414 	while (++tick < 195000) {
5415 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5416 		if (status == ANEG_DONE || status == ANEG_FAILED)
5417 			break;
5418 
5419 		udelay(1);
5420 	}
5421 
5422 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5423 	tw32_f(MAC_MODE, tp->mac_mode);
5424 	udelay(40);
5425 
5426 	*txflags = aninfo.txconfig;
5427 	*rxflags = aninfo.flags;
5428 
5429 	if (status == ANEG_DONE &&
5430 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5431 			     MR_LP_ADV_FULL_DUPLEX)))
5432 		res = 1;
5433 
5434 	return res;
5435 }
5436 
5437 static void tg3_init_bcm8002(struct tg3 *tp)
5438 {
5439 	u32 mac_status = tr32(MAC_STATUS);
5440 	int i;
5441 
5442 	/* Reset when initting first time or we have a link. */
5443 	if (tg3_flag(tp, INIT_COMPLETE) &&
5444 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5445 		return;
5446 
5447 	/* Set PLL lock range. */
5448 	tg3_writephy(tp, 0x16, 0x8007);
5449 
5450 	/* SW reset */
5451 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5452 
5453 	/* Wait for reset to complete. */
5454 	/* XXX schedule_timeout() ... */
5455 	for (i = 0; i < 500; i++)
5456 		udelay(10);
5457 
5458 	/* Config mode; select PMA/Ch 1 regs. */
5459 	tg3_writephy(tp, 0x10, 0x8411);
5460 
5461 	/* Enable auto-lock and comdet, select txclk for tx. */
5462 	tg3_writephy(tp, 0x11, 0x0a10);
5463 
5464 	tg3_writephy(tp, 0x18, 0x00a0);
5465 	tg3_writephy(tp, 0x16, 0x41ff);
5466 
5467 	/* Assert and deassert POR. */
5468 	tg3_writephy(tp, 0x13, 0x0400);
5469 	udelay(40);
5470 	tg3_writephy(tp, 0x13, 0x0000);
5471 
5472 	tg3_writephy(tp, 0x11, 0x0a50);
5473 	udelay(40);
5474 	tg3_writephy(tp, 0x11, 0x0a10);
5475 
5476 	/* Wait for signal to stabilize */
5477 	/* XXX schedule_timeout() ... */
5478 	for (i = 0; i < 15000; i++)
5479 		udelay(10);
5480 
5481 	/* Deselect the channel register so we can read the PHYID
5482 	 * later.
5483 	 */
5484 	tg3_writephy(tp, 0x10, 0x8011);
5485 }
5486 
5487 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5488 {
5489 	u16 flowctrl;
5490 	bool current_link_up;
5491 	u32 sg_dig_ctrl, sg_dig_status;
5492 	u32 serdes_cfg, expected_sg_dig_ctrl;
5493 	int workaround, port_a;
5494 
5495 	serdes_cfg = 0;
5496 	workaround = 0;
5497 	port_a = 1;
5498 	current_link_up = false;
5499 
5500 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5501 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5502 		workaround = 1;
5503 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5504 			port_a = 0;
5505 
5506 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5507 		/* preserve bits 20-23 for voltage regulator */
5508 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5509 	}
5510 
5511 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5512 
5513 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5514 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5515 			if (workaround) {
5516 				u32 val = serdes_cfg;
5517 
5518 				if (port_a)
5519 					val |= 0xc010000;
5520 				else
5521 					val |= 0x4010000;
5522 				tw32_f(MAC_SERDES_CFG, val);
5523 			}
5524 
5525 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5526 		}
5527 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5528 			tg3_setup_flow_control(tp, 0, 0);
5529 			current_link_up = true;
5530 		}
5531 		goto out;
5532 	}
5533 
5534 	/* Want auto-negotiation.  */
5535 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5536 
5537 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5538 	if (flowctrl & ADVERTISE_1000XPAUSE)
5539 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5540 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5541 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5542 
5543 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5544 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5545 		    tp->serdes_counter &&
5546 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5547 				    MAC_STATUS_RCVD_CFG)) ==
5548 		     MAC_STATUS_PCS_SYNCED)) {
5549 			tp->serdes_counter--;
5550 			current_link_up = true;
5551 			goto out;
5552 		}
5553 restart_autoneg:
5554 		if (workaround)
5555 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5556 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5557 		udelay(5);
5558 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5559 
5560 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5561 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5562 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5563 				 MAC_STATUS_SIGNAL_DET)) {
5564 		sg_dig_status = tr32(SG_DIG_STATUS);
5565 		mac_status = tr32(MAC_STATUS);
5566 
5567 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5568 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5569 			u32 local_adv = 0, remote_adv = 0;
5570 
5571 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5572 				local_adv |= ADVERTISE_1000XPAUSE;
5573 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5574 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5575 
5576 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5577 				remote_adv |= LPA_1000XPAUSE;
5578 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5579 				remote_adv |= LPA_1000XPAUSE_ASYM;
5580 
5581 			tp->link_config.rmt_adv =
5582 					   mii_adv_to_ethtool_adv_x(remote_adv);
5583 
5584 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5585 			current_link_up = true;
5586 			tp->serdes_counter = 0;
5587 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5588 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5589 			if (tp->serdes_counter)
5590 				tp->serdes_counter--;
5591 			else {
5592 				if (workaround) {
5593 					u32 val = serdes_cfg;
5594 
5595 					if (port_a)
5596 						val |= 0xc010000;
5597 					else
5598 						val |= 0x4010000;
5599 
5600 					tw32_f(MAC_SERDES_CFG, val);
5601 				}
5602 
5603 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5604 				udelay(40);
5605 
5606 				/* Link parallel detection - link is up */
5607 				/* only if we have PCS_SYNC and not */
5608 				/* receiving config code words */
5609 				mac_status = tr32(MAC_STATUS);
5610 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5611 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5612 					tg3_setup_flow_control(tp, 0, 0);
5613 					current_link_up = true;
5614 					tp->phy_flags |=
5615 						TG3_PHYFLG_PARALLEL_DETECT;
5616 					tp->serdes_counter =
5617 						SERDES_PARALLEL_DET_TIMEOUT;
5618 				} else
5619 					goto restart_autoneg;
5620 			}
5621 		}
5622 	} else {
5623 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5624 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5625 	}
5626 
5627 out:
5628 	return current_link_up;
5629 }
5630 
5631 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5632 {
5633 	bool current_link_up = false;
5634 
5635 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5636 		goto out;
5637 
5638 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5639 		u32 txflags, rxflags;
5640 		int i;
5641 
5642 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5643 			u32 local_adv = 0, remote_adv = 0;
5644 
5645 			if (txflags & ANEG_CFG_PS1)
5646 				local_adv |= ADVERTISE_1000XPAUSE;
5647 			if (txflags & ANEG_CFG_PS2)
5648 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5649 
5650 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5651 				remote_adv |= LPA_1000XPAUSE;
5652 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5653 				remote_adv |= LPA_1000XPAUSE_ASYM;
5654 
5655 			tp->link_config.rmt_adv =
5656 					   mii_adv_to_ethtool_adv_x(remote_adv);
5657 
5658 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5659 
5660 			current_link_up = true;
5661 		}
5662 		for (i = 0; i < 30; i++) {
5663 			udelay(20);
5664 			tw32_f(MAC_STATUS,
5665 			       (MAC_STATUS_SYNC_CHANGED |
5666 				MAC_STATUS_CFG_CHANGED));
5667 			udelay(40);
5668 			if ((tr32(MAC_STATUS) &
5669 			     (MAC_STATUS_SYNC_CHANGED |
5670 			      MAC_STATUS_CFG_CHANGED)) == 0)
5671 				break;
5672 		}
5673 
5674 		mac_status = tr32(MAC_STATUS);
5675 		if (!current_link_up &&
5676 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5677 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5678 			current_link_up = true;
5679 	} else {
5680 		tg3_setup_flow_control(tp, 0, 0);
5681 
5682 		/* Forcing 1000FD link up. */
5683 		current_link_up = true;
5684 
5685 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5686 		udelay(40);
5687 
5688 		tw32_f(MAC_MODE, tp->mac_mode);
5689 		udelay(40);
5690 	}
5691 
5692 out:
5693 	return current_link_up;
5694 }
5695 
5696 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5697 {
5698 	u32 orig_pause_cfg;
5699 	u32 orig_active_speed;
5700 	u8 orig_active_duplex;
5701 	u32 mac_status;
5702 	bool current_link_up;
5703 	int i;
5704 
5705 	orig_pause_cfg = tp->link_config.active_flowctrl;
5706 	orig_active_speed = tp->link_config.active_speed;
5707 	orig_active_duplex = tp->link_config.active_duplex;
5708 
5709 	if (!tg3_flag(tp, HW_AUTONEG) &&
5710 	    tp->link_up &&
5711 	    tg3_flag(tp, INIT_COMPLETE)) {
5712 		mac_status = tr32(MAC_STATUS);
5713 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5714 			       MAC_STATUS_SIGNAL_DET |
5715 			       MAC_STATUS_CFG_CHANGED |
5716 			       MAC_STATUS_RCVD_CFG);
5717 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5718 				   MAC_STATUS_SIGNAL_DET)) {
5719 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5720 					    MAC_STATUS_CFG_CHANGED));
5721 			return 0;
5722 		}
5723 	}
5724 
5725 	tw32_f(MAC_TX_AUTO_NEG, 0);
5726 
5727 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5728 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5729 	tw32_f(MAC_MODE, tp->mac_mode);
5730 	udelay(40);
5731 
5732 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5733 		tg3_init_bcm8002(tp);
5734 
5735 	/* Enable link change event even when serdes polling.  */
5736 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5737 	udelay(40);
5738 
5739 	tp->link_config.rmt_adv = 0;
5740 	mac_status = tr32(MAC_STATUS);
5741 
5742 	if (tg3_flag(tp, HW_AUTONEG))
5743 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5744 	else
5745 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5746 
5747 	tp->napi[0].hw_status->status =
5748 		(SD_STATUS_UPDATED |
5749 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5750 
5751 	for (i = 0; i < 100; i++) {
5752 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5753 				    MAC_STATUS_CFG_CHANGED));
5754 		udelay(5);
5755 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5756 					 MAC_STATUS_CFG_CHANGED |
5757 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5758 			break;
5759 	}
5760 
5761 	mac_status = tr32(MAC_STATUS);
5762 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5763 		current_link_up = false;
5764 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5765 		    tp->serdes_counter == 0) {
5766 			tw32_f(MAC_MODE, (tp->mac_mode |
5767 					  MAC_MODE_SEND_CONFIGS));
5768 			udelay(1);
5769 			tw32_f(MAC_MODE, tp->mac_mode);
5770 		}
5771 	}
5772 
5773 	if (current_link_up) {
5774 		tp->link_config.active_speed = SPEED_1000;
5775 		tp->link_config.active_duplex = DUPLEX_FULL;
5776 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5777 				    LED_CTRL_LNKLED_OVERRIDE |
5778 				    LED_CTRL_1000MBPS_ON));
5779 	} else {
5780 		tp->link_config.active_speed = SPEED_UNKNOWN;
5781 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5782 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5783 				    LED_CTRL_LNKLED_OVERRIDE |
5784 				    LED_CTRL_TRAFFIC_OVERRIDE));
5785 	}
5786 
5787 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5788 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5789 		if (orig_pause_cfg != now_pause_cfg ||
5790 		    orig_active_speed != tp->link_config.active_speed ||
5791 		    orig_active_duplex != tp->link_config.active_duplex)
5792 			tg3_link_report(tp);
5793 	}
5794 
5795 	return 0;
5796 }
5797 
5798 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5799 {
5800 	int err = 0;
5801 	u32 bmsr, bmcr;
5802 	u32 current_speed = SPEED_UNKNOWN;
5803 	u8 current_duplex = DUPLEX_UNKNOWN;
5804 	bool current_link_up = false;
5805 	u32 local_adv, remote_adv, sgsr;
5806 
5807 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5808 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5809 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5810 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5811 
5812 		if (force_reset)
5813 			tg3_phy_reset(tp);
5814 
5815 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5816 
5817 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5818 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5819 		} else {
5820 			current_link_up = true;
5821 			if (sgsr & SERDES_TG3_SPEED_1000) {
5822 				current_speed = SPEED_1000;
5823 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5824 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5825 				current_speed = SPEED_100;
5826 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5827 			} else {
5828 				current_speed = SPEED_10;
5829 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5830 			}
5831 
5832 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5833 				current_duplex = DUPLEX_FULL;
5834 			else
5835 				current_duplex = DUPLEX_HALF;
5836 		}
5837 
5838 		tw32_f(MAC_MODE, tp->mac_mode);
5839 		udelay(40);
5840 
5841 		tg3_clear_mac_status(tp);
5842 
5843 		goto fiber_setup_done;
5844 	}
5845 
5846 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5847 	tw32_f(MAC_MODE, tp->mac_mode);
5848 	udelay(40);
5849 
5850 	tg3_clear_mac_status(tp);
5851 
5852 	if (force_reset)
5853 		tg3_phy_reset(tp);
5854 
5855 	tp->link_config.rmt_adv = 0;
5856 
5857 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5858 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5859 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5860 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5861 			bmsr |= BMSR_LSTATUS;
5862 		else
5863 			bmsr &= ~BMSR_LSTATUS;
5864 	}
5865 
5866 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5867 
5868 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5869 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5870 		/* do nothing, just check for link up at the end */
5871 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5872 		u32 adv, newadv;
5873 
5874 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5875 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5876 				 ADVERTISE_1000XPAUSE |
5877 				 ADVERTISE_1000XPSE_ASYM |
5878 				 ADVERTISE_SLCT);
5879 
5880 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5881 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5882 
5883 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5884 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5885 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5886 			tg3_writephy(tp, MII_BMCR, bmcr);
5887 
5888 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5889 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5890 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5891 
5892 			return err;
5893 		}
5894 	} else {
5895 		u32 new_bmcr;
5896 
5897 		bmcr &= ~BMCR_SPEED1000;
5898 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5899 
5900 		if (tp->link_config.duplex == DUPLEX_FULL)
5901 			new_bmcr |= BMCR_FULLDPLX;
5902 
5903 		if (new_bmcr != bmcr) {
5904 			/* BMCR_SPEED1000 is a reserved bit that needs
5905 			 * to be set on write.
5906 			 */
5907 			new_bmcr |= BMCR_SPEED1000;
5908 
5909 			/* Force a linkdown */
5910 			if (tp->link_up) {
5911 				u32 adv;
5912 
5913 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5914 				adv &= ~(ADVERTISE_1000XFULL |
5915 					 ADVERTISE_1000XHALF |
5916 					 ADVERTISE_SLCT);
5917 				tg3_writephy(tp, MII_ADVERTISE, adv);
5918 				tg3_writephy(tp, MII_BMCR, bmcr |
5919 							   BMCR_ANRESTART |
5920 							   BMCR_ANENABLE);
5921 				udelay(10);
5922 				tg3_carrier_off(tp);
5923 			}
5924 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5925 			bmcr = new_bmcr;
5926 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5927 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5928 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5929 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5930 					bmsr |= BMSR_LSTATUS;
5931 				else
5932 					bmsr &= ~BMSR_LSTATUS;
5933 			}
5934 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5935 		}
5936 	}
5937 
5938 	if (bmsr & BMSR_LSTATUS) {
5939 		current_speed = SPEED_1000;
5940 		current_link_up = true;
5941 		if (bmcr & BMCR_FULLDPLX)
5942 			current_duplex = DUPLEX_FULL;
5943 		else
5944 			current_duplex = DUPLEX_HALF;
5945 
5946 		local_adv = 0;
5947 		remote_adv = 0;
5948 
5949 		if (bmcr & BMCR_ANENABLE) {
5950 			u32 common;
5951 
5952 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5953 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5954 			common = local_adv & remote_adv;
5955 			if (common & (ADVERTISE_1000XHALF |
5956 				      ADVERTISE_1000XFULL)) {
5957 				if (common & ADVERTISE_1000XFULL)
5958 					current_duplex = DUPLEX_FULL;
5959 				else
5960 					current_duplex = DUPLEX_HALF;
5961 
5962 				tp->link_config.rmt_adv =
5963 					   mii_adv_to_ethtool_adv_x(remote_adv);
5964 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5965 				/* Link is up via parallel detect */
5966 			} else {
5967 				current_link_up = false;
5968 			}
5969 		}
5970 	}
5971 
5972 fiber_setup_done:
5973 	if (current_link_up && current_duplex == DUPLEX_FULL)
5974 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5975 
5976 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5977 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5978 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5979 
5980 	tw32_f(MAC_MODE, tp->mac_mode);
5981 	udelay(40);
5982 
5983 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5984 
5985 	tp->link_config.active_speed = current_speed;
5986 	tp->link_config.active_duplex = current_duplex;
5987 
5988 	tg3_test_and_report_link_chg(tp, current_link_up);
5989 	return err;
5990 }
5991 
5992 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5993 {
5994 	if (tp->serdes_counter) {
5995 		/* Give autoneg time to complete. */
5996 		tp->serdes_counter--;
5997 		return;
5998 	}
5999 
6000 	if (!tp->link_up &&
6001 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6002 		u32 bmcr;
6003 
6004 		tg3_readphy(tp, MII_BMCR, &bmcr);
6005 		if (bmcr & BMCR_ANENABLE) {
6006 			u32 phy1, phy2;
6007 
6008 			/* Select shadow register 0x1f */
6009 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6010 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6011 
6012 			/* Select expansion interrupt status register */
6013 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6014 					 MII_TG3_DSP_EXP1_INT_STAT);
6015 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6016 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6017 
6018 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6019 				/* We have signal detect and not receiving
6020 				 * config code words, link is up by parallel
6021 				 * detection.
6022 				 */
6023 
6024 				bmcr &= ~BMCR_ANENABLE;
6025 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6026 				tg3_writephy(tp, MII_BMCR, bmcr);
6027 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6028 			}
6029 		}
6030 	} else if (tp->link_up &&
6031 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6032 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6033 		u32 phy2;
6034 
6035 		/* Select expansion interrupt status register */
6036 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6037 				 MII_TG3_DSP_EXP1_INT_STAT);
6038 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6039 		if (phy2 & 0x20) {
6040 			u32 bmcr;
6041 
6042 			/* Config code words received, turn on autoneg. */
6043 			tg3_readphy(tp, MII_BMCR, &bmcr);
6044 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6045 
6046 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6047 
6048 		}
6049 	}
6050 }
6051 
6052 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6053 {
6054 	u32 val;
6055 	int err;
6056 
6057 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6058 		err = tg3_setup_fiber_phy(tp, force_reset);
6059 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6060 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6061 	else
6062 		err = tg3_setup_copper_phy(tp, force_reset);
6063 
6064 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6065 		u32 scale;
6066 
6067 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6068 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6069 			scale = 65;
6070 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6071 			scale = 6;
6072 		else
6073 			scale = 12;
6074 
6075 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6076 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6077 		tw32(GRC_MISC_CFG, val);
6078 	}
6079 
6080 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6081 	      (6 << TX_LENGTHS_IPG_SHIFT);
6082 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6083 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6084 		val |= tr32(MAC_TX_LENGTHS) &
6085 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6086 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6087 
6088 	if (tp->link_config.active_speed == SPEED_1000 &&
6089 	    tp->link_config.active_duplex == DUPLEX_HALF)
6090 		tw32(MAC_TX_LENGTHS, val |
6091 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6092 	else
6093 		tw32(MAC_TX_LENGTHS, val |
6094 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6095 
6096 	if (!tg3_flag(tp, 5705_PLUS)) {
6097 		if (tp->link_up) {
6098 			tw32(HOSTCC_STAT_COAL_TICKS,
6099 			     tp->coal.stats_block_coalesce_usecs);
6100 		} else {
6101 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6102 		}
6103 	}
6104 
6105 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6106 		val = tr32(PCIE_PWR_MGMT_THRESH);
6107 		if (!tp->link_up)
6108 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6109 			      tp->pwrmgmt_thresh;
6110 		else
6111 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6112 		tw32(PCIE_PWR_MGMT_THRESH, val);
6113 	}
6114 
6115 	return err;
6116 }
6117 
6118 /* tp->lock must be held */
6119 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6120 {
6121 	u64 stamp;
6122 
6123 	ptp_read_system_prets(sts);
6124 	stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6125 	ptp_read_system_postts(sts);
6126 	stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6127 
6128 	return stamp;
6129 }
6130 
6131 /* tp->lock must be held */
6132 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6133 {
6134 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6135 
6136 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6137 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6138 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6139 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6140 }
6141 
6142 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6143 static inline void tg3_full_unlock(struct tg3 *tp);
6144 static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
6145 {
6146 	struct tg3 *tp = netdev_priv(dev);
6147 
6148 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
6149 
6150 	if (tg3_flag(tp, PTP_CAPABLE)) {
6151 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6152 					SOF_TIMESTAMPING_RX_HARDWARE |
6153 					SOF_TIMESTAMPING_RAW_HARDWARE;
6154 	}
6155 
6156 	if (tp->ptp_clock)
6157 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6158 
6159 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6160 
6161 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6162 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6163 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6164 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6165 	return 0;
6166 }
6167 
6168 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6169 {
6170 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6171 	u64 correction;
6172 	bool neg_adj;
6173 
6174 	/* Frequency adjustment is performed using hardware with a 24 bit
6175 	 * accumulator and a programmable correction value. On each clk, the
6176 	 * correction value gets added to the accumulator and when it
6177 	 * overflows, the time counter is incremented/decremented.
6178 	 */
6179 	neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6180 
6181 	tg3_full_lock(tp, 0);
6182 
6183 	if (correction)
6184 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6185 		     TG3_EAV_REF_CLK_CORRECT_EN |
6186 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6187 		     ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6188 	else
6189 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6190 
6191 	tg3_full_unlock(tp);
6192 
6193 	return 0;
6194 }
6195 
6196 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6197 {
6198 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6199 
6200 	tg3_full_lock(tp, 0);
6201 	tp->ptp_adjust += delta;
6202 	tg3_full_unlock(tp);
6203 
6204 	return 0;
6205 }
6206 
6207 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6208 			    struct ptp_system_timestamp *sts)
6209 {
6210 	u64 ns;
6211 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6212 
6213 	tg3_full_lock(tp, 0);
6214 	ns = tg3_refclk_read(tp, sts);
6215 	ns += tp->ptp_adjust;
6216 	tg3_full_unlock(tp);
6217 
6218 	*ts = ns_to_timespec64(ns);
6219 
6220 	return 0;
6221 }
6222 
6223 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6224 			   const struct timespec64 *ts)
6225 {
6226 	u64 ns;
6227 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228 
6229 	ns = timespec64_to_ns(ts);
6230 
6231 	tg3_full_lock(tp, 0);
6232 	tg3_refclk_write(tp, ns);
6233 	tp->ptp_adjust = 0;
6234 	tg3_full_unlock(tp);
6235 
6236 	return 0;
6237 }
6238 
6239 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6240 			  struct ptp_clock_request *rq, int on)
6241 {
6242 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6243 	u32 clock_ctl;
6244 	int rval = 0;
6245 
6246 	switch (rq->type) {
6247 	case PTP_CLK_REQ_PEROUT:
6248 		/* Reject requests with unsupported flags */
6249 		if (rq->perout.flags)
6250 			return -EOPNOTSUPP;
6251 
6252 		if (rq->perout.index != 0)
6253 			return -EINVAL;
6254 
6255 		tg3_full_lock(tp, 0);
6256 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6257 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6258 
6259 		if (on) {
6260 			u64 nsec;
6261 
6262 			nsec = rq->perout.start.sec * 1000000000ULL +
6263 			       rq->perout.start.nsec;
6264 
6265 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6266 				netdev_warn(tp->dev,
6267 					    "Device supports only a one-shot timesync output, period must be 0\n");
6268 				rval = -EINVAL;
6269 				goto err_out;
6270 			}
6271 
6272 			if (nsec & (1ULL << 63)) {
6273 				netdev_warn(tp->dev,
6274 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6275 				rval = -EINVAL;
6276 				goto err_out;
6277 			}
6278 
6279 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6280 			tw32(TG3_EAV_WATCHDOG0_MSB,
6281 			     TG3_EAV_WATCHDOG0_EN |
6282 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6283 
6284 			tw32(TG3_EAV_REF_CLCK_CTL,
6285 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6286 		} else {
6287 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6288 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6289 		}
6290 
6291 err_out:
6292 		tg3_full_unlock(tp);
6293 		return rval;
6294 
6295 	default:
6296 		break;
6297 	}
6298 
6299 	return -EOPNOTSUPP;
6300 }
6301 
6302 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6303 				     struct skb_shared_hwtstamps *timestamp)
6304 {
6305 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6306 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6307 					   tp->ptp_adjust);
6308 }
6309 
6310 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
6311 {
6312 	*hwclock = tr32(TG3_TX_TSTAMP_LSB);
6313 	*hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6314 }
6315 
6316 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
6317 {
6318 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6319 	struct skb_shared_hwtstamps timestamp;
6320 	u64 hwclock;
6321 
6322 	if (tp->ptp_txts_retrycnt > 2)
6323 		goto done;
6324 
6325 	tg3_read_tx_tstamp(tp, &hwclock);
6326 
6327 	if (hwclock != tp->pre_tx_ts) {
6328 		tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6329 		skb_tstamp_tx(tp->tx_tstamp_skb, &timestamp);
6330 		goto done;
6331 	}
6332 	tp->ptp_txts_retrycnt++;
6333 	return HZ / 10;
6334 done:
6335 	dev_consume_skb_any(tp->tx_tstamp_skb);
6336 	tp->tx_tstamp_skb = NULL;
6337 	tp->ptp_txts_retrycnt = 0;
6338 	tp->pre_tx_ts = 0;
6339 	return -1;
6340 }
6341 
6342 static const struct ptp_clock_info tg3_ptp_caps = {
6343 	.owner		= THIS_MODULE,
6344 	.name		= "tg3 clock",
6345 	.max_adj	= 250000000,
6346 	.n_alarm	= 0,
6347 	.n_ext_ts	= 0,
6348 	.n_per_out	= 1,
6349 	.n_pins		= 0,
6350 	.pps		= 0,
6351 	.adjfine	= tg3_ptp_adjfine,
6352 	.adjtime	= tg3_ptp_adjtime,
6353 	.do_aux_work	= tg3_ptp_ts_aux_work,
6354 	.gettimex64	= tg3_ptp_gettimex,
6355 	.settime64	= tg3_ptp_settime,
6356 	.enable		= tg3_ptp_enable,
6357 };
6358 
6359 /* tp->lock must be held */
6360 static void tg3_ptp_init(struct tg3 *tp)
6361 {
6362 	if (!tg3_flag(tp, PTP_CAPABLE))
6363 		return;
6364 
6365 	/* Initialize the hardware clock to the system time. */
6366 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6367 	tp->ptp_adjust = 0;
6368 	tp->ptp_info = tg3_ptp_caps;
6369 }
6370 
6371 /* tp->lock must be held */
6372 static void tg3_ptp_resume(struct tg3 *tp)
6373 {
6374 	if (!tg3_flag(tp, PTP_CAPABLE))
6375 		return;
6376 
6377 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6378 	tp->ptp_adjust = 0;
6379 }
6380 
6381 static void tg3_ptp_fini(struct tg3 *tp)
6382 {
6383 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6384 		return;
6385 
6386 	ptp_clock_unregister(tp->ptp_clock);
6387 	tp->ptp_clock = NULL;
6388 	tp->ptp_adjust = 0;
6389 	dev_consume_skb_any(tp->tx_tstamp_skb);
6390 	tp->tx_tstamp_skb = NULL;
6391 }
6392 
6393 static inline int tg3_irq_sync(struct tg3 *tp)
6394 {
6395 	return tp->irq_sync;
6396 }
6397 
6398 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6399 {
6400 	int i;
6401 
6402 	dst = (u32 *)((u8 *)dst + off);
6403 	for (i = 0; i < len; i += sizeof(u32))
6404 		*dst++ = tr32(off + i);
6405 }
6406 
6407 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6408 {
6409 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6410 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6411 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6412 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6413 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6414 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6415 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6416 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6417 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6418 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6419 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6420 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6421 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6422 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6423 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6424 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6425 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6426 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6427 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6428 
6429 	if (tg3_flag(tp, SUPPORT_MSIX))
6430 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6431 
6432 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6433 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6434 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6435 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6436 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6437 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6438 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6439 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6440 
6441 	if (!tg3_flag(tp, 5705_PLUS)) {
6442 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6443 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6444 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6445 	}
6446 
6447 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6448 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6449 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6450 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6451 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6452 
6453 	if (tg3_flag(tp, NVRAM))
6454 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6455 }
6456 
6457 static void tg3_dump_state(struct tg3 *tp)
6458 {
6459 	int i;
6460 	u32 *regs;
6461 
6462 	/* If it is a PCI error, all registers will be 0xffff,
6463 	 * we don't dump them out, just report the error and return
6464 	 */
6465 	if (tp->pdev->error_state != pci_channel_io_normal) {
6466 		netdev_err(tp->dev, "PCI channel ERROR!\n");
6467 		return;
6468 	}
6469 
6470 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6471 	if (!regs)
6472 		return;
6473 
6474 	if (tg3_flag(tp, PCI_EXPRESS)) {
6475 		/* Read up to but not including private PCI registers */
6476 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6477 			regs[i / sizeof(u32)] = tr32(i);
6478 	} else
6479 		tg3_dump_legacy_regs(tp, regs);
6480 
6481 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6482 		if (!regs[i + 0] && !regs[i + 1] &&
6483 		    !regs[i + 2] && !regs[i + 3])
6484 			continue;
6485 
6486 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6487 			   i * 4,
6488 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6489 	}
6490 
6491 	kfree(regs);
6492 
6493 	for (i = 0; i < tp->irq_cnt; i++) {
6494 		struct tg3_napi *tnapi = &tp->napi[i];
6495 
6496 		/* SW status block */
6497 		netdev_err(tp->dev,
6498 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6499 			   i,
6500 			   tnapi->hw_status->status,
6501 			   tnapi->hw_status->status_tag,
6502 			   tnapi->hw_status->rx_jumbo_consumer,
6503 			   tnapi->hw_status->rx_consumer,
6504 			   tnapi->hw_status->rx_mini_consumer,
6505 			   tnapi->hw_status->idx[0].rx_producer,
6506 			   tnapi->hw_status->idx[0].tx_consumer);
6507 
6508 		netdev_err(tp->dev,
6509 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6510 			   i,
6511 			   tnapi->last_tag, tnapi->last_irq_tag,
6512 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6513 			   tnapi->rx_rcb_ptr,
6514 			   tnapi->prodring.rx_std_prod_idx,
6515 			   tnapi->prodring.rx_std_cons_idx,
6516 			   tnapi->prodring.rx_jmb_prod_idx,
6517 			   tnapi->prodring.rx_jmb_cons_idx);
6518 	}
6519 }
6520 
6521 /* This is called whenever we suspect that the system chipset is re-
6522  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6523  * is bogus tx completions. We try to recover by setting the
6524  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6525  * in the workqueue.
6526  */
6527 static void tg3_tx_recover(struct tg3 *tp)
6528 {
6529 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6530 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6531 
6532 	netdev_warn(tp->dev,
6533 		    "The system may be re-ordering memory-mapped I/O "
6534 		    "cycles to the network device, attempting to recover. "
6535 		    "Please report the problem to the driver maintainer "
6536 		    "and include system chipset information.\n");
6537 
6538 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6539 }
6540 
6541 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6542 {
6543 	/* Tell compiler to fetch tx indices from memory. */
6544 	barrier();
6545 	return tnapi->tx_pending -
6546 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6547 }
6548 
6549 /* Tigon3 never reports partial packet sends.  So we do not
6550  * need special logic to handle SKBs that have not had all
6551  * of their frags sent yet, like SunGEM does.
6552  */
6553 static void tg3_tx(struct tg3_napi *tnapi)
6554 {
6555 	struct tg3 *tp = tnapi->tp;
6556 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6557 	u32 sw_idx = tnapi->tx_cons;
6558 	struct netdev_queue *txq;
6559 	int index = tnapi - tp->napi;
6560 	unsigned int pkts_compl = 0, bytes_compl = 0;
6561 
6562 	if (tg3_flag(tp, ENABLE_TSS))
6563 		index--;
6564 
6565 	txq = netdev_get_tx_queue(tp->dev, index);
6566 
6567 	while (sw_idx != hw_idx) {
6568 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6569 		bool complete_skb_later = false;
6570 		struct sk_buff *skb = ri->skb;
6571 		int i, tx_bug = 0;
6572 
6573 		if (unlikely(skb == NULL)) {
6574 			tg3_tx_recover(tp);
6575 			return;
6576 		}
6577 
6578 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6579 			struct skb_shared_hwtstamps timestamp;
6580 			u64 hwclock;
6581 
6582 			tg3_read_tx_tstamp(tp, &hwclock);
6583 			if (hwclock != tp->pre_tx_ts) {
6584 				tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6585 				skb_tstamp_tx(skb, &timestamp);
6586 				tp->pre_tx_ts = 0;
6587 			} else {
6588 				tp->tx_tstamp_skb = skb;
6589 				complete_skb_later = true;
6590 			}
6591 		}
6592 
6593 		dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6594 				 skb_headlen(skb), DMA_TO_DEVICE);
6595 
6596 		ri->skb = NULL;
6597 
6598 		while (ri->fragmented) {
6599 			ri->fragmented = false;
6600 			sw_idx = NEXT_TX(sw_idx);
6601 			ri = &tnapi->tx_buffers[sw_idx];
6602 		}
6603 
6604 		sw_idx = NEXT_TX(sw_idx);
6605 
6606 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6607 			ri = &tnapi->tx_buffers[sw_idx];
6608 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6609 				tx_bug = 1;
6610 
6611 			dma_unmap_page(&tp->pdev->dev,
6612 				       dma_unmap_addr(ri, mapping),
6613 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6614 				       DMA_TO_DEVICE);
6615 
6616 			while (ri->fragmented) {
6617 				ri->fragmented = false;
6618 				sw_idx = NEXT_TX(sw_idx);
6619 				ri = &tnapi->tx_buffers[sw_idx];
6620 			}
6621 
6622 			sw_idx = NEXT_TX(sw_idx);
6623 		}
6624 
6625 		pkts_compl++;
6626 		bytes_compl += skb->len;
6627 
6628 		if (!complete_skb_later)
6629 			dev_consume_skb_any(skb);
6630 		else
6631 			ptp_schedule_worker(tp->ptp_clock, 0);
6632 
6633 		if (unlikely(tx_bug)) {
6634 			tg3_tx_recover(tp);
6635 			return;
6636 		}
6637 	}
6638 
6639 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6640 
6641 	tnapi->tx_cons = sw_idx;
6642 
6643 	/* Need to make the tx_cons update visible to __tg3_start_xmit()
6644 	 * before checking for netif_queue_stopped().  Without the
6645 	 * memory barrier, there is a small possibility that __tg3_start_xmit()
6646 	 * will miss it and cause the queue to be stopped forever.
6647 	 */
6648 	smp_mb();
6649 
6650 	if (unlikely(netif_tx_queue_stopped(txq) &&
6651 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6652 		__netif_tx_lock(txq, smp_processor_id());
6653 		if (netif_tx_queue_stopped(txq) &&
6654 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6655 			netif_tx_wake_queue(txq);
6656 		__netif_tx_unlock(txq);
6657 	}
6658 }
6659 
6660 static void tg3_frag_free(bool is_frag, void *data)
6661 {
6662 	if (is_frag)
6663 		skb_free_frag(data);
6664 	else
6665 		kfree(data);
6666 }
6667 
6668 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6669 {
6670 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6671 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6672 
6673 	if (!ri->data)
6674 		return;
6675 
6676 	dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6677 			 DMA_FROM_DEVICE);
6678 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6679 	ri->data = NULL;
6680 }
6681 
6682 
6683 /* Returns size of skb allocated or < 0 on error.
6684  *
6685  * We only need to fill in the address because the other members
6686  * of the RX descriptor are invariant, see tg3_init_rings.
6687  *
6688  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6689  * posting buffers we only dirty the first cache line of the RX
6690  * descriptor (containing the address).  Whereas for the RX status
6691  * buffers the cpu only reads the last cacheline of the RX descriptor
6692  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6693  */
6694 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6695 			     u32 opaque_key, u32 dest_idx_unmasked,
6696 			     unsigned int *frag_size)
6697 {
6698 	struct tg3_rx_buffer_desc *desc;
6699 	struct ring_info *map;
6700 	u8 *data;
6701 	dma_addr_t mapping;
6702 	int skb_size, data_size, dest_idx;
6703 
6704 	switch (opaque_key) {
6705 	case RXD_OPAQUE_RING_STD:
6706 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6707 		desc = &tpr->rx_std[dest_idx];
6708 		map = &tpr->rx_std_buffers[dest_idx];
6709 		data_size = tp->rx_pkt_map_sz;
6710 		break;
6711 
6712 	case RXD_OPAQUE_RING_JUMBO:
6713 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6714 		desc = &tpr->rx_jmb[dest_idx].std;
6715 		map = &tpr->rx_jmb_buffers[dest_idx];
6716 		data_size = TG3_RX_JMB_MAP_SZ;
6717 		break;
6718 
6719 	default:
6720 		return -EINVAL;
6721 	}
6722 
6723 	/* Do not overwrite any of the map or rp information
6724 	 * until we are sure we can commit to a new buffer.
6725 	 *
6726 	 * Callers depend upon this behavior and assume that
6727 	 * we leave everything unchanged if we fail.
6728 	 */
6729 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6730 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6731 	if (skb_size <= PAGE_SIZE) {
6732 		data = napi_alloc_frag(skb_size);
6733 		*frag_size = skb_size;
6734 	} else {
6735 		data = kmalloc(skb_size, GFP_ATOMIC);
6736 		*frag_size = 0;
6737 	}
6738 	if (!data)
6739 		return -ENOMEM;
6740 
6741 	mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6742 				 data_size, DMA_FROM_DEVICE);
6743 	if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6744 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6745 		return -EIO;
6746 	}
6747 
6748 	map->data = data;
6749 	dma_unmap_addr_set(map, mapping, mapping);
6750 
6751 	desc->addr_hi = ((u64)mapping >> 32);
6752 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6753 
6754 	return data_size;
6755 }
6756 
6757 /* We only need to move over in the address because the other
6758  * members of the RX descriptor are invariant.  See notes above
6759  * tg3_alloc_rx_data for full details.
6760  */
6761 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6762 			   struct tg3_rx_prodring_set *dpr,
6763 			   u32 opaque_key, int src_idx,
6764 			   u32 dest_idx_unmasked)
6765 {
6766 	struct tg3 *tp = tnapi->tp;
6767 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6768 	struct ring_info *src_map, *dest_map;
6769 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6770 	int dest_idx;
6771 
6772 	switch (opaque_key) {
6773 	case RXD_OPAQUE_RING_STD:
6774 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6775 		dest_desc = &dpr->rx_std[dest_idx];
6776 		dest_map = &dpr->rx_std_buffers[dest_idx];
6777 		src_desc = &spr->rx_std[src_idx];
6778 		src_map = &spr->rx_std_buffers[src_idx];
6779 		break;
6780 
6781 	case RXD_OPAQUE_RING_JUMBO:
6782 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6783 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6784 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6785 		src_desc = &spr->rx_jmb[src_idx].std;
6786 		src_map = &spr->rx_jmb_buffers[src_idx];
6787 		break;
6788 
6789 	default:
6790 		return;
6791 	}
6792 
6793 	dest_map->data = src_map->data;
6794 	dma_unmap_addr_set(dest_map, mapping,
6795 			   dma_unmap_addr(src_map, mapping));
6796 	dest_desc->addr_hi = src_desc->addr_hi;
6797 	dest_desc->addr_lo = src_desc->addr_lo;
6798 
6799 	/* Ensure that the update to the skb happens after the physical
6800 	 * addresses have been transferred to the new BD location.
6801 	 */
6802 	smp_wmb();
6803 
6804 	src_map->data = NULL;
6805 }
6806 
6807 /* The RX ring scheme is composed of multiple rings which post fresh
6808  * buffers to the chip, and one special ring the chip uses to report
6809  * status back to the host.
6810  *
6811  * The special ring reports the status of received packets to the
6812  * host.  The chip does not write into the original descriptor the
6813  * RX buffer was obtained from.  The chip simply takes the original
6814  * descriptor as provided by the host, updates the status and length
6815  * field, then writes this into the next status ring entry.
6816  *
6817  * Each ring the host uses to post buffers to the chip is described
6818  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6819  * it is first placed into the on-chip ram.  When the packet's length
6820  * is known, it walks down the TG3_BDINFO entries to select the ring.
6821  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6822  * which is within the range of the new packet's length is chosen.
6823  *
6824  * The "separate ring for rx status" scheme may sound queer, but it makes
6825  * sense from a cache coherency perspective.  If only the host writes
6826  * to the buffer post rings, and only the chip writes to the rx status
6827  * rings, then cache lines never move beyond shared-modified state.
6828  * If both the host and chip were to write into the same ring, cache line
6829  * eviction could occur since both entities want it in an exclusive state.
6830  */
6831 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6832 {
6833 	struct tg3 *tp = tnapi->tp;
6834 	u32 work_mask, rx_std_posted = 0;
6835 	u32 std_prod_idx, jmb_prod_idx;
6836 	u32 sw_idx = tnapi->rx_rcb_ptr;
6837 	u16 hw_idx;
6838 	int received;
6839 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6840 
6841 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6842 	/*
6843 	 * We need to order the read of hw_idx and the read of
6844 	 * the opaque cookie.
6845 	 */
6846 	rmb();
6847 	work_mask = 0;
6848 	received = 0;
6849 	std_prod_idx = tpr->rx_std_prod_idx;
6850 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6851 	while (sw_idx != hw_idx && budget > 0) {
6852 		struct ring_info *ri;
6853 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6854 		unsigned int len;
6855 		struct sk_buff *skb;
6856 		dma_addr_t dma_addr;
6857 		u32 opaque_key, desc_idx, *post_ptr;
6858 		u8 *data;
6859 		u64 tstamp = 0;
6860 
6861 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6862 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6863 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6864 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6865 			dma_addr = dma_unmap_addr(ri, mapping);
6866 			data = ri->data;
6867 			post_ptr = &std_prod_idx;
6868 			rx_std_posted++;
6869 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6870 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6871 			dma_addr = dma_unmap_addr(ri, mapping);
6872 			data = ri->data;
6873 			post_ptr = &jmb_prod_idx;
6874 		} else
6875 			goto next_pkt_nopost;
6876 
6877 		work_mask |= opaque_key;
6878 
6879 		if (desc->err_vlan & RXD_ERR_MASK) {
6880 		drop_it:
6881 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6882 				       desc_idx, *post_ptr);
6883 		drop_it_no_recycle:
6884 			/* Other statistics kept track of by card. */
6885 			tnapi->rx_dropped++;
6886 			goto next_pkt;
6887 		}
6888 
6889 		prefetch(data + TG3_RX_OFFSET(tp));
6890 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6891 		      ETH_FCS_LEN;
6892 
6893 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6894 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6895 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6896 		     RXD_FLAG_PTPSTAT_PTPV2) {
6897 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6898 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6899 		}
6900 
6901 		if (len > TG3_RX_COPY_THRESH(tp)) {
6902 			int skb_size;
6903 			unsigned int frag_size;
6904 
6905 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6906 						    *post_ptr, &frag_size);
6907 			if (skb_size < 0)
6908 				goto drop_it;
6909 
6910 			dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6911 					 DMA_FROM_DEVICE);
6912 
6913 			/* Ensure that the update to the data happens
6914 			 * after the usage of the old DMA mapping.
6915 			 */
6916 			smp_wmb();
6917 
6918 			ri->data = NULL;
6919 
6920 			if (frag_size)
6921 				skb = build_skb(data, frag_size);
6922 			else
6923 				skb = slab_build_skb(data);
6924 			if (!skb) {
6925 				tg3_frag_free(frag_size != 0, data);
6926 				goto drop_it_no_recycle;
6927 			}
6928 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6929 		} else {
6930 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6931 				       desc_idx, *post_ptr);
6932 
6933 			skb = netdev_alloc_skb(tp->dev,
6934 					       len + TG3_RAW_IP_ALIGN);
6935 			if (skb == NULL)
6936 				goto drop_it_no_recycle;
6937 
6938 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6939 			dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6940 						DMA_FROM_DEVICE);
6941 			memcpy(skb->data,
6942 			       data + TG3_RX_OFFSET(tp),
6943 			       len);
6944 			dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6945 						   len, DMA_FROM_DEVICE);
6946 		}
6947 
6948 		skb_put(skb, len);
6949 		if (tstamp)
6950 			tg3_hwclock_to_timestamp(tp, tstamp,
6951 						 skb_hwtstamps(skb));
6952 
6953 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6954 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6955 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6956 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6957 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6958 		else
6959 			skb_checksum_none_assert(skb);
6960 
6961 		skb->protocol = eth_type_trans(skb, tp->dev);
6962 
6963 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6964 		    skb->protocol != htons(ETH_P_8021Q) &&
6965 		    skb->protocol != htons(ETH_P_8021AD)) {
6966 			dev_kfree_skb_any(skb);
6967 			goto drop_it_no_recycle;
6968 		}
6969 
6970 		if (desc->type_flags & RXD_FLAG_VLAN &&
6971 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6972 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6973 					       desc->err_vlan & RXD_VLAN_MASK);
6974 
6975 		napi_gro_receive(&tnapi->napi, skb);
6976 
6977 		received++;
6978 		budget--;
6979 
6980 next_pkt:
6981 		(*post_ptr)++;
6982 
6983 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6984 			tpr->rx_std_prod_idx = std_prod_idx &
6985 					       tp->rx_std_ring_mask;
6986 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6987 				     tpr->rx_std_prod_idx);
6988 			work_mask &= ~RXD_OPAQUE_RING_STD;
6989 			rx_std_posted = 0;
6990 		}
6991 next_pkt_nopost:
6992 		sw_idx++;
6993 		sw_idx &= tp->rx_ret_ring_mask;
6994 
6995 		/* Refresh hw_idx to see if there is new work */
6996 		if (sw_idx == hw_idx) {
6997 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6998 			rmb();
6999 		}
7000 	}
7001 
7002 	/* ACK the status ring. */
7003 	tnapi->rx_rcb_ptr = sw_idx;
7004 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
7005 
7006 	/* Refill RX ring(s). */
7007 	if (!tg3_flag(tp, ENABLE_RSS)) {
7008 		/* Sync BD data before updating mailbox */
7009 		wmb();
7010 
7011 		if (work_mask & RXD_OPAQUE_RING_STD) {
7012 			tpr->rx_std_prod_idx = std_prod_idx &
7013 					       tp->rx_std_ring_mask;
7014 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7015 				     tpr->rx_std_prod_idx);
7016 		}
7017 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7018 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
7019 					       tp->rx_jmb_ring_mask;
7020 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7021 				     tpr->rx_jmb_prod_idx);
7022 		}
7023 	} else if (work_mask) {
7024 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7025 		 * updated before the producer indices can be updated.
7026 		 */
7027 		smp_wmb();
7028 
7029 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7030 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7031 
7032 		if (tnapi != &tp->napi[1]) {
7033 			tp->rx_refill = true;
7034 			napi_schedule(&tp->napi[1].napi);
7035 		}
7036 	}
7037 
7038 	return received;
7039 }
7040 
7041 static void tg3_poll_link(struct tg3 *tp)
7042 {
7043 	/* handle link change and other phy events */
7044 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7045 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7046 
7047 		if (sblk->status & SD_STATUS_LINK_CHG) {
7048 			sblk->status = SD_STATUS_UPDATED |
7049 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7050 			spin_lock(&tp->lock);
7051 			if (tg3_flag(tp, USE_PHYLIB)) {
7052 				tw32_f(MAC_STATUS,
7053 				     (MAC_STATUS_SYNC_CHANGED |
7054 				      MAC_STATUS_CFG_CHANGED |
7055 				      MAC_STATUS_MI_COMPLETION |
7056 				      MAC_STATUS_LNKSTATE_CHANGED));
7057 				udelay(40);
7058 			} else
7059 				tg3_setup_phy(tp, false);
7060 			spin_unlock(&tp->lock);
7061 		}
7062 	}
7063 }
7064 
7065 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7066 				struct tg3_rx_prodring_set *dpr,
7067 				struct tg3_rx_prodring_set *spr)
7068 {
7069 	u32 si, di, cpycnt, src_prod_idx;
7070 	int i, err = 0;
7071 
7072 	while (1) {
7073 		src_prod_idx = spr->rx_std_prod_idx;
7074 
7075 		/* Make sure updates to the rx_std_buffers[] entries and the
7076 		 * standard producer index are seen in the correct order.
7077 		 */
7078 		smp_rmb();
7079 
7080 		if (spr->rx_std_cons_idx == src_prod_idx)
7081 			break;
7082 
7083 		if (spr->rx_std_cons_idx < src_prod_idx)
7084 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7085 		else
7086 			cpycnt = tp->rx_std_ring_mask + 1 -
7087 				 spr->rx_std_cons_idx;
7088 
7089 		cpycnt = min(cpycnt,
7090 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7091 
7092 		si = spr->rx_std_cons_idx;
7093 		di = dpr->rx_std_prod_idx;
7094 
7095 		for (i = di; i < di + cpycnt; i++) {
7096 			if (dpr->rx_std_buffers[i].data) {
7097 				cpycnt = i - di;
7098 				err = -ENOSPC;
7099 				break;
7100 			}
7101 		}
7102 
7103 		if (!cpycnt)
7104 			break;
7105 
7106 		/* Ensure that updates to the rx_std_buffers ring and the
7107 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7108 		 * ordered correctly WRT the skb check above.
7109 		 */
7110 		smp_rmb();
7111 
7112 		memcpy(&dpr->rx_std_buffers[di],
7113 		       &spr->rx_std_buffers[si],
7114 		       cpycnt * sizeof(struct ring_info));
7115 
7116 		for (i = 0; i < cpycnt; i++, di++, si++) {
7117 			struct tg3_rx_buffer_desc *sbd, *dbd;
7118 			sbd = &spr->rx_std[si];
7119 			dbd = &dpr->rx_std[di];
7120 			dbd->addr_hi = sbd->addr_hi;
7121 			dbd->addr_lo = sbd->addr_lo;
7122 		}
7123 
7124 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7125 				       tp->rx_std_ring_mask;
7126 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7127 				       tp->rx_std_ring_mask;
7128 	}
7129 
7130 	while (1) {
7131 		src_prod_idx = spr->rx_jmb_prod_idx;
7132 
7133 		/* Make sure updates to the rx_jmb_buffers[] entries and
7134 		 * the jumbo producer index are seen in the correct order.
7135 		 */
7136 		smp_rmb();
7137 
7138 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7139 			break;
7140 
7141 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7142 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7143 		else
7144 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7145 				 spr->rx_jmb_cons_idx;
7146 
7147 		cpycnt = min(cpycnt,
7148 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7149 
7150 		si = spr->rx_jmb_cons_idx;
7151 		di = dpr->rx_jmb_prod_idx;
7152 
7153 		for (i = di; i < di + cpycnt; i++) {
7154 			if (dpr->rx_jmb_buffers[i].data) {
7155 				cpycnt = i - di;
7156 				err = -ENOSPC;
7157 				break;
7158 			}
7159 		}
7160 
7161 		if (!cpycnt)
7162 			break;
7163 
7164 		/* Ensure that updates to the rx_jmb_buffers ring and the
7165 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7166 		 * ordered correctly WRT the skb check above.
7167 		 */
7168 		smp_rmb();
7169 
7170 		memcpy(&dpr->rx_jmb_buffers[di],
7171 		       &spr->rx_jmb_buffers[si],
7172 		       cpycnt * sizeof(struct ring_info));
7173 
7174 		for (i = 0; i < cpycnt; i++, di++, si++) {
7175 			struct tg3_rx_buffer_desc *sbd, *dbd;
7176 			sbd = &spr->rx_jmb[si].std;
7177 			dbd = &dpr->rx_jmb[di].std;
7178 			dbd->addr_hi = sbd->addr_hi;
7179 			dbd->addr_lo = sbd->addr_lo;
7180 		}
7181 
7182 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7183 				       tp->rx_jmb_ring_mask;
7184 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7185 				       tp->rx_jmb_ring_mask;
7186 	}
7187 
7188 	return err;
7189 }
7190 
7191 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7192 {
7193 	struct tg3 *tp = tnapi->tp;
7194 
7195 	/* run TX completion thread */
7196 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7197 		tg3_tx(tnapi);
7198 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7199 			return work_done;
7200 	}
7201 
7202 	if (!tnapi->rx_rcb_prod_idx)
7203 		return work_done;
7204 
7205 	/* run RX thread, within the bounds set by NAPI.
7206 	 * All RX "locking" is done by ensuring outside
7207 	 * code synchronizes with tg3->napi.poll()
7208 	 */
7209 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7210 		work_done += tg3_rx(tnapi, budget - work_done);
7211 
7212 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7213 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7214 		int i, err = 0;
7215 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7216 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7217 
7218 		tp->rx_refill = false;
7219 		for (i = 1; i <= tp->rxq_cnt; i++)
7220 			err |= tg3_rx_prodring_xfer(tp, dpr,
7221 						    &tp->napi[i].prodring);
7222 
7223 		wmb();
7224 
7225 		if (std_prod_idx != dpr->rx_std_prod_idx)
7226 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7227 				     dpr->rx_std_prod_idx);
7228 
7229 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7230 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7231 				     dpr->rx_jmb_prod_idx);
7232 
7233 		if (err)
7234 			tw32_f(HOSTCC_MODE, tp->coal_now);
7235 	}
7236 
7237 	return work_done;
7238 }
7239 
7240 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7241 {
7242 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7243 		schedule_work(&tp->reset_task);
7244 }
7245 
7246 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7247 {
7248 	if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7249 		cancel_work_sync(&tp->reset_task);
7250 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7251 }
7252 
7253 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7254 {
7255 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7256 	struct tg3 *tp = tnapi->tp;
7257 	int work_done = 0;
7258 	struct tg3_hw_status *sblk = tnapi->hw_status;
7259 
7260 	while (1) {
7261 		work_done = tg3_poll_work(tnapi, work_done, budget);
7262 
7263 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7264 			goto tx_recovery;
7265 
7266 		if (unlikely(work_done >= budget))
7267 			break;
7268 
7269 		/* tp->last_tag is used in tg3_int_reenable() below
7270 		 * to tell the hw how much work has been processed,
7271 		 * so we must read it before checking for more work.
7272 		 */
7273 		tnapi->last_tag = sblk->status_tag;
7274 		tnapi->last_irq_tag = tnapi->last_tag;
7275 		rmb();
7276 
7277 		/* check for RX/TX work to do */
7278 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7279 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7280 
7281 			/* This test here is not race free, but will reduce
7282 			 * the number of interrupts by looping again.
7283 			 */
7284 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7285 				continue;
7286 
7287 			napi_complete_done(napi, work_done);
7288 			/* Reenable interrupts. */
7289 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7290 
7291 			/* This test here is synchronized by napi_schedule()
7292 			 * and napi_complete() to close the race condition.
7293 			 */
7294 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7295 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7296 						  HOSTCC_MODE_ENABLE |
7297 						  tnapi->coal_now);
7298 			}
7299 			break;
7300 		}
7301 	}
7302 
7303 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7304 	return work_done;
7305 
7306 tx_recovery:
7307 	/* work_done is guaranteed to be less than budget. */
7308 	napi_complete(napi);
7309 	tg3_reset_task_schedule(tp);
7310 	return work_done;
7311 }
7312 
7313 static void tg3_process_error(struct tg3 *tp)
7314 {
7315 	u32 val;
7316 	bool real_error = false;
7317 
7318 	if (tg3_flag(tp, ERROR_PROCESSED))
7319 		return;
7320 
7321 	/* Check Flow Attention register */
7322 	val = tr32(HOSTCC_FLOW_ATTN);
7323 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7324 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7325 		real_error = true;
7326 	}
7327 
7328 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7329 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7330 		real_error = true;
7331 	}
7332 
7333 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7334 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7335 		real_error = true;
7336 	}
7337 
7338 	if (!real_error)
7339 		return;
7340 
7341 	tg3_dump_state(tp);
7342 
7343 	tg3_flag_set(tp, ERROR_PROCESSED);
7344 	tg3_reset_task_schedule(tp);
7345 }
7346 
7347 static int tg3_poll(struct napi_struct *napi, int budget)
7348 {
7349 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7350 	struct tg3 *tp = tnapi->tp;
7351 	int work_done = 0;
7352 	struct tg3_hw_status *sblk = tnapi->hw_status;
7353 
7354 	while (1) {
7355 		if (sblk->status & SD_STATUS_ERROR)
7356 			tg3_process_error(tp);
7357 
7358 		tg3_poll_link(tp);
7359 
7360 		work_done = tg3_poll_work(tnapi, work_done, budget);
7361 
7362 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7363 			goto tx_recovery;
7364 
7365 		if (unlikely(work_done >= budget))
7366 			break;
7367 
7368 		if (tg3_flag(tp, TAGGED_STATUS)) {
7369 			/* tp->last_tag is used in tg3_int_reenable() below
7370 			 * to tell the hw how much work has been processed,
7371 			 * so we must read it before checking for more work.
7372 			 */
7373 			tnapi->last_tag = sblk->status_tag;
7374 			tnapi->last_irq_tag = tnapi->last_tag;
7375 			rmb();
7376 		} else
7377 			sblk->status &= ~SD_STATUS_UPDATED;
7378 
7379 		if (likely(!tg3_has_work(tnapi))) {
7380 			napi_complete_done(napi, work_done);
7381 			tg3_int_reenable(tnapi);
7382 			break;
7383 		}
7384 	}
7385 
7386 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7387 	return work_done;
7388 
7389 tx_recovery:
7390 	/* work_done is guaranteed to be less than budget. */
7391 	napi_complete(napi);
7392 	tg3_reset_task_schedule(tp);
7393 	return work_done;
7394 }
7395 
7396 static void tg3_napi_disable(struct tg3 *tp)
7397 {
7398 	int txq_idx = tp->txq_cnt - 1;
7399 	int rxq_idx = tp->rxq_cnt - 1;
7400 	struct tg3_napi *tnapi;
7401 	int i;
7402 
7403 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
7404 		tnapi = &tp->napi[i];
7405 		if (tnapi->tx_buffers) {
7406 			netif_queue_set_napi(tp->dev, txq_idx,
7407 					     NETDEV_QUEUE_TYPE_TX, NULL);
7408 			txq_idx--;
7409 		}
7410 		if (tnapi->rx_rcb) {
7411 			netif_queue_set_napi(tp->dev, rxq_idx,
7412 					     NETDEV_QUEUE_TYPE_RX, NULL);
7413 			rxq_idx--;
7414 		}
7415 		napi_disable(&tnapi->napi);
7416 	}
7417 }
7418 
7419 static void tg3_napi_enable(struct tg3 *tp)
7420 {
7421 	int txq_idx = 0, rxq_idx = 0;
7422 	struct tg3_napi *tnapi;
7423 	int i;
7424 
7425 	for (i = 0; i < tp->irq_cnt; i++) {
7426 		tnapi = &tp->napi[i];
7427 		napi_enable_locked(&tnapi->napi);
7428 		if (tnapi->tx_buffers) {
7429 			netif_queue_set_napi(tp->dev, txq_idx,
7430 					     NETDEV_QUEUE_TYPE_TX,
7431 					     &tnapi->napi);
7432 			txq_idx++;
7433 		}
7434 		if (tnapi->rx_rcb) {
7435 			netif_queue_set_napi(tp->dev, rxq_idx,
7436 					     NETDEV_QUEUE_TYPE_RX,
7437 					     &tnapi->napi);
7438 			rxq_idx++;
7439 		}
7440 	}
7441 }
7442 
7443 static void tg3_napi_init(struct tg3 *tp)
7444 {
7445 	int i;
7446 
7447 	for (i = 0; i < tp->irq_cnt; i++) {
7448 		netif_napi_add_locked(tp->dev, &tp->napi[i].napi,
7449 				      i ? tg3_poll_msix : tg3_poll);
7450 		netif_napi_set_irq_locked(&tp->napi[i].napi,
7451 					  tp->napi[i].irq_vec);
7452 	}
7453 }
7454 
7455 static void tg3_napi_fini(struct tg3 *tp)
7456 {
7457 	int i;
7458 
7459 	for (i = 0; i < tp->irq_cnt; i++)
7460 		netif_napi_del(&tp->napi[i].napi);
7461 }
7462 
7463 static inline void tg3_netif_stop(struct tg3 *tp)
7464 {
7465 	netif_trans_update(tp->dev);	/* prevent tx timeout */
7466 	tg3_napi_disable(tp);
7467 	netif_carrier_off(tp->dev);
7468 	netif_tx_disable(tp->dev);
7469 }
7470 
7471 /* tp->lock must be held */
7472 static inline void tg3_netif_start(struct tg3 *tp)
7473 {
7474 	tg3_ptp_resume(tp);
7475 
7476 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7477 	 * appropriate so long as all callers are assured to
7478 	 * have free tx slots (such as after tg3_init_hw)
7479 	 */
7480 	netif_tx_wake_all_queues(tp->dev);
7481 
7482 	if (tp->link_up)
7483 		netif_carrier_on(tp->dev);
7484 
7485 	tg3_napi_enable(tp);
7486 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7487 	tg3_enable_ints(tp);
7488 }
7489 
7490 static void tg3_irq_quiesce(struct tg3 *tp)
7491 	__releases(tp->lock)
7492 	__acquires(tp->lock)
7493 {
7494 	int i;
7495 
7496 	BUG_ON(tp->irq_sync);
7497 
7498 	tp->irq_sync = 1;
7499 	smp_mb();
7500 
7501 	spin_unlock_bh(&tp->lock);
7502 
7503 	for (i = 0; i < tp->irq_cnt; i++)
7504 		synchronize_irq(tp->napi[i].irq_vec);
7505 
7506 	spin_lock_bh(&tp->lock);
7507 }
7508 
7509 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7510  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7511  * with as well.  Most of the time, this is not necessary except when
7512  * shutting down the device.
7513  */
7514 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7515 {
7516 	spin_lock_bh(&tp->lock);
7517 	if (irq_sync)
7518 		tg3_irq_quiesce(tp);
7519 }
7520 
7521 static inline void tg3_full_unlock(struct tg3 *tp)
7522 {
7523 	spin_unlock_bh(&tp->lock);
7524 }
7525 
7526 /* One-shot MSI handler - Chip automatically disables interrupt
7527  * after sending MSI so driver doesn't have to do it.
7528  */
7529 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7530 {
7531 	struct tg3_napi *tnapi = dev_id;
7532 	struct tg3 *tp = tnapi->tp;
7533 
7534 	prefetch(tnapi->hw_status);
7535 	if (tnapi->rx_rcb)
7536 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7537 
7538 	if (likely(!tg3_irq_sync(tp)))
7539 		napi_schedule(&tnapi->napi);
7540 
7541 	return IRQ_HANDLED;
7542 }
7543 
7544 /* MSI ISR - No need to check for interrupt sharing and no need to
7545  * flush status block and interrupt mailbox. PCI ordering rules
7546  * guarantee that MSI will arrive after the status block.
7547  */
7548 static irqreturn_t tg3_msi(int irq, void *dev_id)
7549 {
7550 	struct tg3_napi *tnapi = dev_id;
7551 	struct tg3 *tp = tnapi->tp;
7552 
7553 	prefetch(tnapi->hw_status);
7554 	if (tnapi->rx_rcb)
7555 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7556 	/*
7557 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7558 	 * chip-internal interrupt pending events.
7559 	 * Writing non-zero to intr-mbox-0 additional tells the
7560 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7561 	 * event coalescing.
7562 	 */
7563 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7564 	if (likely(!tg3_irq_sync(tp)))
7565 		napi_schedule(&tnapi->napi);
7566 
7567 	return IRQ_RETVAL(1);
7568 }
7569 
7570 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7571 {
7572 	struct tg3_napi *tnapi = dev_id;
7573 	struct tg3 *tp = tnapi->tp;
7574 	struct tg3_hw_status *sblk = tnapi->hw_status;
7575 	unsigned int handled = 1;
7576 
7577 	/* In INTx mode, it is possible for the interrupt to arrive at
7578 	 * the CPU before the status block posted prior to the interrupt.
7579 	 * Reading the PCI State register will confirm whether the
7580 	 * interrupt is ours and will flush the status block.
7581 	 */
7582 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7583 		if (tg3_flag(tp, CHIP_RESETTING) ||
7584 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7585 			handled = 0;
7586 			goto out;
7587 		}
7588 	}
7589 
7590 	/*
7591 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7592 	 * chip-internal interrupt pending events.
7593 	 * Writing non-zero to intr-mbox-0 additional tells the
7594 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7595 	 * event coalescing.
7596 	 *
7597 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7598 	 * spurious interrupts.  The flush impacts performance but
7599 	 * excessive spurious interrupts can be worse in some cases.
7600 	 */
7601 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7602 	if (tg3_irq_sync(tp))
7603 		goto out;
7604 	sblk->status &= ~SD_STATUS_UPDATED;
7605 	if (likely(tg3_has_work(tnapi))) {
7606 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7607 		napi_schedule(&tnapi->napi);
7608 	} else {
7609 		/* No work, shared interrupt perhaps?  re-enable
7610 		 * interrupts, and flush that PCI write
7611 		 */
7612 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7613 			       0x00000000);
7614 	}
7615 out:
7616 	return IRQ_RETVAL(handled);
7617 }
7618 
7619 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7620 {
7621 	struct tg3_napi *tnapi = dev_id;
7622 	struct tg3 *tp = tnapi->tp;
7623 	struct tg3_hw_status *sblk = tnapi->hw_status;
7624 	unsigned int handled = 1;
7625 
7626 	/* In INTx mode, it is possible for the interrupt to arrive at
7627 	 * the CPU before the status block posted prior to the interrupt.
7628 	 * Reading the PCI State register will confirm whether the
7629 	 * interrupt is ours and will flush the status block.
7630 	 */
7631 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7632 		if (tg3_flag(tp, CHIP_RESETTING) ||
7633 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7634 			handled = 0;
7635 			goto out;
7636 		}
7637 	}
7638 
7639 	/*
7640 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7641 	 * chip-internal interrupt pending events.
7642 	 * writing non-zero to intr-mbox-0 additional tells the
7643 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7644 	 * event coalescing.
7645 	 *
7646 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7647 	 * spurious interrupts.  The flush impacts performance but
7648 	 * excessive spurious interrupts can be worse in some cases.
7649 	 */
7650 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7651 
7652 	/*
7653 	 * In a shared interrupt configuration, sometimes other devices'
7654 	 * interrupts will scream.  We record the current status tag here
7655 	 * so that the above check can report that the screaming interrupts
7656 	 * are unhandled.  Eventually they will be silenced.
7657 	 */
7658 	tnapi->last_irq_tag = sblk->status_tag;
7659 
7660 	if (tg3_irq_sync(tp))
7661 		goto out;
7662 
7663 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7664 
7665 	napi_schedule(&tnapi->napi);
7666 
7667 out:
7668 	return IRQ_RETVAL(handled);
7669 }
7670 
7671 /* ISR for interrupt test */
7672 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7673 {
7674 	struct tg3_napi *tnapi = dev_id;
7675 	struct tg3 *tp = tnapi->tp;
7676 	struct tg3_hw_status *sblk = tnapi->hw_status;
7677 
7678 	if ((sblk->status & SD_STATUS_UPDATED) ||
7679 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7680 		tg3_disable_ints(tp);
7681 		return IRQ_RETVAL(1);
7682 	}
7683 	return IRQ_RETVAL(0);
7684 }
7685 
7686 #ifdef CONFIG_NET_POLL_CONTROLLER
7687 static void tg3_poll_controller(struct net_device *dev)
7688 {
7689 	int i;
7690 	struct tg3 *tp = netdev_priv(dev);
7691 
7692 	if (tg3_irq_sync(tp))
7693 		return;
7694 
7695 	for (i = 0; i < tp->irq_cnt; i++)
7696 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7697 }
7698 #endif
7699 
7700 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7701 {
7702 	struct tg3 *tp = netdev_priv(dev);
7703 
7704 	if (netif_msg_tx_err(tp)) {
7705 		netdev_err(dev, "transmit timed out, resetting\n");
7706 		tg3_dump_state(tp);
7707 	}
7708 
7709 	tg3_reset_task_schedule(tp);
7710 }
7711 
7712 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7713 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7714 {
7715 	u32 base = (u32) mapping & 0xffffffff;
7716 
7717 	return base + len + 8 < base;
7718 }
7719 
7720 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7721  * of any 4GB boundaries: 4G, 8G, etc
7722  */
7723 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7724 					   u32 len, u32 mss)
7725 {
7726 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7727 		u32 base = (u32) mapping & 0xffffffff;
7728 
7729 		return ((base + len + (mss & 0x3fff)) < base);
7730 	}
7731 	return 0;
7732 }
7733 
7734 /* Test for DMA addresses > 40-bit */
7735 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7736 					  int len)
7737 {
7738 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7739 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7740 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7741 	return 0;
7742 #else
7743 	return 0;
7744 #endif
7745 }
7746 
7747 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7748 				 dma_addr_t mapping, u32 len, u32 flags,
7749 				 u32 mss, u32 vlan)
7750 {
7751 	txbd->addr_hi = ((u64) mapping >> 32);
7752 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7753 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7754 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7755 }
7756 
7757 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7758 			    dma_addr_t map, u32 len, u32 flags,
7759 			    u32 mss, u32 vlan)
7760 {
7761 	struct tg3 *tp = tnapi->tp;
7762 	bool hwbug = false;
7763 
7764 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7765 		hwbug = true;
7766 
7767 	if (tg3_4g_overflow_test(map, len))
7768 		hwbug = true;
7769 
7770 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7771 		hwbug = true;
7772 
7773 	if (tg3_40bit_overflow_test(tp, map, len))
7774 		hwbug = true;
7775 
7776 	if (tp->dma_limit) {
7777 		u32 prvidx = *entry;
7778 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7779 		while (len > tp->dma_limit && *budget) {
7780 			u32 frag_len = tp->dma_limit;
7781 			len -= tp->dma_limit;
7782 
7783 			/* Avoid the 8byte DMA problem */
7784 			if (len <= 8) {
7785 				len += tp->dma_limit / 2;
7786 				frag_len = tp->dma_limit / 2;
7787 			}
7788 
7789 			tnapi->tx_buffers[*entry].fragmented = true;
7790 
7791 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7792 				      frag_len, tmp_flag, mss, vlan);
7793 			*budget -= 1;
7794 			prvidx = *entry;
7795 			*entry = NEXT_TX(*entry);
7796 
7797 			map += frag_len;
7798 		}
7799 
7800 		if (len) {
7801 			if (*budget) {
7802 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7803 					      len, flags, mss, vlan);
7804 				*budget -= 1;
7805 				*entry = NEXT_TX(*entry);
7806 			} else {
7807 				hwbug = true;
7808 				tnapi->tx_buffers[prvidx].fragmented = false;
7809 			}
7810 		}
7811 	} else {
7812 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7813 			      len, flags, mss, vlan);
7814 		*entry = NEXT_TX(*entry);
7815 	}
7816 
7817 	return hwbug;
7818 }
7819 
7820 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7821 {
7822 	int i;
7823 	struct sk_buff *skb;
7824 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7825 
7826 	skb = txb->skb;
7827 	txb->skb = NULL;
7828 
7829 	dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7830 			 skb_headlen(skb), DMA_TO_DEVICE);
7831 
7832 	while (txb->fragmented) {
7833 		txb->fragmented = false;
7834 		entry = NEXT_TX(entry);
7835 		txb = &tnapi->tx_buffers[entry];
7836 	}
7837 
7838 	for (i = 0; i <= last; i++) {
7839 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7840 
7841 		entry = NEXT_TX(entry);
7842 		txb = &tnapi->tx_buffers[entry];
7843 
7844 		dma_unmap_page(&tnapi->tp->pdev->dev,
7845 			       dma_unmap_addr(txb, mapping),
7846 			       skb_frag_size(frag), DMA_TO_DEVICE);
7847 
7848 		while (txb->fragmented) {
7849 			txb->fragmented = false;
7850 			entry = NEXT_TX(entry);
7851 			txb = &tnapi->tx_buffers[entry];
7852 		}
7853 	}
7854 }
7855 
7856 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7857 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7858 				       struct sk_buff **pskb,
7859 				       u32 *entry, u32 *budget,
7860 				       u32 base_flags, u32 mss, u32 vlan)
7861 {
7862 	struct tg3 *tp = tnapi->tp;
7863 	struct sk_buff *new_skb, *skb = *pskb;
7864 	dma_addr_t new_addr = 0;
7865 	int ret = 0;
7866 
7867 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7868 		new_skb = skb_copy(skb, GFP_ATOMIC);
7869 	else {
7870 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7871 
7872 		new_skb = skb_copy_expand(skb,
7873 					  skb_headroom(skb) + more_headroom,
7874 					  skb_tailroom(skb), GFP_ATOMIC);
7875 	}
7876 
7877 	if (!new_skb) {
7878 		ret = -1;
7879 	} else {
7880 		/* New SKB is guaranteed to be linear. */
7881 		new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7882 					  new_skb->len, DMA_TO_DEVICE);
7883 		/* Make sure the mapping succeeded */
7884 		if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7885 			dev_kfree_skb_any(new_skb);
7886 			ret = -1;
7887 		} else {
7888 			u32 save_entry = *entry;
7889 
7890 			base_flags |= TXD_FLAG_END;
7891 
7892 			tnapi->tx_buffers[*entry].skb = new_skb;
7893 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7894 					   mapping, new_addr);
7895 
7896 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7897 					    new_skb->len, base_flags,
7898 					    mss, vlan)) {
7899 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7900 				dev_kfree_skb_any(new_skb);
7901 				ret = -1;
7902 			}
7903 		}
7904 	}
7905 
7906 	dev_consume_skb_any(skb);
7907 	*pskb = new_skb;
7908 	return ret;
7909 }
7910 
7911 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7912 {
7913 	/* Check if we will never have enough descriptors,
7914 	 * as gso_segs can be more than current ring size
7915 	 */
7916 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7917 }
7918 
7919 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
7920 
7921 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7922  * indicated in tg3_tx_frag_set()
7923  */
7924 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7925 		       struct netdev_queue *txq, struct sk_buff *skb)
7926 {
7927 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7928 	struct sk_buff *segs, *seg, *next;
7929 
7930 	/* Estimate the number of fragments in the worst case */
7931 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7932 		netif_tx_stop_queue(txq);
7933 
7934 		/* netif_tx_stop_queue() must be done before checking
7935 		 * checking tx index in tg3_tx_avail() below, because in
7936 		 * tg3_tx(), we update tx index before checking for
7937 		 * netif_tx_queue_stopped().
7938 		 */
7939 		smp_mb();
7940 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7941 			return NETDEV_TX_BUSY;
7942 
7943 		netif_tx_wake_queue(txq);
7944 	}
7945 
7946 	segs = skb_gso_segment(skb, tp->dev->features &
7947 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7948 	if (IS_ERR(segs) || !segs) {
7949 		tnapi->tx_dropped++;
7950 		goto tg3_tso_bug_end;
7951 	}
7952 
7953 	skb_list_walk_safe(segs, seg, next) {
7954 		skb_mark_not_on_list(seg);
7955 		__tg3_start_xmit(seg, tp->dev);
7956 	}
7957 
7958 tg3_tso_bug_end:
7959 	dev_consume_skb_any(skb);
7960 
7961 	return NETDEV_TX_OK;
7962 }
7963 
7964 /* hard_start_xmit for all devices */
7965 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7966 {
7967 	struct tg3 *tp = netdev_priv(dev);
7968 	u32 len, entry, base_flags, mss, vlan = 0;
7969 	u32 budget;
7970 	int i = -1, would_hit_hwbug;
7971 	dma_addr_t mapping;
7972 	struct tg3_napi *tnapi;
7973 	struct netdev_queue *txq;
7974 	unsigned int last;
7975 	struct iphdr *iph = NULL;
7976 	struct tcphdr *tcph = NULL;
7977 	__sum16 tcp_csum = 0, ip_csum = 0;
7978 	__be16 ip_tot_len = 0;
7979 
7980 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7981 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7982 	if (tg3_flag(tp, ENABLE_TSS))
7983 		tnapi++;
7984 
7985 	budget = tg3_tx_avail(tnapi);
7986 
7987 	/* We are running in BH disabled context with netif_tx_lock
7988 	 * and TX reclaim runs via tp->napi.poll inside of a software
7989 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7990 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7991 	 */
7992 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7993 		if (!netif_tx_queue_stopped(txq)) {
7994 			netif_tx_stop_queue(txq);
7995 
7996 			/* This is a hard error, log it. */
7997 			netdev_err(dev,
7998 				   "BUG! Tx Ring full when queue awake!\n");
7999 		}
8000 		return NETDEV_TX_BUSY;
8001 	}
8002 
8003 	entry = tnapi->tx_prod;
8004 	base_flags = 0;
8005 
8006 	mss = skb_shinfo(skb)->gso_size;
8007 	if (mss) {
8008 		u32 tcp_opt_len, hdr_len;
8009 
8010 		if (skb_cow_head(skb, 0))
8011 			goto drop;
8012 
8013 		iph = ip_hdr(skb);
8014 		tcp_opt_len = tcp_optlen(skb);
8015 
8016 		hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
8017 
8018 		/* HW/FW can not correctly segment packets that have been
8019 		 * vlan encapsulated.
8020 		 */
8021 		if (skb->protocol == htons(ETH_P_8021Q) ||
8022 		    skb->protocol == htons(ETH_P_8021AD)) {
8023 			if (tg3_tso_bug_gso_check(tnapi, skb))
8024 				return tg3_tso_bug(tp, tnapi, txq, skb);
8025 			goto drop;
8026 		}
8027 
8028 		if (!skb_is_gso_v6(skb)) {
8029 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
8030 			    tg3_flag(tp, TSO_BUG)) {
8031 				if (tg3_tso_bug_gso_check(tnapi, skb))
8032 					return tg3_tso_bug(tp, tnapi, txq, skb);
8033 				goto drop;
8034 			}
8035 			ip_csum = iph->check;
8036 			ip_tot_len = iph->tot_len;
8037 			iph->check = 0;
8038 			iph->tot_len = htons(mss + hdr_len);
8039 		}
8040 
8041 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
8042 			       TXD_FLAG_CPU_POST_DMA);
8043 
8044 		tcph = tcp_hdr(skb);
8045 		tcp_csum = tcph->check;
8046 
8047 		if (tg3_flag(tp, HW_TSO_1) ||
8048 		    tg3_flag(tp, HW_TSO_2) ||
8049 		    tg3_flag(tp, HW_TSO_3)) {
8050 			tcph->check = 0;
8051 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8052 		} else {
8053 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8054 							 0, IPPROTO_TCP, 0);
8055 		}
8056 
8057 		if (tg3_flag(tp, HW_TSO_3)) {
8058 			mss |= (hdr_len & 0xc) << 12;
8059 			if (hdr_len & 0x10)
8060 				base_flags |= 0x00000010;
8061 			base_flags |= (hdr_len & 0x3e0) << 5;
8062 		} else if (tg3_flag(tp, HW_TSO_2))
8063 			mss |= hdr_len << 9;
8064 		else if (tg3_flag(tp, HW_TSO_1) ||
8065 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
8066 			if (tcp_opt_len || iph->ihl > 5) {
8067 				int tsflags;
8068 
8069 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8070 				mss |= (tsflags << 11);
8071 			}
8072 		} else {
8073 			if (tcp_opt_len || iph->ihl > 5) {
8074 				int tsflags;
8075 
8076 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8077 				base_flags |= tsflags << 12;
8078 			}
8079 		}
8080 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8081 		/* HW/FW can not correctly checksum packets that have been
8082 		 * vlan encapsulated.
8083 		 */
8084 		if (skb->protocol == htons(ETH_P_8021Q) ||
8085 		    skb->protocol == htons(ETH_P_8021AD)) {
8086 			if (skb_checksum_help(skb))
8087 				goto drop;
8088 		} else  {
8089 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8090 		}
8091 	}
8092 
8093 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8094 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8095 		base_flags |= TXD_FLAG_JMB_PKT;
8096 
8097 	if (skb_vlan_tag_present(skb)) {
8098 		base_flags |= TXD_FLAG_VLAN;
8099 		vlan = skb_vlan_tag_get(skb);
8100 	}
8101 
8102 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8103 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8104 		tg3_full_lock(tp, 0);
8105 		if (!tp->pre_tx_ts) {
8106 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8107 			base_flags |= TXD_FLAG_HWTSTAMP;
8108 			tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
8109 		}
8110 		tg3_full_unlock(tp);
8111 	}
8112 
8113 	len = skb_headlen(skb);
8114 
8115 	mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8116 				 DMA_TO_DEVICE);
8117 	if (dma_mapping_error(&tp->pdev->dev, mapping))
8118 		goto drop;
8119 
8120 
8121 	tnapi->tx_buffers[entry].skb = skb;
8122 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8123 
8124 	would_hit_hwbug = 0;
8125 
8126 	if (tg3_flag(tp, 5701_DMA_BUG))
8127 		would_hit_hwbug = 1;
8128 
8129 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8130 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8131 			    mss, vlan)) {
8132 		would_hit_hwbug = 1;
8133 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8134 		u32 tmp_mss = mss;
8135 
8136 		if (!tg3_flag(tp, HW_TSO_1) &&
8137 		    !tg3_flag(tp, HW_TSO_2) &&
8138 		    !tg3_flag(tp, HW_TSO_3))
8139 			tmp_mss = 0;
8140 
8141 		/* Now loop through additional data
8142 		 * fragments, and queue them.
8143 		 */
8144 		last = skb_shinfo(skb)->nr_frags - 1;
8145 		for (i = 0; i <= last; i++) {
8146 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8147 
8148 			len = skb_frag_size(frag);
8149 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8150 						   len, DMA_TO_DEVICE);
8151 
8152 			tnapi->tx_buffers[entry].skb = NULL;
8153 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8154 					   mapping);
8155 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8156 				goto dma_error;
8157 
8158 			if (!budget ||
8159 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8160 					    len, base_flags |
8161 					    ((i == last) ? TXD_FLAG_END : 0),
8162 					    tmp_mss, vlan)) {
8163 				would_hit_hwbug = 1;
8164 				break;
8165 			}
8166 		}
8167 	}
8168 
8169 	if (would_hit_hwbug) {
8170 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8171 
8172 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8173 			/* If it's a TSO packet, do GSO instead of
8174 			 * allocating and copying to a large linear SKB
8175 			 */
8176 			if (ip_tot_len) {
8177 				iph->check = ip_csum;
8178 				iph->tot_len = ip_tot_len;
8179 			}
8180 			tcph->check = tcp_csum;
8181 			return tg3_tso_bug(tp, tnapi, txq, skb);
8182 		}
8183 
8184 		/* If the workaround fails due to memory/mapping
8185 		 * failure, silently drop this packet.
8186 		 */
8187 		entry = tnapi->tx_prod;
8188 		budget = tg3_tx_avail(tnapi);
8189 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8190 						base_flags, mss, vlan))
8191 			goto drop_nofree;
8192 	}
8193 
8194 	skb_tx_timestamp(skb);
8195 	netdev_tx_sent_queue(txq, skb->len);
8196 
8197 	/* Sync BD data before updating mailbox */
8198 	wmb();
8199 
8200 	tnapi->tx_prod = entry;
8201 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8202 		netif_tx_stop_queue(txq);
8203 
8204 		/* netif_tx_stop_queue() must be done before checking
8205 		 * checking tx index in tg3_tx_avail() below, because in
8206 		 * tg3_tx(), we update tx index before checking for
8207 		 * netif_tx_queue_stopped().
8208 		 */
8209 		smp_mb();
8210 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8211 			netif_tx_wake_queue(txq);
8212 	}
8213 
8214 	return NETDEV_TX_OK;
8215 
8216 dma_error:
8217 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8218 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8219 drop:
8220 	dev_kfree_skb_any(skb);
8221 drop_nofree:
8222 	tnapi->tx_dropped++;
8223 	return NETDEV_TX_OK;
8224 }
8225 
8226 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8227 {
8228 	struct netdev_queue *txq;
8229 	u16 skb_queue_mapping;
8230 	netdev_tx_t ret;
8231 
8232 	skb_queue_mapping = skb_get_queue_mapping(skb);
8233 	txq = netdev_get_tx_queue(dev, skb_queue_mapping);
8234 
8235 	ret = __tg3_start_xmit(skb, dev);
8236 
8237 	/* Notify the hardware that packets are ready by updating the TX ring
8238 	 * tail pointer. We respect netdev_xmit_more() thus avoiding poking
8239 	 * the hardware for every packet. To guarantee forward progress the TX
8240 	 * ring must be drained when it is full as indicated by
8241 	 * netif_xmit_stopped(). This needs to happen even when the current
8242 	 * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
8243 	 * queued by previous __tg3_start_xmit() calls might get stuck in
8244 	 * the queue forever.
8245 	 */
8246 	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8247 		struct tg3_napi *tnapi;
8248 		struct tg3 *tp;
8249 
8250 		tp = netdev_priv(dev);
8251 		tnapi = &tp->napi[skb_queue_mapping];
8252 
8253 		if (tg3_flag(tp, ENABLE_TSS))
8254 			tnapi++;
8255 
8256 		tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
8257 	}
8258 
8259 	return ret;
8260 }
8261 
8262 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8263 {
8264 	if (enable) {
8265 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8266 				  MAC_MODE_PORT_MODE_MASK);
8267 
8268 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8269 
8270 		if (!tg3_flag(tp, 5705_PLUS))
8271 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8272 
8273 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8274 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8275 		else
8276 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8277 	} else {
8278 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8279 
8280 		if (tg3_flag(tp, 5705_PLUS) ||
8281 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8282 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8283 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8284 	}
8285 
8286 	tw32(MAC_MODE, tp->mac_mode);
8287 	udelay(40);
8288 }
8289 
8290 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8291 {
8292 	u32 val, bmcr, mac_mode, ptest = 0;
8293 
8294 	tg3_phy_toggle_apd(tp, false);
8295 	tg3_phy_toggle_automdix(tp, false);
8296 
8297 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8298 		return -EIO;
8299 
8300 	bmcr = BMCR_FULLDPLX;
8301 	switch (speed) {
8302 	case SPEED_10:
8303 		break;
8304 	case SPEED_100:
8305 		bmcr |= BMCR_SPEED100;
8306 		break;
8307 	case SPEED_1000:
8308 	default:
8309 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8310 			speed = SPEED_100;
8311 			bmcr |= BMCR_SPEED100;
8312 		} else {
8313 			speed = SPEED_1000;
8314 			bmcr |= BMCR_SPEED1000;
8315 		}
8316 	}
8317 
8318 	if (extlpbk) {
8319 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8320 			tg3_readphy(tp, MII_CTRL1000, &val);
8321 			val |= CTL1000_AS_MASTER |
8322 			       CTL1000_ENABLE_MASTER;
8323 			tg3_writephy(tp, MII_CTRL1000, val);
8324 		} else {
8325 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8326 				MII_TG3_FET_PTEST_TRIM_2;
8327 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8328 		}
8329 	} else
8330 		bmcr |= BMCR_LOOPBACK;
8331 
8332 	tg3_writephy(tp, MII_BMCR, bmcr);
8333 
8334 	/* The write needs to be flushed for the FETs */
8335 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8336 		tg3_readphy(tp, MII_BMCR, &bmcr);
8337 
8338 	udelay(40);
8339 
8340 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8341 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8342 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8343 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8344 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8345 
8346 		/* The write needs to be flushed for the AC131 */
8347 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8348 	}
8349 
8350 	/* Reset to prevent losing 1st rx packet intermittently */
8351 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8352 	    tg3_flag(tp, 5780_CLASS)) {
8353 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8354 		udelay(10);
8355 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8356 	}
8357 
8358 	mac_mode = tp->mac_mode &
8359 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8360 	if (speed == SPEED_1000)
8361 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8362 	else
8363 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8364 
8365 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8366 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8367 
8368 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8369 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8370 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8371 			mac_mode |= MAC_MODE_LINK_POLARITY;
8372 
8373 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8374 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8375 	}
8376 
8377 	tw32(MAC_MODE, mac_mode);
8378 	udelay(40);
8379 
8380 	return 0;
8381 }
8382 
8383 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8384 {
8385 	struct tg3 *tp = netdev_priv(dev);
8386 
8387 	if (features & NETIF_F_LOOPBACK) {
8388 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8389 			return;
8390 
8391 		spin_lock_bh(&tp->lock);
8392 		tg3_mac_loopback(tp, true);
8393 		netif_carrier_on(tp->dev);
8394 		spin_unlock_bh(&tp->lock);
8395 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8396 	} else {
8397 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8398 			return;
8399 
8400 		spin_lock_bh(&tp->lock);
8401 		tg3_mac_loopback(tp, false);
8402 		/* Force link status check */
8403 		tg3_setup_phy(tp, true);
8404 		spin_unlock_bh(&tp->lock);
8405 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8406 	}
8407 }
8408 
8409 static netdev_features_t tg3_fix_features(struct net_device *dev,
8410 	netdev_features_t features)
8411 {
8412 	struct tg3 *tp = netdev_priv(dev);
8413 
8414 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8415 		features &= ~NETIF_F_ALL_TSO;
8416 
8417 	return features;
8418 }
8419 
8420 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8421 {
8422 	netdev_features_t changed = dev->features ^ features;
8423 
8424 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8425 		tg3_set_loopback(dev, features);
8426 
8427 	return 0;
8428 }
8429 
8430 static void tg3_rx_prodring_free(struct tg3 *tp,
8431 				 struct tg3_rx_prodring_set *tpr)
8432 {
8433 	int i;
8434 
8435 	if (tpr != &tp->napi[0].prodring) {
8436 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8437 		     i = (i + 1) & tp->rx_std_ring_mask)
8438 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8439 					tp->rx_pkt_map_sz);
8440 
8441 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8442 			for (i = tpr->rx_jmb_cons_idx;
8443 			     i != tpr->rx_jmb_prod_idx;
8444 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8445 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8446 						TG3_RX_JMB_MAP_SZ);
8447 			}
8448 		}
8449 
8450 		return;
8451 	}
8452 
8453 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8454 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8455 				tp->rx_pkt_map_sz);
8456 
8457 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8458 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8459 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8460 					TG3_RX_JMB_MAP_SZ);
8461 	}
8462 }
8463 
8464 /* Initialize rx rings for packet processing.
8465  *
8466  * The chip has been shut down and the driver detached from
8467  * the networking, so no interrupts or new tx packets will
8468  * end up in the driver.  tp->{tx,}lock are held and thus
8469  * we may not sleep.
8470  */
8471 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8472 				 struct tg3_rx_prodring_set *tpr)
8473 {
8474 	u32 i, rx_pkt_dma_sz;
8475 
8476 	tpr->rx_std_cons_idx = 0;
8477 	tpr->rx_std_prod_idx = 0;
8478 	tpr->rx_jmb_cons_idx = 0;
8479 	tpr->rx_jmb_prod_idx = 0;
8480 
8481 	if (tpr != &tp->napi[0].prodring) {
8482 		memset(&tpr->rx_std_buffers[0], 0,
8483 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8484 		if (tpr->rx_jmb_buffers)
8485 			memset(&tpr->rx_jmb_buffers[0], 0,
8486 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8487 		goto done;
8488 	}
8489 
8490 	/* Zero out all descriptors. */
8491 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8492 
8493 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8494 	if (tg3_flag(tp, 5780_CLASS) &&
8495 	    tp->dev->mtu > ETH_DATA_LEN)
8496 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8497 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8498 
8499 	/* Initialize invariants of the rings, we only set this
8500 	 * stuff once.  This works because the card does not
8501 	 * write into the rx buffer posting rings.
8502 	 */
8503 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8504 		struct tg3_rx_buffer_desc *rxd;
8505 
8506 		rxd = &tpr->rx_std[i];
8507 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8508 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8509 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8510 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8511 	}
8512 
8513 	/* Now allocate fresh SKBs for each rx ring. */
8514 	for (i = 0; i < tp->rx_pending; i++) {
8515 		unsigned int frag_size;
8516 
8517 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8518 				      &frag_size) < 0) {
8519 			netdev_warn(tp->dev,
8520 				    "Using a smaller RX standard ring. Only "
8521 				    "%d out of %d buffers were allocated "
8522 				    "successfully\n", i, tp->rx_pending);
8523 			if (i == 0)
8524 				goto initfail;
8525 			tp->rx_pending = i;
8526 			break;
8527 		}
8528 	}
8529 
8530 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8531 		goto done;
8532 
8533 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8534 
8535 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8536 		goto done;
8537 
8538 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8539 		struct tg3_rx_buffer_desc *rxd;
8540 
8541 		rxd = &tpr->rx_jmb[i].std;
8542 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8543 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8544 				  RXD_FLAG_JUMBO;
8545 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8546 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8547 	}
8548 
8549 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8550 		unsigned int frag_size;
8551 
8552 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8553 				      &frag_size) < 0) {
8554 			netdev_warn(tp->dev,
8555 				    "Using a smaller RX jumbo ring. Only %d "
8556 				    "out of %d buffers were allocated "
8557 				    "successfully\n", i, tp->rx_jumbo_pending);
8558 			if (i == 0)
8559 				goto initfail;
8560 			tp->rx_jumbo_pending = i;
8561 			break;
8562 		}
8563 	}
8564 
8565 done:
8566 	return 0;
8567 
8568 initfail:
8569 	tg3_rx_prodring_free(tp, tpr);
8570 	return -ENOMEM;
8571 }
8572 
8573 static void tg3_rx_prodring_fini(struct tg3 *tp,
8574 				 struct tg3_rx_prodring_set *tpr)
8575 {
8576 	kfree(tpr->rx_std_buffers);
8577 	tpr->rx_std_buffers = NULL;
8578 	kfree(tpr->rx_jmb_buffers);
8579 	tpr->rx_jmb_buffers = NULL;
8580 	if (tpr->rx_std) {
8581 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8582 				  tpr->rx_std, tpr->rx_std_mapping);
8583 		tpr->rx_std = NULL;
8584 	}
8585 	if (tpr->rx_jmb) {
8586 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8587 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8588 		tpr->rx_jmb = NULL;
8589 	}
8590 }
8591 
8592 static int tg3_rx_prodring_init(struct tg3 *tp,
8593 				struct tg3_rx_prodring_set *tpr)
8594 {
8595 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8596 				      GFP_KERNEL);
8597 	if (!tpr->rx_std_buffers)
8598 		return -ENOMEM;
8599 
8600 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8601 					 TG3_RX_STD_RING_BYTES(tp),
8602 					 &tpr->rx_std_mapping,
8603 					 GFP_KERNEL);
8604 	if (!tpr->rx_std)
8605 		goto err_out;
8606 
8607 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8608 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8609 					      GFP_KERNEL);
8610 		if (!tpr->rx_jmb_buffers)
8611 			goto err_out;
8612 
8613 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8614 						 TG3_RX_JMB_RING_BYTES(tp),
8615 						 &tpr->rx_jmb_mapping,
8616 						 GFP_KERNEL);
8617 		if (!tpr->rx_jmb)
8618 			goto err_out;
8619 	}
8620 
8621 	return 0;
8622 
8623 err_out:
8624 	tg3_rx_prodring_fini(tp, tpr);
8625 	return -ENOMEM;
8626 }
8627 
8628 /* Free up pending packets in all rx/tx rings.
8629  *
8630  * The chip has been shut down and the driver detached from
8631  * the networking, so no interrupts or new tx packets will
8632  * end up in the driver.  tp->{tx,}lock is not held and we are not
8633  * in an interrupt context and thus may sleep.
8634  */
8635 static void tg3_free_rings(struct tg3 *tp)
8636 {
8637 	int i, j;
8638 
8639 	for (j = 0; j < tp->irq_cnt; j++) {
8640 		struct tg3_napi *tnapi = &tp->napi[j];
8641 
8642 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8643 
8644 		if (!tnapi->tx_buffers)
8645 			continue;
8646 
8647 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8648 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8649 
8650 			if (!skb)
8651 				continue;
8652 
8653 			tg3_tx_skb_unmap(tnapi, i,
8654 					 skb_shinfo(skb)->nr_frags - 1);
8655 
8656 			dev_consume_skb_any(skb);
8657 		}
8658 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8659 	}
8660 }
8661 
8662 /* Initialize tx/rx rings for packet processing.
8663  *
8664  * The chip has been shut down and the driver detached from
8665  * the networking, so no interrupts or new tx packets will
8666  * end up in the driver.  tp->{tx,}lock are held and thus
8667  * we may not sleep.
8668  */
8669 static int tg3_init_rings(struct tg3 *tp)
8670 {
8671 	int i;
8672 
8673 	/* Free up all the SKBs. */
8674 	tg3_free_rings(tp);
8675 
8676 	for (i = 0; i < tp->irq_cnt; i++) {
8677 		struct tg3_napi *tnapi = &tp->napi[i];
8678 
8679 		tnapi->last_tag = 0;
8680 		tnapi->last_irq_tag = 0;
8681 		tnapi->hw_status->status = 0;
8682 		tnapi->hw_status->status_tag = 0;
8683 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8684 
8685 		tnapi->tx_prod = 0;
8686 		tnapi->tx_cons = 0;
8687 		if (tnapi->tx_ring)
8688 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8689 
8690 		tnapi->rx_rcb_ptr = 0;
8691 		if (tnapi->rx_rcb)
8692 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8693 
8694 		if (tnapi->prodring.rx_std &&
8695 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8696 			tg3_free_rings(tp);
8697 			return -ENOMEM;
8698 		}
8699 	}
8700 
8701 	return 0;
8702 }
8703 
8704 static void tg3_mem_tx_release(struct tg3 *tp)
8705 {
8706 	int i;
8707 
8708 	for (i = 0; i < tp->irq_max; i++) {
8709 		struct tg3_napi *tnapi = &tp->napi[i];
8710 
8711 		if (tnapi->tx_ring) {
8712 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8713 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8714 			tnapi->tx_ring = NULL;
8715 		}
8716 
8717 		kfree(tnapi->tx_buffers);
8718 		tnapi->tx_buffers = NULL;
8719 	}
8720 }
8721 
8722 static int tg3_mem_tx_acquire(struct tg3 *tp)
8723 {
8724 	int i;
8725 	struct tg3_napi *tnapi = &tp->napi[0];
8726 
8727 	/* If multivector TSS is enabled, vector 0 does not handle
8728 	 * tx interrupts.  Don't allocate any resources for it.
8729 	 */
8730 	if (tg3_flag(tp, ENABLE_TSS))
8731 		tnapi++;
8732 
8733 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8734 		tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8735 					    sizeof(struct tg3_tx_ring_info),
8736 					    GFP_KERNEL);
8737 		if (!tnapi->tx_buffers)
8738 			goto err_out;
8739 
8740 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8741 						    TG3_TX_RING_BYTES,
8742 						    &tnapi->tx_desc_mapping,
8743 						    GFP_KERNEL);
8744 		if (!tnapi->tx_ring)
8745 			goto err_out;
8746 	}
8747 
8748 	return 0;
8749 
8750 err_out:
8751 	tg3_mem_tx_release(tp);
8752 	return -ENOMEM;
8753 }
8754 
8755 static void tg3_mem_rx_release(struct tg3 *tp)
8756 {
8757 	int i;
8758 
8759 	for (i = 0; i < tp->irq_max; i++) {
8760 		struct tg3_napi *tnapi = &tp->napi[i];
8761 
8762 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8763 
8764 		if (!tnapi->rx_rcb)
8765 			continue;
8766 
8767 		dma_free_coherent(&tp->pdev->dev,
8768 				  TG3_RX_RCB_RING_BYTES(tp),
8769 				  tnapi->rx_rcb,
8770 				  tnapi->rx_rcb_mapping);
8771 		tnapi->rx_rcb = NULL;
8772 	}
8773 }
8774 
8775 static int tg3_mem_rx_acquire(struct tg3 *tp)
8776 {
8777 	unsigned int i, limit;
8778 
8779 	limit = tp->rxq_cnt;
8780 
8781 	/* If RSS is enabled, we need a (dummy) producer ring
8782 	 * set on vector zero.  This is the true hw prodring.
8783 	 */
8784 	if (tg3_flag(tp, ENABLE_RSS))
8785 		limit++;
8786 
8787 	for (i = 0; i < limit; i++) {
8788 		struct tg3_napi *tnapi = &tp->napi[i];
8789 
8790 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8791 			goto err_out;
8792 
8793 		/* If multivector RSS is enabled, vector 0
8794 		 * does not handle rx or tx interrupts.
8795 		 * Don't allocate any resources for it.
8796 		 */
8797 		if (!i && tg3_flag(tp, ENABLE_RSS))
8798 			continue;
8799 
8800 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8801 						   TG3_RX_RCB_RING_BYTES(tp),
8802 						   &tnapi->rx_rcb_mapping,
8803 						   GFP_KERNEL);
8804 		if (!tnapi->rx_rcb)
8805 			goto err_out;
8806 	}
8807 
8808 	return 0;
8809 
8810 err_out:
8811 	tg3_mem_rx_release(tp);
8812 	return -ENOMEM;
8813 }
8814 
8815 /*
8816  * Must not be invoked with interrupt sources disabled and
8817  * the hardware shutdown down.
8818  */
8819 static void tg3_free_consistent(struct tg3 *tp)
8820 {
8821 	int i;
8822 
8823 	for (i = 0; i < tp->irq_cnt; i++) {
8824 		struct tg3_napi *tnapi = &tp->napi[i];
8825 
8826 		if (tnapi->hw_status) {
8827 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8828 					  tnapi->hw_status,
8829 					  tnapi->status_mapping);
8830 			tnapi->hw_status = NULL;
8831 		}
8832 	}
8833 
8834 	tg3_mem_rx_release(tp);
8835 	tg3_mem_tx_release(tp);
8836 
8837 	/* tp->hw_stats can be referenced safely:
8838 	 *     1. under rtnl_lock
8839 	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8840 	 */
8841 	if (tp->hw_stats) {
8842 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8843 				  tp->hw_stats, tp->stats_mapping);
8844 		tp->hw_stats = NULL;
8845 	}
8846 }
8847 
8848 /*
8849  * Must not be invoked with interrupt sources disabled and
8850  * the hardware shutdown down.  Can sleep.
8851  */
8852 static int tg3_alloc_consistent(struct tg3 *tp)
8853 {
8854 	int i;
8855 
8856 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8857 					  sizeof(struct tg3_hw_stats),
8858 					  &tp->stats_mapping, GFP_KERNEL);
8859 	if (!tp->hw_stats)
8860 		goto err_out;
8861 
8862 	for (i = 0; i < tp->irq_cnt; i++) {
8863 		struct tg3_napi *tnapi = &tp->napi[i];
8864 		struct tg3_hw_status *sblk;
8865 
8866 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8867 						      TG3_HW_STATUS_SIZE,
8868 						      &tnapi->status_mapping,
8869 						      GFP_KERNEL);
8870 		if (!tnapi->hw_status)
8871 			goto err_out;
8872 
8873 		sblk = tnapi->hw_status;
8874 
8875 		if (tg3_flag(tp, ENABLE_RSS)) {
8876 			u16 *prodptr = NULL;
8877 
8878 			/*
8879 			 * When RSS is enabled, the status block format changes
8880 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8881 			 * and "rx_mini_consumer" members get mapped to the
8882 			 * other three rx return ring producer indexes.
8883 			 */
8884 			switch (i) {
8885 			case 1:
8886 				prodptr = &sblk->idx[0].rx_producer;
8887 				break;
8888 			case 2:
8889 				prodptr = &sblk->rx_jumbo_consumer;
8890 				break;
8891 			case 3:
8892 				prodptr = &sblk->reserved;
8893 				break;
8894 			case 4:
8895 				prodptr = &sblk->rx_mini_consumer;
8896 				break;
8897 			}
8898 			tnapi->rx_rcb_prod_idx = prodptr;
8899 		} else {
8900 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8901 		}
8902 	}
8903 
8904 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8905 		goto err_out;
8906 
8907 	return 0;
8908 
8909 err_out:
8910 	tg3_free_consistent(tp);
8911 	return -ENOMEM;
8912 }
8913 
8914 #define MAX_WAIT_CNT 1000
8915 
8916 /* To stop a block, clear the enable bit and poll till it
8917  * clears.  tp->lock is held.
8918  */
8919 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8920 {
8921 	unsigned int i;
8922 	u32 val;
8923 
8924 	if (tg3_flag(tp, 5705_PLUS)) {
8925 		switch (ofs) {
8926 		case RCVLSC_MODE:
8927 		case DMAC_MODE:
8928 		case MBFREE_MODE:
8929 		case BUFMGR_MODE:
8930 		case MEMARB_MODE:
8931 			/* We can't enable/disable these bits of the
8932 			 * 5705/5750, just say success.
8933 			 */
8934 			return 0;
8935 
8936 		default:
8937 			break;
8938 		}
8939 	}
8940 
8941 	val = tr32(ofs);
8942 	val &= ~enable_bit;
8943 	tw32_f(ofs, val);
8944 
8945 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8946 		if (pci_channel_offline(tp->pdev)) {
8947 			dev_err(&tp->pdev->dev,
8948 				"tg3_stop_block device offline, "
8949 				"ofs=%lx enable_bit=%x\n",
8950 				ofs, enable_bit);
8951 			return -ENODEV;
8952 		}
8953 
8954 		udelay(100);
8955 		val = tr32(ofs);
8956 		if ((val & enable_bit) == 0)
8957 			break;
8958 	}
8959 
8960 	if (i == MAX_WAIT_CNT && !silent) {
8961 		dev_err(&tp->pdev->dev,
8962 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8963 			ofs, enable_bit);
8964 		return -ENODEV;
8965 	}
8966 
8967 	return 0;
8968 }
8969 
8970 /* tp->lock is held. */
8971 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8972 {
8973 	int i, err;
8974 
8975 	tg3_disable_ints(tp);
8976 
8977 	if (pci_channel_offline(tp->pdev)) {
8978 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8979 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8980 		err = -ENODEV;
8981 		goto err_no_dev;
8982 	}
8983 
8984 	tp->rx_mode &= ~RX_MODE_ENABLE;
8985 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8986 	udelay(10);
8987 
8988 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8989 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8990 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8991 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8992 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8993 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8994 
8995 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8996 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8997 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8998 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8999 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
9000 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
9001 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
9002 
9003 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
9004 	tw32_f(MAC_MODE, tp->mac_mode);
9005 	udelay(40);
9006 
9007 	tp->tx_mode &= ~TX_MODE_ENABLE;
9008 	tw32_f(MAC_TX_MODE, tp->tx_mode);
9009 
9010 	for (i = 0; i < MAX_WAIT_CNT; i++) {
9011 		udelay(100);
9012 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
9013 			break;
9014 	}
9015 	if (i >= MAX_WAIT_CNT) {
9016 		dev_err(&tp->pdev->dev,
9017 			"%s timed out, TX_MODE_ENABLE will not clear "
9018 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
9019 		err |= -ENODEV;
9020 	}
9021 
9022 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
9023 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
9024 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
9025 
9026 	tw32(FTQ_RESET, 0xffffffff);
9027 	tw32(FTQ_RESET, 0x00000000);
9028 
9029 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
9030 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
9031 
9032 err_no_dev:
9033 	for (i = 0; i < tp->irq_cnt; i++) {
9034 		struct tg3_napi *tnapi = &tp->napi[i];
9035 		if (tnapi->hw_status)
9036 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9037 	}
9038 
9039 	return err;
9040 }
9041 
9042 /* Save PCI command register before chip reset */
9043 static void tg3_save_pci_state(struct tg3 *tp)
9044 {
9045 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
9046 }
9047 
9048 /* Restore PCI state after chip reset */
9049 static void tg3_restore_pci_state(struct tg3 *tp)
9050 {
9051 	u32 val;
9052 
9053 	/* Re-enable indirect register accesses. */
9054 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9055 			       tp->misc_host_ctrl);
9056 
9057 	/* Set MAX PCI retry to zero. */
9058 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
9059 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9060 	    tg3_flag(tp, PCIX_MODE))
9061 		val |= PCISTATE_RETRY_SAME_DMA;
9062 	/* Allow reads and writes to the APE register and memory space. */
9063 	if (tg3_flag(tp, ENABLE_APE))
9064 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9065 		       PCISTATE_ALLOW_APE_SHMEM_WR |
9066 		       PCISTATE_ALLOW_APE_PSPACE_WR;
9067 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
9068 
9069 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
9070 
9071 	if (!tg3_flag(tp, PCI_EXPRESS)) {
9072 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
9073 				      tp->pci_cacheline_sz);
9074 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
9075 				      tp->pci_lat_timer);
9076 	}
9077 
9078 	/* Make sure PCI-X relaxed ordering bit is clear. */
9079 	if (tg3_flag(tp, PCIX_MODE)) {
9080 		u16 pcix_cmd;
9081 
9082 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9083 				     &pcix_cmd);
9084 		pcix_cmd &= ~PCI_X_CMD_ERO;
9085 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9086 				      pcix_cmd);
9087 	}
9088 
9089 	if (tg3_flag(tp, 5780_CLASS)) {
9090 
9091 		/* Chip reset on 5780 will reset MSI enable bit,
9092 		 * so need to restore it.
9093 		 */
9094 		if (tg3_flag(tp, USING_MSI)) {
9095 			u16 ctrl;
9096 
9097 			pci_read_config_word(tp->pdev,
9098 					     tp->msi_cap + PCI_MSI_FLAGS,
9099 					     &ctrl);
9100 			pci_write_config_word(tp->pdev,
9101 					      tp->msi_cap + PCI_MSI_FLAGS,
9102 					      ctrl | PCI_MSI_FLAGS_ENABLE);
9103 			val = tr32(MSGINT_MODE);
9104 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9105 		}
9106 	}
9107 }
9108 
9109 static void tg3_override_clk(struct tg3 *tp)
9110 {
9111 	u32 val;
9112 
9113 	switch (tg3_asic_rev(tp)) {
9114 	case ASIC_REV_5717:
9115 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9116 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9117 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9118 		break;
9119 
9120 	case ASIC_REV_5719:
9121 	case ASIC_REV_5720:
9122 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9123 		break;
9124 
9125 	default:
9126 		return;
9127 	}
9128 }
9129 
9130 static void tg3_restore_clk(struct tg3 *tp)
9131 {
9132 	u32 val;
9133 
9134 	switch (tg3_asic_rev(tp)) {
9135 	case ASIC_REV_5717:
9136 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9137 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9138 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9139 		break;
9140 
9141 	case ASIC_REV_5719:
9142 	case ASIC_REV_5720:
9143 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9144 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9145 		break;
9146 
9147 	default:
9148 		return;
9149 	}
9150 }
9151 
9152 /* tp->lock is held. */
9153 static int tg3_chip_reset(struct tg3 *tp)
9154 	__releases(tp->lock)
9155 	__acquires(tp->lock)
9156 {
9157 	u32 val;
9158 	void (*write_op)(struct tg3 *, u32, u32);
9159 	int i, err;
9160 
9161 	if (!pci_device_is_present(tp->pdev))
9162 		return -ENODEV;
9163 
9164 	tg3_nvram_lock(tp);
9165 
9166 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9167 
9168 	/* No matching tg3_nvram_unlock() after this because
9169 	 * chip reset below will undo the nvram lock.
9170 	 */
9171 	tp->nvram_lock_cnt = 0;
9172 
9173 	/* GRC_MISC_CFG core clock reset will clear the memory
9174 	 * enable bit in PCI register 4 and the MSI enable bit
9175 	 * on some chips, so we save relevant registers here.
9176 	 */
9177 	tg3_save_pci_state(tp);
9178 
9179 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9180 	    tg3_flag(tp, 5755_PLUS))
9181 		tw32(GRC_FASTBOOT_PC, 0);
9182 
9183 	/*
9184 	 * We must avoid the readl() that normally takes place.
9185 	 * It locks machines, causes machine checks, and other
9186 	 * fun things.  So, temporarily disable the 5701
9187 	 * hardware workaround, while we do the reset.
9188 	 */
9189 	write_op = tp->write32;
9190 	if (write_op == tg3_write_flush_reg32)
9191 		tp->write32 = tg3_write32;
9192 
9193 	/* Prevent the irq handler from reading or writing PCI registers
9194 	 * during chip reset when the memory enable bit in the PCI command
9195 	 * register may be cleared.  The chip does not generate interrupt
9196 	 * at this time, but the irq handler may still be called due to irq
9197 	 * sharing or irqpoll.
9198 	 */
9199 	tg3_flag_set(tp, CHIP_RESETTING);
9200 	for (i = 0; i < tp->irq_cnt; i++) {
9201 		struct tg3_napi *tnapi = &tp->napi[i];
9202 		if (tnapi->hw_status) {
9203 			tnapi->hw_status->status = 0;
9204 			tnapi->hw_status->status_tag = 0;
9205 		}
9206 		tnapi->last_tag = 0;
9207 		tnapi->last_irq_tag = 0;
9208 	}
9209 	smp_mb();
9210 
9211 	tg3_full_unlock(tp);
9212 
9213 	for (i = 0; i < tp->irq_cnt; i++)
9214 		synchronize_irq(tp->napi[i].irq_vec);
9215 
9216 	tg3_full_lock(tp, 0);
9217 
9218 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9219 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9220 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9221 	}
9222 
9223 	/* do the reset */
9224 	val = GRC_MISC_CFG_CORECLK_RESET;
9225 
9226 	if (tg3_flag(tp, PCI_EXPRESS)) {
9227 		/* Force PCIe 1.0a mode */
9228 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9229 		    !tg3_flag(tp, 57765_PLUS) &&
9230 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9231 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9232 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9233 
9234 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9235 			tw32(GRC_MISC_CFG, (1 << 29));
9236 			val |= (1 << 29);
9237 		}
9238 	}
9239 
9240 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9241 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9242 		tw32(GRC_VCPU_EXT_CTRL,
9243 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9244 	}
9245 
9246 	/* Set the clock to the highest frequency to avoid timeouts. With link
9247 	 * aware mode, the clock speed could be slow and bootcode does not
9248 	 * complete within the expected time. Override the clock to allow the
9249 	 * bootcode to finish sooner and then restore it.
9250 	 */
9251 	tg3_override_clk(tp);
9252 
9253 	/* Manage gphy power for all CPMU absent PCIe devices. */
9254 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9255 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9256 
9257 	tw32(GRC_MISC_CFG, val);
9258 
9259 	/* restore 5701 hardware bug workaround write method */
9260 	tp->write32 = write_op;
9261 
9262 	/* Unfortunately, we have to delay before the PCI read back.
9263 	 * Some 575X chips even will not respond to a PCI cfg access
9264 	 * when the reset command is given to the chip.
9265 	 *
9266 	 * How do these hardware designers expect things to work
9267 	 * properly if the PCI write is posted for a long period
9268 	 * of time?  It is always necessary to have some method by
9269 	 * which a register read back can occur to push the write
9270 	 * out which does the reset.
9271 	 *
9272 	 * For most tg3 variants the trick below was working.
9273 	 * Ho hum...
9274 	 */
9275 	udelay(120);
9276 
9277 	/* Flush PCI posted writes.  The normal MMIO registers
9278 	 * are inaccessible at this time so this is the only
9279 	 * way to make this reliably (actually, this is no longer
9280 	 * the case, see above).  I tried to use indirect
9281 	 * register read/write but this upset some 5701 variants.
9282 	 */
9283 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9284 
9285 	udelay(120);
9286 
9287 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9288 		u16 val16;
9289 
9290 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9291 			int j;
9292 			u32 cfg_val;
9293 
9294 			/* Wait for link training to complete.  */
9295 			for (j = 0; j < 5000; j++)
9296 				udelay(100);
9297 
9298 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9299 			pci_write_config_dword(tp->pdev, 0xc4,
9300 					       cfg_val | (1 << 15));
9301 		}
9302 
9303 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9304 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9305 		/*
9306 		 * Older PCIe devices only support the 128 byte
9307 		 * MPS setting.  Enforce the restriction.
9308 		 */
9309 		if (!tg3_flag(tp, CPMU_PRESENT))
9310 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9311 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9312 
9313 		/* Clear error status */
9314 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9315 				      PCI_EXP_DEVSTA_CED |
9316 				      PCI_EXP_DEVSTA_NFED |
9317 				      PCI_EXP_DEVSTA_FED |
9318 				      PCI_EXP_DEVSTA_URD);
9319 	}
9320 
9321 	tg3_restore_pci_state(tp);
9322 
9323 	tg3_flag_clear(tp, CHIP_RESETTING);
9324 	tg3_flag_clear(tp, ERROR_PROCESSED);
9325 
9326 	val = 0;
9327 	if (tg3_flag(tp, 5780_CLASS))
9328 		val = tr32(MEMARB_MODE);
9329 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9330 
9331 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9332 		tg3_stop_fw(tp);
9333 		tw32(0x5000, 0x400);
9334 	}
9335 
9336 	if (tg3_flag(tp, IS_SSB_CORE)) {
9337 		/*
9338 		 * BCM4785: In order to avoid repercussions from using
9339 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9340 		 * which is not required.
9341 		 */
9342 		tg3_stop_fw(tp);
9343 		tg3_halt_cpu(tp, RX_CPU_BASE);
9344 	}
9345 
9346 	err = tg3_poll_fw(tp);
9347 	if (err)
9348 		return err;
9349 
9350 	tw32(GRC_MODE, tp->grc_mode);
9351 
9352 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9353 		val = tr32(0xc4);
9354 
9355 		tw32(0xc4, val | (1 << 15));
9356 	}
9357 
9358 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9359 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9360 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9361 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9362 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9363 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9364 	}
9365 
9366 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9367 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9368 		val = tp->mac_mode;
9369 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9370 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9371 		val = tp->mac_mode;
9372 	} else
9373 		val = 0;
9374 
9375 	tw32_f(MAC_MODE, val);
9376 	udelay(40);
9377 
9378 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9379 
9380 	tg3_mdio_start(tp);
9381 
9382 	if (tg3_flag(tp, PCI_EXPRESS) &&
9383 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9384 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9385 	    !tg3_flag(tp, 57765_PLUS)) {
9386 		val = tr32(0x7c00);
9387 
9388 		tw32(0x7c00, val | (1 << 25));
9389 	}
9390 
9391 	tg3_restore_clk(tp);
9392 
9393 	/* Increase the core clock speed to fix tx timeout issue for 5762
9394 	 * with 100Mbps link speed.
9395 	 */
9396 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9397 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9398 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9399 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9400 	}
9401 
9402 	/* Reprobe ASF enable state.  */
9403 	tg3_flag_clear(tp, ENABLE_ASF);
9404 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9405 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9406 
9407 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9408 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9409 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9410 		u32 nic_cfg;
9411 
9412 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9413 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9414 			tg3_flag_set(tp, ENABLE_ASF);
9415 			tp->last_event_jiffies = jiffies;
9416 			if (tg3_flag(tp, 5750_PLUS))
9417 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9418 
9419 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9420 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9421 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9422 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9423 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9424 		}
9425 	}
9426 
9427 	return 0;
9428 }
9429 
9430 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9431 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9432 static void __tg3_set_rx_mode(struct net_device *);
9433 
9434 /* tp->lock is held. */
9435 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9436 {
9437 	int err, i;
9438 
9439 	tg3_stop_fw(tp);
9440 
9441 	tg3_write_sig_pre_reset(tp, kind);
9442 
9443 	tg3_abort_hw(tp, silent);
9444 	err = tg3_chip_reset(tp);
9445 
9446 	__tg3_set_mac_addr(tp, false);
9447 
9448 	tg3_write_sig_legacy(tp, kind);
9449 	tg3_write_sig_post_reset(tp, kind);
9450 
9451 	if (tp->hw_stats) {
9452 		/* Save the stats across chip resets... */
9453 		tg3_get_nstats(tp, &tp->net_stats_prev);
9454 		tg3_get_estats(tp, &tp->estats_prev);
9455 
9456 		/* And make sure the next sample is new data */
9457 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9458 
9459 		for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
9460 			struct tg3_napi *tnapi = &tp->napi[i];
9461 
9462 			tnapi->rx_dropped = 0;
9463 			tnapi->tx_dropped = 0;
9464 		}
9465 	}
9466 
9467 	return err;
9468 }
9469 
9470 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9471 {
9472 	struct tg3 *tp = netdev_priv(dev);
9473 	struct sockaddr *addr = p;
9474 	int err = 0;
9475 	bool skip_mac_1 = false;
9476 
9477 	if (!is_valid_ether_addr(addr->sa_data))
9478 		return -EADDRNOTAVAIL;
9479 
9480 	eth_hw_addr_set(dev, addr->sa_data);
9481 
9482 	if (!netif_running(dev))
9483 		return 0;
9484 
9485 	if (tg3_flag(tp, ENABLE_ASF)) {
9486 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9487 
9488 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9489 		addr0_low = tr32(MAC_ADDR_0_LOW);
9490 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9491 		addr1_low = tr32(MAC_ADDR_1_LOW);
9492 
9493 		/* Skip MAC addr 1 if ASF is using it. */
9494 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9495 		    !(addr1_high == 0 && addr1_low == 0))
9496 			skip_mac_1 = true;
9497 	}
9498 	spin_lock_bh(&tp->lock);
9499 	__tg3_set_mac_addr(tp, skip_mac_1);
9500 	__tg3_set_rx_mode(dev);
9501 	spin_unlock_bh(&tp->lock);
9502 
9503 	return err;
9504 }
9505 
9506 /* tp->lock is held. */
9507 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9508 			   dma_addr_t mapping, u32 maxlen_flags,
9509 			   u32 nic_addr)
9510 {
9511 	tg3_write_mem(tp,
9512 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9513 		      ((u64) mapping >> 32));
9514 	tg3_write_mem(tp,
9515 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9516 		      ((u64) mapping & 0xffffffff));
9517 	tg3_write_mem(tp,
9518 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9519 		       maxlen_flags);
9520 
9521 	if (!tg3_flag(tp, 5705_PLUS))
9522 		tg3_write_mem(tp,
9523 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9524 			      nic_addr);
9525 }
9526 
9527 
9528 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9529 {
9530 	int i = 0;
9531 
9532 	if (!tg3_flag(tp, ENABLE_TSS)) {
9533 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9534 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9535 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9536 	} else {
9537 		tw32(HOSTCC_TXCOL_TICKS, 0);
9538 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9539 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9540 
9541 		for (; i < tp->txq_cnt; i++) {
9542 			u32 reg;
9543 
9544 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9545 			tw32(reg, ec->tx_coalesce_usecs);
9546 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9547 			tw32(reg, ec->tx_max_coalesced_frames);
9548 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9549 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9550 		}
9551 	}
9552 
9553 	for (; i < tp->irq_max - 1; i++) {
9554 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9555 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9556 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9557 	}
9558 }
9559 
9560 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9561 {
9562 	int i = 0;
9563 	u32 limit = tp->rxq_cnt;
9564 
9565 	if (!tg3_flag(tp, ENABLE_RSS)) {
9566 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9567 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9568 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9569 		limit--;
9570 	} else {
9571 		tw32(HOSTCC_RXCOL_TICKS, 0);
9572 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9573 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9574 	}
9575 
9576 	for (; i < limit; i++) {
9577 		u32 reg;
9578 
9579 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9580 		tw32(reg, ec->rx_coalesce_usecs);
9581 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9582 		tw32(reg, ec->rx_max_coalesced_frames);
9583 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9584 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9585 	}
9586 
9587 	for (; i < tp->irq_max - 1; i++) {
9588 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9589 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9590 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9591 	}
9592 }
9593 
9594 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9595 {
9596 	tg3_coal_tx_init(tp, ec);
9597 	tg3_coal_rx_init(tp, ec);
9598 
9599 	if (!tg3_flag(tp, 5705_PLUS)) {
9600 		u32 val = ec->stats_block_coalesce_usecs;
9601 
9602 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9603 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9604 
9605 		if (!tp->link_up)
9606 			val = 0;
9607 
9608 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9609 	}
9610 }
9611 
9612 /* tp->lock is held. */
9613 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9614 {
9615 	u32 txrcb, limit;
9616 
9617 	/* Disable all transmit rings but the first. */
9618 	if (!tg3_flag(tp, 5705_PLUS))
9619 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9620 	else if (tg3_flag(tp, 5717_PLUS))
9621 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9622 	else if (tg3_flag(tp, 57765_CLASS) ||
9623 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9624 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9625 	else
9626 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9627 
9628 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9629 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9630 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9631 			      BDINFO_FLAGS_DISABLED);
9632 }
9633 
9634 /* tp->lock is held. */
9635 static void tg3_tx_rcbs_init(struct tg3 *tp)
9636 {
9637 	int i = 0;
9638 	u32 txrcb = NIC_SRAM_SEND_RCB;
9639 
9640 	if (tg3_flag(tp, ENABLE_TSS))
9641 		i++;
9642 
9643 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9644 		struct tg3_napi *tnapi = &tp->napi[i];
9645 
9646 		if (!tnapi->tx_ring)
9647 			continue;
9648 
9649 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9650 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9651 			       NIC_SRAM_TX_BUFFER_DESC);
9652 	}
9653 }
9654 
9655 /* tp->lock is held. */
9656 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9657 {
9658 	u32 rxrcb, limit;
9659 
9660 	/* Disable all receive return rings but the first. */
9661 	if (tg3_flag(tp, 5717_PLUS))
9662 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9663 	else if (!tg3_flag(tp, 5705_PLUS))
9664 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9665 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9666 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9667 		 tg3_flag(tp, 57765_CLASS))
9668 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9669 	else
9670 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9671 
9672 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9673 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9674 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9675 			      BDINFO_FLAGS_DISABLED);
9676 }
9677 
9678 /* tp->lock is held. */
9679 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9680 {
9681 	int i = 0;
9682 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9683 
9684 	if (tg3_flag(tp, ENABLE_RSS))
9685 		i++;
9686 
9687 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9688 		struct tg3_napi *tnapi = &tp->napi[i];
9689 
9690 		if (!tnapi->rx_rcb)
9691 			continue;
9692 
9693 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9694 			       (tp->rx_ret_ring_mask + 1) <<
9695 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9696 	}
9697 }
9698 
9699 /* tp->lock is held. */
9700 static void tg3_rings_reset(struct tg3 *tp)
9701 {
9702 	int i;
9703 	u32 stblk;
9704 	struct tg3_napi *tnapi = &tp->napi[0];
9705 
9706 	tg3_tx_rcbs_disable(tp);
9707 
9708 	tg3_rx_ret_rcbs_disable(tp);
9709 
9710 	/* Disable interrupts */
9711 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9712 	tp->napi[0].chk_msi_cnt = 0;
9713 	tp->napi[0].last_rx_cons = 0;
9714 	tp->napi[0].last_tx_cons = 0;
9715 
9716 	/* Zero mailbox registers. */
9717 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9718 		for (i = 1; i < tp->irq_max; i++) {
9719 			tp->napi[i].tx_prod = 0;
9720 			tp->napi[i].tx_cons = 0;
9721 			if (tg3_flag(tp, ENABLE_TSS))
9722 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9723 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9724 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9725 			tp->napi[i].chk_msi_cnt = 0;
9726 			tp->napi[i].last_rx_cons = 0;
9727 			tp->napi[i].last_tx_cons = 0;
9728 		}
9729 		if (!tg3_flag(tp, ENABLE_TSS))
9730 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9731 	} else {
9732 		tp->napi[0].tx_prod = 0;
9733 		tp->napi[0].tx_cons = 0;
9734 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9735 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9736 	}
9737 
9738 	/* Make sure the NIC-based send BD rings are disabled. */
9739 	if (!tg3_flag(tp, 5705_PLUS)) {
9740 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9741 		for (i = 0; i < 16; i++)
9742 			tw32_tx_mbox(mbox + i * 8, 0);
9743 	}
9744 
9745 	/* Clear status block in ram. */
9746 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9747 
9748 	/* Set status block DMA address */
9749 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9750 	     ((u64) tnapi->status_mapping >> 32));
9751 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9752 	     ((u64) tnapi->status_mapping & 0xffffffff));
9753 
9754 	stblk = HOSTCC_STATBLCK_RING1;
9755 
9756 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9757 		u64 mapping = (u64)tnapi->status_mapping;
9758 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9759 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9760 		stblk += 8;
9761 
9762 		/* Clear status block in ram. */
9763 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9764 	}
9765 
9766 	tg3_tx_rcbs_init(tp);
9767 	tg3_rx_ret_rcbs_init(tp);
9768 }
9769 
9770 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9771 {
9772 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9773 
9774 	if (!tg3_flag(tp, 5750_PLUS) ||
9775 	    tg3_flag(tp, 5780_CLASS) ||
9776 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9777 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9778 	    tg3_flag(tp, 57765_PLUS))
9779 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9780 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9781 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9782 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9783 	else
9784 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9785 
9786 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9787 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9788 
9789 	val = min(nic_rep_thresh, host_rep_thresh);
9790 	tw32(RCVBDI_STD_THRESH, val);
9791 
9792 	if (tg3_flag(tp, 57765_PLUS))
9793 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9794 
9795 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9796 		return;
9797 
9798 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9799 
9800 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9801 
9802 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9803 	tw32(RCVBDI_JUMBO_THRESH, val);
9804 
9805 	if (tg3_flag(tp, 57765_PLUS))
9806 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9807 }
9808 
9809 static inline u32 calc_crc(unsigned char *buf, int len)
9810 {
9811 	u32 reg;
9812 	u32 tmp;
9813 	int j, k;
9814 
9815 	reg = 0xffffffff;
9816 
9817 	for (j = 0; j < len; j++) {
9818 		reg ^= buf[j];
9819 
9820 		for (k = 0; k < 8; k++) {
9821 			tmp = reg & 0x01;
9822 
9823 			reg >>= 1;
9824 
9825 			if (tmp)
9826 				reg ^= CRC32_POLY_LE;
9827 		}
9828 	}
9829 
9830 	return ~reg;
9831 }
9832 
9833 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9834 {
9835 	/* accept or reject all multicast frames */
9836 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9837 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9838 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9839 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9840 }
9841 
9842 static void __tg3_set_rx_mode(struct net_device *dev)
9843 {
9844 	struct tg3 *tp = netdev_priv(dev);
9845 	u32 rx_mode;
9846 
9847 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9848 				  RX_MODE_KEEP_VLAN_TAG);
9849 
9850 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9851 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9852 	 * flag clear.
9853 	 */
9854 	if (!tg3_flag(tp, ENABLE_ASF))
9855 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9856 #endif
9857 
9858 	if (dev->flags & IFF_PROMISC) {
9859 		/* Promiscuous mode. */
9860 		rx_mode |= RX_MODE_PROMISC;
9861 	} else if (dev->flags & IFF_ALLMULTI) {
9862 		/* Accept all multicast. */
9863 		tg3_set_multi(tp, 1);
9864 	} else if (netdev_mc_empty(dev)) {
9865 		/* Reject all multicast. */
9866 		tg3_set_multi(tp, 0);
9867 	} else {
9868 		/* Accept one or more multicast(s). */
9869 		struct netdev_hw_addr *ha;
9870 		u32 mc_filter[4] = { 0, };
9871 		u32 regidx;
9872 		u32 bit;
9873 		u32 crc;
9874 
9875 		netdev_for_each_mc_addr(ha, dev) {
9876 			crc = calc_crc(ha->addr, ETH_ALEN);
9877 			bit = ~crc & 0x7f;
9878 			regidx = (bit & 0x60) >> 5;
9879 			bit &= 0x1f;
9880 			mc_filter[regidx] |= (1 << bit);
9881 		}
9882 
9883 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9884 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9885 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9886 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9887 	}
9888 
9889 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9890 		rx_mode |= RX_MODE_PROMISC;
9891 	} else if (!(dev->flags & IFF_PROMISC)) {
9892 		/* Add all entries into to the mac addr filter list */
9893 		int i = 0;
9894 		struct netdev_hw_addr *ha;
9895 
9896 		netdev_for_each_uc_addr(ha, dev) {
9897 			__tg3_set_one_mac_addr(tp, ha->addr,
9898 					       i + TG3_UCAST_ADDR_IDX(tp));
9899 			i++;
9900 		}
9901 	}
9902 
9903 	if (rx_mode != tp->rx_mode) {
9904 		tp->rx_mode = rx_mode;
9905 		tw32_f(MAC_RX_MODE, rx_mode);
9906 		udelay(10);
9907 	}
9908 }
9909 
9910 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9911 {
9912 	int i;
9913 
9914 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9915 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9916 }
9917 
9918 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9919 {
9920 	int i;
9921 
9922 	if (!tg3_flag(tp, SUPPORT_MSIX))
9923 		return;
9924 
9925 	if (tp->rxq_cnt == 1) {
9926 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9927 		return;
9928 	}
9929 
9930 	/* Validate table against current IRQ count */
9931 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9932 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9933 			break;
9934 	}
9935 
9936 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9937 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9938 }
9939 
9940 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9941 {
9942 	int i = 0;
9943 	u32 reg = MAC_RSS_INDIR_TBL_0;
9944 
9945 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9946 		u32 val = tp->rss_ind_tbl[i];
9947 		i++;
9948 		for (; i % 8; i++) {
9949 			val <<= 4;
9950 			val |= tp->rss_ind_tbl[i];
9951 		}
9952 		tw32(reg, val);
9953 		reg += 4;
9954 	}
9955 }
9956 
9957 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9958 {
9959 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9960 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9961 	else
9962 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9963 }
9964 
9965 /* tp->lock is held. */
9966 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9967 {
9968 	u32 val, rdmac_mode;
9969 	int i, err, limit;
9970 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9971 
9972 	tg3_disable_ints(tp);
9973 
9974 	tg3_stop_fw(tp);
9975 
9976 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9977 
9978 	if (tg3_flag(tp, INIT_COMPLETE))
9979 		tg3_abort_hw(tp, 1);
9980 
9981 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9982 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9983 		tg3_phy_pull_config(tp);
9984 		tg3_eee_pull_config(tp, NULL);
9985 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9986 	}
9987 
9988 	/* Enable MAC control of LPI */
9989 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9990 		tg3_setup_eee(tp);
9991 
9992 	if (reset_phy)
9993 		tg3_phy_reset(tp);
9994 
9995 	err = tg3_chip_reset(tp);
9996 	if (err)
9997 		return err;
9998 
9999 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
10000 
10001 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
10002 		val = tr32(TG3_CPMU_CTRL);
10003 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
10004 		tw32(TG3_CPMU_CTRL, val);
10005 
10006 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10007 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10008 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
10009 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10010 
10011 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
10012 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
10013 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
10014 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
10015 
10016 		val = tr32(TG3_CPMU_HST_ACC);
10017 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
10018 		val |= CPMU_HST_ACC_MACCLK_6_25;
10019 		tw32(TG3_CPMU_HST_ACC, val);
10020 	}
10021 
10022 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
10023 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
10024 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
10025 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
10026 		tw32(PCIE_PWR_MGMT_THRESH, val);
10027 
10028 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
10029 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
10030 
10031 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
10032 
10033 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
10034 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
10035 	}
10036 
10037 	if (tg3_flag(tp, L1PLLPD_EN)) {
10038 		u32 grc_mode = tr32(GRC_MODE);
10039 
10040 		/* Access the lower 1K of PL PCIE block registers. */
10041 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10042 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10043 
10044 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
10045 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
10046 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
10047 
10048 		tw32(GRC_MODE, grc_mode);
10049 	}
10050 
10051 	if (tg3_flag(tp, 57765_CLASS)) {
10052 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
10053 			u32 grc_mode = tr32(GRC_MODE);
10054 
10055 			/* Access the lower 1K of PL PCIE block registers. */
10056 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10057 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10058 
10059 			val = tr32(TG3_PCIE_TLDLPL_PORT +
10060 				   TG3_PCIE_PL_LO_PHYCTL5);
10061 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
10062 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
10063 
10064 			tw32(GRC_MODE, grc_mode);
10065 		}
10066 
10067 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
10068 			u32 grc_mode;
10069 
10070 			/* Fix transmit hangs */
10071 			val = tr32(TG3_CPMU_PADRNG_CTL);
10072 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
10073 			tw32(TG3_CPMU_PADRNG_CTL, val);
10074 
10075 			grc_mode = tr32(GRC_MODE);
10076 
10077 			/* Access the lower 1K of DL PCIE block registers. */
10078 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10079 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
10080 
10081 			val = tr32(TG3_PCIE_TLDLPL_PORT +
10082 				   TG3_PCIE_DL_LO_FTSMAX);
10083 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10084 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10085 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10086 
10087 			tw32(GRC_MODE, grc_mode);
10088 		}
10089 
10090 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10091 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10092 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
10093 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10094 	}
10095 
10096 	/* This works around an issue with Athlon chipsets on
10097 	 * B3 tigon3 silicon.  This bit has no effect on any
10098 	 * other revision.  But do not set this on PCI Express
10099 	 * chips and don't even touch the clocks if the CPMU is present.
10100 	 */
10101 	if (!tg3_flag(tp, CPMU_PRESENT)) {
10102 		if (!tg3_flag(tp, PCI_EXPRESS))
10103 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10104 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10105 	}
10106 
10107 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10108 	    tg3_flag(tp, PCIX_MODE)) {
10109 		val = tr32(TG3PCI_PCISTATE);
10110 		val |= PCISTATE_RETRY_SAME_DMA;
10111 		tw32(TG3PCI_PCISTATE, val);
10112 	}
10113 
10114 	if (tg3_flag(tp, ENABLE_APE)) {
10115 		/* Allow reads and writes to the
10116 		 * APE register and memory space.
10117 		 */
10118 		val = tr32(TG3PCI_PCISTATE);
10119 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10120 		       PCISTATE_ALLOW_APE_SHMEM_WR |
10121 		       PCISTATE_ALLOW_APE_PSPACE_WR;
10122 		tw32(TG3PCI_PCISTATE, val);
10123 	}
10124 
10125 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10126 		/* Enable some hw fixes.  */
10127 		val = tr32(TG3PCI_MSI_DATA);
10128 		val |= (1 << 26) | (1 << 28) | (1 << 29);
10129 		tw32(TG3PCI_MSI_DATA, val);
10130 	}
10131 
10132 	/* Descriptor ring init may make accesses to the
10133 	 * NIC SRAM area to setup the TX descriptors, so we
10134 	 * can only do this after the hardware has been
10135 	 * successfully reset.
10136 	 */
10137 	err = tg3_init_rings(tp);
10138 	if (err)
10139 		return err;
10140 
10141 	if (tg3_flag(tp, 57765_PLUS)) {
10142 		val = tr32(TG3PCI_DMA_RW_CTRL) &
10143 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10144 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10145 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10146 		if (!tg3_flag(tp, 57765_CLASS) &&
10147 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10148 		    tg3_asic_rev(tp) != ASIC_REV_5762)
10149 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10150 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10151 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10152 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10153 		/* This value is determined during the probe time DMA
10154 		 * engine test, tg3_test_dma.
10155 		 */
10156 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10157 	}
10158 
10159 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10160 			  GRC_MODE_4X_NIC_SEND_RINGS |
10161 			  GRC_MODE_NO_TX_PHDR_CSUM |
10162 			  GRC_MODE_NO_RX_PHDR_CSUM);
10163 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10164 
10165 	/* Pseudo-header checksum is done by hardware logic and not
10166 	 * the offload processers, so make the chip do the pseudo-
10167 	 * header checksums on receive.  For transmit it is more
10168 	 * convenient to do the pseudo-header checksum in software
10169 	 * as Linux does that on transmit for us in all cases.
10170 	 */
10171 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10172 
10173 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10174 	if (tp->rxptpctl)
10175 		tw32(TG3_RX_PTP_CTL,
10176 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10177 
10178 	if (tg3_flag(tp, PTP_CAPABLE))
10179 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10180 
10181 	tw32(GRC_MODE, tp->grc_mode | val);
10182 
10183 	/* On one of the AMD platform, MRRS is restricted to 4000 because of
10184 	 * south bridge limitation. As a workaround, Driver is setting MRRS
10185 	 * to 2048 instead of default 4096.
10186 	 */
10187 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10188 	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10189 		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10190 		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10191 	}
10192 
10193 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10194 	val = tr32(GRC_MISC_CFG);
10195 	val &= ~0xff;
10196 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10197 	tw32(GRC_MISC_CFG, val);
10198 
10199 	/* Initialize MBUF/DESC pool. */
10200 	if (tg3_flag(tp, 5750_PLUS)) {
10201 		/* Do nothing.  */
10202 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10203 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10204 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10205 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10206 		else
10207 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10208 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10209 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10210 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10211 		int fw_len;
10212 
10213 		fw_len = tp->fw_len;
10214 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10215 		tw32(BUFMGR_MB_POOL_ADDR,
10216 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10217 		tw32(BUFMGR_MB_POOL_SIZE,
10218 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10219 	}
10220 
10221 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10222 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10223 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10224 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10225 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10226 		tw32(BUFMGR_MB_HIGH_WATER,
10227 		     tp->bufmgr_config.mbuf_high_water);
10228 	} else {
10229 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10230 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10231 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10232 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10233 		tw32(BUFMGR_MB_HIGH_WATER,
10234 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10235 	}
10236 	tw32(BUFMGR_DMA_LOW_WATER,
10237 	     tp->bufmgr_config.dma_low_water);
10238 	tw32(BUFMGR_DMA_HIGH_WATER,
10239 	     tp->bufmgr_config.dma_high_water);
10240 
10241 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10242 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10243 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10244 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10245 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10246 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10247 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10248 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10249 	tw32(BUFMGR_MODE, val);
10250 	for (i = 0; i < 2000; i++) {
10251 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10252 			break;
10253 		udelay(10);
10254 	}
10255 	if (i >= 2000) {
10256 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10257 		return -ENODEV;
10258 	}
10259 
10260 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10261 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10262 
10263 	tg3_setup_rxbd_thresholds(tp);
10264 
10265 	/* Initialize TG3_BDINFO's at:
10266 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10267 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10268 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10269 	 *
10270 	 * like so:
10271 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10272 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10273 	 *                              ring attribute flags
10274 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10275 	 *
10276 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10277 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10278 	 *
10279 	 * The size of each ring is fixed in the firmware, but the location is
10280 	 * configurable.
10281 	 */
10282 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10283 	     ((u64) tpr->rx_std_mapping >> 32));
10284 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10285 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10286 	if (!tg3_flag(tp, 5717_PLUS))
10287 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10288 		     NIC_SRAM_RX_BUFFER_DESC);
10289 
10290 	/* Disable the mini ring */
10291 	if (!tg3_flag(tp, 5705_PLUS))
10292 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10293 		     BDINFO_FLAGS_DISABLED);
10294 
10295 	/* Program the jumbo buffer descriptor ring control
10296 	 * blocks on those devices that have them.
10297 	 */
10298 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10299 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10300 
10301 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10302 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10303 			     ((u64) tpr->rx_jmb_mapping >> 32));
10304 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10305 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10306 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10307 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10308 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10309 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10310 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10311 			    tg3_flag(tp, 57765_CLASS) ||
10312 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10313 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10314 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10315 		} else {
10316 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10317 			     BDINFO_FLAGS_DISABLED);
10318 		}
10319 
10320 		if (tg3_flag(tp, 57765_PLUS)) {
10321 			val = TG3_RX_STD_RING_SIZE(tp);
10322 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10323 			val |= (TG3_RX_STD_DMA_SZ << 2);
10324 		} else
10325 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10326 	} else
10327 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10328 
10329 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10330 
10331 	tpr->rx_std_prod_idx = tp->rx_pending;
10332 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10333 
10334 	tpr->rx_jmb_prod_idx =
10335 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10336 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10337 
10338 	tg3_rings_reset(tp);
10339 
10340 	/* Initialize MAC address and backoff seed. */
10341 	__tg3_set_mac_addr(tp, false);
10342 
10343 	/* MTU + ethernet header + FCS + optional VLAN tag */
10344 	tw32(MAC_RX_MTU_SIZE,
10345 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10346 
10347 	/* The slot time is changed by tg3_setup_phy if we
10348 	 * run at gigabit with half duplex.
10349 	 */
10350 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10351 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10352 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10353 
10354 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10355 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10356 		val |= tr32(MAC_TX_LENGTHS) &
10357 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10358 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10359 
10360 	tw32(MAC_TX_LENGTHS, val);
10361 
10362 	/* Receive rules. */
10363 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10364 	tw32(RCVLPC_CONFIG, 0x0181);
10365 
10366 	/* Calculate RDMAC_MODE setting early, we need it to determine
10367 	 * the RCVLPC_STATE_ENABLE mask.
10368 	 */
10369 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10370 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10371 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10372 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10373 		      RDMAC_MODE_LNGREAD_ENAB);
10374 
10375 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10376 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10377 
10378 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10379 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10380 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10381 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10382 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10383 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10384 
10385 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10386 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10387 		if (tg3_flag(tp, TSO_CAPABLE)) {
10388 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10389 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10390 			   !tg3_flag(tp, IS_5788)) {
10391 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10392 		}
10393 	}
10394 
10395 	if (tg3_flag(tp, PCI_EXPRESS))
10396 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10397 
10398 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10399 		tp->dma_limit = 0;
10400 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10401 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10402 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10403 		}
10404 	}
10405 
10406 	if (tg3_flag(tp, HW_TSO_1) ||
10407 	    tg3_flag(tp, HW_TSO_2) ||
10408 	    tg3_flag(tp, HW_TSO_3))
10409 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10410 
10411 	if (tg3_flag(tp, 57765_PLUS) ||
10412 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10413 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10414 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10415 
10416 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10417 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10418 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10419 
10420 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10421 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10422 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10423 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10424 	    tg3_flag(tp, 57765_PLUS)) {
10425 		u32 tgtreg;
10426 
10427 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10428 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10429 		else
10430 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10431 
10432 		val = tr32(tgtreg);
10433 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10434 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10435 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10436 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10437 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10438 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10439 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10440 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10441 		}
10442 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10443 	}
10444 
10445 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10446 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10447 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10448 		u32 tgtreg;
10449 
10450 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10451 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10452 		else
10453 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10454 
10455 		val = tr32(tgtreg);
10456 		tw32(tgtreg, val |
10457 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10458 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10459 	}
10460 
10461 	/* Receive/send statistics. */
10462 	if (tg3_flag(tp, 5750_PLUS)) {
10463 		val = tr32(RCVLPC_STATS_ENABLE);
10464 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10465 		tw32(RCVLPC_STATS_ENABLE, val);
10466 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10467 		   tg3_flag(tp, TSO_CAPABLE)) {
10468 		val = tr32(RCVLPC_STATS_ENABLE);
10469 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10470 		tw32(RCVLPC_STATS_ENABLE, val);
10471 	} else {
10472 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10473 	}
10474 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10475 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10476 	tw32(SNDDATAI_STATSCTRL,
10477 	     (SNDDATAI_SCTRL_ENABLE |
10478 	      SNDDATAI_SCTRL_FASTUPD));
10479 
10480 	/* Setup host coalescing engine. */
10481 	tw32(HOSTCC_MODE, 0);
10482 	for (i = 0; i < 2000; i++) {
10483 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10484 			break;
10485 		udelay(10);
10486 	}
10487 
10488 	__tg3_set_coalesce(tp, &tp->coal);
10489 
10490 	if (!tg3_flag(tp, 5705_PLUS)) {
10491 		/* Status/statistics block address.  See tg3_timer,
10492 		 * the tg3_periodic_fetch_stats call there, and
10493 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10494 		 */
10495 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10496 		     ((u64) tp->stats_mapping >> 32));
10497 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10498 		     ((u64) tp->stats_mapping & 0xffffffff));
10499 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10500 
10501 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10502 
10503 		/* Clear statistics and status block memory areas */
10504 		for (i = NIC_SRAM_STATS_BLK;
10505 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10506 		     i += sizeof(u32)) {
10507 			tg3_write_mem(tp, i, 0);
10508 			udelay(40);
10509 		}
10510 	}
10511 
10512 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10513 
10514 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10515 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10516 	if (!tg3_flag(tp, 5705_PLUS))
10517 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10518 
10519 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10520 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10521 		/* reset to prevent losing 1st rx packet intermittently */
10522 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10523 		udelay(10);
10524 	}
10525 
10526 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10527 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10528 			MAC_MODE_FHDE_ENABLE;
10529 	if (tg3_flag(tp, ENABLE_APE))
10530 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10531 	if (!tg3_flag(tp, 5705_PLUS) &&
10532 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10533 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10534 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10535 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10536 	udelay(40);
10537 
10538 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10539 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10540 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10541 	 * whether used as inputs or outputs, are set by boot code after
10542 	 * reset.
10543 	 */
10544 	if (!tg3_flag(tp, IS_NIC)) {
10545 		u32 gpio_mask;
10546 
10547 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10548 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10549 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10550 
10551 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10552 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10553 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10554 
10555 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10556 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10557 
10558 		tp->grc_local_ctrl &= ~gpio_mask;
10559 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10560 
10561 		/* GPIO1 must be driven high for eeprom write protect */
10562 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10563 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10564 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10565 	}
10566 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10567 	udelay(100);
10568 
10569 	if (tg3_flag(tp, USING_MSIX)) {
10570 		val = tr32(MSGINT_MODE);
10571 		val |= MSGINT_MODE_ENABLE;
10572 		if (tp->irq_cnt > 1)
10573 			val |= MSGINT_MODE_MULTIVEC_EN;
10574 		if (!tg3_flag(tp, 1SHOT_MSI))
10575 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10576 		tw32(MSGINT_MODE, val);
10577 	}
10578 
10579 	if (!tg3_flag(tp, 5705_PLUS)) {
10580 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10581 		udelay(40);
10582 	}
10583 
10584 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10585 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10586 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10587 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10588 	       WDMAC_MODE_LNGREAD_ENAB);
10589 
10590 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10591 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10592 		if (tg3_flag(tp, TSO_CAPABLE) &&
10593 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10594 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10595 			/* nothing */
10596 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10597 			   !tg3_flag(tp, IS_5788)) {
10598 			val |= WDMAC_MODE_RX_ACCEL;
10599 		}
10600 	}
10601 
10602 	/* Enable host coalescing bug fix */
10603 	if (tg3_flag(tp, 5755_PLUS))
10604 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10605 
10606 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10607 		val |= WDMAC_MODE_BURST_ALL_DATA;
10608 
10609 	tw32_f(WDMAC_MODE, val);
10610 	udelay(40);
10611 
10612 	if (tg3_flag(tp, PCIX_MODE)) {
10613 		u16 pcix_cmd;
10614 
10615 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10616 				     &pcix_cmd);
10617 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10618 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10619 			pcix_cmd |= PCI_X_CMD_READ_2K;
10620 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10621 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10622 			pcix_cmd |= PCI_X_CMD_READ_2K;
10623 		}
10624 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10625 				      pcix_cmd);
10626 	}
10627 
10628 	tw32_f(RDMAC_MODE, rdmac_mode);
10629 	udelay(40);
10630 
10631 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10632 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10633 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10634 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10635 				break;
10636 		}
10637 		if (i < TG3_NUM_RDMA_CHANNELS) {
10638 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10639 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10640 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10641 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10642 		}
10643 	}
10644 
10645 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10646 	if (!tg3_flag(tp, 5705_PLUS))
10647 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10648 
10649 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10650 		tw32(SNDDATAC_MODE,
10651 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10652 	else
10653 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10654 
10655 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10656 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10657 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10658 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10659 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10660 	tw32(RCVDBDI_MODE, val);
10661 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10662 	if (tg3_flag(tp, HW_TSO_1) ||
10663 	    tg3_flag(tp, HW_TSO_2) ||
10664 	    tg3_flag(tp, HW_TSO_3))
10665 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10666 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10667 	if (tg3_flag(tp, ENABLE_TSS))
10668 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10669 	tw32(SNDBDI_MODE, val);
10670 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10671 
10672 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10673 		err = tg3_load_5701_a0_firmware_fix(tp);
10674 		if (err)
10675 			return err;
10676 	}
10677 
10678 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10679 		/* Ignore any errors for the firmware download. If download
10680 		 * fails, the device will operate with EEE disabled
10681 		 */
10682 		tg3_load_57766_firmware(tp);
10683 	}
10684 
10685 	if (tg3_flag(tp, TSO_CAPABLE)) {
10686 		err = tg3_load_tso_firmware(tp);
10687 		if (err)
10688 			return err;
10689 	}
10690 
10691 	tp->tx_mode = TX_MODE_ENABLE;
10692 
10693 	if (tg3_flag(tp, 5755_PLUS) ||
10694 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10695 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10696 
10697 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10698 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10699 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10700 		tp->tx_mode &= ~val;
10701 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10702 	}
10703 
10704 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10705 	udelay(100);
10706 
10707 	if (tg3_flag(tp, ENABLE_RSS)) {
10708 		u32 rss_key[10];
10709 
10710 		tg3_rss_write_indir_tbl(tp);
10711 
10712 		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10713 
10714 		for (i = 0; i < 10 ; i++)
10715 			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10716 	}
10717 
10718 	tp->rx_mode = RX_MODE_ENABLE;
10719 	if (tg3_flag(tp, 5755_PLUS))
10720 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10721 
10722 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10723 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10724 
10725 	if (tg3_flag(tp, ENABLE_RSS))
10726 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10727 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10728 			       RX_MODE_RSS_IPV6_HASH_EN |
10729 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10730 			       RX_MODE_RSS_IPV4_HASH_EN |
10731 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10732 
10733 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10734 	udelay(10);
10735 
10736 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10737 
10738 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10739 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10740 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10741 		udelay(10);
10742 	}
10743 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10744 	udelay(10);
10745 
10746 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10747 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10748 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10749 			/* Set drive transmission level to 1.2V  */
10750 			/* only if the signal pre-emphasis bit is not set  */
10751 			val = tr32(MAC_SERDES_CFG);
10752 			val &= 0xfffff000;
10753 			val |= 0x880;
10754 			tw32(MAC_SERDES_CFG, val);
10755 		}
10756 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10757 			tw32(MAC_SERDES_CFG, 0x616000);
10758 	}
10759 
10760 	/* Prevent chip from dropping frames when flow control
10761 	 * is enabled.
10762 	 */
10763 	if (tg3_flag(tp, 57765_CLASS))
10764 		val = 1;
10765 	else
10766 		val = 2;
10767 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10768 
10769 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10770 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10771 		/* Use hardware link auto-negotiation */
10772 		tg3_flag_set(tp, HW_AUTONEG);
10773 	}
10774 
10775 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10776 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10777 		u32 tmp;
10778 
10779 		tmp = tr32(SERDES_RX_CTRL);
10780 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10781 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10782 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10783 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10784 	}
10785 
10786 	if (!tg3_flag(tp, USE_PHYLIB)) {
10787 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10788 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10789 
10790 		err = tg3_setup_phy(tp, false);
10791 		if (err)
10792 			return err;
10793 
10794 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10795 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10796 			u32 tmp;
10797 
10798 			/* Clear CRC stats. */
10799 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10800 				tg3_writephy(tp, MII_TG3_TEST1,
10801 					     tmp | MII_TG3_TEST1_CRC_EN);
10802 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10803 			}
10804 		}
10805 	}
10806 
10807 	__tg3_set_rx_mode(tp->dev);
10808 
10809 	/* Initialize receive rules. */
10810 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10811 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10812 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10813 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10814 
10815 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10816 		limit = 8;
10817 	else
10818 		limit = 16;
10819 	if (tg3_flag(tp, ENABLE_ASF))
10820 		limit -= 4;
10821 	switch (limit) {
10822 	case 16:
10823 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10824 		fallthrough;
10825 	case 15:
10826 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10827 		fallthrough;
10828 	case 14:
10829 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10830 		fallthrough;
10831 	case 13:
10832 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10833 		fallthrough;
10834 	case 12:
10835 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10836 		fallthrough;
10837 	case 11:
10838 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10839 		fallthrough;
10840 	case 10:
10841 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10842 		fallthrough;
10843 	case 9:
10844 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10845 		fallthrough;
10846 	case 8:
10847 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10848 		fallthrough;
10849 	case 7:
10850 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10851 		fallthrough;
10852 	case 6:
10853 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10854 		fallthrough;
10855 	case 5:
10856 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10857 		fallthrough;
10858 	case 4:
10859 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10860 	case 3:
10861 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10862 	case 2:
10863 	case 1:
10864 
10865 	default:
10866 		break;
10867 	}
10868 
10869 	if (tg3_flag(tp, ENABLE_APE))
10870 		/* Write our heartbeat update interval to APE. */
10871 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10872 				APE_HOST_HEARTBEAT_INT_5SEC);
10873 
10874 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10875 
10876 	return 0;
10877 }
10878 
10879 /* Called at device open time to get the chip ready for
10880  * packet processing.  Invoked with tp->lock held.
10881  */
10882 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10883 {
10884 	/* Chip may have been just powered on. If so, the boot code may still
10885 	 * be running initialization. Wait for it to finish to avoid races in
10886 	 * accessing the hardware.
10887 	 */
10888 	tg3_enable_register_access(tp);
10889 	tg3_poll_fw(tp);
10890 
10891 	tg3_switch_clocks(tp);
10892 
10893 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10894 
10895 	return tg3_reset_hw(tp, reset_phy);
10896 }
10897 
10898 #ifdef CONFIG_TIGON3_HWMON
10899 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10900 {
10901 	u32 off, len = TG3_OCIR_LEN;
10902 	int i;
10903 
10904 	for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10905 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10906 
10907 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10908 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10909 			memset(ocir, 0, len);
10910 	}
10911 }
10912 
10913 /* sysfs attributes for hwmon */
10914 static ssize_t tg3_show_temp(struct device *dev,
10915 			     struct device_attribute *devattr, char *buf)
10916 {
10917 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10918 	struct tg3 *tp = dev_get_drvdata(dev);
10919 	u32 temperature;
10920 
10921 	spin_lock_bh(&tp->lock);
10922 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10923 				sizeof(temperature));
10924 	spin_unlock_bh(&tp->lock);
10925 	return sprintf(buf, "%u\n", temperature * 1000);
10926 }
10927 
10928 
10929 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10930 			  TG3_TEMP_SENSOR_OFFSET);
10931 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10932 			  TG3_TEMP_CAUTION_OFFSET);
10933 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10934 			  TG3_TEMP_MAX_OFFSET);
10935 
10936 static struct attribute *tg3_attrs[] = {
10937 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10938 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10939 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10940 	NULL
10941 };
10942 ATTRIBUTE_GROUPS(tg3);
10943 
10944 static void tg3_hwmon_close(struct tg3 *tp)
10945 {
10946 	if (tp->hwmon_dev) {
10947 		hwmon_device_unregister(tp->hwmon_dev);
10948 		tp->hwmon_dev = NULL;
10949 	}
10950 }
10951 
10952 static void tg3_hwmon_open(struct tg3 *tp)
10953 {
10954 	int i;
10955 	u32 size = 0;
10956 	struct pci_dev *pdev = tp->pdev;
10957 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10958 
10959 	tg3_sd_scan_scratchpad(tp, ocirs);
10960 
10961 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10962 		if (!ocirs[i].src_data_length)
10963 			continue;
10964 
10965 		size += ocirs[i].src_hdr_length;
10966 		size += ocirs[i].src_data_length;
10967 	}
10968 
10969 	if (!size)
10970 		return;
10971 
10972 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10973 							  tp, tg3_groups);
10974 	if (IS_ERR(tp->hwmon_dev)) {
10975 		tp->hwmon_dev = NULL;
10976 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10977 	}
10978 }
10979 #else
10980 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10981 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10982 #endif /* CONFIG_TIGON3_HWMON */
10983 
10984 
10985 #define TG3_STAT_ADD32(PSTAT, REG) \
10986 do {	u32 __val = tr32(REG); \
10987 	(PSTAT)->low += __val; \
10988 	if ((PSTAT)->low < __val) \
10989 		(PSTAT)->high += 1; \
10990 } while (0)
10991 
10992 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10993 {
10994 	struct tg3_hw_stats *sp = tp->hw_stats;
10995 
10996 	if (!tp->link_up)
10997 		return;
10998 
10999 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
11000 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
11001 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
11002 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
11003 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
11004 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
11005 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
11006 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
11007 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
11008 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
11009 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
11010 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
11011 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
11012 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
11013 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
11014 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
11015 		u32 val;
11016 
11017 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
11018 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
11019 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
11020 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
11021 	}
11022 
11023 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
11024 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
11025 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
11026 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
11027 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
11028 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
11029 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
11030 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
11031 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
11032 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
11033 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
11034 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
11035 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
11036 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
11037 
11038 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
11039 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
11040 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
11041 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
11042 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
11043 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
11044 	} else {
11045 		u32 val = tr32(HOSTCC_FLOW_ATTN);
11046 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
11047 		if (val) {
11048 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
11049 			sp->rx_discards.low += val;
11050 			if (sp->rx_discards.low < val)
11051 				sp->rx_discards.high += 1;
11052 		}
11053 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
11054 	}
11055 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
11056 }
11057 
11058 static void tg3_chk_missed_msi(struct tg3 *tp)
11059 {
11060 	u32 i;
11061 
11062 	for (i = 0; i < tp->irq_cnt; i++) {
11063 		struct tg3_napi *tnapi = &tp->napi[i];
11064 
11065 		if (tg3_has_work(tnapi)) {
11066 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
11067 			    tnapi->last_tx_cons == tnapi->tx_cons) {
11068 				if (tnapi->chk_msi_cnt < 1) {
11069 					tnapi->chk_msi_cnt++;
11070 					return;
11071 				}
11072 				tg3_msi(0, tnapi);
11073 			}
11074 		}
11075 		tnapi->chk_msi_cnt = 0;
11076 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
11077 		tnapi->last_tx_cons = tnapi->tx_cons;
11078 	}
11079 }
11080 
11081 static void tg3_timer(struct timer_list *t)
11082 {
11083 	struct tg3 *tp = from_timer(tp, t, timer);
11084 
11085 	spin_lock(&tp->lock);
11086 
11087 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11088 		spin_unlock(&tp->lock);
11089 		goto restart_timer;
11090 	}
11091 
11092 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11093 	    tg3_flag(tp, 57765_CLASS))
11094 		tg3_chk_missed_msi(tp);
11095 
11096 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11097 		/* BCM4785: Flush posted writes from GbE to host memory. */
11098 		tr32(HOSTCC_MODE);
11099 	}
11100 
11101 	if (!tg3_flag(tp, TAGGED_STATUS)) {
11102 		/* All of this garbage is because when using non-tagged
11103 		 * IRQ status the mailbox/status_block protocol the chip
11104 		 * uses with the cpu is race prone.
11105 		 */
11106 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11107 			tw32(GRC_LOCAL_CTRL,
11108 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11109 		} else {
11110 			tw32(HOSTCC_MODE, tp->coalesce_mode |
11111 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11112 		}
11113 
11114 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11115 			spin_unlock(&tp->lock);
11116 			tg3_reset_task_schedule(tp);
11117 			goto restart_timer;
11118 		}
11119 	}
11120 
11121 	/* This part only runs once per second. */
11122 	if (!--tp->timer_counter) {
11123 		if (tg3_flag(tp, 5705_PLUS))
11124 			tg3_periodic_fetch_stats(tp);
11125 
11126 		if (tp->setlpicnt && !--tp->setlpicnt)
11127 			tg3_phy_eee_enable(tp);
11128 
11129 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
11130 			u32 mac_stat;
11131 			int phy_event;
11132 
11133 			mac_stat = tr32(MAC_STATUS);
11134 
11135 			phy_event = 0;
11136 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11137 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11138 					phy_event = 1;
11139 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11140 				phy_event = 1;
11141 
11142 			if (phy_event)
11143 				tg3_setup_phy(tp, false);
11144 		} else if (tg3_flag(tp, POLL_SERDES)) {
11145 			u32 mac_stat = tr32(MAC_STATUS);
11146 			int need_setup = 0;
11147 
11148 			if (tp->link_up &&
11149 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11150 				need_setup = 1;
11151 			}
11152 			if (!tp->link_up &&
11153 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11154 					 MAC_STATUS_SIGNAL_DET))) {
11155 				need_setup = 1;
11156 			}
11157 			if (need_setup) {
11158 				if (!tp->serdes_counter) {
11159 					tw32_f(MAC_MODE,
11160 					     (tp->mac_mode &
11161 					      ~MAC_MODE_PORT_MODE_MASK));
11162 					udelay(40);
11163 					tw32_f(MAC_MODE, tp->mac_mode);
11164 					udelay(40);
11165 				}
11166 				tg3_setup_phy(tp, false);
11167 			}
11168 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11169 			   tg3_flag(tp, 5780_CLASS)) {
11170 			tg3_serdes_parallel_detect(tp);
11171 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11172 			u32 cpmu = tr32(TG3_CPMU_STATUS);
11173 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11174 					 TG3_CPMU_STATUS_LINK_MASK);
11175 
11176 			if (link_up != tp->link_up)
11177 				tg3_setup_phy(tp, false);
11178 		}
11179 
11180 		tp->timer_counter = tp->timer_multiplier;
11181 	}
11182 
11183 	/* Heartbeat is only sent once every 2 seconds.
11184 	 *
11185 	 * The heartbeat is to tell the ASF firmware that the host
11186 	 * driver is still alive.  In the event that the OS crashes,
11187 	 * ASF needs to reset the hardware to free up the FIFO space
11188 	 * that may be filled with rx packets destined for the host.
11189 	 * If the FIFO is full, ASF will no longer function properly.
11190 	 *
11191 	 * Unintended resets have been reported on real time kernels
11192 	 * where the timer doesn't run on time.  Netpoll will also have
11193 	 * same problem.
11194 	 *
11195 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11196 	 * to check the ring condition when the heartbeat is expiring
11197 	 * before doing the reset.  This will prevent most unintended
11198 	 * resets.
11199 	 */
11200 	if (!--tp->asf_counter) {
11201 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11202 			tg3_wait_for_event_ack(tp);
11203 
11204 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11205 				      FWCMD_NICDRV_ALIVE3);
11206 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11207 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11208 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11209 
11210 			tg3_generate_fw_event(tp);
11211 		}
11212 		tp->asf_counter = tp->asf_multiplier;
11213 	}
11214 
11215 	/* Update the APE heartbeat every 5 seconds.*/
11216 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11217 
11218 	spin_unlock(&tp->lock);
11219 
11220 restart_timer:
11221 	tp->timer.expires = jiffies + tp->timer_offset;
11222 	add_timer(&tp->timer);
11223 }
11224 
11225 static void tg3_timer_init(struct tg3 *tp)
11226 {
11227 	if (tg3_flag(tp, TAGGED_STATUS) &&
11228 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11229 	    !tg3_flag(tp, 57765_CLASS))
11230 		tp->timer_offset = HZ;
11231 	else
11232 		tp->timer_offset = HZ / 10;
11233 
11234 	BUG_ON(tp->timer_offset > HZ);
11235 
11236 	tp->timer_multiplier = (HZ / tp->timer_offset);
11237 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11238 			     TG3_FW_UPDATE_FREQ_SEC;
11239 
11240 	timer_setup(&tp->timer, tg3_timer, 0);
11241 }
11242 
11243 static void tg3_timer_start(struct tg3 *tp)
11244 {
11245 	tp->asf_counter   = tp->asf_multiplier;
11246 	tp->timer_counter = tp->timer_multiplier;
11247 
11248 	tp->timer.expires = jiffies + tp->timer_offset;
11249 	add_timer(&tp->timer);
11250 }
11251 
11252 static void tg3_timer_stop(struct tg3 *tp)
11253 {
11254 	del_timer_sync(&tp->timer);
11255 }
11256 
11257 /* Restart hardware after configuration changes, self-test, etc.
11258  * Invoked with tp->lock held.
11259  */
11260 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11261 	__releases(tp->lock)
11262 	__acquires(tp->lock)
11263 	__releases(tp->dev->lock)
11264 	__acquires(tp->dev->lock)
11265 {
11266 	int err;
11267 
11268 	err = tg3_init_hw(tp, reset_phy);
11269 	if (err) {
11270 		netdev_err(tp->dev,
11271 			   "Failed to re-initialize device, aborting\n");
11272 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11273 		tg3_full_unlock(tp);
11274 		tg3_timer_stop(tp);
11275 		tp->irq_sync = 0;
11276 		tg3_napi_enable(tp);
11277 		netdev_unlock(tp->dev);
11278 		dev_close(tp->dev);
11279 		netdev_lock(tp->dev);
11280 		tg3_full_lock(tp, 0);
11281 	}
11282 	return err;
11283 }
11284 
11285 static void tg3_reset_task(struct work_struct *work)
11286 {
11287 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11288 	int err;
11289 
11290 	rtnl_lock();
11291 	tg3_full_lock(tp, 0);
11292 
11293 	if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11294 	    tp->pdev->error_state != pci_channel_io_normal) {
11295 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11296 		tg3_full_unlock(tp);
11297 		rtnl_unlock();
11298 		return;
11299 	}
11300 
11301 	tg3_full_unlock(tp);
11302 
11303 	tg3_phy_stop(tp);
11304 
11305 	tg3_netif_stop(tp);
11306 
11307 	netdev_lock(tp->dev);
11308 	tg3_full_lock(tp, 1);
11309 
11310 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11311 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11312 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11313 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11314 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11315 	}
11316 
11317 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11318 	err = tg3_init_hw(tp, true);
11319 	if (err) {
11320 		tg3_full_unlock(tp);
11321 		tp->irq_sync = 0;
11322 		tg3_napi_enable(tp);
11323 		/* Clear this flag so that tg3_reset_task_cancel() will not
11324 		 * call cancel_work_sync() and wait forever.
11325 		 */
11326 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11327 		netdev_unlock(tp->dev);
11328 		dev_close(tp->dev);
11329 		goto out;
11330 	}
11331 
11332 	tg3_netif_start(tp);
11333 	tg3_full_unlock(tp);
11334 	netdev_unlock(tp->dev);
11335 	tg3_phy_start(tp);
11336 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11337 out:
11338 	rtnl_unlock();
11339 }
11340 
11341 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11342 {
11343 	irq_handler_t fn;
11344 	unsigned long flags;
11345 	char *name;
11346 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11347 
11348 	if (tp->irq_cnt == 1)
11349 		name = tp->dev->name;
11350 	else {
11351 		name = &tnapi->irq_lbl[0];
11352 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11353 			snprintf(name, sizeof(tnapi->irq_lbl),
11354 				 "%s-txrx-%d", tp->dev->name, irq_num);
11355 		else if (tnapi->tx_buffers)
11356 			snprintf(name, sizeof(tnapi->irq_lbl),
11357 				 "%s-tx-%d", tp->dev->name, irq_num);
11358 		else if (tnapi->rx_rcb)
11359 			snprintf(name, sizeof(tnapi->irq_lbl),
11360 				 "%s-rx-%d", tp->dev->name, irq_num);
11361 		else
11362 			snprintf(name, sizeof(tnapi->irq_lbl),
11363 				 "%s-%d", tp->dev->name, irq_num);
11364 	}
11365 
11366 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11367 		fn = tg3_msi;
11368 		if (tg3_flag(tp, 1SHOT_MSI))
11369 			fn = tg3_msi_1shot;
11370 		flags = 0;
11371 	} else {
11372 		fn = tg3_interrupt;
11373 		if (tg3_flag(tp, TAGGED_STATUS))
11374 			fn = tg3_interrupt_tagged;
11375 		flags = IRQF_SHARED;
11376 	}
11377 
11378 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11379 }
11380 
11381 static int tg3_test_interrupt(struct tg3 *tp)
11382 {
11383 	struct tg3_napi *tnapi = &tp->napi[0];
11384 	struct net_device *dev = tp->dev;
11385 	int err, i, intr_ok = 0;
11386 	u32 val;
11387 
11388 	if (!netif_running(dev))
11389 		return -ENODEV;
11390 
11391 	tg3_disable_ints(tp);
11392 
11393 	free_irq(tnapi->irq_vec, tnapi);
11394 
11395 	/*
11396 	 * Turn off MSI one shot mode.  Otherwise this test has no
11397 	 * observable way to know whether the interrupt was delivered.
11398 	 */
11399 	if (tg3_flag(tp, 57765_PLUS)) {
11400 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11401 		tw32(MSGINT_MODE, val);
11402 	}
11403 
11404 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11405 			  IRQF_SHARED, dev->name, tnapi);
11406 	if (err)
11407 		return err;
11408 
11409 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11410 	tg3_enable_ints(tp);
11411 
11412 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11413 	       tnapi->coal_now);
11414 
11415 	for (i = 0; i < 5; i++) {
11416 		u32 int_mbox, misc_host_ctrl;
11417 
11418 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11419 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11420 
11421 		if ((int_mbox != 0) ||
11422 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11423 			intr_ok = 1;
11424 			break;
11425 		}
11426 
11427 		if (tg3_flag(tp, 57765_PLUS) &&
11428 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11429 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11430 
11431 		msleep(10);
11432 	}
11433 
11434 	tg3_disable_ints(tp);
11435 
11436 	free_irq(tnapi->irq_vec, tnapi);
11437 
11438 	err = tg3_request_irq(tp, 0);
11439 
11440 	if (err)
11441 		return err;
11442 
11443 	if (intr_ok) {
11444 		/* Reenable MSI one shot mode. */
11445 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11446 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11447 			tw32(MSGINT_MODE, val);
11448 		}
11449 		return 0;
11450 	}
11451 
11452 	return -EIO;
11453 }
11454 
11455 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11456  * successfully restored
11457  */
11458 static int tg3_test_msi(struct tg3 *tp)
11459 {
11460 	int err;
11461 	u16 pci_cmd;
11462 
11463 	if (!tg3_flag(tp, USING_MSI))
11464 		return 0;
11465 
11466 	/* Turn off SERR reporting in case MSI terminates with Master
11467 	 * Abort.
11468 	 */
11469 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11470 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11471 			      pci_cmd & ~PCI_COMMAND_SERR);
11472 
11473 	err = tg3_test_interrupt(tp);
11474 
11475 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11476 
11477 	if (!err)
11478 		return 0;
11479 
11480 	/* other failures */
11481 	if (err != -EIO)
11482 		return err;
11483 
11484 	/* MSI test failed, go back to INTx mode */
11485 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11486 		    "to INTx mode. Please report this failure to the PCI "
11487 		    "maintainer and include system chipset information\n");
11488 
11489 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11490 
11491 	pci_disable_msi(tp->pdev);
11492 
11493 	tg3_flag_clear(tp, USING_MSI);
11494 	tp->napi[0].irq_vec = tp->pdev->irq;
11495 
11496 	err = tg3_request_irq(tp, 0);
11497 	if (err)
11498 		return err;
11499 
11500 	/* Need to reset the chip because the MSI cycle may have terminated
11501 	 * with Master Abort.
11502 	 */
11503 	tg3_full_lock(tp, 1);
11504 
11505 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11506 	err = tg3_init_hw(tp, true);
11507 
11508 	tg3_full_unlock(tp);
11509 
11510 	if (err)
11511 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11512 
11513 	return err;
11514 }
11515 
11516 static int tg3_request_firmware(struct tg3 *tp)
11517 {
11518 	const struct tg3_firmware_hdr *fw_hdr;
11519 
11520 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11521 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11522 			   tp->fw_needed);
11523 		return -ENOENT;
11524 	}
11525 
11526 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11527 
11528 	/* Firmware blob starts with version numbers, followed by
11529 	 * start address and _full_ length including BSS sections
11530 	 * (which must be longer than the actual data, of course
11531 	 */
11532 
11533 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11534 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11535 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11536 			   tp->fw_len, tp->fw_needed);
11537 		release_firmware(tp->fw);
11538 		tp->fw = NULL;
11539 		return -EINVAL;
11540 	}
11541 
11542 	/* We no longer need firmware; we have it. */
11543 	tp->fw_needed = NULL;
11544 	return 0;
11545 }
11546 
11547 static u32 tg3_irq_count(struct tg3 *tp)
11548 {
11549 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11550 
11551 	if (irq_cnt > 1) {
11552 		/* We want as many rx rings enabled as there are cpus.
11553 		 * In multiqueue MSI-X mode, the first MSI-X vector
11554 		 * only deals with link interrupts, etc, so we add
11555 		 * one to the number of vectors we are requesting.
11556 		 */
11557 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11558 	}
11559 
11560 	return irq_cnt;
11561 }
11562 
11563 static bool tg3_enable_msix(struct tg3 *tp)
11564 {
11565 	int i, rc;
11566 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11567 
11568 	tp->txq_cnt = tp->txq_req;
11569 	tp->rxq_cnt = tp->rxq_req;
11570 	if (!tp->rxq_cnt)
11571 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11572 	if (tp->rxq_cnt > tp->rxq_max)
11573 		tp->rxq_cnt = tp->rxq_max;
11574 
11575 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11576 	 * scheduling of the TX rings can cause starvation of rings with
11577 	 * small packets when other rings have TSO or jumbo packets.
11578 	 */
11579 	if (!tp->txq_req)
11580 		tp->txq_cnt = 1;
11581 
11582 	tp->irq_cnt = tg3_irq_count(tp);
11583 
11584 	for (i = 0; i < tp->irq_max; i++) {
11585 		msix_ent[i].entry  = i;
11586 		msix_ent[i].vector = 0;
11587 	}
11588 
11589 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11590 	if (rc < 0) {
11591 		return false;
11592 	} else if (rc < tp->irq_cnt) {
11593 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11594 			      tp->irq_cnt, rc);
11595 		tp->irq_cnt = rc;
11596 		tp->rxq_cnt = max(rc - 1, 1);
11597 		if (tp->txq_cnt)
11598 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11599 	}
11600 
11601 	for (i = 0; i < tp->irq_max; i++)
11602 		tp->napi[i].irq_vec = msix_ent[i].vector;
11603 
11604 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11605 		pci_disable_msix(tp->pdev);
11606 		return false;
11607 	}
11608 
11609 	if (tp->irq_cnt == 1)
11610 		return true;
11611 
11612 	tg3_flag_set(tp, ENABLE_RSS);
11613 
11614 	if (tp->txq_cnt > 1)
11615 		tg3_flag_set(tp, ENABLE_TSS);
11616 
11617 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11618 
11619 	return true;
11620 }
11621 
11622 static void tg3_ints_init(struct tg3 *tp)
11623 {
11624 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11625 	    !tg3_flag(tp, TAGGED_STATUS)) {
11626 		/* All MSI supporting chips should support tagged
11627 		 * status.  Assert that this is the case.
11628 		 */
11629 		netdev_warn(tp->dev,
11630 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11631 		goto defcfg;
11632 	}
11633 
11634 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11635 		tg3_flag_set(tp, USING_MSIX);
11636 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11637 		tg3_flag_set(tp, USING_MSI);
11638 
11639 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11640 		u32 msi_mode = tr32(MSGINT_MODE);
11641 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11642 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11643 		if (!tg3_flag(tp, 1SHOT_MSI))
11644 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11645 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11646 	}
11647 defcfg:
11648 	if (!tg3_flag(tp, USING_MSIX)) {
11649 		tp->irq_cnt = 1;
11650 		tp->napi[0].irq_vec = tp->pdev->irq;
11651 	}
11652 
11653 	if (tp->irq_cnt == 1) {
11654 		tp->txq_cnt = 1;
11655 		tp->rxq_cnt = 1;
11656 		netif_set_real_num_tx_queues(tp->dev, 1);
11657 		netif_set_real_num_rx_queues(tp->dev, 1);
11658 	}
11659 }
11660 
11661 static void tg3_ints_fini(struct tg3 *tp)
11662 {
11663 	if (tg3_flag(tp, USING_MSIX))
11664 		pci_disable_msix(tp->pdev);
11665 	else if (tg3_flag(tp, USING_MSI))
11666 		pci_disable_msi(tp->pdev);
11667 	tg3_flag_clear(tp, USING_MSI);
11668 	tg3_flag_clear(tp, USING_MSIX);
11669 	tg3_flag_clear(tp, ENABLE_RSS);
11670 	tg3_flag_clear(tp, ENABLE_TSS);
11671 }
11672 
11673 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11674 		     bool init)
11675 {
11676 	struct net_device *dev = tp->dev;
11677 	int i, err;
11678 
11679 	/*
11680 	 * Setup interrupts first so we know how
11681 	 * many NAPI resources to allocate
11682 	 */
11683 	tg3_ints_init(tp);
11684 
11685 	tg3_rss_check_indir_tbl(tp);
11686 
11687 	/* The placement of this call is tied
11688 	 * to the setup and use of Host TX descriptors.
11689 	 */
11690 	err = tg3_alloc_consistent(tp);
11691 	if (err)
11692 		goto out_ints_fini;
11693 
11694 	netdev_lock(dev);
11695 	tg3_napi_init(tp);
11696 
11697 	tg3_napi_enable(tp);
11698 	netdev_unlock(dev);
11699 
11700 	for (i = 0; i < tp->irq_cnt; i++) {
11701 		err = tg3_request_irq(tp, i);
11702 		if (err) {
11703 			for (i--; i >= 0; i--) {
11704 				struct tg3_napi *tnapi = &tp->napi[i];
11705 
11706 				free_irq(tnapi->irq_vec, tnapi);
11707 			}
11708 			goto out_napi_fini;
11709 		}
11710 	}
11711 
11712 	tg3_full_lock(tp, 0);
11713 
11714 	if (init)
11715 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11716 
11717 	err = tg3_init_hw(tp, reset_phy);
11718 	if (err) {
11719 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11720 		tg3_free_rings(tp);
11721 	}
11722 
11723 	tg3_full_unlock(tp);
11724 
11725 	if (err)
11726 		goto out_free_irq;
11727 
11728 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11729 		err = tg3_test_msi(tp);
11730 
11731 		if (err) {
11732 			tg3_full_lock(tp, 0);
11733 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11734 			tg3_free_rings(tp);
11735 			tg3_full_unlock(tp);
11736 
11737 			goto out_napi_fini;
11738 		}
11739 
11740 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11741 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11742 
11743 			tw32(PCIE_TRANSACTION_CFG,
11744 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11745 		}
11746 	}
11747 
11748 	tg3_phy_start(tp);
11749 
11750 	tg3_hwmon_open(tp);
11751 
11752 	tg3_full_lock(tp, 0);
11753 
11754 	tg3_timer_start(tp);
11755 	tg3_flag_set(tp, INIT_COMPLETE);
11756 	tg3_enable_ints(tp);
11757 
11758 	tg3_ptp_resume(tp);
11759 
11760 	tg3_full_unlock(tp);
11761 
11762 	netif_tx_start_all_queues(dev);
11763 
11764 	/*
11765 	 * Reset loopback feature if it was turned on while the device was down
11766 	 * make sure that it's installed properly now.
11767 	 */
11768 	if (dev->features & NETIF_F_LOOPBACK)
11769 		tg3_set_loopback(dev, dev->features);
11770 
11771 	return 0;
11772 
11773 out_free_irq:
11774 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11775 		struct tg3_napi *tnapi = &tp->napi[i];
11776 		free_irq(tnapi->irq_vec, tnapi);
11777 	}
11778 
11779 out_napi_fini:
11780 	tg3_napi_disable(tp);
11781 	tg3_napi_fini(tp);
11782 	tg3_free_consistent(tp);
11783 
11784 out_ints_fini:
11785 	tg3_ints_fini(tp);
11786 
11787 	return err;
11788 }
11789 
11790 static void tg3_stop(struct tg3 *tp)
11791 {
11792 	int i;
11793 
11794 	tg3_reset_task_cancel(tp);
11795 	tg3_netif_stop(tp);
11796 
11797 	tg3_timer_stop(tp);
11798 
11799 	tg3_hwmon_close(tp);
11800 
11801 	tg3_phy_stop(tp);
11802 
11803 	tg3_full_lock(tp, 1);
11804 
11805 	tg3_disable_ints(tp);
11806 
11807 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11808 	tg3_free_rings(tp);
11809 	tg3_flag_clear(tp, INIT_COMPLETE);
11810 
11811 	tg3_full_unlock(tp);
11812 
11813 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11814 		struct tg3_napi *tnapi = &tp->napi[i];
11815 		free_irq(tnapi->irq_vec, tnapi);
11816 	}
11817 
11818 	tg3_ints_fini(tp);
11819 
11820 	tg3_napi_fini(tp);
11821 
11822 	tg3_free_consistent(tp);
11823 }
11824 
11825 static int tg3_open(struct net_device *dev)
11826 {
11827 	struct tg3 *tp = netdev_priv(dev);
11828 	int err;
11829 
11830 	if (tp->pcierr_recovery) {
11831 		netdev_err(dev, "Failed to open device. PCI error recovery "
11832 			   "in progress\n");
11833 		return -EAGAIN;
11834 	}
11835 
11836 	if (tp->fw_needed) {
11837 		err = tg3_request_firmware(tp);
11838 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11839 			if (err) {
11840 				netdev_warn(tp->dev, "EEE capability disabled\n");
11841 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11842 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11843 				netdev_warn(tp->dev, "EEE capability restored\n");
11844 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11845 			}
11846 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11847 			if (err)
11848 				return err;
11849 		} else if (err) {
11850 			netdev_warn(tp->dev, "TSO capability disabled\n");
11851 			tg3_flag_clear(tp, TSO_CAPABLE);
11852 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11853 			netdev_notice(tp->dev, "TSO capability restored\n");
11854 			tg3_flag_set(tp, TSO_CAPABLE);
11855 		}
11856 	}
11857 
11858 	tg3_carrier_off(tp);
11859 
11860 	err = tg3_power_up(tp);
11861 	if (err)
11862 		return err;
11863 
11864 	tg3_full_lock(tp, 0);
11865 
11866 	tg3_disable_ints(tp);
11867 	tg3_flag_clear(tp, INIT_COMPLETE);
11868 
11869 	tg3_full_unlock(tp);
11870 
11871 	err = tg3_start(tp,
11872 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11873 			true, true);
11874 	if (err) {
11875 		tg3_frob_aux_power(tp, false);
11876 		pci_set_power_state(tp->pdev, PCI_D3hot);
11877 	}
11878 
11879 	return err;
11880 }
11881 
11882 static int tg3_close(struct net_device *dev)
11883 {
11884 	struct tg3 *tp = netdev_priv(dev);
11885 
11886 	if (tp->pcierr_recovery) {
11887 		netdev_err(dev, "Failed to close device. PCI error recovery "
11888 			   "in progress\n");
11889 		return -EAGAIN;
11890 	}
11891 
11892 	tg3_stop(tp);
11893 
11894 	if (pci_device_is_present(tp->pdev)) {
11895 		tg3_power_down_prepare(tp);
11896 
11897 		tg3_carrier_off(tp);
11898 	}
11899 	return 0;
11900 }
11901 
11902 static inline u64 get_stat64(tg3_stat64_t *val)
11903 {
11904        return ((u64)val->high << 32) | ((u64)val->low);
11905 }
11906 
11907 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11908 {
11909 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11910 
11911 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11912 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11913 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11914 		u32 val;
11915 
11916 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11917 			tg3_writephy(tp, MII_TG3_TEST1,
11918 				     val | MII_TG3_TEST1_CRC_EN);
11919 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11920 		} else
11921 			val = 0;
11922 
11923 		tp->phy_crc_errors += val;
11924 
11925 		return tp->phy_crc_errors;
11926 	}
11927 
11928 	return get_stat64(&hw_stats->rx_fcs_errors);
11929 }
11930 
11931 #define ESTAT_ADD(member) \
11932 	estats->member =	old_estats->member + \
11933 				get_stat64(&hw_stats->member)
11934 
11935 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11936 {
11937 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11938 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11939 
11940 	ESTAT_ADD(rx_octets);
11941 	ESTAT_ADD(rx_fragments);
11942 	ESTAT_ADD(rx_ucast_packets);
11943 	ESTAT_ADD(rx_mcast_packets);
11944 	ESTAT_ADD(rx_bcast_packets);
11945 	ESTAT_ADD(rx_fcs_errors);
11946 	ESTAT_ADD(rx_align_errors);
11947 	ESTAT_ADD(rx_xon_pause_rcvd);
11948 	ESTAT_ADD(rx_xoff_pause_rcvd);
11949 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11950 	ESTAT_ADD(rx_xoff_entered);
11951 	ESTAT_ADD(rx_frame_too_long_errors);
11952 	ESTAT_ADD(rx_jabbers);
11953 	ESTAT_ADD(rx_undersize_packets);
11954 	ESTAT_ADD(rx_in_length_errors);
11955 	ESTAT_ADD(rx_out_length_errors);
11956 	ESTAT_ADD(rx_64_or_less_octet_packets);
11957 	ESTAT_ADD(rx_65_to_127_octet_packets);
11958 	ESTAT_ADD(rx_128_to_255_octet_packets);
11959 	ESTAT_ADD(rx_256_to_511_octet_packets);
11960 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11961 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11962 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11963 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11964 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11965 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11966 
11967 	ESTAT_ADD(tx_octets);
11968 	ESTAT_ADD(tx_collisions);
11969 	ESTAT_ADD(tx_xon_sent);
11970 	ESTAT_ADD(tx_xoff_sent);
11971 	ESTAT_ADD(tx_flow_control);
11972 	ESTAT_ADD(tx_mac_errors);
11973 	ESTAT_ADD(tx_single_collisions);
11974 	ESTAT_ADD(tx_mult_collisions);
11975 	ESTAT_ADD(tx_deferred);
11976 	ESTAT_ADD(tx_excessive_collisions);
11977 	ESTAT_ADD(tx_late_collisions);
11978 	ESTAT_ADD(tx_collide_2times);
11979 	ESTAT_ADD(tx_collide_3times);
11980 	ESTAT_ADD(tx_collide_4times);
11981 	ESTAT_ADD(tx_collide_5times);
11982 	ESTAT_ADD(tx_collide_6times);
11983 	ESTAT_ADD(tx_collide_7times);
11984 	ESTAT_ADD(tx_collide_8times);
11985 	ESTAT_ADD(tx_collide_9times);
11986 	ESTAT_ADD(tx_collide_10times);
11987 	ESTAT_ADD(tx_collide_11times);
11988 	ESTAT_ADD(tx_collide_12times);
11989 	ESTAT_ADD(tx_collide_13times);
11990 	ESTAT_ADD(tx_collide_14times);
11991 	ESTAT_ADD(tx_collide_15times);
11992 	ESTAT_ADD(tx_ucast_packets);
11993 	ESTAT_ADD(tx_mcast_packets);
11994 	ESTAT_ADD(tx_bcast_packets);
11995 	ESTAT_ADD(tx_carrier_sense_errors);
11996 	ESTAT_ADD(tx_discards);
11997 	ESTAT_ADD(tx_errors);
11998 
11999 	ESTAT_ADD(dma_writeq_full);
12000 	ESTAT_ADD(dma_write_prioq_full);
12001 	ESTAT_ADD(rxbds_empty);
12002 	ESTAT_ADD(rx_discards);
12003 	ESTAT_ADD(rx_errors);
12004 	ESTAT_ADD(rx_threshold_hit);
12005 
12006 	ESTAT_ADD(dma_readq_full);
12007 	ESTAT_ADD(dma_read_prioq_full);
12008 	ESTAT_ADD(tx_comp_queue_full);
12009 
12010 	ESTAT_ADD(ring_set_send_prod_index);
12011 	ESTAT_ADD(ring_status_update);
12012 	ESTAT_ADD(nic_irqs);
12013 	ESTAT_ADD(nic_avoided_irqs);
12014 	ESTAT_ADD(nic_tx_threshold_hit);
12015 
12016 	ESTAT_ADD(mbuf_lwm_thresh_hit);
12017 }
12018 
12019 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
12020 {
12021 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
12022 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
12023 	unsigned long rx_dropped;
12024 	unsigned long tx_dropped;
12025 	int i;
12026 
12027 	stats->rx_packets = old_stats->rx_packets +
12028 		get_stat64(&hw_stats->rx_ucast_packets) +
12029 		get_stat64(&hw_stats->rx_mcast_packets) +
12030 		get_stat64(&hw_stats->rx_bcast_packets);
12031 
12032 	stats->tx_packets = old_stats->tx_packets +
12033 		get_stat64(&hw_stats->tx_ucast_packets) +
12034 		get_stat64(&hw_stats->tx_mcast_packets) +
12035 		get_stat64(&hw_stats->tx_bcast_packets);
12036 
12037 	stats->rx_bytes = old_stats->rx_bytes +
12038 		get_stat64(&hw_stats->rx_octets);
12039 	stats->tx_bytes = old_stats->tx_bytes +
12040 		get_stat64(&hw_stats->tx_octets);
12041 
12042 	stats->rx_errors = old_stats->rx_errors +
12043 		get_stat64(&hw_stats->rx_errors);
12044 	stats->tx_errors = old_stats->tx_errors +
12045 		get_stat64(&hw_stats->tx_errors) +
12046 		get_stat64(&hw_stats->tx_mac_errors) +
12047 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
12048 		get_stat64(&hw_stats->tx_discards);
12049 
12050 	stats->multicast = old_stats->multicast +
12051 		get_stat64(&hw_stats->rx_mcast_packets);
12052 	stats->collisions = old_stats->collisions +
12053 		get_stat64(&hw_stats->tx_collisions);
12054 
12055 	stats->rx_length_errors = old_stats->rx_length_errors +
12056 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
12057 		get_stat64(&hw_stats->rx_undersize_packets);
12058 
12059 	stats->rx_frame_errors = old_stats->rx_frame_errors +
12060 		get_stat64(&hw_stats->rx_align_errors);
12061 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
12062 		get_stat64(&hw_stats->tx_discards);
12063 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
12064 		get_stat64(&hw_stats->tx_carrier_sense_errors);
12065 
12066 	stats->rx_crc_errors = old_stats->rx_crc_errors +
12067 		tg3_calc_crc_errors(tp);
12068 
12069 	stats->rx_missed_errors = old_stats->rx_missed_errors +
12070 		get_stat64(&hw_stats->rx_discards);
12071 
12072 	/* Aggregate per-queue counters. The per-queue counters are updated
12073 	 * by a single writer, race-free. The result computed by this loop
12074 	 * might not be 100% accurate (counters can be updated in the middle of
12075 	 * the loop) but the next tg3_get_nstats() will recompute the current
12076 	 * value so it is acceptable.
12077 	 *
12078 	 * Note that these counters wrap around at 4G on 32bit machines.
12079 	 */
12080 	rx_dropped = (unsigned long)(old_stats->rx_dropped);
12081 	tx_dropped = (unsigned long)(old_stats->tx_dropped);
12082 
12083 	for (i = 0; i < tp->irq_cnt; i++) {
12084 		struct tg3_napi *tnapi = &tp->napi[i];
12085 
12086 		rx_dropped += tnapi->rx_dropped;
12087 		tx_dropped += tnapi->tx_dropped;
12088 	}
12089 
12090 	stats->rx_dropped = rx_dropped;
12091 	stats->tx_dropped = tx_dropped;
12092 }
12093 
12094 static int tg3_get_regs_len(struct net_device *dev)
12095 {
12096 	return TG3_REG_BLK_SIZE;
12097 }
12098 
12099 static void tg3_get_regs(struct net_device *dev,
12100 		struct ethtool_regs *regs, void *_p)
12101 {
12102 	struct tg3 *tp = netdev_priv(dev);
12103 
12104 	regs->version = 0;
12105 
12106 	memset(_p, 0, TG3_REG_BLK_SIZE);
12107 
12108 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12109 		return;
12110 
12111 	tg3_full_lock(tp, 0);
12112 
12113 	tg3_dump_legacy_regs(tp, (u32 *)_p);
12114 
12115 	tg3_full_unlock(tp);
12116 }
12117 
12118 static int tg3_get_eeprom_len(struct net_device *dev)
12119 {
12120 	struct tg3 *tp = netdev_priv(dev);
12121 
12122 	return tp->nvram_size;
12123 }
12124 
12125 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12126 {
12127 	struct tg3 *tp = netdev_priv(dev);
12128 	int ret, cpmu_restore = 0;
12129 	u8  *pd;
12130 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12131 	__be32 val;
12132 
12133 	if (tg3_flag(tp, NO_NVRAM))
12134 		return -EINVAL;
12135 
12136 	offset = eeprom->offset;
12137 	len = eeprom->len;
12138 	eeprom->len = 0;
12139 
12140 	eeprom->magic = TG3_EEPROM_MAGIC;
12141 
12142 	/* Override clock, link aware and link idle modes */
12143 	if (tg3_flag(tp, CPMU_PRESENT)) {
12144 		cpmu_val = tr32(TG3_CPMU_CTRL);
12145 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12146 				CPMU_CTRL_LINK_IDLE_MODE)) {
12147 			tw32(TG3_CPMU_CTRL, cpmu_val &
12148 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
12149 					     CPMU_CTRL_LINK_IDLE_MODE));
12150 			cpmu_restore = 1;
12151 		}
12152 	}
12153 	tg3_override_clk(tp);
12154 
12155 	if (offset & 3) {
12156 		/* adjustments to start on required 4 byte boundary */
12157 		b_offset = offset & 3;
12158 		b_count = 4 - b_offset;
12159 		if (b_count > len) {
12160 			/* i.e. offset=1 len=2 */
12161 			b_count = len;
12162 		}
12163 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12164 		if (ret)
12165 			goto eeprom_done;
12166 		memcpy(data, ((char *)&val) + b_offset, b_count);
12167 		len -= b_count;
12168 		offset += b_count;
12169 		eeprom->len += b_count;
12170 	}
12171 
12172 	/* read bytes up to the last 4 byte boundary */
12173 	pd = &data[eeprom->len];
12174 	for (i = 0; i < (len - (len & 3)); i += 4) {
12175 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
12176 		if (ret) {
12177 			if (i)
12178 				i -= 4;
12179 			eeprom->len += i;
12180 			goto eeprom_done;
12181 		}
12182 		memcpy(pd + i, &val, 4);
12183 		if (need_resched()) {
12184 			if (signal_pending(current)) {
12185 				eeprom->len += i;
12186 				ret = -EINTR;
12187 				goto eeprom_done;
12188 			}
12189 			cond_resched();
12190 		}
12191 	}
12192 	eeprom->len += i;
12193 
12194 	if (len & 3) {
12195 		/* read last bytes not ending on 4 byte boundary */
12196 		pd = &data[eeprom->len];
12197 		b_count = len & 3;
12198 		b_offset = offset + len - b_count;
12199 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12200 		if (ret)
12201 			goto eeprom_done;
12202 		memcpy(pd, &val, b_count);
12203 		eeprom->len += b_count;
12204 	}
12205 	ret = 0;
12206 
12207 eeprom_done:
12208 	/* Restore clock, link aware and link idle modes */
12209 	tg3_restore_clk(tp);
12210 	if (cpmu_restore)
12211 		tw32(TG3_CPMU_CTRL, cpmu_val);
12212 
12213 	return ret;
12214 }
12215 
12216 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12217 {
12218 	struct tg3 *tp = netdev_priv(dev);
12219 	int ret;
12220 	u32 offset, len, b_offset, odd_len;
12221 	u8 *buf;
12222 	__be32 start = 0, end;
12223 
12224 	if (tg3_flag(tp, NO_NVRAM) ||
12225 	    eeprom->magic != TG3_EEPROM_MAGIC)
12226 		return -EINVAL;
12227 
12228 	offset = eeprom->offset;
12229 	len = eeprom->len;
12230 
12231 	if ((b_offset = (offset & 3))) {
12232 		/* adjustments to start on required 4 byte boundary */
12233 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12234 		if (ret)
12235 			return ret;
12236 		len += b_offset;
12237 		offset &= ~3;
12238 		if (len < 4)
12239 			len = 4;
12240 	}
12241 
12242 	odd_len = 0;
12243 	if (len & 3) {
12244 		/* adjustments to end on required 4 byte boundary */
12245 		odd_len = 1;
12246 		len = (len + 3) & ~3;
12247 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12248 		if (ret)
12249 			return ret;
12250 	}
12251 
12252 	buf = data;
12253 	if (b_offset || odd_len) {
12254 		buf = kmalloc(len, GFP_KERNEL);
12255 		if (!buf)
12256 			return -ENOMEM;
12257 		if (b_offset)
12258 			memcpy(buf, &start, 4);
12259 		if (odd_len)
12260 			memcpy(buf+len-4, &end, 4);
12261 		memcpy(buf + b_offset, data, eeprom->len);
12262 	}
12263 
12264 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12265 
12266 	if (buf != data)
12267 		kfree(buf);
12268 
12269 	return ret;
12270 }
12271 
12272 static int tg3_get_link_ksettings(struct net_device *dev,
12273 				  struct ethtool_link_ksettings *cmd)
12274 {
12275 	struct tg3 *tp = netdev_priv(dev);
12276 	u32 supported, advertising;
12277 
12278 	if (tg3_flag(tp, USE_PHYLIB)) {
12279 		struct phy_device *phydev;
12280 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12281 			return -EAGAIN;
12282 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12283 		phy_ethtool_ksettings_get(phydev, cmd);
12284 
12285 		return 0;
12286 	}
12287 
12288 	supported = (SUPPORTED_Autoneg);
12289 
12290 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12291 		supported |= (SUPPORTED_1000baseT_Half |
12292 			      SUPPORTED_1000baseT_Full);
12293 
12294 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12295 		supported |= (SUPPORTED_100baseT_Half |
12296 			      SUPPORTED_100baseT_Full |
12297 			      SUPPORTED_10baseT_Half |
12298 			      SUPPORTED_10baseT_Full |
12299 			      SUPPORTED_TP);
12300 		cmd->base.port = PORT_TP;
12301 	} else {
12302 		supported |= SUPPORTED_FIBRE;
12303 		cmd->base.port = PORT_FIBRE;
12304 	}
12305 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12306 						supported);
12307 
12308 	advertising = tp->link_config.advertising;
12309 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12310 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12311 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12312 				advertising |= ADVERTISED_Pause;
12313 			} else {
12314 				advertising |= ADVERTISED_Pause |
12315 					ADVERTISED_Asym_Pause;
12316 			}
12317 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12318 			advertising |= ADVERTISED_Asym_Pause;
12319 		}
12320 	}
12321 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12322 						advertising);
12323 
12324 	if (netif_running(dev) && tp->link_up) {
12325 		cmd->base.speed = tp->link_config.active_speed;
12326 		cmd->base.duplex = tp->link_config.active_duplex;
12327 		ethtool_convert_legacy_u32_to_link_mode(
12328 			cmd->link_modes.lp_advertising,
12329 			tp->link_config.rmt_adv);
12330 
12331 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12332 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12333 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12334 			else
12335 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12336 		}
12337 	} else {
12338 		cmd->base.speed = SPEED_UNKNOWN;
12339 		cmd->base.duplex = DUPLEX_UNKNOWN;
12340 		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12341 	}
12342 	cmd->base.phy_address = tp->phy_addr;
12343 	cmd->base.autoneg = tp->link_config.autoneg;
12344 	return 0;
12345 }
12346 
12347 static int tg3_set_link_ksettings(struct net_device *dev,
12348 				  const struct ethtool_link_ksettings *cmd)
12349 {
12350 	struct tg3 *tp = netdev_priv(dev);
12351 	u32 speed = cmd->base.speed;
12352 	u32 advertising;
12353 
12354 	if (tg3_flag(tp, USE_PHYLIB)) {
12355 		struct phy_device *phydev;
12356 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12357 			return -EAGAIN;
12358 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12359 		return phy_ethtool_ksettings_set(phydev, cmd);
12360 	}
12361 
12362 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12363 	    cmd->base.autoneg != AUTONEG_DISABLE)
12364 		return -EINVAL;
12365 
12366 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12367 	    cmd->base.duplex != DUPLEX_FULL &&
12368 	    cmd->base.duplex != DUPLEX_HALF)
12369 		return -EINVAL;
12370 
12371 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12372 						cmd->link_modes.advertising);
12373 
12374 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12375 		u32 mask = ADVERTISED_Autoneg |
12376 			   ADVERTISED_Pause |
12377 			   ADVERTISED_Asym_Pause;
12378 
12379 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12380 			mask |= ADVERTISED_1000baseT_Half |
12381 				ADVERTISED_1000baseT_Full;
12382 
12383 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12384 			mask |= ADVERTISED_100baseT_Half |
12385 				ADVERTISED_100baseT_Full |
12386 				ADVERTISED_10baseT_Half |
12387 				ADVERTISED_10baseT_Full |
12388 				ADVERTISED_TP;
12389 		else
12390 			mask |= ADVERTISED_FIBRE;
12391 
12392 		if (advertising & ~mask)
12393 			return -EINVAL;
12394 
12395 		mask &= (ADVERTISED_1000baseT_Half |
12396 			 ADVERTISED_1000baseT_Full |
12397 			 ADVERTISED_100baseT_Half |
12398 			 ADVERTISED_100baseT_Full |
12399 			 ADVERTISED_10baseT_Half |
12400 			 ADVERTISED_10baseT_Full);
12401 
12402 		advertising &= mask;
12403 	} else {
12404 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12405 			if (speed != SPEED_1000)
12406 				return -EINVAL;
12407 
12408 			if (cmd->base.duplex != DUPLEX_FULL)
12409 				return -EINVAL;
12410 		} else {
12411 			if (speed != SPEED_100 &&
12412 			    speed != SPEED_10)
12413 				return -EINVAL;
12414 		}
12415 	}
12416 
12417 	tg3_full_lock(tp, 0);
12418 
12419 	tp->link_config.autoneg = cmd->base.autoneg;
12420 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12421 		tp->link_config.advertising = (advertising |
12422 					      ADVERTISED_Autoneg);
12423 		tp->link_config.speed = SPEED_UNKNOWN;
12424 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12425 	} else {
12426 		tp->link_config.advertising = 0;
12427 		tp->link_config.speed = speed;
12428 		tp->link_config.duplex = cmd->base.duplex;
12429 	}
12430 
12431 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12432 
12433 	tg3_warn_mgmt_link_flap(tp);
12434 
12435 	if (netif_running(dev))
12436 		tg3_setup_phy(tp, true);
12437 
12438 	tg3_full_unlock(tp);
12439 
12440 	return 0;
12441 }
12442 
12443 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12444 {
12445 	struct tg3 *tp = netdev_priv(dev);
12446 
12447 	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12448 	strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12449 	strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12450 }
12451 
12452 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12453 {
12454 	struct tg3 *tp = netdev_priv(dev);
12455 
12456 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12457 		wol->supported = WAKE_MAGIC;
12458 	else
12459 		wol->supported = 0;
12460 	wol->wolopts = 0;
12461 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12462 		wol->wolopts = WAKE_MAGIC;
12463 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12464 }
12465 
12466 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12467 {
12468 	struct tg3 *tp = netdev_priv(dev);
12469 	struct device *dp = &tp->pdev->dev;
12470 
12471 	if (wol->wolopts & ~WAKE_MAGIC)
12472 		return -EINVAL;
12473 	if ((wol->wolopts & WAKE_MAGIC) &&
12474 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12475 		return -EINVAL;
12476 
12477 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12478 
12479 	if (device_may_wakeup(dp))
12480 		tg3_flag_set(tp, WOL_ENABLE);
12481 	else
12482 		tg3_flag_clear(tp, WOL_ENABLE);
12483 
12484 	return 0;
12485 }
12486 
12487 static u32 tg3_get_msglevel(struct net_device *dev)
12488 {
12489 	struct tg3 *tp = netdev_priv(dev);
12490 	return tp->msg_enable;
12491 }
12492 
12493 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12494 {
12495 	struct tg3 *tp = netdev_priv(dev);
12496 	tp->msg_enable = value;
12497 }
12498 
12499 static int tg3_nway_reset(struct net_device *dev)
12500 {
12501 	struct tg3 *tp = netdev_priv(dev);
12502 	int r;
12503 
12504 	if (!netif_running(dev))
12505 		return -EAGAIN;
12506 
12507 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12508 		return -EINVAL;
12509 
12510 	tg3_warn_mgmt_link_flap(tp);
12511 
12512 	if (tg3_flag(tp, USE_PHYLIB)) {
12513 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12514 			return -EAGAIN;
12515 		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12516 	} else {
12517 		u32 bmcr;
12518 
12519 		spin_lock_bh(&tp->lock);
12520 		r = -EINVAL;
12521 		tg3_readphy(tp, MII_BMCR, &bmcr);
12522 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12523 		    ((bmcr & BMCR_ANENABLE) ||
12524 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12525 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12526 						   BMCR_ANENABLE);
12527 			r = 0;
12528 		}
12529 		spin_unlock_bh(&tp->lock);
12530 	}
12531 
12532 	return r;
12533 }
12534 
12535 static void tg3_get_ringparam(struct net_device *dev,
12536 			      struct ethtool_ringparam *ering,
12537 			      struct kernel_ethtool_ringparam *kernel_ering,
12538 			      struct netlink_ext_ack *extack)
12539 {
12540 	struct tg3 *tp = netdev_priv(dev);
12541 
12542 	ering->rx_max_pending = tp->rx_std_ring_mask;
12543 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12544 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12545 	else
12546 		ering->rx_jumbo_max_pending = 0;
12547 
12548 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12549 
12550 	ering->rx_pending = tp->rx_pending;
12551 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12552 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12553 	else
12554 		ering->rx_jumbo_pending = 0;
12555 
12556 	ering->tx_pending = tp->napi[0].tx_pending;
12557 }
12558 
12559 static int tg3_set_ringparam(struct net_device *dev,
12560 			     struct ethtool_ringparam *ering,
12561 			     struct kernel_ethtool_ringparam *kernel_ering,
12562 			     struct netlink_ext_ack *extack)
12563 {
12564 	struct tg3 *tp = netdev_priv(dev);
12565 	int i, irq_sync = 0, err = 0;
12566 	bool reset_phy = false;
12567 
12568 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12569 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12570 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12571 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12572 	    (tg3_flag(tp, TSO_BUG) &&
12573 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12574 		return -EINVAL;
12575 
12576 	if (netif_running(dev)) {
12577 		tg3_phy_stop(tp);
12578 		tg3_netif_stop(tp);
12579 		irq_sync = 1;
12580 	}
12581 
12582 	netdev_lock(dev);
12583 	tg3_full_lock(tp, irq_sync);
12584 
12585 	tp->rx_pending = ering->rx_pending;
12586 
12587 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12588 	    tp->rx_pending > 63)
12589 		tp->rx_pending = 63;
12590 
12591 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12592 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12593 
12594 	for (i = 0; i < tp->irq_max; i++)
12595 		tp->napi[i].tx_pending = ering->tx_pending;
12596 
12597 	if (netif_running(dev)) {
12598 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12599 		/* Reset PHY to avoid PHY lock up */
12600 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12601 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12602 		    tg3_asic_rev(tp) == ASIC_REV_5720)
12603 			reset_phy = true;
12604 
12605 		err = tg3_restart_hw(tp, reset_phy);
12606 		if (!err)
12607 			tg3_netif_start(tp);
12608 	}
12609 
12610 	tg3_full_unlock(tp);
12611 	netdev_unlock(dev);
12612 
12613 	if (irq_sync && !err)
12614 		tg3_phy_start(tp);
12615 
12616 	return err;
12617 }
12618 
12619 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12620 {
12621 	struct tg3 *tp = netdev_priv(dev);
12622 
12623 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12624 
12625 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12626 		epause->rx_pause = 1;
12627 	else
12628 		epause->rx_pause = 0;
12629 
12630 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12631 		epause->tx_pause = 1;
12632 	else
12633 		epause->tx_pause = 0;
12634 }
12635 
12636 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12637 {
12638 	struct tg3 *tp = netdev_priv(dev);
12639 	int err = 0;
12640 	bool reset_phy = false;
12641 
12642 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12643 		tg3_warn_mgmt_link_flap(tp);
12644 
12645 	if (tg3_flag(tp, USE_PHYLIB)) {
12646 		struct phy_device *phydev;
12647 
12648 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12649 
12650 		if (!phy_validate_pause(phydev, epause))
12651 			return -EINVAL;
12652 
12653 		tp->link_config.flowctrl = 0;
12654 		phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12655 		if (epause->rx_pause) {
12656 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12657 
12658 			if (epause->tx_pause) {
12659 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12660 			}
12661 		} else if (epause->tx_pause) {
12662 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12663 		}
12664 
12665 		if (epause->autoneg)
12666 			tg3_flag_set(tp, PAUSE_AUTONEG);
12667 		else
12668 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12669 
12670 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12671 			if (phydev->autoneg) {
12672 				/* phy_set_asym_pause() will
12673 				 * renegotiate the link to inform our
12674 				 * link partner of our flow control
12675 				 * settings, even if the flow control
12676 				 * is forced.  Let tg3_adjust_link()
12677 				 * do the final flow control setup.
12678 				 */
12679 				return 0;
12680 			}
12681 
12682 			if (!epause->autoneg)
12683 				tg3_setup_flow_control(tp, 0, 0);
12684 		}
12685 	} else {
12686 		int irq_sync = 0;
12687 
12688 		if (netif_running(dev)) {
12689 			tg3_netif_stop(tp);
12690 			irq_sync = 1;
12691 		}
12692 
12693 		netdev_lock(dev);
12694 		tg3_full_lock(tp, irq_sync);
12695 
12696 		if (epause->autoneg)
12697 			tg3_flag_set(tp, PAUSE_AUTONEG);
12698 		else
12699 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12700 		if (epause->rx_pause)
12701 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12702 		else
12703 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12704 		if (epause->tx_pause)
12705 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12706 		else
12707 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12708 
12709 		if (netif_running(dev)) {
12710 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12711 			/* Reset PHY to avoid PHY lock up */
12712 			if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12713 			    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12714 			    tg3_asic_rev(tp) == ASIC_REV_5720)
12715 				reset_phy = true;
12716 
12717 			err = tg3_restart_hw(tp, reset_phy);
12718 			if (!err)
12719 				tg3_netif_start(tp);
12720 		}
12721 
12722 		tg3_full_unlock(tp);
12723 		netdev_unlock(dev);
12724 	}
12725 
12726 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12727 
12728 	return err;
12729 }
12730 
12731 static int tg3_get_sset_count(struct net_device *dev, int sset)
12732 {
12733 	switch (sset) {
12734 	case ETH_SS_TEST:
12735 		return TG3_NUM_TEST;
12736 	case ETH_SS_STATS:
12737 		return TG3_NUM_STATS;
12738 	default:
12739 		return -EOPNOTSUPP;
12740 	}
12741 }
12742 
12743 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12744 			 u32 *rules __always_unused)
12745 {
12746 	struct tg3 *tp = netdev_priv(dev);
12747 
12748 	if (!tg3_flag(tp, SUPPORT_MSIX))
12749 		return -EOPNOTSUPP;
12750 
12751 	switch (info->cmd) {
12752 	case ETHTOOL_GRXRINGS:
12753 		if (netif_running(tp->dev))
12754 			info->data = tp->rxq_cnt;
12755 		else {
12756 			info->data = num_online_cpus();
12757 			if (info->data > TG3_RSS_MAX_NUM_QS)
12758 				info->data = TG3_RSS_MAX_NUM_QS;
12759 		}
12760 
12761 		return 0;
12762 
12763 	default:
12764 		return -EOPNOTSUPP;
12765 	}
12766 }
12767 
12768 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12769 {
12770 	u32 size = 0;
12771 	struct tg3 *tp = netdev_priv(dev);
12772 
12773 	if (tg3_flag(tp, SUPPORT_MSIX))
12774 		size = TG3_RSS_INDIR_TBL_SIZE;
12775 
12776 	return size;
12777 }
12778 
12779 static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
12780 {
12781 	struct tg3 *tp = netdev_priv(dev);
12782 	int i;
12783 
12784 	rxfh->hfunc = ETH_RSS_HASH_TOP;
12785 	if (!rxfh->indir)
12786 		return 0;
12787 
12788 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12789 		rxfh->indir[i] = tp->rss_ind_tbl[i];
12790 
12791 	return 0;
12792 }
12793 
12794 static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
12795 			struct netlink_ext_ack *extack)
12796 {
12797 	struct tg3 *tp = netdev_priv(dev);
12798 	size_t i;
12799 
12800 	/* We require at least one supported parameter to be changed and no
12801 	 * change in any of the unsupported parameters
12802 	 */
12803 	if (rxfh->key ||
12804 	    (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
12805 	     rxfh->hfunc != ETH_RSS_HASH_TOP))
12806 		return -EOPNOTSUPP;
12807 
12808 	if (!rxfh->indir)
12809 		return 0;
12810 
12811 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12812 		tp->rss_ind_tbl[i] = rxfh->indir[i];
12813 
12814 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12815 		return 0;
12816 
12817 	/* It is legal to write the indirection
12818 	 * table while the device is running.
12819 	 */
12820 	tg3_full_lock(tp, 0);
12821 	tg3_rss_write_indir_tbl(tp);
12822 	tg3_full_unlock(tp);
12823 
12824 	return 0;
12825 }
12826 
12827 static void tg3_get_channels(struct net_device *dev,
12828 			     struct ethtool_channels *channel)
12829 {
12830 	struct tg3 *tp = netdev_priv(dev);
12831 	u32 deflt_qs = netif_get_num_default_rss_queues();
12832 
12833 	channel->max_rx = tp->rxq_max;
12834 	channel->max_tx = tp->txq_max;
12835 
12836 	if (netif_running(dev)) {
12837 		channel->rx_count = tp->rxq_cnt;
12838 		channel->tx_count = tp->txq_cnt;
12839 	} else {
12840 		if (tp->rxq_req)
12841 			channel->rx_count = tp->rxq_req;
12842 		else
12843 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12844 
12845 		if (tp->txq_req)
12846 			channel->tx_count = tp->txq_req;
12847 		else
12848 			channel->tx_count = min(deflt_qs, tp->txq_max);
12849 	}
12850 }
12851 
12852 static int tg3_set_channels(struct net_device *dev,
12853 			    struct ethtool_channels *channel)
12854 {
12855 	struct tg3 *tp = netdev_priv(dev);
12856 
12857 	if (!tg3_flag(tp, SUPPORT_MSIX))
12858 		return -EOPNOTSUPP;
12859 
12860 	if (channel->rx_count > tp->rxq_max ||
12861 	    channel->tx_count > tp->txq_max)
12862 		return -EINVAL;
12863 
12864 	tp->rxq_req = channel->rx_count;
12865 	tp->txq_req = channel->tx_count;
12866 
12867 	if (!netif_running(dev))
12868 		return 0;
12869 
12870 	tg3_stop(tp);
12871 
12872 	tg3_carrier_off(tp);
12873 
12874 	tg3_start(tp, true, false, false);
12875 
12876 	return 0;
12877 }
12878 
12879 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12880 {
12881 	switch (stringset) {
12882 	case ETH_SS_STATS:
12883 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12884 		break;
12885 	case ETH_SS_TEST:
12886 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12887 		break;
12888 	default:
12889 		WARN_ON(1);	/* we need a WARN() */
12890 		break;
12891 	}
12892 }
12893 
12894 static int tg3_set_phys_id(struct net_device *dev,
12895 			    enum ethtool_phys_id_state state)
12896 {
12897 	struct tg3 *tp = netdev_priv(dev);
12898 
12899 	switch (state) {
12900 	case ETHTOOL_ID_ACTIVE:
12901 		return 1;	/* cycle on/off once per second */
12902 
12903 	case ETHTOOL_ID_ON:
12904 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12905 		     LED_CTRL_1000MBPS_ON |
12906 		     LED_CTRL_100MBPS_ON |
12907 		     LED_CTRL_10MBPS_ON |
12908 		     LED_CTRL_TRAFFIC_OVERRIDE |
12909 		     LED_CTRL_TRAFFIC_BLINK |
12910 		     LED_CTRL_TRAFFIC_LED);
12911 		break;
12912 
12913 	case ETHTOOL_ID_OFF:
12914 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12915 		     LED_CTRL_TRAFFIC_OVERRIDE);
12916 		break;
12917 
12918 	case ETHTOOL_ID_INACTIVE:
12919 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12920 		break;
12921 	}
12922 
12923 	return 0;
12924 }
12925 
12926 static void tg3_get_ethtool_stats(struct net_device *dev,
12927 				   struct ethtool_stats *estats, u64 *tmp_stats)
12928 {
12929 	struct tg3 *tp = netdev_priv(dev);
12930 
12931 	if (tp->hw_stats)
12932 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12933 	else
12934 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12935 }
12936 
12937 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12938 {
12939 	int i;
12940 	__be32 *buf;
12941 	u32 offset = 0, len = 0;
12942 	u32 magic, val;
12943 
12944 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12945 		return NULL;
12946 
12947 	if (magic == TG3_EEPROM_MAGIC) {
12948 		for (offset = TG3_NVM_DIR_START;
12949 		     offset < TG3_NVM_DIR_END;
12950 		     offset += TG3_NVM_DIRENT_SIZE) {
12951 			if (tg3_nvram_read(tp, offset, &val))
12952 				return NULL;
12953 
12954 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12955 			    TG3_NVM_DIRTYPE_EXTVPD)
12956 				break;
12957 		}
12958 
12959 		if (offset != TG3_NVM_DIR_END) {
12960 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12961 			if (tg3_nvram_read(tp, offset + 4, &offset))
12962 				return NULL;
12963 
12964 			offset = tg3_nvram_logical_addr(tp, offset);
12965 		}
12966 
12967 		if (!offset || !len) {
12968 			offset = TG3_NVM_VPD_OFF;
12969 			len = TG3_NVM_VPD_LEN;
12970 		}
12971 
12972 		buf = kmalloc(len, GFP_KERNEL);
12973 		if (!buf)
12974 			return NULL;
12975 
12976 		for (i = 0; i < len; i += 4) {
12977 			/* The data is in little-endian format in NVRAM.
12978 			 * Use the big-endian read routines to preserve
12979 			 * the byte order as it exists in NVRAM.
12980 			 */
12981 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12982 				goto error;
12983 		}
12984 		*vpdlen = len;
12985 	} else {
12986 		buf = pci_vpd_alloc(tp->pdev, vpdlen);
12987 		if (IS_ERR(buf))
12988 			return NULL;
12989 	}
12990 
12991 	return buf;
12992 
12993 error:
12994 	kfree(buf);
12995 	return NULL;
12996 }
12997 
12998 #define NVRAM_TEST_SIZE 0x100
12999 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
13000 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
13001 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
13002 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
13003 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
13004 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
13005 #define NVRAM_SELFBOOT_HW_SIZE 0x20
13006 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
13007 
13008 static int tg3_test_nvram(struct tg3 *tp)
13009 {
13010 	u32 csum, magic;
13011 	__be32 *buf;
13012 	int i, j, k, err = 0, size;
13013 	unsigned int len;
13014 
13015 	if (tg3_flag(tp, NO_NVRAM))
13016 		return 0;
13017 
13018 	if (tg3_nvram_read(tp, 0, &magic) != 0)
13019 		return -EIO;
13020 
13021 	if (magic == TG3_EEPROM_MAGIC)
13022 		size = NVRAM_TEST_SIZE;
13023 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
13024 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
13025 		    TG3_EEPROM_SB_FORMAT_1) {
13026 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
13027 			case TG3_EEPROM_SB_REVISION_0:
13028 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
13029 				break;
13030 			case TG3_EEPROM_SB_REVISION_2:
13031 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
13032 				break;
13033 			case TG3_EEPROM_SB_REVISION_3:
13034 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
13035 				break;
13036 			case TG3_EEPROM_SB_REVISION_4:
13037 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
13038 				break;
13039 			case TG3_EEPROM_SB_REVISION_5:
13040 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
13041 				break;
13042 			case TG3_EEPROM_SB_REVISION_6:
13043 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
13044 				break;
13045 			default:
13046 				return -EIO;
13047 			}
13048 		} else
13049 			return 0;
13050 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13051 		size = NVRAM_SELFBOOT_HW_SIZE;
13052 	else
13053 		return -EIO;
13054 
13055 	buf = kmalloc(size, GFP_KERNEL);
13056 	if (buf == NULL)
13057 		return -ENOMEM;
13058 
13059 	err = -EIO;
13060 	for (i = 0, j = 0; i < size; i += 4, j++) {
13061 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
13062 		if (err)
13063 			break;
13064 	}
13065 	if (i < size)
13066 		goto out;
13067 
13068 	/* Selfboot format */
13069 	magic = be32_to_cpu(buf[0]);
13070 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
13071 	    TG3_EEPROM_MAGIC_FW) {
13072 		u8 *buf8 = (u8 *) buf, csum8 = 0;
13073 
13074 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
13075 		    TG3_EEPROM_SB_REVISION_2) {
13076 			/* For rev 2, the csum doesn't include the MBA. */
13077 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
13078 				csum8 += buf8[i];
13079 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
13080 				csum8 += buf8[i];
13081 		} else {
13082 			for (i = 0; i < size; i++)
13083 				csum8 += buf8[i];
13084 		}
13085 
13086 		if (csum8 == 0) {
13087 			err = 0;
13088 			goto out;
13089 		}
13090 
13091 		err = -EIO;
13092 		goto out;
13093 	}
13094 
13095 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13096 	    TG3_EEPROM_MAGIC_HW) {
13097 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13098 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13099 		u8 *buf8 = (u8 *) buf;
13100 
13101 		/* Separate the parity bits and the data bytes.  */
13102 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13103 			if ((i == 0) || (i == 8)) {
13104 				int l;
13105 				u8 msk;
13106 
13107 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13108 					parity[k++] = buf8[i] & msk;
13109 				i++;
13110 			} else if (i == 16) {
13111 				int l;
13112 				u8 msk;
13113 
13114 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13115 					parity[k++] = buf8[i] & msk;
13116 				i++;
13117 
13118 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13119 					parity[k++] = buf8[i] & msk;
13120 				i++;
13121 			}
13122 			data[j++] = buf8[i];
13123 		}
13124 
13125 		err = -EIO;
13126 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13127 			u8 hw8 = hweight8(data[i]);
13128 
13129 			if ((hw8 & 0x1) && parity[i])
13130 				goto out;
13131 			else if (!(hw8 & 0x1) && !parity[i])
13132 				goto out;
13133 		}
13134 		err = 0;
13135 		goto out;
13136 	}
13137 
13138 	err = -EIO;
13139 
13140 	/* Bootstrap checksum at offset 0x10 */
13141 	csum = calc_crc((unsigned char *) buf, 0x10);
13142 
13143 	/* The type of buf is __be32 *, but this value is __le32 */
13144 	if (csum != le32_to_cpu((__force __le32)buf[0x10 / 4]))
13145 		goto out;
13146 
13147 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13148 	csum = calc_crc((unsigned char *)&buf[0x74 / 4], 0x88);
13149 
13150 	/* The type of buf is __be32 *, but this value is __le32 */
13151 	if (csum != le32_to_cpu((__force __le32)buf[0xfc / 4]))
13152 		goto out;
13153 
13154 	kfree(buf);
13155 
13156 	buf = tg3_vpd_readblock(tp, &len);
13157 	if (!buf)
13158 		return -ENOMEM;
13159 
13160 	err = pci_vpd_check_csum(buf, len);
13161 	/* go on if no checksum found */
13162 	if (err == 1)
13163 		err = 0;
13164 out:
13165 	kfree(buf);
13166 	return err;
13167 }
13168 
13169 #define TG3_SERDES_TIMEOUT_SEC	2
13170 #define TG3_COPPER_TIMEOUT_SEC	6
13171 
13172 static int tg3_test_link(struct tg3 *tp)
13173 {
13174 	int i, max;
13175 
13176 	if (!netif_running(tp->dev))
13177 		return -ENODEV;
13178 
13179 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13180 		max = TG3_SERDES_TIMEOUT_SEC;
13181 	else
13182 		max = TG3_COPPER_TIMEOUT_SEC;
13183 
13184 	for (i = 0; i < max; i++) {
13185 		if (tp->link_up)
13186 			return 0;
13187 
13188 		if (msleep_interruptible(1000))
13189 			break;
13190 	}
13191 
13192 	return -EIO;
13193 }
13194 
13195 /* Only test the commonly used registers */
13196 static int tg3_test_registers(struct tg3 *tp)
13197 {
13198 	int i, is_5705, is_5750;
13199 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13200 	static struct {
13201 		u16 offset;
13202 		u16 flags;
13203 #define TG3_FL_5705	0x1
13204 #define TG3_FL_NOT_5705	0x2
13205 #define TG3_FL_NOT_5788	0x4
13206 #define TG3_FL_NOT_5750	0x8
13207 		u32 read_mask;
13208 		u32 write_mask;
13209 	} reg_tbl[] = {
13210 		/* MAC Control Registers */
13211 		{ MAC_MODE, TG3_FL_NOT_5705,
13212 			0x00000000, 0x00ef6f8c },
13213 		{ MAC_MODE, TG3_FL_5705,
13214 			0x00000000, 0x01ef6b8c },
13215 		{ MAC_STATUS, TG3_FL_NOT_5705,
13216 			0x03800107, 0x00000000 },
13217 		{ MAC_STATUS, TG3_FL_5705,
13218 			0x03800100, 0x00000000 },
13219 		{ MAC_ADDR_0_HIGH, 0x0000,
13220 			0x00000000, 0x0000ffff },
13221 		{ MAC_ADDR_0_LOW, 0x0000,
13222 			0x00000000, 0xffffffff },
13223 		{ MAC_RX_MTU_SIZE, 0x0000,
13224 			0x00000000, 0x0000ffff },
13225 		{ MAC_TX_MODE, 0x0000,
13226 			0x00000000, 0x00000070 },
13227 		{ MAC_TX_LENGTHS, 0x0000,
13228 			0x00000000, 0x00003fff },
13229 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13230 			0x00000000, 0x000007fc },
13231 		{ MAC_RX_MODE, TG3_FL_5705,
13232 			0x00000000, 0x000007dc },
13233 		{ MAC_HASH_REG_0, 0x0000,
13234 			0x00000000, 0xffffffff },
13235 		{ MAC_HASH_REG_1, 0x0000,
13236 			0x00000000, 0xffffffff },
13237 		{ MAC_HASH_REG_2, 0x0000,
13238 			0x00000000, 0xffffffff },
13239 		{ MAC_HASH_REG_3, 0x0000,
13240 			0x00000000, 0xffffffff },
13241 
13242 		/* Receive Data and Receive BD Initiator Control Registers. */
13243 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13244 			0x00000000, 0xffffffff },
13245 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13246 			0x00000000, 0xffffffff },
13247 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13248 			0x00000000, 0x00000003 },
13249 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13250 			0x00000000, 0xffffffff },
13251 		{ RCVDBDI_STD_BD+0, 0x0000,
13252 			0x00000000, 0xffffffff },
13253 		{ RCVDBDI_STD_BD+4, 0x0000,
13254 			0x00000000, 0xffffffff },
13255 		{ RCVDBDI_STD_BD+8, 0x0000,
13256 			0x00000000, 0xffff0002 },
13257 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13258 			0x00000000, 0xffffffff },
13259 
13260 		/* Receive BD Initiator Control Registers. */
13261 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13262 			0x00000000, 0xffffffff },
13263 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13264 			0x00000000, 0x000003ff },
13265 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13266 			0x00000000, 0xffffffff },
13267 
13268 		/* Host Coalescing Control Registers. */
13269 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13270 			0x00000000, 0x00000004 },
13271 		{ HOSTCC_MODE, TG3_FL_5705,
13272 			0x00000000, 0x000000f6 },
13273 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13274 			0x00000000, 0xffffffff },
13275 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13276 			0x00000000, 0x000003ff },
13277 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13278 			0x00000000, 0xffffffff },
13279 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13280 			0x00000000, 0x000003ff },
13281 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13282 			0x00000000, 0xffffffff },
13283 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13284 			0x00000000, 0x000000ff },
13285 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13286 			0x00000000, 0xffffffff },
13287 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13288 			0x00000000, 0x000000ff },
13289 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13290 			0x00000000, 0xffffffff },
13291 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13292 			0x00000000, 0xffffffff },
13293 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13294 			0x00000000, 0xffffffff },
13295 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13296 			0x00000000, 0x000000ff },
13297 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13298 			0x00000000, 0xffffffff },
13299 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13300 			0x00000000, 0x000000ff },
13301 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13302 			0x00000000, 0xffffffff },
13303 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13304 			0x00000000, 0xffffffff },
13305 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13306 			0x00000000, 0xffffffff },
13307 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13308 			0x00000000, 0xffffffff },
13309 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13310 			0x00000000, 0xffffffff },
13311 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13312 			0xffffffff, 0x00000000 },
13313 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13314 			0xffffffff, 0x00000000 },
13315 
13316 		/* Buffer Manager Control Registers. */
13317 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13318 			0x00000000, 0x007fff80 },
13319 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13320 			0x00000000, 0x007fffff },
13321 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13322 			0x00000000, 0x0000003f },
13323 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13324 			0x00000000, 0x000001ff },
13325 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13326 			0x00000000, 0x000001ff },
13327 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13328 			0xffffffff, 0x00000000 },
13329 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13330 			0xffffffff, 0x00000000 },
13331 
13332 		/* Mailbox Registers */
13333 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13334 			0x00000000, 0x000001ff },
13335 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13336 			0x00000000, 0x000001ff },
13337 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13338 			0x00000000, 0x000007ff },
13339 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13340 			0x00000000, 0x000001ff },
13341 
13342 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13343 	};
13344 
13345 	is_5705 = is_5750 = 0;
13346 	if (tg3_flag(tp, 5705_PLUS)) {
13347 		is_5705 = 1;
13348 		if (tg3_flag(tp, 5750_PLUS))
13349 			is_5750 = 1;
13350 	}
13351 
13352 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13353 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13354 			continue;
13355 
13356 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13357 			continue;
13358 
13359 		if (tg3_flag(tp, IS_5788) &&
13360 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13361 			continue;
13362 
13363 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13364 			continue;
13365 
13366 		offset = (u32) reg_tbl[i].offset;
13367 		read_mask = reg_tbl[i].read_mask;
13368 		write_mask = reg_tbl[i].write_mask;
13369 
13370 		/* Save the original register content */
13371 		save_val = tr32(offset);
13372 
13373 		/* Determine the read-only value. */
13374 		read_val = save_val & read_mask;
13375 
13376 		/* Write zero to the register, then make sure the read-only bits
13377 		 * are not changed and the read/write bits are all zeros.
13378 		 */
13379 		tw32(offset, 0);
13380 
13381 		val = tr32(offset);
13382 
13383 		/* Test the read-only and read/write bits. */
13384 		if (((val & read_mask) != read_val) || (val & write_mask))
13385 			goto out;
13386 
13387 		/* Write ones to all the bits defined by RdMask and WrMask, then
13388 		 * make sure the read-only bits are not changed and the
13389 		 * read/write bits are all ones.
13390 		 */
13391 		tw32(offset, read_mask | write_mask);
13392 
13393 		val = tr32(offset);
13394 
13395 		/* Test the read-only bits. */
13396 		if ((val & read_mask) != read_val)
13397 			goto out;
13398 
13399 		/* Test the read/write bits. */
13400 		if ((val & write_mask) != write_mask)
13401 			goto out;
13402 
13403 		tw32(offset, save_val);
13404 	}
13405 
13406 	return 0;
13407 
13408 out:
13409 	if (netif_msg_hw(tp))
13410 		netdev_err(tp->dev,
13411 			   "Register test failed at offset %x\n", offset);
13412 	tw32(offset, save_val);
13413 	return -EIO;
13414 }
13415 
13416 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13417 {
13418 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13419 	int i;
13420 	u32 j;
13421 
13422 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13423 		for (j = 0; j < len; j += 4) {
13424 			u32 val;
13425 
13426 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13427 			tg3_read_mem(tp, offset + j, &val);
13428 			if (val != test_pattern[i])
13429 				return -EIO;
13430 		}
13431 	}
13432 	return 0;
13433 }
13434 
13435 static int tg3_test_memory(struct tg3 *tp)
13436 {
13437 	static struct mem_entry {
13438 		u32 offset;
13439 		u32 len;
13440 	} mem_tbl_570x[] = {
13441 		{ 0x00000000, 0x00b50},
13442 		{ 0x00002000, 0x1c000},
13443 		{ 0xffffffff, 0x00000}
13444 	}, mem_tbl_5705[] = {
13445 		{ 0x00000100, 0x0000c},
13446 		{ 0x00000200, 0x00008},
13447 		{ 0x00004000, 0x00800},
13448 		{ 0x00006000, 0x01000},
13449 		{ 0x00008000, 0x02000},
13450 		{ 0x00010000, 0x0e000},
13451 		{ 0xffffffff, 0x00000}
13452 	}, mem_tbl_5755[] = {
13453 		{ 0x00000200, 0x00008},
13454 		{ 0x00004000, 0x00800},
13455 		{ 0x00006000, 0x00800},
13456 		{ 0x00008000, 0x02000},
13457 		{ 0x00010000, 0x0c000},
13458 		{ 0xffffffff, 0x00000}
13459 	}, mem_tbl_5906[] = {
13460 		{ 0x00000200, 0x00008},
13461 		{ 0x00004000, 0x00400},
13462 		{ 0x00006000, 0x00400},
13463 		{ 0x00008000, 0x01000},
13464 		{ 0x00010000, 0x01000},
13465 		{ 0xffffffff, 0x00000}
13466 	}, mem_tbl_5717[] = {
13467 		{ 0x00000200, 0x00008},
13468 		{ 0x00010000, 0x0a000},
13469 		{ 0x00020000, 0x13c00},
13470 		{ 0xffffffff, 0x00000}
13471 	}, mem_tbl_57765[] = {
13472 		{ 0x00000200, 0x00008},
13473 		{ 0x00004000, 0x00800},
13474 		{ 0x00006000, 0x09800},
13475 		{ 0x00010000, 0x0a000},
13476 		{ 0xffffffff, 0x00000}
13477 	};
13478 	struct mem_entry *mem_tbl;
13479 	int err = 0;
13480 	int i;
13481 
13482 	if (tg3_flag(tp, 5717_PLUS))
13483 		mem_tbl = mem_tbl_5717;
13484 	else if (tg3_flag(tp, 57765_CLASS) ||
13485 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13486 		mem_tbl = mem_tbl_57765;
13487 	else if (tg3_flag(tp, 5755_PLUS))
13488 		mem_tbl = mem_tbl_5755;
13489 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13490 		mem_tbl = mem_tbl_5906;
13491 	else if (tg3_flag(tp, 5705_PLUS))
13492 		mem_tbl = mem_tbl_5705;
13493 	else
13494 		mem_tbl = mem_tbl_570x;
13495 
13496 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13497 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13498 		if (err)
13499 			break;
13500 	}
13501 
13502 	return err;
13503 }
13504 
13505 #define TG3_TSO_MSS		500
13506 
13507 #define TG3_TSO_IP_HDR_LEN	20
13508 #define TG3_TSO_TCP_HDR_LEN	20
13509 #define TG3_TSO_TCP_OPT_LEN	12
13510 
13511 static const u8 tg3_tso_header[] = {
13512 0x08, 0x00,
13513 0x45, 0x00, 0x00, 0x00,
13514 0x00, 0x00, 0x40, 0x00,
13515 0x40, 0x06, 0x00, 0x00,
13516 0x0a, 0x00, 0x00, 0x01,
13517 0x0a, 0x00, 0x00, 0x02,
13518 0x0d, 0x00, 0xe0, 0x00,
13519 0x00, 0x00, 0x01, 0x00,
13520 0x00, 0x00, 0x02, 0x00,
13521 0x80, 0x10, 0x10, 0x00,
13522 0x14, 0x09, 0x00, 0x00,
13523 0x01, 0x01, 0x08, 0x0a,
13524 0x11, 0x11, 0x11, 0x11,
13525 0x11, 0x11, 0x11, 0x11,
13526 };
13527 
13528 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13529 {
13530 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13531 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13532 	u32 budget;
13533 	struct sk_buff *skb;
13534 	u8 *tx_data, *rx_data;
13535 	dma_addr_t map;
13536 	int num_pkts, tx_len, rx_len, i, err;
13537 	struct tg3_rx_buffer_desc *desc;
13538 	struct tg3_napi *tnapi, *rnapi;
13539 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13540 
13541 	tnapi = &tp->napi[0];
13542 	rnapi = &tp->napi[0];
13543 	if (tp->irq_cnt > 1) {
13544 		if (tg3_flag(tp, ENABLE_RSS))
13545 			rnapi = &tp->napi[1];
13546 		if (tg3_flag(tp, ENABLE_TSS))
13547 			tnapi = &tp->napi[1];
13548 	}
13549 	coal_now = tnapi->coal_now | rnapi->coal_now;
13550 
13551 	err = -EIO;
13552 
13553 	tx_len = pktsz;
13554 	skb = netdev_alloc_skb(tp->dev, tx_len);
13555 	if (!skb)
13556 		return -ENOMEM;
13557 
13558 	tx_data = skb_put(skb, tx_len);
13559 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13560 	memset(tx_data + ETH_ALEN, 0x0, 8);
13561 
13562 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13563 
13564 	if (tso_loopback) {
13565 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13566 
13567 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13568 			      TG3_TSO_TCP_OPT_LEN;
13569 
13570 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13571 		       sizeof(tg3_tso_header));
13572 		mss = TG3_TSO_MSS;
13573 
13574 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13575 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13576 
13577 		/* Set the total length field in the IP header */
13578 		iph->tot_len = htons((u16)(mss + hdr_len));
13579 
13580 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13581 			      TXD_FLAG_CPU_POST_DMA);
13582 
13583 		if (tg3_flag(tp, HW_TSO_1) ||
13584 		    tg3_flag(tp, HW_TSO_2) ||
13585 		    tg3_flag(tp, HW_TSO_3)) {
13586 			struct tcphdr *th;
13587 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13588 			th = (struct tcphdr *)&tx_data[val];
13589 			th->check = 0;
13590 		} else
13591 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13592 
13593 		if (tg3_flag(tp, HW_TSO_3)) {
13594 			mss |= (hdr_len & 0xc) << 12;
13595 			if (hdr_len & 0x10)
13596 				base_flags |= 0x00000010;
13597 			base_flags |= (hdr_len & 0x3e0) << 5;
13598 		} else if (tg3_flag(tp, HW_TSO_2))
13599 			mss |= hdr_len << 9;
13600 		else if (tg3_flag(tp, HW_TSO_1) ||
13601 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13602 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13603 		} else {
13604 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13605 		}
13606 
13607 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13608 	} else {
13609 		num_pkts = 1;
13610 		data_off = ETH_HLEN;
13611 
13612 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13613 		    tx_len > VLAN_ETH_FRAME_LEN)
13614 			base_flags |= TXD_FLAG_JMB_PKT;
13615 	}
13616 
13617 	for (i = data_off; i < tx_len; i++)
13618 		tx_data[i] = (u8) (i & 0xff);
13619 
13620 	map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13621 	if (dma_mapping_error(&tp->pdev->dev, map)) {
13622 		dev_kfree_skb(skb);
13623 		return -EIO;
13624 	}
13625 
13626 	val = tnapi->tx_prod;
13627 	tnapi->tx_buffers[val].skb = skb;
13628 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13629 
13630 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13631 	       rnapi->coal_now);
13632 
13633 	udelay(10);
13634 
13635 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13636 
13637 	budget = tg3_tx_avail(tnapi);
13638 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13639 			    base_flags | TXD_FLAG_END, mss, 0)) {
13640 		tnapi->tx_buffers[val].skb = NULL;
13641 		dev_kfree_skb(skb);
13642 		return -EIO;
13643 	}
13644 
13645 	tnapi->tx_prod++;
13646 
13647 	/* Sync BD data before updating mailbox */
13648 	wmb();
13649 
13650 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13651 	tr32_mailbox(tnapi->prodmbox);
13652 
13653 	udelay(10);
13654 
13655 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13656 	for (i = 0; i < 35; i++) {
13657 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13658 		       coal_now);
13659 
13660 		udelay(10);
13661 
13662 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13663 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13664 		if ((tx_idx == tnapi->tx_prod) &&
13665 		    (rx_idx == (rx_start_idx + num_pkts)))
13666 			break;
13667 	}
13668 
13669 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13670 	dev_kfree_skb(skb);
13671 
13672 	if (tx_idx != tnapi->tx_prod)
13673 		goto out;
13674 
13675 	if (rx_idx != rx_start_idx + num_pkts)
13676 		goto out;
13677 
13678 	val = data_off;
13679 	while (rx_idx != rx_start_idx) {
13680 		desc = &rnapi->rx_rcb[rx_start_idx++];
13681 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13682 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13683 
13684 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13685 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13686 			goto out;
13687 
13688 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13689 			 - ETH_FCS_LEN;
13690 
13691 		if (!tso_loopback) {
13692 			if (rx_len != tx_len)
13693 				goto out;
13694 
13695 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13696 				if (opaque_key != RXD_OPAQUE_RING_STD)
13697 					goto out;
13698 			} else {
13699 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13700 					goto out;
13701 			}
13702 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13703 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13704 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13705 			goto out;
13706 		}
13707 
13708 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13709 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13710 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13711 					     mapping);
13712 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13713 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13714 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13715 					     mapping);
13716 		} else
13717 			goto out;
13718 
13719 		dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13720 					DMA_FROM_DEVICE);
13721 
13722 		rx_data += TG3_RX_OFFSET(tp);
13723 		for (i = data_off; i < rx_len; i++, val++) {
13724 			if (*(rx_data + i) != (u8) (val & 0xff))
13725 				goto out;
13726 		}
13727 	}
13728 
13729 	err = 0;
13730 
13731 	/* tg3_free_rings will unmap and free the rx_data */
13732 out:
13733 	return err;
13734 }
13735 
13736 #define TG3_STD_LOOPBACK_FAILED		1
13737 #define TG3_JMB_LOOPBACK_FAILED		2
13738 #define TG3_TSO_LOOPBACK_FAILED		4
13739 #define TG3_LOOPBACK_FAILED \
13740 	(TG3_STD_LOOPBACK_FAILED | \
13741 	 TG3_JMB_LOOPBACK_FAILED | \
13742 	 TG3_TSO_LOOPBACK_FAILED)
13743 
13744 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13745 {
13746 	int err = -EIO;
13747 	u32 eee_cap;
13748 	u32 jmb_pkt_sz = 9000;
13749 
13750 	if (tp->dma_limit)
13751 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13752 
13753 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13754 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13755 
13756 	if (!netif_running(tp->dev)) {
13757 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13758 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13759 		if (do_extlpbk)
13760 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13761 		goto done;
13762 	}
13763 
13764 	err = tg3_reset_hw(tp, true);
13765 	if (err) {
13766 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13767 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13768 		if (do_extlpbk)
13769 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13770 		goto done;
13771 	}
13772 
13773 	if (tg3_flag(tp, ENABLE_RSS)) {
13774 		int i;
13775 
13776 		/* Reroute all rx packets to the 1st queue */
13777 		for (i = MAC_RSS_INDIR_TBL_0;
13778 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13779 			tw32(i, 0x0);
13780 	}
13781 
13782 	/* HW errata - mac loopback fails in some cases on 5780.
13783 	 * Normal traffic and PHY loopback are not affected by
13784 	 * errata.  Also, the MAC loopback test is deprecated for
13785 	 * all newer ASIC revisions.
13786 	 */
13787 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13788 	    !tg3_flag(tp, CPMU_PRESENT)) {
13789 		tg3_mac_loopback(tp, true);
13790 
13791 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13792 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13793 
13794 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13795 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13796 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13797 
13798 		tg3_mac_loopback(tp, false);
13799 	}
13800 
13801 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13802 	    !tg3_flag(tp, USE_PHYLIB)) {
13803 		int i;
13804 
13805 		tg3_phy_lpbk_set(tp, 0, false);
13806 
13807 		/* Wait for link */
13808 		for (i = 0; i < 100; i++) {
13809 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13810 				break;
13811 			mdelay(1);
13812 		}
13813 
13814 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13815 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13816 		if (tg3_flag(tp, TSO_CAPABLE) &&
13817 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13818 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13819 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13820 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13821 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13822 
13823 		if (do_extlpbk) {
13824 			tg3_phy_lpbk_set(tp, 0, true);
13825 
13826 			/* All link indications report up, but the hardware
13827 			 * isn't really ready for about 20 msec.  Double it
13828 			 * to be sure.
13829 			 */
13830 			mdelay(40);
13831 
13832 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13833 				data[TG3_EXT_LOOPB_TEST] |=
13834 							TG3_STD_LOOPBACK_FAILED;
13835 			if (tg3_flag(tp, TSO_CAPABLE) &&
13836 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13837 				data[TG3_EXT_LOOPB_TEST] |=
13838 							TG3_TSO_LOOPBACK_FAILED;
13839 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13840 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13841 				data[TG3_EXT_LOOPB_TEST] |=
13842 							TG3_JMB_LOOPBACK_FAILED;
13843 		}
13844 
13845 		/* Re-enable gphy autopowerdown. */
13846 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13847 			tg3_phy_toggle_apd(tp, true);
13848 	}
13849 
13850 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13851 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13852 
13853 done:
13854 	tp->phy_flags |= eee_cap;
13855 
13856 	return err;
13857 }
13858 
13859 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13860 			  u64 *data)
13861 {
13862 	struct tg3 *tp = netdev_priv(dev);
13863 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13864 
13865 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13866 		if (tg3_power_up(tp)) {
13867 			etest->flags |= ETH_TEST_FL_FAILED;
13868 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13869 			return;
13870 		}
13871 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13872 	}
13873 
13874 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13875 
13876 	if (tg3_test_nvram(tp) != 0) {
13877 		etest->flags |= ETH_TEST_FL_FAILED;
13878 		data[TG3_NVRAM_TEST] = 1;
13879 	}
13880 	if (!doextlpbk && tg3_test_link(tp)) {
13881 		etest->flags |= ETH_TEST_FL_FAILED;
13882 		data[TG3_LINK_TEST] = 1;
13883 	}
13884 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13885 		int err, err2 = 0, irq_sync = 0;
13886 
13887 		if (netif_running(dev)) {
13888 			tg3_phy_stop(tp);
13889 			tg3_netif_stop(tp);
13890 			irq_sync = 1;
13891 		}
13892 
13893 		tg3_full_lock(tp, irq_sync);
13894 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13895 		err = tg3_nvram_lock(tp);
13896 		tg3_halt_cpu(tp, RX_CPU_BASE);
13897 		if (!tg3_flag(tp, 5705_PLUS))
13898 			tg3_halt_cpu(tp, TX_CPU_BASE);
13899 		if (!err)
13900 			tg3_nvram_unlock(tp);
13901 
13902 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13903 			tg3_phy_reset(tp);
13904 
13905 		if (tg3_test_registers(tp) != 0) {
13906 			etest->flags |= ETH_TEST_FL_FAILED;
13907 			data[TG3_REGISTER_TEST] = 1;
13908 		}
13909 
13910 		if (tg3_test_memory(tp) != 0) {
13911 			etest->flags |= ETH_TEST_FL_FAILED;
13912 			data[TG3_MEMORY_TEST] = 1;
13913 		}
13914 
13915 		if (doextlpbk)
13916 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13917 
13918 		if (tg3_test_loopback(tp, data, doextlpbk))
13919 			etest->flags |= ETH_TEST_FL_FAILED;
13920 
13921 		tg3_full_unlock(tp);
13922 
13923 		if (tg3_test_interrupt(tp) != 0) {
13924 			etest->flags |= ETH_TEST_FL_FAILED;
13925 			data[TG3_INTERRUPT_TEST] = 1;
13926 		}
13927 
13928 		netdev_lock(dev);
13929 		tg3_full_lock(tp, 0);
13930 
13931 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13932 		if (netif_running(dev)) {
13933 			tg3_flag_set(tp, INIT_COMPLETE);
13934 			err2 = tg3_restart_hw(tp, true);
13935 			if (!err2)
13936 				tg3_netif_start(tp);
13937 		}
13938 
13939 		tg3_full_unlock(tp);
13940 		netdev_unlock(dev);
13941 
13942 		if (irq_sync && !err2)
13943 			tg3_phy_start(tp);
13944 	}
13945 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13946 		tg3_power_down_prepare(tp);
13947 
13948 }
13949 
13950 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13951 {
13952 	struct tg3 *tp = netdev_priv(dev);
13953 	struct hwtstamp_config stmpconf;
13954 
13955 	if (!tg3_flag(tp, PTP_CAPABLE))
13956 		return -EOPNOTSUPP;
13957 
13958 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13959 		return -EFAULT;
13960 
13961 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13962 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13963 		return -ERANGE;
13964 
13965 	switch (stmpconf.rx_filter) {
13966 	case HWTSTAMP_FILTER_NONE:
13967 		tp->rxptpctl = 0;
13968 		break;
13969 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13970 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13971 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13972 		break;
13973 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13974 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13975 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13976 		break;
13977 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13978 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13979 			       TG3_RX_PTP_CTL_DELAY_REQ;
13980 		break;
13981 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13982 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13983 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13984 		break;
13985 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13986 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13987 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13988 		break;
13989 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13990 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13991 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13992 		break;
13993 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13994 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13995 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13996 		break;
13997 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13998 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13999 			       TG3_RX_PTP_CTL_SYNC_EVNT;
14000 		break;
14001 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
14002 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
14003 			       TG3_RX_PTP_CTL_SYNC_EVNT;
14004 		break;
14005 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
14006 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
14007 			       TG3_RX_PTP_CTL_DELAY_REQ;
14008 		break;
14009 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
14010 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
14011 			       TG3_RX_PTP_CTL_DELAY_REQ;
14012 		break;
14013 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
14014 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
14015 			       TG3_RX_PTP_CTL_DELAY_REQ;
14016 		break;
14017 	default:
14018 		return -ERANGE;
14019 	}
14020 
14021 	if (netif_running(dev) && tp->rxptpctl)
14022 		tw32(TG3_RX_PTP_CTL,
14023 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
14024 
14025 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
14026 		tg3_flag_set(tp, TX_TSTAMP_EN);
14027 	else
14028 		tg3_flag_clear(tp, TX_TSTAMP_EN);
14029 
14030 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14031 		-EFAULT : 0;
14032 }
14033 
14034 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
14035 {
14036 	struct tg3 *tp = netdev_priv(dev);
14037 	struct hwtstamp_config stmpconf;
14038 
14039 	if (!tg3_flag(tp, PTP_CAPABLE))
14040 		return -EOPNOTSUPP;
14041 
14042 	stmpconf.flags = 0;
14043 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
14044 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
14045 
14046 	switch (tp->rxptpctl) {
14047 	case 0:
14048 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
14049 		break;
14050 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
14051 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14052 		break;
14053 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14054 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
14055 		break;
14056 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14057 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
14058 		break;
14059 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14060 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14061 		break;
14062 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14063 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14064 		break;
14065 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14066 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14067 		break;
14068 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14069 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
14070 		break;
14071 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14072 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
14073 		break;
14074 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14075 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14076 		break;
14077 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14078 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14079 		break;
14080 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14081 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14082 		break;
14083 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14084 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14085 		break;
14086 	default:
14087 		WARN_ON_ONCE(1);
14088 		return -ERANGE;
14089 	}
14090 
14091 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14092 		-EFAULT : 0;
14093 }
14094 
14095 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14096 {
14097 	struct mii_ioctl_data *data = if_mii(ifr);
14098 	struct tg3 *tp = netdev_priv(dev);
14099 	int err;
14100 
14101 	if (tg3_flag(tp, USE_PHYLIB)) {
14102 		struct phy_device *phydev;
14103 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14104 			return -EAGAIN;
14105 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14106 		return phy_mii_ioctl(phydev, ifr, cmd);
14107 	}
14108 
14109 	switch (cmd) {
14110 	case SIOCGMIIPHY:
14111 		data->phy_id = tp->phy_addr;
14112 
14113 		fallthrough;
14114 	case SIOCGMIIREG: {
14115 		u32 mii_regval;
14116 
14117 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14118 			break;			/* We have no PHY */
14119 
14120 		if (!netif_running(dev))
14121 			return -EAGAIN;
14122 
14123 		spin_lock_bh(&tp->lock);
14124 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
14125 				    data->reg_num & 0x1f, &mii_regval);
14126 		spin_unlock_bh(&tp->lock);
14127 
14128 		data->val_out = mii_regval;
14129 
14130 		return err;
14131 	}
14132 
14133 	case SIOCSMIIREG:
14134 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14135 			break;			/* We have no PHY */
14136 
14137 		if (!netif_running(dev))
14138 			return -EAGAIN;
14139 
14140 		spin_lock_bh(&tp->lock);
14141 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
14142 				     data->reg_num & 0x1f, data->val_in);
14143 		spin_unlock_bh(&tp->lock);
14144 
14145 		return err;
14146 
14147 	case SIOCSHWTSTAMP:
14148 		return tg3_hwtstamp_set(dev, ifr);
14149 
14150 	case SIOCGHWTSTAMP:
14151 		return tg3_hwtstamp_get(dev, ifr);
14152 
14153 	default:
14154 		/* do nothing */
14155 		break;
14156 	}
14157 	return -EOPNOTSUPP;
14158 }
14159 
14160 static int tg3_get_coalesce(struct net_device *dev,
14161 			    struct ethtool_coalesce *ec,
14162 			    struct kernel_ethtool_coalesce *kernel_coal,
14163 			    struct netlink_ext_ack *extack)
14164 {
14165 	struct tg3 *tp = netdev_priv(dev);
14166 
14167 	memcpy(ec, &tp->coal, sizeof(*ec));
14168 	return 0;
14169 }
14170 
14171 static int tg3_set_coalesce(struct net_device *dev,
14172 			    struct ethtool_coalesce *ec,
14173 			    struct kernel_ethtool_coalesce *kernel_coal,
14174 			    struct netlink_ext_ack *extack)
14175 {
14176 	struct tg3 *tp = netdev_priv(dev);
14177 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14178 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14179 
14180 	if (!tg3_flag(tp, 5705_PLUS)) {
14181 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14182 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14183 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14184 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14185 	}
14186 
14187 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14188 	    (!ec->rx_coalesce_usecs) ||
14189 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14190 	    (!ec->tx_coalesce_usecs) ||
14191 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14192 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14193 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14194 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14195 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14196 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14197 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14198 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14199 		return -EINVAL;
14200 
14201 	/* Only copy relevant parameters, ignore all others. */
14202 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14203 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14204 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14205 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14206 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14207 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14208 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14209 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14210 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14211 
14212 	if (netif_running(dev)) {
14213 		tg3_full_lock(tp, 0);
14214 		__tg3_set_coalesce(tp, &tp->coal);
14215 		tg3_full_unlock(tp);
14216 	}
14217 	return 0;
14218 }
14219 
14220 static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
14221 {
14222 	struct tg3 *tp = netdev_priv(dev);
14223 
14224 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14225 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14226 		return -EOPNOTSUPP;
14227 	}
14228 
14229 	if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
14230 		netdev_warn(tp->dev,
14231 			    "Direct manipulation of EEE advertisement is not supported\n");
14232 		return -EINVAL;
14233 	}
14234 
14235 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14236 		netdev_warn(tp->dev,
14237 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14238 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14239 		return -EINVAL;
14240 	}
14241 
14242 	tp->eee.eee_enabled = edata->eee_enabled;
14243 	tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
14244 	tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
14245 
14246 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14247 	tg3_warn_mgmt_link_flap(tp);
14248 
14249 	if (netif_running(tp->dev)) {
14250 		tg3_full_lock(tp, 0);
14251 		tg3_setup_eee(tp);
14252 		tg3_phy_reset(tp);
14253 		tg3_full_unlock(tp);
14254 	}
14255 
14256 	return 0;
14257 }
14258 
14259 static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
14260 {
14261 	struct tg3 *tp = netdev_priv(dev);
14262 
14263 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14264 		netdev_warn(tp->dev,
14265 			    "Board does not support EEE!\n");
14266 		return -EOPNOTSUPP;
14267 	}
14268 
14269 	*edata = tp->eee;
14270 	return 0;
14271 }
14272 
14273 static const struct ethtool_ops tg3_ethtool_ops = {
14274 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14275 				     ETHTOOL_COALESCE_MAX_FRAMES |
14276 				     ETHTOOL_COALESCE_USECS_IRQ |
14277 				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14278 				     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14279 	.get_drvinfo		= tg3_get_drvinfo,
14280 	.get_regs_len		= tg3_get_regs_len,
14281 	.get_regs		= tg3_get_regs,
14282 	.get_wol		= tg3_get_wol,
14283 	.set_wol		= tg3_set_wol,
14284 	.get_msglevel		= tg3_get_msglevel,
14285 	.set_msglevel		= tg3_set_msglevel,
14286 	.nway_reset		= tg3_nway_reset,
14287 	.get_link		= ethtool_op_get_link,
14288 	.get_eeprom_len		= tg3_get_eeprom_len,
14289 	.get_eeprom		= tg3_get_eeprom,
14290 	.set_eeprom		= tg3_set_eeprom,
14291 	.get_ringparam		= tg3_get_ringparam,
14292 	.set_ringparam		= tg3_set_ringparam,
14293 	.get_pauseparam		= tg3_get_pauseparam,
14294 	.set_pauseparam		= tg3_set_pauseparam,
14295 	.self_test		= tg3_self_test,
14296 	.get_strings		= tg3_get_strings,
14297 	.set_phys_id		= tg3_set_phys_id,
14298 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14299 	.get_coalesce		= tg3_get_coalesce,
14300 	.set_coalesce		= tg3_set_coalesce,
14301 	.get_sset_count		= tg3_get_sset_count,
14302 	.get_rxnfc		= tg3_get_rxnfc,
14303 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14304 	.get_rxfh		= tg3_get_rxfh,
14305 	.set_rxfh		= tg3_set_rxfh,
14306 	.get_channels		= tg3_get_channels,
14307 	.set_channels		= tg3_set_channels,
14308 	.get_ts_info		= tg3_get_ts_info,
14309 	.get_eee		= tg3_get_eee,
14310 	.set_eee		= tg3_set_eee,
14311 	.get_link_ksettings	= tg3_get_link_ksettings,
14312 	.set_link_ksettings	= tg3_set_link_ksettings,
14313 };
14314 
14315 static void tg3_get_stats64(struct net_device *dev,
14316 			    struct rtnl_link_stats64 *stats)
14317 {
14318 	struct tg3 *tp = netdev_priv(dev);
14319 
14320 	spin_lock_bh(&tp->lock);
14321 	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14322 		*stats = tp->net_stats_prev;
14323 		spin_unlock_bh(&tp->lock);
14324 		return;
14325 	}
14326 
14327 	tg3_get_nstats(tp, stats);
14328 	spin_unlock_bh(&tp->lock);
14329 }
14330 
14331 static void tg3_set_rx_mode(struct net_device *dev)
14332 {
14333 	struct tg3 *tp = netdev_priv(dev);
14334 
14335 	if (!netif_running(dev))
14336 		return;
14337 
14338 	tg3_full_lock(tp, 0);
14339 	__tg3_set_rx_mode(dev);
14340 	tg3_full_unlock(tp);
14341 }
14342 
14343 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14344 			       int new_mtu)
14345 {
14346 	WRITE_ONCE(dev->mtu, new_mtu);
14347 
14348 	if (new_mtu > ETH_DATA_LEN) {
14349 		if (tg3_flag(tp, 5780_CLASS)) {
14350 			netdev_update_features(dev);
14351 			tg3_flag_clear(tp, TSO_CAPABLE);
14352 		} else {
14353 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14354 		}
14355 	} else {
14356 		if (tg3_flag(tp, 5780_CLASS)) {
14357 			tg3_flag_set(tp, TSO_CAPABLE);
14358 			netdev_update_features(dev);
14359 		}
14360 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14361 	}
14362 }
14363 
14364 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14365 {
14366 	struct tg3 *tp = netdev_priv(dev);
14367 	int err;
14368 	bool reset_phy = false;
14369 
14370 	if (!netif_running(dev)) {
14371 		/* We'll just catch it later when the
14372 		 * device is up'd.
14373 		 */
14374 		tg3_set_mtu(dev, tp, new_mtu);
14375 		return 0;
14376 	}
14377 
14378 	tg3_phy_stop(tp);
14379 
14380 	tg3_netif_stop(tp);
14381 
14382 	tg3_set_mtu(dev, tp, new_mtu);
14383 
14384 	netdev_lock(dev);
14385 	tg3_full_lock(tp, 1);
14386 
14387 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14388 
14389 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14390 	 * breaks all requests to 256 bytes.
14391 	 */
14392 	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14393 	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
14394 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14395 	    tg3_asic_rev(tp) == ASIC_REV_5720)
14396 		reset_phy = true;
14397 
14398 	err = tg3_restart_hw(tp, reset_phy);
14399 
14400 	if (!err)
14401 		tg3_netif_start(tp);
14402 
14403 	tg3_full_unlock(tp);
14404 	netdev_unlock(dev);
14405 
14406 	if (!err)
14407 		tg3_phy_start(tp);
14408 
14409 	return err;
14410 }
14411 
14412 static const struct net_device_ops tg3_netdev_ops = {
14413 	.ndo_open		= tg3_open,
14414 	.ndo_stop		= tg3_close,
14415 	.ndo_start_xmit		= tg3_start_xmit,
14416 	.ndo_get_stats64	= tg3_get_stats64,
14417 	.ndo_validate_addr	= eth_validate_addr,
14418 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14419 	.ndo_set_mac_address	= tg3_set_mac_addr,
14420 	.ndo_eth_ioctl		= tg3_ioctl,
14421 	.ndo_tx_timeout		= tg3_tx_timeout,
14422 	.ndo_change_mtu		= tg3_change_mtu,
14423 	.ndo_fix_features	= tg3_fix_features,
14424 	.ndo_set_features	= tg3_set_features,
14425 #ifdef CONFIG_NET_POLL_CONTROLLER
14426 	.ndo_poll_controller	= tg3_poll_controller,
14427 #endif
14428 };
14429 
14430 static void tg3_get_eeprom_size(struct tg3 *tp)
14431 {
14432 	u32 cursize, val, magic;
14433 
14434 	tp->nvram_size = EEPROM_CHIP_SIZE;
14435 
14436 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14437 		return;
14438 
14439 	if ((magic != TG3_EEPROM_MAGIC) &&
14440 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14441 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14442 		return;
14443 
14444 	/*
14445 	 * Size the chip by reading offsets at increasing powers of two.
14446 	 * When we encounter our validation signature, we know the addressing
14447 	 * has wrapped around, and thus have our chip size.
14448 	 */
14449 	cursize = 0x10;
14450 
14451 	while (cursize < tp->nvram_size) {
14452 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14453 			return;
14454 
14455 		if (val == magic)
14456 			break;
14457 
14458 		cursize <<= 1;
14459 	}
14460 
14461 	tp->nvram_size = cursize;
14462 }
14463 
14464 static void tg3_get_nvram_size(struct tg3 *tp)
14465 {
14466 	u32 val;
14467 
14468 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14469 		return;
14470 
14471 	/* Selfboot format */
14472 	if (val != TG3_EEPROM_MAGIC) {
14473 		tg3_get_eeprom_size(tp);
14474 		return;
14475 	}
14476 
14477 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14478 		if (val != 0) {
14479 			/* This is confusing.  We want to operate on the
14480 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14481 			 * call will read from NVRAM and byteswap the data
14482 			 * according to the byteswapping settings for all
14483 			 * other register accesses.  This ensures the data we
14484 			 * want will always reside in the lower 16-bits.
14485 			 * However, the data in NVRAM is in LE format, which
14486 			 * means the data from the NVRAM read will always be
14487 			 * opposite the endianness of the CPU.  The 16-bit
14488 			 * byteswap then brings the data to CPU endianness.
14489 			 */
14490 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14491 			return;
14492 		}
14493 	}
14494 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14495 }
14496 
14497 static void tg3_get_nvram_info(struct tg3 *tp)
14498 {
14499 	u32 nvcfg1;
14500 
14501 	nvcfg1 = tr32(NVRAM_CFG1);
14502 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14503 		tg3_flag_set(tp, FLASH);
14504 	} else {
14505 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14506 		tw32(NVRAM_CFG1, nvcfg1);
14507 	}
14508 
14509 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14510 	    tg3_flag(tp, 5780_CLASS)) {
14511 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14512 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14513 			tp->nvram_jedecnum = JEDEC_ATMEL;
14514 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14515 			tg3_flag_set(tp, NVRAM_BUFFERED);
14516 			break;
14517 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14518 			tp->nvram_jedecnum = JEDEC_ATMEL;
14519 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14520 			break;
14521 		case FLASH_VENDOR_ATMEL_EEPROM:
14522 			tp->nvram_jedecnum = JEDEC_ATMEL;
14523 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14524 			tg3_flag_set(tp, NVRAM_BUFFERED);
14525 			break;
14526 		case FLASH_VENDOR_ST:
14527 			tp->nvram_jedecnum = JEDEC_ST;
14528 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14529 			tg3_flag_set(tp, NVRAM_BUFFERED);
14530 			break;
14531 		case FLASH_VENDOR_SAIFUN:
14532 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14533 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14534 			break;
14535 		case FLASH_VENDOR_SST_SMALL:
14536 		case FLASH_VENDOR_SST_LARGE:
14537 			tp->nvram_jedecnum = JEDEC_SST;
14538 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14539 			break;
14540 		}
14541 	} else {
14542 		tp->nvram_jedecnum = JEDEC_ATMEL;
14543 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14544 		tg3_flag_set(tp, NVRAM_BUFFERED);
14545 	}
14546 }
14547 
14548 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14549 {
14550 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14551 	case FLASH_5752PAGE_SIZE_256:
14552 		tp->nvram_pagesize = 256;
14553 		break;
14554 	case FLASH_5752PAGE_SIZE_512:
14555 		tp->nvram_pagesize = 512;
14556 		break;
14557 	case FLASH_5752PAGE_SIZE_1K:
14558 		tp->nvram_pagesize = 1024;
14559 		break;
14560 	case FLASH_5752PAGE_SIZE_2K:
14561 		tp->nvram_pagesize = 2048;
14562 		break;
14563 	case FLASH_5752PAGE_SIZE_4K:
14564 		tp->nvram_pagesize = 4096;
14565 		break;
14566 	case FLASH_5752PAGE_SIZE_264:
14567 		tp->nvram_pagesize = 264;
14568 		break;
14569 	case FLASH_5752PAGE_SIZE_528:
14570 		tp->nvram_pagesize = 528;
14571 		break;
14572 	}
14573 }
14574 
14575 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14576 {
14577 	u32 nvcfg1;
14578 
14579 	nvcfg1 = tr32(NVRAM_CFG1);
14580 
14581 	/* NVRAM protection for TPM */
14582 	if (nvcfg1 & (1 << 27))
14583 		tg3_flag_set(tp, PROTECTED_NVRAM);
14584 
14585 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14586 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14587 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14588 		tp->nvram_jedecnum = JEDEC_ATMEL;
14589 		tg3_flag_set(tp, NVRAM_BUFFERED);
14590 		break;
14591 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14592 		tp->nvram_jedecnum = JEDEC_ATMEL;
14593 		tg3_flag_set(tp, NVRAM_BUFFERED);
14594 		tg3_flag_set(tp, FLASH);
14595 		break;
14596 	case FLASH_5752VENDOR_ST_M45PE10:
14597 	case FLASH_5752VENDOR_ST_M45PE20:
14598 	case FLASH_5752VENDOR_ST_M45PE40:
14599 		tp->nvram_jedecnum = JEDEC_ST;
14600 		tg3_flag_set(tp, NVRAM_BUFFERED);
14601 		tg3_flag_set(tp, FLASH);
14602 		break;
14603 	}
14604 
14605 	if (tg3_flag(tp, FLASH)) {
14606 		tg3_nvram_get_pagesize(tp, nvcfg1);
14607 	} else {
14608 		/* For eeprom, set pagesize to maximum eeprom size */
14609 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14610 
14611 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14612 		tw32(NVRAM_CFG1, nvcfg1);
14613 	}
14614 }
14615 
14616 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14617 {
14618 	u32 nvcfg1, protect = 0;
14619 
14620 	nvcfg1 = tr32(NVRAM_CFG1);
14621 
14622 	/* NVRAM protection for TPM */
14623 	if (nvcfg1 & (1 << 27)) {
14624 		tg3_flag_set(tp, PROTECTED_NVRAM);
14625 		protect = 1;
14626 	}
14627 
14628 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14629 	switch (nvcfg1) {
14630 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14631 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14632 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14633 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14634 		tp->nvram_jedecnum = JEDEC_ATMEL;
14635 		tg3_flag_set(tp, NVRAM_BUFFERED);
14636 		tg3_flag_set(tp, FLASH);
14637 		tp->nvram_pagesize = 264;
14638 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14639 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14640 			tp->nvram_size = (protect ? 0x3e200 :
14641 					  TG3_NVRAM_SIZE_512KB);
14642 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14643 			tp->nvram_size = (protect ? 0x1f200 :
14644 					  TG3_NVRAM_SIZE_256KB);
14645 		else
14646 			tp->nvram_size = (protect ? 0x1f200 :
14647 					  TG3_NVRAM_SIZE_128KB);
14648 		break;
14649 	case FLASH_5752VENDOR_ST_M45PE10:
14650 	case FLASH_5752VENDOR_ST_M45PE20:
14651 	case FLASH_5752VENDOR_ST_M45PE40:
14652 		tp->nvram_jedecnum = JEDEC_ST;
14653 		tg3_flag_set(tp, NVRAM_BUFFERED);
14654 		tg3_flag_set(tp, FLASH);
14655 		tp->nvram_pagesize = 256;
14656 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14657 			tp->nvram_size = (protect ?
14658 					  TG3_NVRAM_SIZE_64KB :
14659 					  TG3_NVRAM_SIZE_128KB);
14660 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14661 			tp->nvram_size = (protect ?
14662 					  TG3_NVRAM_SIZE_64KB :
14663 					  TG3_NVRAM_SIZE_256KB);
14664 		else
14665 			tp->nvram_size = (protect ?
14666 					  TG3_NVRAM_SIZE_128KB :
14667 					  TG3_NVRAM_SIZE_512KB);
14668 		break;
14669 	}
14670 }
14671 
14672 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14673 {
14674 	u32 nvcfg1;
14675 
14676 	nvcfg1 = tr32(NVRAM_CFG1);
14677 
14678 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14679 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14680 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14681 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14682 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14683 		tp->nvram_jedecnum = JEDEC_ATMEL;
14684 		tg3_flag_set(tp, NVRAM_BUFFERED);
14685 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14686 
14687 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14688 		tw32(NVRAM_CFG1, nvcfg1);
14689 		break;
14690 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14691 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14692 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14693 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14694 		tp->nvram_jedecnum = JEDEC_ATMEL;
14695 		tg3_flag_set(tp, NVRAM_BUFFERED);
14696 		tg3_flag_set(tp, FLASH);
14697 		tp->nvram_pagesize = 264;
14698 		break;
14699 	case FLASH_5752VENDOR_ST_M45PE10:
14700 	case FLASH_5752VENDOR_ST_M45PE20:
14701 	case FLASH_5752VENDOR_ST_M45PE40:
14702 		tp->nvram_jedecnum = JEDEC_ST;
14703 		tg3_flag_set(tp, NVRAM_BUFFERED);
14704 		tg3_flag_set(tp, FLASH);
14705 		tp->nvram_pagesize = 256;
14706 		break;
14707 	}
14708 }
14709 
14710 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14711 {
14712 	u32 nvcfg1, protect = 0;
14713 
14714 	nvcfg1 = tr32(NVRAM_CFG1);
14715 
14716 	/* NVRAM protection for TPM */
14717 	if (nvcfg1 & (1 << 27)) {
14718 		tg3_flag_set(tp, PROTECTED_NVRAM);
14719 		protect = 1;
14720 	}
14721 
14722 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14723 	switch (nvcfg1) {
14724 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14725 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14726 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14727 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14728 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14729 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14730 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14731 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14732 		tp->nvram_jedecnum = JEDEC_ATMEL;
14733 		tg3_flag_set(tp, NVRAM_BUFFERED);
14734 		tg3_flag_set(tp, FLASH);
14735 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14736 		tp->nvram_pagesize = 256;
14737 		break;
14738 	case FLASH_5761VENDOR_ST_A_M45PE20:
14739 	case FLASH_5761VENDOR_ST_A_M45PE40:
14740 	case FLASH_5761VENDOR_ST_A_M45PE80:
14741 	case FLASH_5761VENDOR_ST_A_M45PE16:
14742 	case FLASH_5761VENDOR_ST_M_M45PE20:
14743 	case FLASH_5761VENDOR_ST_M_M45PE40:
14744 	case FLASH_5761VENDOR_ST_M_M45PE80:
14745 	case FLASH_5761VENDOR_ST_M_M45PE16:
14746 		tp->nvram_jedecnum = JEDEC_ST;
14747 		tg3_flag_set(tp, NVRAM_BUFFERED);
14748 		tg3_flag_set(tp, FLASH);
14749 		tp->nvram_pagesize = 256;
14750 		break;
14751 	}
14752 
14753 	if (protect) {
14754 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14755 	} else {
14756 		switch (nvcfg1) {
14757 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14758 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14759 		case FLASH_5761VENDOR_ST_A_M45PE16:
14760 		case FLASH_5761VENDOR_ST_M_M45PE16:
14761 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14762 			break;
14763 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14764 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14765 		case FLASH_5761VENDOR_ST_A_M45PE80:
14766 		case FLASH_5761VENDOR_ST_M_M45PE80:
14767 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14768 			break;
14769 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14770 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14771 		case FLASH_5761VENDOR_ST_A_M45PE40:
14772 		case FLASH_5761VENDOR_ST_M_M45PE40:
14773 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14774 			break;
14775 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14776 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14777 		case FLASH_5761VENDOR_ST_A_M45PE20:
14778 		case FLASH_5761VENDOR_ST_M_M45PE20:
14779 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14780 			break;
14781 		}
14782 	}
14783 }
14784 
14785 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14786 {
14787 	tp->nvram_jedecnum = JEDEC_ATMEL;
14788 	tg3_flag_set(tp, NVRAM_BUFFERED);
14789 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14790 }
14791 
14792 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14793 {
14794 	u32 nvcfg1;
14795 
14796 	nvcfg1 = tr32(NVRAM_CFG1);
14797 
14798 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14799 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14800 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14801 		tp->nvram_jedecnum = JEDEC_ATMEL;
14802 		tg3_flag_set(tp, NVRAM_BUFFERED);
14803 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14804 
14805 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14806 		tw32(NVRAM_CFG1, nvcfg1);
14807 		return;
14808 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14809 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14810 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14811 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14812 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14813 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14814 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14815 		tp->nvram_jedecnum = JEDEC_ATMEL;
14816 		tg3_flag_set(tp, NVRAM_BUFFERED);
14817 		tg3_flag_set(tp, FLASH);
14818 
14819 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14820 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14821 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14822 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14823 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14824 			break;
14825 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14826 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14827 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14828 			break;
14829 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14830 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14831 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14832 			break;
14833 		}
14834 		break;
14835 	case FLASH_5752VENDOR_ST_M45PE10:
14836 	case FLASH_5752VENDOR_ST_M45PE20:
14837 	case FLASH_5752VENDOR_ST_M45PE40:
14838 		tp->nvram_jedecnum = JEDEC_ST;
14839 		tg3_flag_set(tp, NVRAM_BUFFERED);
14840 		tg3_flag_set(tp, FLASH);
14841 
14842 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14843 		case FLASH_5752VENDOR_ST_M45PE10:
14844 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14845 			break;
14846 		case FLASH_5752VENDOR_ST_M45PE20:
14847 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14848 			break;
14849 		case FLASH_5752VENDOR_ST_M45PE40:
14850 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14851 			break;
14852 		}
14853 		break;
14854 	default:
14855 		tg3_flag_set(tp, NO_NVRAM);
14856 		return;
14857 	}
14858 
14859 	tg3_nvram_get_pagesize(tp, nvcfg1);
14860 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14861 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14862 }
14863 
14864 
14865 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14866 {
14867 	u32 nvcfg1;
14868 
14869 	nvcfg1 = tr32(NVRAM_CFG1);
14870 
14871 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14872 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14873 	case FLASH_5717VENDOR_MICRO_EEPROM:
14874 		tp->nvram_jedecnum = JEDEC_ATMEL;
14875 		tg3_flag_set(tp, NVRAM_BUFFERED);
14876 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14877 
14878 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14879 		tw32(NVRAM_CFG1, nvcfg1);
14880 		return;
14881 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14882 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14883 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14884 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14885 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14886 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14887 	case FLASH_5717VENDOR_ATMEL_45USPT:
14888 		tp->nvram_jedecnum = JEDEC_ATMEL;
14889 		tg3_flag_set(tp, NVRAM_BUFFERED);
14890 		tg3_flag_set(tp, FLASH);
14891 
14892 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14893 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14894 			/* Detect size with tg3_nvram_get_size() */
14895 			break;
14896 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14897 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14898 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14899 			break;
14900 		default:
14901 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14902 			break;
14903 		}
14904 		break;
14905 	case FLASH_5717VENDOR_ST_M_M25PE10:
14906 	case FLASH_5717VENDOR_ST_A_M25PE10:
14907 	case FLASH_5717VENDOR_ST_M_M45PE10:
14908 	case FLASH_5717VENDOR_ST_A_M45PE10:
14909 	case FLASH_5717VENDOR_ST_M_M25PE20:
14910 	case FLASH_5717VENDOR_ST_A_M25PE20:
14911 	case FLASH_5717VENDOR_ST_M_M45PE20:
14912 	case FLASH_5717VENDOR_ST_A_M45PE20:
14913 	case FLASH_5717VENDOR_ST_25USPT:
14914 	case FLASH_5717VENDOR_ST_45USPT:
14915 		tp->nvram_jedecnum = JEDEC_ST;
14916 		tg3_flag_set(tp, NVRAM_BUFFERED);
14917 		tg3_flag_set(tp, FLASH);
14918 
14919 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14920 		case FLASH_5717VENDOR_ST_M_M25PE20:
14921 		case FLASH_5717VENDOR_ST_M_M45PE20:
14922 			/* Detect size with tg3_nvram_get_size() */
14923 			break;
14924 		case FLASH_5717VENDOR_ST_A_M25PE20:
14925 		case FLASH_5717VENDOR_ST_A_M45PE20:
14926 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14927 			break;
14928 		default:
14929 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14930 			break;
14931 		}
14932 		break;
14933 	default:
14934 		tg3_flag_set(tp, NO_NVRAM);
14935 		return;
14936 	}
14937 
14938 	tg3_nvram_get_pagesize(tp, nvcfg1);
14939 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14940 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14941 }
14942 
14943 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14944 {
14945 	u32 nvcfg1, nvmpinstrp, nv_status;
14946 
14947 	nvcfg1 = tr32(NVRAM_CFG1);
14948 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14949 
14950 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14951 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14952 			tg3_flag_set(tp, NO_NVRAM);
14953 			return;
14954 		}
14955 
14956 		switch (nvmpinstrp) {
14957 		case FLASH_5762_MX25L_100:
14958 		case FLASH_5762_MX25L_200:
14959 		case FLASH_5762_MX25L_400:
14960 		case FLASH_5762_MX25L_800:
14961 		case FLASH_5762_MX25L_160_320:
14962 			tp->nvram_pagesize = 4096;
14963 			tp->nvram_jedecnum = JEDEC_MACRONIX;
14964 			tg3_flag_set(tp, NVRAM_BUFFERED);
14965 			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14966 			tg3_flag_set(tp, FLASH);
14967 			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14968 			tp->nvram_size =
14969 				(1 << (nv_status >> AUTOSENSE_DEVID &
14970 						AUTOSENSE_DEVID_MASK)
14971 					<< AUTOSENSE_SIZE_IN_MB);
14972 			return;
14973 
14974 		case FLASH_5762_EEPROM_HD:
14975 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14976 			break;
14977 		case FLASH_5762_EEPROM_LD:
14978 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14979 			break;
14980 		case FLASH_5720VENDOR_M_ST_M45PE20:
14981 			/* This pinstrap supports multiple sizes, so force it
14982 			 * to read the actual size from location 0xf0.
14983 			 */
14984 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14985 			break;
14986 		}
14987 	}
14988 
14989 	switch (nvmpinstrp) {
14990 	case FLASH_5720_EEPROM_HD:
14991 	case FLASH_5720_EEPROM_LD:
14992 		tp->nvram_jedecnum = JEDEC_ATMEL;
14993 		tg3_flag_set(tp, NVRAM_BUFFERED);
14994 
14995 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14996 		tw32(NVRAM_CFG1, nvcfg1);
14997 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14998 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14999 		else
15000 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
15001 		return;
15002 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
15003 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
15004 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
15005 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
15006 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
15007 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
15008 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
15009 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
15010 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
15011 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
15012 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
15013 	case FLASH_5720VENDOR_ATMEL_45USPT:
15014 		tp->nvram_jedecnum = JEDEC_ATMEL;
15015 		tg3_flag_set(tp, NVRAM_BUFFERED);
15016 		tg3_flag_set(tp, FLASH);
15017 
15018 		switch (nvmpinstrp) {
15019 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
15020 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
15021 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
15022 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15023 			break;
15024 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
15025 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
15026 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
15027 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15028 			break;
15029 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
15030 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
15031 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15032 			break;
15033 		default:
15034 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
15035 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15036 			break;
15037 		}
15038 		break;
15039 	case FLASH_5720VENDOR_M_ST_M25PE10:
15040 	case FLASH_5720VENDOR_M_ST_M45PE10:
15041 	case FLASH_5720VENDOR_A_ST_M25PE10:
15042 	case FLASH_5720VENDOR_A_ST_M45PE10:
15043 	case FLASH_5720VENDOR_M_ST_M25PE20:
15044 	case FLASH_5720VENDOR_M_ST_M45PE20:
15045 	case FLASH_5720VENDOR_A_ST_M25PE20:
15046 	case FLASH_5720VENDOR_A_ST_M45PE20:
15047 	case FLASH_5720VENDOR_M_ST_M25PE40:
15048 	case FLASH_5720VENDOR_M_ST_M45PE40:
15049 	case FLASH_5720VENDOR_A_ST_M25PE40:
15050 	case FLASH_5720VENDOR_A_ST_M45PE40:
15051 	case FLASH_5720VENDOR_M_ST_M25PE80:
15052 	case FLASH_5720VENDOR_M_ST_M45PE80:
15053 	case FLASH_5720VENDOR_A_ST_M25PE80:
15054 	case FLASH_5720VENDOR_A_ST_M45PE80:
15055 	case FLASH_5720VENDOR_ST_25USPT:
15056 	case FLASH_5720VENDOR_ST_45USPT:
15057 		tp->nvram_jedecnum = JEDEC_ST;
15058 		tg3_flag_set(tp, NVRAM_BUFFERED);
15059 		tg3_flag_set(tp, FLASH);
15060 
15061 		switch (nvmpinstrp) {
15062 		case FLASH_5720VENDOR_M_ST_M25PE20:
15063 		case FLASH_5720VENDOR_M_ST_M45PE20:
15064 		case FLASH_5720VENDOR_A_ST_M25PE20:
15065 		case FLASH_5720VENDOR_A_ST_M45PE20:
15066 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15067 			break;
15068 		case FLASH_5720VENDOR_M_ST_M25PE40:
15069 		case FLASH_5720VENDOR_M_ST_M45PE40:
15070 		case FLASH_5720VENDOR_A_ST_M25PE40:
15071 		case FLASH_5720VENDOR_A_ST_M45PE40:
15072 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15073 			break;
15074 		case FLASH_5720VENDOR_M_ST_M25PE80:
15075 		case FLASH_5720VENDOR_M_ST_M45PE80:
15076 		case FLASH_5720VENDOR_A_ST_M25PE80:
15077 		case FLASH_5720VENDOR_A_ST_M45PE80:
15078 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15079 			break;
15080 		default:
15081 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
15082 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15083 			break;
15084 		}
15085 		break;
15086 	default:
15087 		tg3_flag_set(tp, NO_NVRAM);
15088 		return;
15089 	}
15090 
15091 	tg3_nvram_get_pagesize(tp, nvcfg1);
15092 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15093 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15094 
15095 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15096 		u32 val;
15097 
15098 		if (tg3_nvram_read(tp, 0, &val))
15099 			return;
15100 
15101 		if (val != TG3_EEPROM_MAGIC &&
15102 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15103 			tg3_flag_set(tp, NO_NVRAM);
15104 	}
15105 }
15106 
15107 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
15108 static void tg3_nvram_init(struct tg3 *tp)
15109 {
15110 	if (tg3_flag(tp, IS_SSB_CORE)) {
15111 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15112 		tg3_flag_clear(tp, NVRAM);
15113 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15114 		tg3_flag_set(tp, NO_NVRAM);
15115 		return;
15116 	}
15117 
15118 	tw32_f(GRC_EEPROM_ADDR,
15119 	     (EEPROM_ADDR_FSM_RESET |
15120 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
15121 	       EEPROM_ADDR_CLKPERD_SHIFT)));
15122 
15123 	msleep(1);
15124 
15125 	/* Enable seeprom accesses. */
15126 	tw32_f(GRC_LOCAL_CTRL,
15127 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15128 	udelay(100);
15129 
15130 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15131 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
15132 		tg3_flag_set(tp, NVRAM);
15133 
15134 		if (tg3_nvram_lock(tp)) {
15135 			netdev_warn(tp->dev,
15136 				    "Cannot get nvram lock, %s failed\n",
15137 				    __func__);
15138 			return;
15139 		}
15140 		tg3_enable_nvram_access(tp);
15141 
15142 		tp->nvram_size = 0;
15143 
15144 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
15145 			tg3_get_5752_nvram_info(tp);
15146 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15147 			tg3_get_5755_nvram_info(tp);
15148 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15149 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15150 			 tg3_asic_rev(tp) == ASIC_REV_5785)
15151 			tg3_get_5787_nvram_info(tp);
15152 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15153 			tg3_get_5761_nvram_info(tp);
15154 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15155 			tg3_get_5906_nvram_info(tp);
15156 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15157 			 tg3_flag(tp, 57765_CLASS))
15158 			tg3_get_57780_nvram_info(tp);
15159 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15160 			 tg3_asic_rev(tp) == ASIC_REV_5719)
15161 			tg3_get_5717_nvram_info(tp);
15162 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15163 			 tg3_asic_rev(tp) == ASIC_REV_5762)
15164 			tg3_get_5720_nvram_info(tp);
15165 		else
15166 			tg3_get_nvram_info(tp);
15167 
15168 		if (tp->nvram_size == 0)
15169 			tg3_get_nvram_size(tp);
15170 
15171 		tg3_disable_nvram_access(tp);
15172 		tg3_nvram_unlock(tp);
15173 
15174 	} else {
15175 		tg3_flag_clear(tp, NVRAM);
15176 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15177 
15178 		tg3_get_eeprom_size(tp);
15179 	}
15180 }
15181 
15182 struct subsys_tbl_ent {
15183 	u16 subsys_vendor, subsys_devid;
15184 	u32 phy_id;
15185 };
15186 
15187 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15188 	/* Broadcom boards. */
15189 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15190 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15191 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15192 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15193 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15194 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15195 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15196 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15197 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15198 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15199 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15200 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15201 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15202 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15203 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15204 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15205 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15206 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15207 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15208 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15209 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15210 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15211 
15212 	/* 3com boards. */
15213 	{ TG3PCI_SUBVENDOR_ID_3COM,
15214 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15215 	{ TG3PCI_SUBVENDOR_ID_3COM,
15216 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15217 	{ TG3PCI_SUBVENDOR_ID_3COM,
15218 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15219 	{ TG3PCI_SUBVENDOR_ID_3COM,
15220 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15221 	{ TG3PCI_SUBVENDOR_ID_3COM,
15222 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15223 
15224 	/* DELL boards. */
15225 	{ TG3PCI_SUBVENDOR_ID_DELL,
15226 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15227 	{ TG3PCI_SUBVENDOR_ID_DELL,
15228 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15229 	{ TG3PCI_SUBVENDOR_ID_DELL,
15230 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15231 	{ TG3PCI_SUBVENDOR_ID_DELL,
15232 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15233 
15234 	/* Compaq boards. */
15235 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15236 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15237 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15238 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15239 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15240 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15241 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15242 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15243 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15244 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15245 
15246 	/* IBM boards. */
15247 	{ TG3PCI_SUBVENDOR_ID_IBM,
15248 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15249 };
15250 
15251 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15252 {
15253 	int i;
15254 
15255 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15256 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15257 		     tp->pdev->subsystem_vendor) &&
15258 		    (subsys_id_to_phy_id[i].subsys_devid ==
15259 		     tp->pdev->subsystem_device))
15260 			return &subsys_id_to_phy_id[i];
15261 	}
15262 	return NULL;
15263 }
15264 
15265 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15266 {
15267 	u32 val;
15268 
15269 	tp->phy_id = TG3_PHY_ID_INVALID;
15270 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15271 
15272 	/* Assume an onboard device and WOL capable by default.  */
15273 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15274 	tg3_flag_set(tp, WOL_CAP);
15275 
15276 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15277 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15278 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15279 			tg3_flag_set(tp, IS_NIC);
15280 		}
15281 		val = tr32(VCPU_CFGSHDW);
15282 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15283 			tg3_flag_set(tp, ASPM_WORKAROUND);
15284 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15285 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15286 			tg3_flag_set(tp, WOL_ENABLE);
15287 			device_set_wakeup_enable(&tp->pdev->dev, true);
15288 		}
15289 		goto done;
15290 	}
15291 
15292 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15293 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15294 		u32 nic_cfg, led_cfg;
15295 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15296 		u32 nic_phy_id, ver, eeprom_phy_id;
15297 		int eeprom_phy_serdes = 0;
15298 
15299 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15300 		tp->nic_sram_data_cfg = nic_cfg;
15301 
15302 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15303 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15304 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15305 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15306 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15307 		    (ver > 0) && (ver < 0x100))
15308 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15309 
15310 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15311 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15312 
15313 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15314 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15315 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15316 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15317 
15318 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15319 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15320 			eeprom_phy_serdes = 1;
15321 
15322 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15323 		if (nic_phy_id != 0) {
15324 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15325 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15326 
15327 			eeprom_phy_id  = (id1 >> 16) << 10;
15328 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15329 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15330 		} else
15331 			eeprom_phy_id = 0;
15332 
15333 		tp->phy_id = eeprom_phy_id;
15334 		if (eeprom_phy_serdes) {
15335 			if (!tg3_flag(tp, 5705_PLUS))
15336 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15337 			else
15338 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15339 		}
15340 
15341 		if (tg3_flag(tp, 5750_PLUS))
15342 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15343 				    SHASTA_EXT_LED_MODE_MASK);
15344 		else
15345 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15346 
15347 		switch (led_cfg) {
15348 		default:
15349 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15350 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15351 			break;
15352 
15353 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15354 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15355 			break;
15356 
15357 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15358 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15359 
15360 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15361 			 * read on some older 5700/5701 bootcode.
15362 			 */
15363 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15364 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15365 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15366 
15367 			break;
15368 
15369 		case SHASTA_EXT_LED_SHARED:
15370 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15371 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15372 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15373 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15374 						 LED_CTRL_MODE_PHY_2);
15375 
15376 			if (tg3_flag(tp, 5717_PLUS) ||
15377 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15378 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15379 						LED_CTRL_BLINK_RATE_MASK;
15380 
15381 			break;
15382 
15383 		case SHASTA_EXT_LED_MAC:
15384 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15385 			break;
15386 
15387 		case SHASTA_EXT_LED_COMBO:
15388 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15389 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15390 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15391 						 LED_CTRL_MODE_PHY_2);
15392 			break;
15393 
15394 		}
15395 
15396 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15397 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15398 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15399 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15400 
15401 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15402 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15403 
15404 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15405 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15406 			if ((tp->pdev->subsystem_vendor ==
15407 			     PCI_VENDOR_ID_ARIMA) &&
15408 			    (tp->pdev->subsystem_device == 0x205a ||
15409 			     tp->pdev->subsystem_device == 0x2063))
15410 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15411 		} else {
15412 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15413 			tg3_flag_set(tp, IS_NIC);
15414 		}
15415 
15416 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15417 			tg3_flag_set(tp, ENABLE_ASF);
15418 			if (tg3_flag(tp, 5750_PLUS))
15419 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15420 		}
15421 
15422 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15423 		    tg3_flag(tp, 5750_PLUS))
15424 			tg3_flag_set(tp, ENABLE_APE);
15425 
15426 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15427 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15428 			tg3_flag_clear(tp, WOL_CAP);
15429 
15430 		if (tg3_flag(tp, WOL_CAP) &&
15431 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15432 			tg3_flag_set(tp, WOL_ENABLE);
15433 			device_set_wakeup_enable(&tp->pdev->dev, true);
15434 		}
15435 
15436 		if (cfg2 & (1 << 17))
15437 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15438 
15439 		/* serdes signal pre-emphasis in register 0x590 set by */
15440 		/* bootcode if bit 18 is set */
15441 		if (cfg2 & (1 << 18))
15442 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15443 
15444 		if ((tg3_flag(tp, 57765_PLUS) ||
15445 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15446 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15447 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15448 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15449 
15450 		if (tg3_flag(tp, PCI_EXPRESS)) {
15451 			u32 cfg3;
15452 
15453 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15454 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15455 			    !tg3_flag(tp, 57765_PLUS) &&
15456 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15457 				tg3_flag_set(tp, ASPM_WORKAROUND);
15458 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15459 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15460 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15461 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15462 		}
15463 
15464 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15465 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15466 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15467 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15468 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15469 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15470 
15471 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15472 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15473 	}
15474 done:
15475 	if (tg3_flag(tp, WOL_CAP))
15476 		device_set_wakeup_enable(&tp->pdev->dev,
15477 					 tg3_flag(tp, WOL_ENABLE));
15478 	else
15479 		device_set_wakeup_capable(&tp->pdev->dev, false);
15480 }
15481 
15482 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15483 {
15484 	int i, err;
15485 	u32 val2, off = offset * 8;
15486 
15487 	err = tg3_nvram_lock(tp);
15488 	if (err)
15489 		return err;
15490 
15491 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15492 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15493 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15494 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15495 	udelay(10);
15496 
15497 	for (i = 0; i < 100; i++) {
15498 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15499 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15500 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15501 			break;
15502 		}
15503 		udelay(10);
15504 	}
15505 
15506 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15507 
15508 	tg3_nvram_unlock(tp);
15509 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15510 		return 0;
15511 
15512 	return -EBUSY;
15513 }
15514 
15515 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15516 {
15517 	int i;
15518 	u32 val;
15519 
15520 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15521 	tw32(OTP_CTRL, cmd);
15522 
15523 	/* Wait for up to 1 ms for command to execute. */
15524 	for (i = 0; i < 100; i++) {
15525 		val = tr32(OTP_STATUS);
15526 		if (val & OTP_STATUS_CMD_DONE)
15527 			break;
15528 		udelay(10);
15529 	}
15530 
15531 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15532 }
15533 
15534 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15535  * configuration is a 32-bit value that straddles the alignment boundary.
15536  * We do two 32-bit reads and then shift and merge the results.
15537  */
15538 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15539 {
15540 	u32 bhalf_otp, thalf_otp;
15541 
15542 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15543 
15544 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15545 		return 0;
15546 
15547 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15548 
15549 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15550 		return 0;
15551 
15552 	thalf_otp = tr32(OTP_READ_DATA);
15553 
15554 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15555 
15556 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15557 		return 0;
15558 
15559 	bhalf_otp = tr32(OTP_READ_DATA);
15560 
15561 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15562 }
15563 
15564 static void tg3_phy_init_link_config(struct tg3 *tp)
15565 {
15566 	u32 adv = ADVERTISED_Autoneg;
15567 
15568 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15569 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15570 			adv |= ADVERTISED_1000baseT_Half;
15571 		adv |= ADVERTISED_1000baseT_Full;
15572 	}
15573 
15574 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15575 		adv |= ADVERTISED_100baseT_Half |
15576 		       ADVERTISED_100baseT_Full |
15577 		       ADVERTISED_10baseT_Half |
15578 		       ADVERTISED_10baseT_Full |
15579 		       ADVERTISED_TP;
15580 	else
15581 		adv |= ADVERTISED_FIBRE;
15582 
15583 	tp->link_config.advertising = adv;
15584 	tp->link_config.speed = SPEED_UNKNOWN;
15585 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15586 	tp->link_config.autoneg = AUTONEG_ENABLE;
15587 	tp->link_config.active_speed = SPEED_UNKNOWN;
15588 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15589 
15590 	tp->old_link = -1;
15591 }
15592 
15593 static int tg3_phy_probe(struct tg3 *tp)
15594 {
15595 	u32 hw_phy_id_1, hw_phy_id_2;
15596 	u32 hw_phy_id, hw_phy_id_masked;
15597 	int err;
15598 
15599 	/* flow control autonegotiation is default behavior */
15600 	tg3_flag_set(tp, PAUSE_AUTONEG);
15601 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15602 
15603 	if (tg3_flag(tp, ENABLE_APE)) {
15604 		switch (tp->pci_fn) {
15605 		case 0:
15606 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15607 			break;
15608 		case 1:
15609 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15610 			break;
15611 		case 2:
15612 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15613 			break;
15614 		case 3:
15615 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15616 			break;
15617 		}
15618 	}
15619 
15620 	if (!tg3_flag(tp, ENABLE_ASF) &&
15621 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15622 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15623 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15624 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15625 
15626 	if (tg3_flag(tp, USE_PHYLIB))
15627 		return tg3_phy_init(tp);
15628 
15629 	/* Reading the PHY ID register can conflict with ASF
15630 	 * firmware access to the PHY hardware.
15631 	 */
15632 	err = 0;
15633 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15634 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15635 	} else {
15636 		/* Now read the physical PHY_ID from the chip and verify
15637 		 * that it is sane.  If it doesn't look good, we fall back
15638 		 * to either the hard-coded table based PHY_ID and failing
15639 		 * that the value found in the eeprom area.
15640 		 */
15641 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15642 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15643 
15644 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15645 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15646 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15647 
15648 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15649 	}
15650 
15651 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15652 		tp->phy_id = hw_phy_id;
15653 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15654 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15655 		else
15656 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15657 	} else {
15658 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15659 			/* Do nothing, phy ID already set up in
15660 			 * tg3_get_eeprom_hw_cfg().
15661 			 */
15662 		} else {
15663 			struct subsys_tbl_ent *p;
15664 
15665 			/* No eeprom signature?  Try the hardcoded
15666 			 * subsys device table.
15667 			 */
15668 			p = tg3_lookup_by_subsys(tp);
15669 			if (p) {
15670 				tp->phy_id = p->phy_id;
15671 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15672 				/* For now we saw the IDs 0xbc050cd0,
15673 				 * 0xbc050f80 and 0xbc050c30 on devices
15674 				 * connected to an BCM4785 and there are
15675 				 * probably more. Just assume that the phy is
15676 				 * supported when it is connected to a SSB core
15677 				 * for now.
15678 				 */
15679 				return -ENODEV;
15680 			}
15681 
15682 			if (!tp->phy_id ||
15683 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15684 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15685 		}
15686 	}
15687 
15688 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15689 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15690 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15691 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15692 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15693 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15694 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15695 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15696 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15697 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15698 
15699 		linkmode_zero(tp->eee.supported);
15700 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
15701 				 tp->eee.supported);
15702 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
15703 				 tp->eee.supported);
15704 		linkmode_copy(tp->eee.advertised, tp->eee.supported);
15705 
15706 		tp->eee.eee_enabled = 1;
15707 		tp->eee.tx_lpi_enabled = 1;
15708 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15709 	}
15710 
15711 	tg3_phy_init_link_config(tp);
15712 
15713 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15714 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15715 	    !tg3_flag(tp, ENABLE_APE) &&
15716 	    !tg3_flag(tp, ENABLE_ASF)) {
15717 		u32 bmsr, dummy;
15718 
15719 		tg3_readphy(tp, MII_BMSR, &bmsr);
15720 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15721 		    (bmsr & BMSR_LSTATUS))
15722 			goto skip_phy_reset;
15723 
15724 		err = tg3_phy_reset(tp);
15725 		if (err)
15726 			return err;
15727 
15728 		tg3_phy_set_wirespeed(tp);
15729 
15730 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15731 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15732 					    tp->link_config.flowctrl);
15733 
15734 			tg3_writephy(tp, MII_BMCR,
15735 				     BMCR_ANENABLE | BMCR_ANRESTART);
15736 		}
15737 	}
15738 
15739 skip_phy_reset:
15740 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15741 		err = tg3_init_5401phy_dsp(tp);
15742 		if (err)
15743 			return err;
15744 
15745 		err = tg3_init_5401phy_dsp(tp);
15746 	}
15747 
15748 	return err;
15749 }
15750 
15751 static void tg3_read_vpd(struct tg3 *tp)
15752 {
15753 	u8 *vpd_data;
15754 	unsigned int len, vpdlen;
15755 	int i;
15756 
15757 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15758 	if (!vpd_data)
15759 		goto out_no_vpd;
15760 
15761 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15762 					 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15763 	if (i < 0)
15764 		goto partno;
15765 
15766 	if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15767 		goto partno;
15768 
15769 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15770 					 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15771 	if (i < 0)
15772 		goto partno;
15773 
15774 	memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15775 	snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15776 
15777 partno:
15778 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15779 					 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15780 	if (i < 0)
15781 		goto out_not_found;
15782 
15783 	if (len > TG3_BPN_SIZE)
15784 		goto out_not_found;
15785 
15786 	memcpy(tp->board_part_number, &vpd_data[i], len);
15787 
15788 out_not_found:
15789 	kfree(vpd_data);
15790 	if (tp->board_part_number[0])
15791 		return;
15792 
15793 out_no_vpd:
15794 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15795 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15796 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15797 			strcpy(tp->board_part_number, "BCM5717");
15798 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15799 			strcpy(tp->board_part_number, "BCM5718");
15800 		else
15801 			goto nomatch;
15802 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15803 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15804 			strcpy(tp->board_part_number, "BCM57780");
15805 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15806 			strcpy(tp->board_part_number, "BCM57760");
15807 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15808 			strcpy(tp->board_part_number, "BCM57790");
15809 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15810 			strcpy(tp->board_part_number, "BCM57788");
15811 		else
15812 			goto nomatch;
15813 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15814 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15815 			strcpy(tp->board_part_number, "BCM57761");
15816 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15817 			strcpy(tp->board_part_number, "BCM57765");
15818 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15819 			strcpy(tp->board_part_number, "BCM57781");
15820 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15821 			strcpy(tp->board_part_number, "BCM57785");
15822 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15823 			strcpy(tp->board_part_number, "BCM57791");
15824 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15825 			strcpy(tp->board_part_number, "BCM57795");
15826 		else
15827 			goto nomatch;
15828 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15829 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15830 			strcpy(tp->board_part_number, "BCM57762");
15831 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15832 			strcpy(tp->board_part_number, "BCM57766");
15833 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15834 			strcpy(tp->board_part_number, "BCM57782");
15835 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15836 			strcpy(tp->board_part_number, "BCM57786");
15837 		else
15838 			goto nomatch;
15839 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15840 		strcpy(tp->board_part_number, "BCM95906");
15841 	} else {
15842 nomatch:
15843 		strcpy(tp->board_part_number, "none");
15844 	}
15845 }
15846 
15847 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15848 {
15849 	u32 val;
15850 
15851 	if (tg3_nvram_read(tp, offset, &val) ||
15852 	    (val & 0xfc000000) != 0x0c000000 ||
15853 	    tg3_nvram_read(tp, offset + 4, &val) ||
15854 	    val != 0)
15855 		return 0;
15856 
15857 	return 1;
15858 }
15859 
15860 static void tg3_read_bc_ver(struct tg3 *tp)
15861 {
15862 	u32 val, offset, start, ver_offset;
15863 	int i, dst_off;
15864 	bool newver = false;
15865 
15866 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15867 	    tg3_nvram_read(tp, 0x4, &start))
15868 		return;
15869 
15870 	offset = tg3_nvram_logical_addr(tp, offset);
15871 
15872 	if (tg3_nvram_read(tp, offset, &val))
15873 		return;
15874 
15875 	if ((val & 0xfc000000) == 0x0c000000) {
15876 		if (tg3_nvram_read(tp, offset + 4, &val))
15877 			return;
15878 
15879 		if (val == 0)
15880 			newver = true;
15881 	}
15882 
15883 	dst_off = strlen(tp->fw_ver);
15884 
15885 	if (newver) {
15886 		if (TG3_VER_SIZE - dst_off < 16 ||
15887 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15888 			return;
15889 
15890 		offset = offset + ver_offset - start;
15891 		for (i = 0; i < 16; i += 4) {
15892 			__be32 v;
15893 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15894 				return;
15895 
15896 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15897 		}
15898 	} else {
15899 		u32 major, minor;
15900 
15901 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15902 			return;
15903 
15904 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15905 			TG3_NVM_BCVER_MAJSFT;
15906 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15907 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15908 			 "v%d.%02d", major, minor);
15909 	}
15910 }
15911 
15912 static void tg3_read_hwsb_ver(struct tg3 *tp)
15913 {
15914 	u32 val, major, minor;
15915 
15916 	/* Use native endian representation */
15917 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15918 		return;
15919 
15920 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15921 		TG3_NVM_HWSB_CFG1_MAJSFT;
15922 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15923 		TG3_NVM_HWSB_CFG1_MINSFT;
15924 
15925 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15926 }
15927 
15928 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15929 {
15930 	u32 offset, major, minor, build;
15931 
15932 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15933 
15934 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15935 		return;
15936 
15937 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15938 	case TG3_EEPROM_SB_REVISION_0:
15939 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15940 		break;
15941 	case TG3_EEPROM_SB_REVISION_2:
15942 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15943 		break;
15944 	case TG3_EEPROM_SB_REVISION_3:
15945 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15946 		break;
15947 	case TG3_EEPROM_SB_REVISION_4:
15948 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15949 		break;
15950 	case TG3_EEPROM_SB_REVISION_5:
15951 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15952 		break;
15953 	case TG3_EEPROM_SB_REVISION_6:
15954 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15955 		break;
15956 	default:
15957 		return;
15958 	}
15959 
15960 	if (tg3_nvram_read(tp, offset, &val))
15961 		return;
15962 
15963 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15964 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15965 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15966 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15967 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15968 
15969 	if (minor > 99 || build > 26)
15970 		return;
15971 
15972 	offset = strlen(tp->fw_ver);
15973 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15974 		 " v%d.%02d", major, minor);
15975 
15976 	if (build > 0) {
15977 		offset = strlen(tp->fw_ver);
15978 		if (offset < TG3_VER_SIZE - 1)
15979 			tp->fw_ver[offset] = 'a' + build - 1;
15980 	}
15981 }
15982 
15983 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15984 {
15985 	u32 val, offset, start;
15986 	int i, vlen;
15987 
15988 	for (offset = TG3_NVM_DIR_START;
15989 	     offset < TG3_NVM_DIR_END;
15990 	     offset += TG3_NVM_DIRENT_SIZE) {
15991 		if (tg3_nvram_read(tp, offset, &val))
15992 			return;
15993 
15994 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15995 			break;
15996 	}
15997 
15998 	if (offset == TG3_NVM_DIR_END)
15999 		return;
16000 
16001 	if (!tg3_flag(tp, 5705_PLUS))
16002 		start = 0x08000000;
16003 	else if (tg3_nvram_read(tp, offset - 4, &start))
16004 		return;
16005 
16006 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
16007 	    !tg3_fw_img_is_valid(tp, offset) ||
16008 	    tg3_nvram_read(tp, offset + 8, &val))
16009 		return;
16010 
16011 	offset += val - start;
16012 
16013 	vlen = strlen(tp->fw_ver);
16014 
16015 	tp->fw_ver[vlen++] = ',';
16016 	tp->fw_ver[vlen++] = ' ';
16017 
16018 	for (i = 0; i < 4; i++) {
16019 		__be32 v;
16020 		if (tg3_nvram_read_be32(tp, offset, &v))
16021 			return;
16022 
16023 		offset += sizeof(v);
16024 
16025 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
16026 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
16027 			break;
16028 		}
16029 
16030 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
16031 		vlen += sizeof(v);
16032 	}
16033 }
16034 
16035 static void tg3_probe_ncsi(struct tg3 *tp)
16036 {
16037 	u32 apedata;
16038 
16039 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
16040 	if (apedata != APE_SEG_SIG_MAGIC)
16041 		return;
16042 
16043 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
16044 	if (!(apedata & APE_FW_STATUS_READY))
16045 		return;
16046 
16047 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
16048 		tg3_flag_set(tp, APE_HAS_NCSI);
16049 }
16050 
16051 static void tg3_read_dash_ver(struct tg3 *tp)
16052 {
16053 	int vlen;
16054 	u32 apedata;
16055 	char *fwtype;
16056 
16057 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
16058 
16059 	if (tg3_flag(tp, APE_HAS_NCSI))
16060 		fwtype = "NCSI";
16061 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
16062 		fwtype = "SMASH";
16063 	else
16064 		fwtype = "DASH";
16065 
16066 	vlen = strlen(tp->fw_ver);
16067 
16068 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
16069 		 fwtype,
16070 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
16071 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
16072 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
16073 		 (apedata & APE_FW_VERSION_BLDMSK));
16074 }
16075 
16076 static void tg3_read_otp_ver(struct tg3 *tp)
16077 {
16078 	u32 val, val2;
16079 
16080 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
16081 		return;
16082 
16083 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16084 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16085 	    TG3_OTP_MAGIC0_VALID(val)) {
16086 		u64 val64 = (u64) val << 32 | val2;
16087 		u32 ver = 0;
16088 		int i, vlen;
16089 
16090 		for (i = 0; i < 7; i++) {
16091 			if ((val64 & 0xff) == 0)
16092 				break;
16093 			ver = val64 & 0xff;
16094 			val64 >>= 8;
16095 		}
16096 		vlen = strlen(tp->fw_ver);
16097 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16098 	}
16099 }
16100 
16101 static void tg3_read_fw_ver(struct tg3 *tp)
16102 {
16103 	u32 val;
16104 	bool vpd_vers = false;
16105 
16106 	if (tp->fw_ver[0] != 0)
16107 		vpd_vers = true;
16108 
16109 	if (tg3_flag(tp, NO_NVRAM)) {
16110 		strcat(tp->fw_ver, "sb");
16111 		tg3_read_otp_ver(tp);
16112 		return;
16113 	}
16114 
16115 	if (tg3_nvram_read(tp, 0, &val))
16116 		return;
16117 
16118 	if (val == TG3_EEPROM_MAGIC)
16119 		tg3_read_bc_ver(tp);
16120 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16121 		tg3_read_sb_ver(tp, val);
16122 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16123 		tg3_read_hwsb_ver(tp);
16124 
16125 	if (tg3_flag(tp, ENABLE_ASF)) {
16126 		if (tg3_flag(tp, ENABLE_APE)) {
16127 			tg3_probe_ncsi(tp);
16128 			if (!vpd_vers)
16129 				tg3_read_dash_ver(tp);
16130 		} else if (!vpd_vers) {
16131 			tg3_read_mgmtfw_ver(tp);
16132 		}
16133 	}
16134 
16135 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16136 }
16137 
16138 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16139 {
16140 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
16141 		return TG3_RX_RET_MAX_SIZE_5717;
16142 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16143 		return TG3_RX_RET_MAX_SIZE_5700;
16144 	else
16145 		return TG3_RX_RET_MAX_SIZE_5705;
16146 }
16147 
16148 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16149 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16150 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16151 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16152 	{ },
16153 };
16154 
16155 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16156 {
16157 	struct pci_dev *peer;
16158 	unsigned int func, devnr = tp->pdev->devfn & ~7;
16159 
16160 	for (func = 0; func < 8; func++) {
16161 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16162 		if (peer && peer != tp->pdev)
16163 			break;
16164 		pci_dev_put(peer);
16165 	}
16166 	/* 5704 can be configured in single-port mode, set peer to
16167 	 * tp->pdev in that case.
16168 	 */
16169 	if (!peer) {
16170 		peer = tp->pdev;
16171 		return peer;
16172 	}
16173 
16174 	/*
16175 	 * We don't need to keep the refcount elevated; there's no way
16176 	 * to remove one half of this device without removing the other
16177 	 */
16178 	pci_dev_put(peer);
16179 
16180 	return peer;
16181 }
16182 
16183 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16184 {
16185 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16186 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16187 		u32 reg;
16188 
16189 		/* All devices that use the alternate
16190 		 * ASIC REV location have a CPMU.
16191 		 */
16192 		tg3_flag_set(tp, CPMU_PRESENT);
16193 
16194 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16195 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16196 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16197 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16198 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16199 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16200 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16201 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16202 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16203 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16204 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16205 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16206 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16207 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16208 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16209 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16210 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16211 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16212 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16213 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16214 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16215 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16216 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16217 		else
16218 			reg = TG3PCI_PRODID_ASICREV;
16219 
16220 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16221 	}
16222 
16223 	/* Wrong chip ID in 5752 A0. This code can be removed later
16224 	 * as A0 is not in production.
16225 	 */
16226 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16227 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16228 
16229 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16230 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16231 
16232 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16233 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16234 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16235 		tg3_flag_set(tp, 5717_PLUS);
16236 
16237 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16238 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16239 		tg3_flag_set(tp, 57765_CLASS);
16240 
16241 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16242 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16243 		tg3_flag_set(tp, 57765_PLUS);
16244 
16245 	/* Intentionally exclude ASIC_REV_5906 */
16246 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16247 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16248 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16249 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16250 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16251 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16252 	    tg3_flag(tp, 57765_PLUS))
16253 		tg3_flag_set(tp, 5755_PLUS);
16254 
16255 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16256 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16257 		tg3_flag_set(tp, 5780_CLASS);
16258 
16259 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16260 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16261 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16262 	    tg3_flag(tp, 5755_PLUS) ||
16263 	    tg3_flag(tp, 5780_CLASS))
16264 		tg3_flag_set(tp, 5750_PLUS);
16265 
16266 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16267 	    tg3_flag(tp, 5750_PLUS))
16268 		tg3_flag_set(tp, 5705_PLUS);
16269 }
16270 
16271 static bool tg3_10_100_only_device(struct tg3 *tp,
16272 				   const struct pci_device_id *ent)
16273 {
16274 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16275 
16276 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16277 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16278 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16279 		return true;
16280 
16281 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16282 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16283 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16284 				return true;
16285 		} else {
16286 			return true;
16287 		}
16288 	}
16289 
16290 	return false;
16291 }
16292 
16293 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16294 {
16295 	u32 misc_ctrl_reg;
16296 	u32 pci_state_reg, grc_misc_cfg;
16297 	u32 val;
16298 	u16 pci_cmd;
16299 	int err;
16300 
16301 	/* Force memory write invalidate off.  If we leave it on,
16302 	 * then on 5700_BX chips we have to enable a workaround.
16303 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16304 	 * to match the cacheline size.  The Broadcom driver have this
16305 	 * workaround but turns MWI off all the times so never uses
16306 	 * it.  This seems to suggest that the workaround is insufficient.
16307 	 */
16308 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16309 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16310 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16311 
16312 	/* Important! -- Make sure register accesses are byteswapped
16313 	 * correctly.  Also, for those chips that require it, make
16314 	 * sure that indirect register accesses are enabled before
16315 	 * the first operation.
16316 	 */
16317 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16318 			      &misc_ctrl_reg);
16319 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16320 			       MISC_HOST_CTRL_CHIPREV);
16321 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16322 			       tp->misc_host_ctrl);
16323 
16324 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16325 
16326 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16327 	 * we need to disable memory and use config. cycles
16328 	 * only to access all registers. The 5702/03 chips
16329 	 * can mistakenly decode the special cycles from the
16330 	 * ICH chipsets as memory write cycles, causing corruption
16331 	 * of register and memory space. Only certain ICH bridges
16332 	 * will drive special cycles with non-zero data during the
16333 	 * address phase which can fall within the 5703's address
16334 	 * range. This is not an ICH bug as the PCI spec allows
16335 	 * non-zero address during special cycles. However, only
16336 	 * these ICH bridges are known to drive non-zero addresses
16337 	 * during special cycles.
16338 	 *
16339 	 * Since special cycles do not cross PCI bridges, we only
16340 	 * enable this workaround if the 5703 is on the secondary
16341 	 * bus of these ICH bridges.
16342 	 */
16343 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16344 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16345 		static struct tg3_dev_id {
16346 			u32	vendor;
16347 			u32	device;
16348 			u32	rev;
16349 		} ich_chipsets[] = {
16350 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16351 			  PCI_ANY_ID },
16352 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16353 			  PCI_ANY_ID },
16354 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16355 			  0xa },
16356 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16357 			  PCI_ANY_ID },
16358 			{ },
16359 		};
16360 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16361 		struct pci_dev *bridge = NULL;
16362 
16363 		while (pci_id->vendor != 0) {
16364 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16365 						bridge);
16366 			if (!bridge) {
16367 				pci_id++;
16368 				continue;
16369 			}
16370 			if (pci_id->rev != PCI_ANY_ID) {
16371 				if (bridge->revision > pci_id->rev)
16372 					continue;
16373 			}
16374 			if (bridge->subordinate &&
16375 			    (bridge->subordinate->number ==
16376 			     tp->pdev->bus->number)) {
16377 				tg3_flag_set(tp, ICH_WORKAROUND);
16378 				pci_dev_put(bridge);
16379 				break;
16380 			}
16381 		}
16382 	}
16383 
16384 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16385 		static struct tg3_dev_id {
16386 			u32	vendor;
16387 			u32	device;
16388 		} bridge_chipsets[] = {
16389 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16390 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16391 			{ },
16392 		};
16393 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16394 		struct pci_dev *bridge = NULL;
16395 
16396 		while (pci_id->vendor != 0) {
16397 			bridge = pci_get_device(pci_id->vendor,
16398 						pci_id->device,
16399 						bridge);
16400 			if (!bridge) {
16401 				pci_id++;
16402 				continue;
16403 			}
16404 			if (bridge->subordinate &&
16405 			    (bridge->subordinate->number <=
16406 			     tp->pdev->bus->number) &&
16407 			    (bridge->subordinate->busn_res.end >=
16408 			     tp->pdev->bus->number)) {
16409 				tg3_flag_set(tp, 5701_DMA_BUG);
16410 				pci_dev_put(bridge);
16411 				break;
16412 			}
16413 		}
16414 	}
16415 
16416 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16417 	 * DMA addresses > 40-bit. This bridge may have other additional
16418 	 * 57xx devices behind it in some 4-port NIC designs for example.
16419 	 * Any tg3 device found behind the bridge will also need the 40-bit
16420 	 * DMA workaround.
16421 	 */
16422 	if (tg3_flag(tp, 5780_CLASS)) {
16423 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16424 		tp->msi_cap = tp->pdev->msi_cap;
16425 	} else {
16426 		struct pci_dev *bridge = NULL;
16427 
16428 		do {
16429 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16430 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16431 						bridge);
16432 			if (bridge && bridge->subordinate &&
16433 			    (bridge->subordinate->number <=
16434 			     tp->pdev->bus->number) &&
16435 			    (bridge->subordinate->busn_res.end >=
16436 			     tp->pdev->bus->number)) {
16437 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16438 				pci_dev_put(bridge);
16439 				break;
16440 			}
16441 		} while (bridge);
16442 	}
16443 
16444 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16445 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16446 		tp->pdev_peer = tg3_find_peer(tp);
16447 
16448 	/* Determine TSO capabilities */
16449 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16450 		; /* Do nothing. HW bug. */
16451 	else if (tg3_flag(tp, 57765_PLUS))
16452 		tg3_flag_set(tp, HW_TSO_3);
16453 	else if (tg3_flag(tp, 5755_PLUS) ||
16454 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16455 		tg3_flag_set(tp, HW_TSO_2);
16456 	else if (tg3_flag(tp, 5750_PLUS)) {
16457 		tg3_flag_set(tp, HW_TSO_1);
16458 		tg3_flag_set(tp, TSO_BUG);
16459 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16460 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16461 			tg3_flag_clear(tp, TSO_BUG);
16462 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16463 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16464 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16465 		tg3_flag_set(tp, FW_TSO);
16466 		tg3_flag_set(tp, TSO_BUG);
16467 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16468 			tp->fw_needed = FIRMWARE_TG3TSO5;
16469 		else
16470 			tp->fw_needed = FIRMWARE_TG3TSO;
16471 	}
16472 
16473 	/* Selectively allow TSO based on operating conditions */
16474 	if (tg3_flag(tp, HW_TSO_1) ||
16475 	    tg3_flag(tp, HW_TSO_2) ||
16476 	    tg3_flag(tp, HW_TSO_3) ||
16477 	    tg3_flag(tp, FW_TSO)) {
16478 		/* For firmware TSO, assume ASF is disabled.
16479 		 * We'll disable TSO later if we discover ASF
16480 		 * is enabled in tg3_get_eeprom_hw_cfg().
16481 		 */
16482 		tg3_flag_set(tp, TSO_CAPABLE);
16483 	} else {
16484 		tg3_flag_clear(tp, TSO_CAPABLE);
16485 		tg3_flag_clear(tp, TSO_BUG);
16486 		tp->fw_needed = NULL;
16487 	}
16488 
16489 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16490 		tp->fw_needed = FIRMWARE_TG3;
16491 
16492 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16493 		tp->fw_needed = FIRMWARE_TG357766;
16494 
16495 	tp->irq_max = 1;
16496 
16497 	if (tg3_flag(tp, 5750_PLUS)) {
16498 		tg3_flag_set(tp, SUPPORT_MSI);
16499 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16500 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16501 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16502 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16503 		     tp->pdev_peer == tp->pdev))
16504 			tg3_flag_clear(tp, SUPPORT_MSI);
16505 
16506 		if (tg3_flag(tp, 5755_PLUS) ||
16507 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16508 			tg3_flag_set(tp, 1SHOT_MSI);
16509 		}
16510 
16511 		if (tg3_flag(tp, 57765_PLUS)) {
16512 			tg3_flag_set(tp, SUPPORT_MSIX);
16513 			tp->irq_max = TG3_IRQ_MAX_VECS;
16514 		}
16515 	}
16516 
16517 	tp->txq_max = 1;
16518 	tp->rxq_max = 1;
16519 	if (tp->irq_max > 1) {
16520 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16521 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16522 
16523 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16524 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16525 			tp->txq_max = tp->irq_max - 1;
16526 	}
16527 
16528 	if (tg3_flag(tp, 5755_PLUS) ||
16529 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16530 		tg3_flag_set(tp, SHORT_DMA_BUG);
16531 
16532 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16533 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16534 
16535 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16536 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16537 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16538 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16539 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16540 
16541 	if (tg3_flag(tp, 57765_PLUS) &&
16542 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16543 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16544 
16545 	if (!tg3_flag(tp, 5705_PLUS) ||
16546 	    tg3_flag(tp, 5780_CLASS) ||
16547 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16548 		tg3_flag_set(tp, JUMBO_CAPABLE);
16549 
16550 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16551 			      &pci_state_reg);
16552 
16553 	if (pci_is_pcie(tp->pdev)) {
16554 		u16 lnkctl;
16555 
16556 		tg3_flag_set(tp, PCI_EXPRESS);
16557 
16558 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16559 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16560 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16561 				tg3_flag_clear(tp, HW_TSO_2);
16562 				tg3_flag_clear(tp, TSO_CAPABLE);
16563 			}
16564 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16565 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16566 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16567 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16568 				tg3_flag_set(tp, CLKREQ_BUG);
16569 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16570 			tg3_flag_set(tp, L1PLLPD_EN);
16571 		}
16572 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16573 		/* BCM5785 devices are effectively PCIe devices, and should
16574 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16575 		 * section.
16576 		 */
16577 		tg3_flag_set(tp, PCI_EXPRESS);
16578 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16579 		   tg3_flag(tp, 5780_CLASS)) {
16580 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16581 		if (!tp->pcix_cap) {
16582 			dev_err(&tp->pdev->dev,
16583 				"Cannot find PCI-X capability, aborting\n");
16584 			return -EIO;
16585 		}
16586 
16587 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16588 			tg3_flag_set(tp, PCIX_MODE);
16589 	}
16590 
16591 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16592 	 * reordering to the mailbox registers done by the host
16593 	 * controller can cause major troubles.  We read back from
16594 	 * every mailbox register write to force the writes to be
16595 	 * posted to the chip in order.
16596 	 */
16597 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16598 	    !tg3_flag(tp, PCI_EXPRESS))
16599 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16600 
16601 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16602 			     &tp->pci_cacheline_sz);
16603 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16604 			     &tp->pci_lat_timer);
16605 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16606 	    tp->pci_lat_timer < 64) {
16607 		tp->pci_lat_timer = 64;
16608 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16609 				      tp->pci_lat_timer);
16610 	}
16611 
16612 	/* Important! -- It is critical that the PCI-X hw workaround
16613 	 * situation is decided before the first MMIO register access.
16614 	 */
16615 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16616 		/* 5700 BX chips need to have their TX producer index
16617 		 * mailboxes written twice to workaround a bug.
16618 		 */
16619 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16620 
16621 		/* If we are in PCI-X mode, enable register write workaround.
16622 		 *
16623 		 * The workaround is to use indirect register accesses
16624 		 * for all chip writes not to mailbox registers.
16625 		 */
16626 		if (tg3_flag(tp, PCIX_MODE)) {
16627 			u32 pm_reg;
16628 
16629 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16630 
16631 			/* The chip can have it's power management PCI config
16632 			 * space registers clobbered due to this bug.
16633 			 * So explicitly force the chip into D0 here.
16634 			 */
16635 			pci_read_config_dword(tp->pdev,
16636 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16637 					      &pm_reg);
16638 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16639 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16640 			pci_write_config_dword(tp->pdev,
16641 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16642 					       pm_reg);
16643 
16644 			/* Also, force SERR#/PERR# in PCI command. */
16645 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16646 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16647 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16648 		}
16649 	}
16650 
16651 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16652 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16653 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16654 		tg3_flag_set(tp, PCI_32BIT);
16655 
16656 	/* Chip-specific fixup from Broadcom driver */
16657 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16658 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16659 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16660 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16661 	}
16662 
16663 	/* Default fast path register access methods */
16664 	tp->read32 = tg3_read32;
16665 	tp->write32 = tg3_write32;
16666 	tp->read32_mbox = tg3_read32;
16667 	tp->write32_mbox = tg3_write32;
16668 	tp->write32_tx_mbox = tg3_write32;
16669 	tp->write32_rx_mbox = tg3_write32;
16670 
16671 	/* Various workaround register access methods */
16672 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16673 		tp->write32 = tg3_write_indirect_reg32;
16674 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16675 		 (tg3_flag(tp, PCI_EXPRESS) &&
16676 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16677 		/*
16678 		 * Back to back register writes can cause problems on these
16679 		 * chips, the workaround is to read back all reg writes
16680 		 * except those to mailbox regs.
16681 		 *
16682 		 * See tg3_write_indirect_reg32().
16683 		 */
16684 		tp->write32 = tg3_write_flush_reg32;
16685 	}
16686 
16687 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16688 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16689 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16690 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16691 	}
16692 
16693 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16694 		tp->read32 = tg3_read_indirect_reg32;
16695 		tp->write32 = tg3_write_indirect_reg32;
16696 		tp->read32_mbox = tg3_read_indirect_mbox;
16697 		tp->write32_mbox = tg3_write_indirect_mbox;
16698 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16699 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16700 
16701 		iounmap(tp->regs);
16702 		tp->regs = NULL;
16703 
16704 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16705 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16706 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16707 	}
16708 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16709 		tp->read32_mbox = tg3_read32_mbox_5906;
16710 		tp->write32_mbox = tg3_write32_mbox_5906;
16711 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16712 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16713 	}
16714 
16715 	if (tp->write32 == tg3_write_indirect_reg32 ||
16716 	    (tg3_flag(tp, PCIX_MODE) &&
16717 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16718 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16719 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16720 
16721 	/* The memory arbiter has to be enabled in order for SRAM accesses
16722 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16723 	 * sure it is enabled, but other entities such as system netboot
16724 	 * code might disable it.
16725 	 */
16726 	val = tr32(MEMARB_MODE);
16727 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16728 
16729 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16730 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16731 	    tg3_flag(tp, 5780_CLASS)) {
16732 		if (tg3_flag(tp, PCIX_MODE)) {
16733 			pci_read_config_dword(tp->pdev,
16734 					      tp->pcix_cap + PCI_X_STATUS,
16735 					      &val);
16736 			tp->pci_fn = val & 0x7;
16737 		}
16738 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16739 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16740 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16741 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16742 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16743 			val = tr32(TG3_CPMU_STATUS);
16744 
16745 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16746 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16747 		else
16748 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16749 				     TG3_CPMU_STATUS_FSHFT_5719;
16750 	}
16751 
16752 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16753 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16754 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16755 	}
16756 
16757 	/* Get eeprom hw config before calling tg3_set_power_state().
16758 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16759 	 * determined before calling tg3_set_power_state() so that
16760 	 * we know whether or not to switch out of Vaux power.
16761 	 * When the flag is set, it means that GPIO1 is used for eeprom
16762 	 * write protect and also implies that it is a LOM where GPIOs
16763 	 * are not used to switch power.
16764 	 */
16765 	tg3_get_eeprom_hw_cfg(tp);
16766 
16767 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16768 		tg3_flag_clear(tp, TSO_CAPABLE);
16769 		tg3_flag_clear(tp, TSO_BUG);
16770 		tp->fw_needed = NULL;
16771 	}
16772 
16773 	if (tg3_flag(tp, ENABLE_APE)) {
16774 		/* Allow reads and writes to the
16775 		 * APE register and memory space.
16776 		 */
16777 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16778 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16779 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16780 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16781 				       pci_state_reg);
16782 
16783 		tg3_ape_lock_init(tp);
16784 		tp->ape_hb_interval =
16785 			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16786 	}
16787 
16788 	/* Set up tp->grc_local_ctrl before calling
16789 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16790 	 * will bring 5700's external PHY out of reset.
16791 	 * It is also used as eeprom write protect on LOMs.
16792 	 */
16793 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16794 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16795 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16796 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16797 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16798 	/* Unused GPIO3 must be driven as output on 5752 because there
16799 	 * are no pull-up resistors on unused GPIO pins.
16800 	 */
16801 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16802 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16803 
16804 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16805 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16806 	    tg3_flag(tp, 57765_CLASS))
16807 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16808 
16809 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16810 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16811 		/* Turn off the debug UART. */
16812 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16813 		if (tg3_flag(tp, IS_NIC))
16814 			/* Keep VMain power. */
16815 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16816 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16817 	}
16818 
16819 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16820 		tp->grc_local_ctrl |=
16821 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16822 
16823 	/* Switch out of Vaux if it is a NIC */
16824 	tg3_pwrsrc_switch_to_vmain(tp);
16825 
16826 	/* Derive initial jumbo mode from MTU assigned in
16827 	 * ether_setup() via the alloc_etherdev() call
16828 	 */
16829 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16830 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16831 
16832 	/* Determine WakeOnLan speed to use. */
16833 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16834 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16835 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16836 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16837 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16838 	} else {
16839 		tg3_flag_set(tp, WOL_SPEED_100MB);
16840 	}
16841 
16842 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16843 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16844 
16845 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16846 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16847 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16848 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16849 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16850 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16851 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16852 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16853 
16854 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16855 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16856 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16857 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16858 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16859 
16860 	if (tg3_flag(tp, 5705_PLUS) &&
16861 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16862 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16863 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16864 	    !tg3_flag(tp, 57765_PLUS)) {
16865 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16866 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16867 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16868 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16869 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16870 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16871 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16872 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16873 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16874 		} else
16875 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16876 	}
16877 
16878 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16879 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16880 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16881 		if (tp->phy_otp == 0)
16882 			tp->phy_otp = TG3_OTP_DEFAULT;
16883 	}
16884 
16885 	if (tg3_flag(tp, CPMU_PRESENT))
16886 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16887 	else
16888 		tp->mi_mode = MAC_MI_MODE_BASE;
16889 
16890 	tp->coalesce_mode = 0;
16891 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16892 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16893 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16894 
16895 	/* Set these bits to enable statistics workaround. */
16896 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16897 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16898 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16899 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16900 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16901 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16902 	}
16903 
16904 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16905 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16906 		tg3_flag_set(tp, USE_PHYLIB);
16907 
16908 	err = tg3_mdio_init(tp);
16909 	if (err)
16910 		return err;
16911 
16912 	/* Initialize data/descriptor byte/word swapping. */
16913 	val = tr32(GRC_MODE);
16914 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16915 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16916 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16917 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16918 			GRC_MODE_B2HRX_ENABLE |
16919 			GRC_MODE_HTX2B_ENABLE |
16920 			GRC_MODE_HOST_STACKUP);
16921 	else
16922 		val &= GRC_MODE_HOST_STACKUP;
16923 
16924 	tw32(GRC_MODE, val | tp->grc_mode);
16925 
16926 	tg3_switch_clocks(tp);
16927 
16928 	/* Clear this out for sanity. */
16929 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16930 
16931 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16932 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16933 
16934 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16935 			      &pci_state_reg);
16936 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16937 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16938 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16939 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16940 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16941 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16942 			void __iomem *sram_base;
16943 
16944 			/* Write some dummy words into the SRAM status block
16945 			 * area, see if it reads back correctly.  If the return
16946 			 * value is bad, force enable the PCIX workaround.
16947 			 */
16948 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16949 
16950 			writel(0x00000000, sram_base);
16951 			writel(0x00000000, sram_base + 4);
16952 			writel(0xffffffff, sram_base + 4);
16953 			if (readl(sram_base) != 0x00000000)
16954 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16955 		}
16956 	}
16957 
16958 	udelay(50);
16959 	tg3_nvram_init(tp);
16960 
16961 	/* If the device has an NVRAM, no need to load patch firmware */
16962 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16963 	    !tg3_flag(tp, NO_NVRAM))
16964 		tp->fw_needed = NULL;
16965 
16966 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16967 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16968 
16969 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16970 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16971 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16972 		tg3_flag_set(tp, IS_5788);
16973 
16974 	if (!tg3_flag(tp, IS_5788) &&
16975 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16976 		tg3_flag_set(tp, TAGGED_STATUS);
16977 	if (tg3_flag(tp, TAGGED_STATUS)) {
16978 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16979 				      HOSTCC_MODE_CLRTICK_TXBD);
16980 
16981 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16982 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16983 				       tp->misc_host_ctrl);
16984 	}
16985 
16986 	/* Preserve the APE MAC_MODE bits */
16987 	if (tg3_flag(tp, ENABLE_APE))
16988 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16989 	else
16990 		tp->mac_mode = 0;
16991 
16992 	if (tg3_10_100_only_device(tp, ent))
16993 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16994 
16995 	err = tg3_phy_probe(tp);
16996 	if (err) {
16997 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16998 		/* ... but do not return immediately ... */
16999 		tg3_mdio_fini(tp);
17000 	}
17001 
17002 	tg3_read_vpd(tp);
17003 	tg3_read_fw_ver(tp);
17004 
17005 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
17006 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
17007 	} else {
17008 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
17009 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
17010 		else
17011 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
17012 	}
17013 
17014 	/* 5700 {AX,BX} chips have a broken status block link
17015 	 * change bit implementation, so we must use the
17016 	 * status register in those cases.
17017 	 */
17018 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
17019 		tg3_flag_set(tp, USE_LINKCHG_REG);
17020 	else
17021 		tg3_flag_clear(tp, USE_LINKCHG_REG);
17022 
17023 	/* The led_ctrl is set during tg3_phy_probe, here we might
17024 	 * have to force the link status polling mechanism based
17025 	 * upon subsystem IDs.
17026 	 */
17027 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
17028 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
17029 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
17030 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
17031 		tg3_flag_set(tp, USE_LINKCHG_REG);
17032 	}
17033 
17034 	/* For all SERDES we poll the MAC status register. */
17035 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
17036 		tg3_flag_set(tp, POLL_SERDES);
17037 	else
17038 		tg3_flag_clear(tp, POLL_SERDES);
17039 
17040 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
17041 		tg3_flag_set(tp, POLL_CPMU_LINK);
17042 
17043 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
17044 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
17045 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
17046 	    tg3_flag(tp, PCIX_MODE)) {
17047 		tp->rx_offset = NET_SKB_PAD;
17048 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
17049 		tp->rx_copy_thresh = ~(u16)0;
17050 #endif
17051 	}
17052 
17053 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
17054 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
17055 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
17056 
17057 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
17058 
17059 	/* Increment the rx prod index on the rx std ring by at most
17060 	 * 8 for these chips to workaround hw errata.
17061 	 */
17062 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
17063 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
17064 	    tg3_asic_rev(tp) == ASIC_REV_5755)
17065 		tp->rx_std_max_post = 8;
17066 
17067 	if (tg3_flag(tp, ASPM_WORKAROUND))
17068 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
17069 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
17070 
17071 	return err;
17072 }
17073 
17074 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
17075 {
17076 	u32 hi, lo, mac_offset;
17077 	int addr_ok = 0;
17078 	int err;
17079 
17080 	if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
17081 		return 0;
17082 
17083 	if (tg3_flag(tp, IS_SSB_CORE)) {
17084 		err = ssb_gige_get_macaddr(tp->pdev, addr);
17085 		if (!err && is_valid_ether_addr(addr))
17086 			return 0;
17087 	}
17088 
17089 	mac_offset = 0x7c;
17090 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17091 	    tg3_flag(tp, 5780_CLASS)) {
17092 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17093 			mac_offset = 0xcc;
17094 		if (tg3_nvram_lock(tp))
17095 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17096 		else
17097 			tg3_nvram_unlock(tp);
17098 	} else if (tg3_flag(tp, 5717_PLUS)) {
17099 		if (tp->pci_fn & 1)
17100 			mac_offset = 0xcc;
17101 		if (tp->pci_fn > 1)
17102 			mac_offset += 0x18c;
17103 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17104 		mac_offset = 0x10;
17105 
17106 	/* First try to get it from MAC address mailbox. */
17107 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17108 	if ((hi >> 16) == 0x484b) {
17109 		addr[0] = (hi >>  8) & 0xff;
17110 		addr[1] = (hi >>  0) & 0xff;
17111 
17112 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17113 		addr[2] = (lo >> 24) & 0xff;
17114 		addr[3] = (lo >> 16) & 0xff;
17115 		addr[4] = (lo >>  8) & 0xff;
17116 		addr[5] = (lo >>  0) & 0xff;
17117 
17118 		/* Some old bootcode may report a 0 MAC address in SRAM */
17119 		addr_ok = is_valid_ether_addr(addr);
17120 	}
17121 	if (!addr_ok) {
17122 		__be32 be_hi, be_lo;
17123 
17124 		/* Next, try NVRAM. */
17125 		if (!tg3_flag(tp, NO_NVRAM) &&
17126 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &be_hi) &&
17127 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &be_lo)) {
17128 			memcpy(&addr[0], ((char *)&be_hi) + 2, 2);
17129 			memcpy(&addr[2], (char *)&be_lo, sizeof(be_lo));
17130 		}
17131 		/* Finally just fetch it out of the MAC control regs. */
17132 		else {
17133 			hi = tr32(MAC_ADDR_0_HIGH);
17134 			lo = tr32(MAC_ADDR_0_LOW);
17135 
17136 			addr[5] = lo & 0xff;
17137 			addr[4] = (lo >> 8) & 0xff;
17138 			addr[3] = (lo >> 16) & 0xff;
17139 			addr[2] = (lo >> 24) & 0xff;
17140 			addr[1] = hi & 0xff;
17141 			addr[0] = (hi >> 8) & 0xff;
17142 		}
17143 	}
17144 
17145 	if (!is_valid_ether_addr(addr))
17146 		return -EINVAL;
17147 	return 0;
17148 }
17149 
17150 #define BOUNDARY_SINGLE_CACHELINE	1
17151 #define BOUNDARY_MULTI_CACHELINE	2
17152 
17153 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17154 {
17155 	int cacheline_size;
17156 	u8 byte;
17157 	int goal;
17158 
17159 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17160 	if (byte == 0)
17161 		cacheline_size = 1024;
17162 	else
17163 		cacheline_size = (int) byte * 4;
17164 
17165 	/* On 5703 and later chips, the boundary bits have no
17166 	 * effect.
17167 	 */
17168 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17169 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17170 	    !tg3_flag(tp, PCI_EXPRESS))
17171 		goto out;
17172 
17173 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
17174 	goal = BOUNDARY_MULTI_CACHELINE;
17175 #else
17176 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17177 	goal = BOUNDARY_SINGLE_CACHELINE;
17178 #else
17179 	goal = 0;
17180 #endif
17181 #endif
17182 
17183 	if (tg3_flag(tp, 57765_PLUS)) {
17184 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17185 		goto out;
17186 	}
17187 
17188 	if (!goal)
17189 		goto out;
17190 
17191 	/* PCI controllers on most RISC systems tend to disconnect
17192 	 * when a device tries to burst across a cache-line boundary.
17193 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17194 	 *
17195 	 * Unfortunately, for PCI-E there are only limited
17196 	 * write-side controls for this, and thus for reads
17197 	 * we will still get the disconnects.  We'll also waste
17198 	 * these PCI cycles for both read and write for chips
17199 	 * other than 5700 and 5701 which do not implement the
17200 	 * boundary bits.
17201 	 */
17202 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17203 		switch (cacheline_size) {
17204 		case 16:
17205 		case 32:
17206 		case 64:
17207 		case 128:
17208 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17209 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17210 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17211 			} else {
17212 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17213 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17214 			}
17215 			break;
17216 
17217 		case 256:
17218 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17219 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17220 			break;
17221 
17222 		default:
17223 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17224 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17225 			break;
17226 		}
17227 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17228 		switch (cacheline_size) {
17229 		case 16:
17230 		case 32:
17231 		case 64:
17232 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17233 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17234 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17235 				break;
17236 			}
17237 			fallthrough;
17238 		case 128:
17239 		default:
17240 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17241 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17242 			break;
17243 		}
17244 	} else {
17245 		switch (cacheline_size) {
17246 		case 16:
17247 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17248 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17249 					DMA_RWCTRL_WRITE_BNDRY_16);
17250 				break;
17251 			}
17252 			fallthrough;
17253 		case 32:
17254 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17255 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17256 					DMA_RWCTRL_WRITE_BNDRY_32);
17257 				break;
17258 			}
17259 			fallthrough;
17260 		case 64:
17261 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17262 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17263 					DMA_RWCTRL_WRITE_BNDRY_64);
17264 				break;
17265 			}
17266 			fallthrough;
17267 		case 128:
17268 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17269 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17270 					DMA_RWCTRL_WRITE_BNDRY_128);
17271 				break;
17272 			}
17273 			fallthrough;
17274 		case 256:
17275 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17276 				DMA_RWCTRL_WRITE_BNDRY_256);
17277 			break;
17278 		case 512:
17279 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17280 				DMA_RWCTRL_WRITE_BNDRY_512);
17281 			break;
17282 		case 1024:
17283 		default:
17284 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17285 				DMA_RWCTRL_WRITE_BNDRY_1024);
17286 			break;
17287 		}
17288 	}
17289 
17290 out:
17291 	return val;
17292 }
17293 
17294 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17295 			   int size, bool to_device)
17296 {
17297 	struct tg3_internal_buffer_desc test_desc;
17298 	u32 sram_dma_descs;
17299 	int i, ret;
17300 
17301 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17302 
17303 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17304 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17305 	tw32(RDMAC_STATUS, 0);
17306 	tw32(WDMAC_STATUS, 0);
17307 
17308 	tw32(BUFMGR_MODE, 0);
17309 	tw32(FTQ_RESET, 0);
17310 
17311 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17312 	test_desc.addr_lo = buf_dma & 0xffffffff;
17313 	test_desc.nic_mbuf = 0x00002100;
17314 	test_desc.len = size;
17315 
17316 	/*
17317 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17318 	 * the *second* time the tg3 driver was getting loaded after an
17319 	 * initial scan.
17320 	 *
17321 	 * Broadcom tells me:
17322 	 *   ...the DMA engine is connected to the GRC block and a DMA
17323 	 *   reset may affect the GRC block in some unpredictable way...
17324 	 *   The behavior of resets to individual blocks has not been tested.
17325 	 *
17326 	 * Broadcom noted the GRC reset will also reset all sub-components.
17327 	 */
17328 	if (to_device) {
17329 		test_desc.cqid_sqid = (13 << 8) | 2;
17330 
17331 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17332 		udelay(40);
17333 	} else {
17334 		test_desc.cqid_sqid = (16 << 8) | 7;
17335 
17336 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17337 		udelay(40);
17338 	}
17339 	test_desc.flags = 0x00000005;
17340 
17341 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17342 		u32 val;
17343 
17344 		val = *(((u32 *)&test_desc) + i);
17345 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17346 				       sram_dma_descs + (i * sizeof(u32)));
17347 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17348 	}
17349 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17350 
17351 	if (to_device)
17352 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17353 	else
17354 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17355 
17356 	ret = -ENODEV;
17357 	for (i = 0; i < 40; i++) {
17358 		u32 val;
17359 
17360 		if (to_device)
17361 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17362 		else
17363 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17364 		if ((val & 0xffff) == sram_dma_descs) {
17365 			ret = 0;
17366 			break;
17367 		}
17368 
17369 		udelay(100);
17370 	}
17371 
17372 	return ret;
17373 }
17374 
17375 #define TEST_BUFFER_SIZE	0x2000
17376 
17377 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17378 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17379 	{ },
17380 };
17381 
17382 static int tg3_test_dma(struct tg3 *tp)
17383 {
17384 	dma_addr_t buf_dma;
17385 	u32 *buf, saved_dma_rwctrl;
17386 	int ret = 0;
17387 
17388 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17389 				 &buf_dma, GFP_KERNEL);
17390 	if (!buf) {
17391 		ret = -ENOMEM;
17392 		goto out_nofree;
17393 	}
17394 
17395 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17396 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17397 
17398 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17399 
17400 	if (tg3_flag(tp, 57765_PLUS))
17401 		goto out;
17402 
17403 	if (tg3_flag(tp, PCI_EXPRESS)) {
17404 		/* DMA read watermark not used on PCIE */
17405 		tp->dma_rwctrl |= 0x00180000;
17406 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17407 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17408 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17409 			tp->dma_rwctrl |= 0x003f0000;
17410 		else
17411 			tp->dma_rwctrl |= 0x003f000f;
17412 	} else {
17413 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17414 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17415 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17416 			u32 read_water = 0x7;
17417 
17418 			/* If the 5704 is behind the EPB bridge, we can
17419 			 * do the less restrictive ONE_DMA workaround for
17420 			 * better performance.
17421 			 */
17422 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17423 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17424 				tp->dma_rwctrl |= 0x8000;
17425 			else if (ccval == 0x6 || ccval == 0x7)
17426 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17427 
17428 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17429 				read_water = 4;
17430 			/* Set bit 23 to enable PCIX hw bug fix */
17431 			tp->dma_rwctrl |=
17432 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17433 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17434 				(1 << 23);
17435 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17436 			/* 5780 always in PCIX mode */
17437 			tp->dma_rwctrl |= 0x00144000;
17438 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17439 			/* 5714 always in PCIX mode */
17440 			tp->dma_rwctrl |= 0x00148000;
17441 		} else {
17442 			tp->dma_rwctrl |= 0x001b000f;
17443 		}
17444 	}
17445 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17446 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17447 
17448 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17449 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17450 		tp->dma_rwctrl &= 0xfffffff0;
17451 
17452 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17453 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17454 		/* Remove this if it causes problems for some boards. */
17455 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17456 
17457 		/* On 5700/5701 chips, we need to set this bit.
17458 		 * Otherwise the chip will issue cacheline transactions
17459 		 * to streamable DMA memory with not all the byte
17460 		 * enables turned on.  This is an error on several
17461 		 * RISC PCI controllers, in particular sparc64.
17462 		 *
17463 		 * On 5703/5704 chips, this bit has been reassigned
17464 		 * a different meaning.  In particular, it is used
17465 		 * on those chips to enable a PCI-X workaround.
17466 		 */
17467 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17468 	}
17469 
17470 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17471 
17472 
17473 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17474 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17475 		goto out;
17476 
17477 	/* It is best to perform DMA test with maximum write burst size
17478 	 * to expose the 5700/5701 write DMA bug.
17479 	 */
17480 	saved_dma_rwctrl = tp->dma_rwctrl;
17481 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17482 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17483 
17484 	while (1) {
17485 		u32 *p = buf, i;
17486 
17487 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17488 			p[i] = i;
17489 
17490 		/* Send the buffer to the chip. */
17491 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17492 		if (ret) {
17493 			dev_err(&tp->pdev->dev,
17494 				"%s: Buffer write failed. err = %d\n",
17495 				__func__, ret);
17496 			break;
17497 		}
17498 
17499 		/* Now read it back. */
17500 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17501 		if (ret) {
17502 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17503 				"err = %d\n", __func__, ret);
17504 			break;
17505 		}
17506 
17507 		/* Verify it. */
17508 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17509 			if (p[i] == i)
17510 				continue;
17511 
17512 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17513 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17514 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17515 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17516 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17517 				break;
17518 			} else {
17519 				dev_err(&tp->pdev->dev,
17520 					"%s: Buffer corrupted on read back! "
17521 					"(%d != %d)\n", __func__, p[i], i);
17522 				ret = -ENODEV;
17523 				goto out;
17524 			}
17525 		}
17526 
17527 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17528 			/* Success. */
17529 			ret = 0;
17530 			break;
17531 		}
17532 	}
17533 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17534 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17535 		/* DMA test passed without adjusting DMA boundary,
17536 		 * now look for chipsets that are known to expose the
17537 		 * DMA bug without failing the test.
17538 		 */
17539 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17540 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17541 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17542 		} else {
17543 			/* Safe to use the calculated DMA boundary. */
17544 			tp->dma_rwctrl = saved_dma_rwctrl;
17545 		}
17546 
17547 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17548 	}
17549 
17550 out:
17551 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17552 out_nofree:
17553 	return ret;
17554 }
17555 
17556 static void tg3_init_bufmgr_config(struct tg3 *tp)
17557 {
17558 	if (tg3_flag(tp, 57765_PLUS)) {
17559 		tp->bufmgr_config.mbuf_read_dma_low_water =
17560 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17561 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17562 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17563 		tp->bufmgr_config.mbuf_high_water =
17564 			DEFAULT_MB_HIGH_WATER_57765;
17565 
17566 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17567 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17568 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17569 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17570 		tp->bufmgr_config.mbuf_high_water_jumbo =
17571 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17572 	} else if (tg3_flag(tp, 5705_PLUS)) {
17573 		tp->bufmgr_config.mbuf_read_dma_low_water =
17574 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17575 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17576 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17577 		tp->bufmgr_config.mbuf_high_water =
17578 			DEFAULT_MB_HIGH_WATER_5705;
17579 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17580 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17581 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17582 			tp->bufmgr_config.mbuf_high_water =
17583 				DEFAULT_MB_HIGH_WATER_5906;
17584 		}
17585 
17586 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17587 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17588 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17589 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17590 		tp->bufmgr_config.mbuf_high_water_jumbo =
17591 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17592 	} else {
17593 		tp->bufmgr_config.mbuf_read_dma_low_water =
17594 			DEFAULT_MB_RDMA_LOW_WATER;
17595 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17596 			DEFAULT_MB_MACRX_LOW_WATER;
17597 		tp->bufmgr_config.mbuf_high_water =
17598 			DEFAULT_MB_HIGH_WATER;
17599 
17600 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17601 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17602 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17603 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17604 		tp->bufmgr_config.mbuf_high_water_jumbo =
17605 			DEFAULT_MB_HIGH_WATER_JUMBO;
17606 	}
17607 
17608 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17609 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17610 }
17611 
17612 static char *tg3_phy_string(struct tg3 *tp)
17613 {
17614 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17615 	case TG3_PHY_ID_BCM5400:	return "5400";
17616 	case TG3_PHY_ID_BCM5401:	return "5401";
17617 	case TG3_PHY_ID_BCM5411:	return "5411";
17618 	case TG3_PHY_ID_BCM5701:	return "5701";
17619 	case TG3_PHY_ID_BCM5703:	return "5703";
17620 	case TG3_PHY_ID_BCM5704:	return "5704";
17621 	case TG3_PHY_ID_BCM5705:	return "5705";
17622 	case TG3_PHY_ID_BCM5750:	return "5750";
17623 	case TG3_PHY_ID_BCM5752:	return "5752";
17624 	case TG3_PHY_ID_BCM5714:	return "5714";
17625 	case TG3_PHY_ID_BCM5780:	return "5780";
17626 	case TG3_PHY_ID_BCM5755:	return "5755";
17627 	case TG3_PHY_ID_BCM5787:	return "5787";
17628 	case TG3_PHY_ID_BCM5784:	return "5784";
17629 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17630 	case TG3_PHY_ID_BCM5906:	return "5906";
17631 	case TG3_PHY_ID_BCM5761:	return "5761";
17632 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17633 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17634 	case TG3_PHY_ID_BCM57765:	return "57765";
17635 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17636 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17637 	case TG3_PHY_ID_BCM5762:	return "5762C";
17638 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17639 	case 0:			return "serdes";
17640 	default:		return "unknown";
17641 	}
17642 }
17643 
17644 static char *tg3_bus_string(struct tg3 *tp, char *str)
17645 {
17646 	if (tg3_flag(tp, PCI_EXPRESS)) {
17647 		strcpy(str, "PCI Express");
17648 		return str;
17649 	} else if (tg3_flag(tp, PCIX_MODE)) {
17650 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17651 
17652 		strcpy(str, "PCIX:");
17653 
17654 		if ((clock_ctrl == 7) ||
17655 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17656 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17657 			strcat(str, "133MHz");
17658 		else if (clock_ctrl == 0)
17659 			strcat(str, "33MHz");
17660 		else if (clock_ctrl == 2)
17661 			strcat(str, "50MHz");
17662 		else if (clock_ctrl == 4)
17663 			strcat(str, "66MHz");
17664 		else if (clock_ctrl == 6)
17665 			strcat(str, "100MHz");
17666 	} else {
17667 		strcpy(str, "PCI:");
17668 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17669 			strcat(str, "66MHz");
17670 		else
17671 			strcat(str, "33MHz");
17672 	}
17673 	if (tg3_flag(tp, PCI_32BIT))
17674 		strcat(str, ":32-bit");
17675 	else
17676 		strcat(str, ":64-bit");
17677 	return str;
17678 }
17679 
17680 static void tg3_init_coal(struct tg3 *tp)
17681 {
17682 	struct ethtool_coalesce *ec = &tp->coal;
17683 
17684 	memset(ec, 0, sizeof(*ec));
17685 	ec->cmd = ETHTOOL_GCOALESCE;
17686 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17687 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17688 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17689 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17690 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17691 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17692 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17693 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17694 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17695 
17696 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17697 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17698 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17699 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17700 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17701 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17702 	}
17703 
17704 	if (tg3_flag(tp, 5705_PLUS)) {
17705 		ec->rx_coalesce_usecs_irq = 0;
17706 		ec->tx_coalesce_usecs_irq = 0;
17707 		ec->stats_block_coalesce_usecs = 0;
17708 	}
17709 }
17710 
17711 static int tg3_init_one(struct pci_dev *pdev,
17712 				  const struct pci_device_id *ent)
17713 {
17714 	struct net_device *dev;
17715 	struct tg3 *tp;
17716 	int i, err;
17717 	u32 sndmbx, rcvmbx, intmbx;
17718 	char str[40];
17719 	u64 dma_mask, persist_dma_mask;
17720 	netdev_features_t features = 0;
17721 	u8 addr[ETH_ALEN] __aligned(2);
17722 
17723 	err = pci_enable_device(pdev);
17724 	if (err) {
17725 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17726 		return err;
17727 	}
17728 
17729 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17730 	if (err) {
17731 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17732 		goto err_out_disable_pdev;
17733 	}
17734 
17735 	pci_set_master(pdev);
17736 
17737 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17738 	if (!dev) {
17739 		err = -ENOMEM;
17740 		goto err_out_free_res;
17741 	}
17742 
17743 	SET_NETDEV_DEV(dev, &pdev->dev);
17744 
17745 	tp = netdev_priv(dev);
17746 	tp->pdev = pdev;
17747 	tp->dev = dev;
17748 	tp->rx_mode = TG3_DEF_RX_MODE;
17749 	tp->tx_mode = TG3_DEF_TX_MODE;
17750 	tp->irq_sync = 1;
17751 	tp->pcierr_recovery = false;
17752 
17753 	if (tg3_debug > 0)
17754 		tp->msg_enable = tg3_debug;
17755 	else
17756 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17757 
17758 	if (pdev_is_ssb_gige_core(pdev)) {
17759 		tg3_flag_set(tp, IS_SSB_CORE);
17760 		if (ssb_gige_must_flush_posted_writes(pdev))
17761 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17762 		if (ssb_gige_one_dma_at_once(pdev))
17763 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17764 		if (ssb_gige_have_roboswitch(pdev)) {
17765 			tg3_flag_set(tp, USE_PHYLIB);
17766 			tg3_flag_set(tp, ROBOSWITCH);
17767 		}
17768 		if (ssb_gige_is_rgmii(pdev))
17769 			tg3_flag_set(tp, RGMII_MODE);
17770 	}
17771 
17772 	/* The word/byte swap controls here control register access byte
17773 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17774 	 * setting below.
17775 	 */
17776 	tp->misc_host_ctrl =
17777 		MISC_HOST_CTRL_MASK_PCI_INT |
17778 		MISC_HOST_CTRL_WORD_SWAP |
17779 		MISC_HOST_CTRL_INDIR_ACCESS |
17780 		MISC_HOST_CTRL_PCISTATE_RW;
17781 
17782 	/* The NONFRM (non-frame) byte/word swap controls take effect
17783 	 * on descriptor entries, anything which isn't packet data.
17784 	 *
17785 	 * The StrongARM chips on the board (one for tx, one for rx)
17786 	 * are running in big-endian mode.
17787 	 */
17788 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17789 			GRC_MODE_WSWAP_NONFRM_DATA);
17790 #ifdef __BIG_ENDIAN
17791 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17792 #endif
17793 	spin_lock_init(&tp->lock);
17794 	spin_lock_init(&tp->indirect_lock);
17795 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17796 
17797 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17798 	if (!tp->regs) {
17799 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17800 		err = -ENOMEM;
17801 		goto err_out_free_dev;
17802 	}
17803 
17804 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17805 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17806 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17807 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17808 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17809 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17810 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17811 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17812 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17813 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17814 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17815 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17816 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17817 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17818 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17819 		tg3_flag_set(tp, ENABLE_APE);
17820 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17821 		if (!tp->aperegs) {
17822 			dev_err(&pdev->dev,
17823 				"Cannot map APE registers, aborting\n");
17824 			err = -ENOMEM;
17825 			goto err_out_iounmap;
17826 		}
17827 	}
17828 
17829 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17830 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17831 
17832 	dev->ethtool_ops = &tg3_ethtool_ops;
17833 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17834 	dev->netdev_ops = &tg3_netdev_ops;
17835 	dev->irq = pdev->irq;
17836 
17837 	err = tg3_get_invariants(tp, ent);
17838 	if (err) {
17839 		dev_err(&pdev->dev,
17840 			"Problem fetching invariants of chip, aborting\n");
17841 		goto err_out_apeunmap;
17842 	}
17843 
17844 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17845 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17846 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17847 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17848 	 * do DMA address check in __tg3_start_xmit().
17849 	 */
17850 	if (tg3_flag(tp, IS_5788))
17851 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17852 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17853 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17854 #ifdef CONFIG_HIGHMEM
17855 		dma_mask = DMA_BIT_MASK(64);
17856 #endif
17857 	} else
17858 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17859 
17860 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
17861 		persist_dma_mask = DMA_BIT_MASK(31);
17862 
17863 	/* Configure DMA attributes. */
17864 	if (dma_mask > DMA_BIT_MASK(32)) {
17865 		err = dma_set_mask(&pdev->dev, dma_mask);
17866 		if (!err) {
17867 			features |= NETIF_F_HIGHDMA;
17868 			err = dma_set_coherent_mask(&pdev->dev,
17869 						    persist_dma_mask);
17870 			if (err < 0) {
17871 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17872 					"DMA for consistent allocations\n");
17873 				goto err_out_apeunmap;
17874 			}
17875 		}
17876 	}
17877 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17878 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17879 		if (err) {
17880 			dev_err(&pdev->dev,
17881 				"No usable DMA configuration, aborting\n");
17882 			goto err_out_apeunmap;
17883 		}
17884 	}
17885 
17886 	tg3_init_bufmgr_config(tp);
17887 
17888 	/* 5700 B0 chips do not support checksumming correctly due
17889 	 * to hardware bugs.
17890 	 */
17891 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17892 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17893 
17894 		if (tg3_flag(tp, 5755_PLUS))
17895 			features |= NETIF_F_IPV6_CSUM;
17896 	}
17897 
17898 	/* TSO is on by default on chips that support hardware TSO.
17899 	 * Firmware TSO on older chips gives lower performance, so it
17900 	 * is off by default, but can be enabled using ethtool.
17901 	 */
17902 	if ((tg3_flag(tp, HW_TSO_1) ||
17903 	     tg3_flag(tp, HW_TSO_2) ||
17904 	     tg3_flag(tp, HW_TSO_3)) &&
17905 	    (features & NETIF_F_IP_CSUM))
17906 		features |= NETIF_F_TSO;
17907 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17908 		if (features & NETIF_F_IPV6_CSUM)
17909 			features |= NETIF_F_TSO6;
17910 		if (tg3_flag(tp, HW_TSO_3) ||
17911 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17912 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17913 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17914 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17915 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17916 			features |= NETIF_F_TSO_ECN;
17917 	}
17918 
17919 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17920 			 NETIF_F_HW_VLAN_CTAG_RX;
17921 	dev->vlan_features |= features;
17922 
17923 	/*
17924 	 * Add loopback capability only for a subset of devices that support
17925 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17926 	 * loopback for the remaining devices.
17927 	 */
17928 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17929 	    !tg3_flag(tp, CPMU_PRESENT))
17930 		/* Add the loopback capability */
17931 		features |= NETIF_F_LOOPBACK;
17932 
17933 	dev->hw_features |= features;
17934 	dev->priv_flags |= IFF_UNICAST_FLT;
17935 
17936 	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17937 	dev->min_mtu = TG3_MIN_MTU;
17938 	dev->max_mtu = TG3_MAX_MTU(tp);
17939 
17940 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17941 	    !tg3_flag(tp, TSO_CAPABLE) &&
17942 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17943 		tg3_flag_set(tp, MAX_RXPEND_64);
17944 		tp->rx_pending = 63;
17945 	}
17946 
17947 	err = tg3_get_device_address(tp, addr);
17948 	if (err) {
17949 		dev_err(&pdev->dev,
17950 			"Could not obtain valid ethernet address, aborting\n");
17951 		goto err_out_apeunmap;
17952 	}
17953 	eth_hw_addr_set(dev, addr);
17954 
17955 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17956 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17957 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17958 	for (i = 0; i < tp->irq_max; i++) {
17959 		struct tg3_napi *tnapi = &tp->napi[i];
17960 
17961 		tnapi->tp = tp;
17962 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17963 
17964 		tnapi->int_mbox = intmbx;
17965 		intmbx += 0x8;
17966 
17967 		tnapi->consmbox = rcvmbx;
17968 		tnapi->prodmbox = sndmbx;
17969 
17970 		if (i)
17971 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17972 		else
17973 			tnapi->coal_now = HOSTCC_MODE_NOW;
17974 
17975 		if (!tg3_flag(tp, SUPPORT_MSIX))
17976 			break;
17977 
17978 		/*
17979 		 * If we support MSIX, we'll be using RSS.  If we're using
17980 		 * RSS, the first vector only handles link interrupts and the
17981 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17982 		 * mailbox values for the next iteration.  The values we setup
17983 		 * above are still useful for the single vectored mode.
17984 		 */
17985 		if (!i)
17986 			continue;
17987 
17988 		rcvmbx += 0x8;
17989 
17990 		if (sndmbx & 0x4)
17991 			sndmbx -= 0x4;
17992 		else
17993 			sndmbx += 0xc;
17994 	}
17995 
17996 	/*
17997 	 * Reset chip in case UNDI or EFI driver did not shutdown
17998 	 * DMA self test will enable WDMAC and we'll see (spurious)
17999 	 * pending DMA on the PCI bus at that point.
18000 	 */
18001 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
18002 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
18003 		tg3_full_lock(tp, 0);
18004 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
18005 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18006 		tg3_full_unlock(tp);
18007 	}
18008 
18009 	err = tg3_test_dma(tp);
18010 	if (err) {
18011 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
18012 		goto err_out_apeunmap;
18013 	}
18014 
18015 	tg3_init_coal(tp);
18016 
18017 	pci_set_drvdata(pdev, dev);
18018 
18019 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
18020 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
18021 	    tg3_asic_rev(tp) == ASIC_REV_5762)
18022 		tg3_flag_set(tp, PTP_CAPABLE);
18023 
18024 	tg3_timer_init(tp);
18025 
18026 	tg3_carrier_off(tp);
18027 
18028 	err = register_netdev(dev);
18029 	if (err) {
18030 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
18031 		goto err_out_apeunmap;
18032 	}
18033 
18034 	if (tg3_flag(tp, PTP_CAPABLE)) {
18035 		tg3_ptp_init(tp);
18036 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
18037 						   &tp->pdev->dev);
18038 		if (IS_ERR(tp->ptp_clock))
18039 			tp->ptp_clock = NULL;
18040 	}
18041 
18042 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
18043 		    tp->board_part_number,
18044 		    tg3_chip_rev_id(tp),
18045 		    tg3_bus_string(tp, str),
18046 		    dev->dev_addr);
18047 
18048 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
18049 		char *ethtype;
18050 
18051 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
18052 			ethtype = "10/100Base-TX";
18053 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
18054 			ethtype = "1000Base-SX";
18055 		else
18056 			ethtype = "10/100/1000Base-T";
18057 
18058 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
18059 			    "(WireSpeed[%d], EEE[%d])\n",
18060 			    tg3_phy_string(tp), ethtype,
18061 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18062 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18063 	}
18064 
18065 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18066 		    (dev->features & NETIF_F_RXCSUM) != 0,
18067 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
18068 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18069 		    tg3_flag(tp, ENABLE_ASF) != 0,
18070 		    tg3_flag(tp, TSO_CAPABLE) != 0);
18071 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18072 		    tp->dma_rwctrl,
18073 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18074 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18075 
18076 	pci_save_state(pdev);
18077 
18078 	return 0;
18079 
18080 err_out_apeunmap:
18081 	if (tp->aperegs) {
18082 		iounmap(tp->aperegs);
18083 		tp->aperegs = NULL;
18084 	}
18085 
18086 err_out_iounmap:
18087 	if (tp->regs) {
18088 		iounmap(tp->regs);
18089 		tp->regs = NULL;
18090 	}
18091 
18092 err_out_free_dev:
18093 	free_netdev(dev);
18094 
18095 err_out_free_res:
18096 	pci_release_regions(pdev);
18097 
18098 err_out_disable_pdev:
18099 	if (pci_is_enabled(pdev))
18100 		pci_disable_device(pdev);
18101 	return err;
18102 }
18103 
18104 static void tg3_remove_one(struct pci_dev *pdev)
18105 {
18106 	struct net_device *dev = pci_get_drvdata(pdev);
18107 
18108 	if (dev) {
18109 		struct tg3 *tp = netdev_priv(dev);
18110 
18111 		tg3_ptp_fini(tp);
18112 
18113 		release_firmware(tp->fw);
18114 
18115 		tg3_reset_task_cancel(tp);
18116 
18117 		if (tg3_flag(tp, USE_PHYLIB)) {
18118 			tg3_phy_fini(tp);
18119 			tg3_mdio_fini(tp);
18120 		}
18121 
18122 		unregister_netdev(dev);
18123 		if (tp->aperegs) {
18124 			iounmap(tp->aperegs);
18125 			tp->aperegs = NULL;
18126 		}
18127 		if (tp->regs) {
18128 			iounmap(tp->regs);
18129 			tp->regs = NULL;
18130 		}
18131 		free_netdev(dev);
18132 		pci_release_regions(pdev);
18133 		pci_disable_device(pdev);
18134 	}
18135 }
18136 
18137 #ifdef CONFIG_PM_SLEEP
18138 static int tg3_suspend(struct device *device)
18139 {
18140 	struct net_device *dev = dev_get_drvdata(device);
18141 	struct tg3 *tp = netdev_priv(dev);
18142 
18143 	rtnl_lock();
18144 
18145 	if (!netif_running(dev))
18146 		goto unlock;
18147 
18148 	tg3_reset_task_cancel(tp);
18149 	tg3_phy_stop(tp);
18150 	tg3_netif_stop(tp);
18151 
18152 	tg3_timer_stop(tp);
18153 
18154 	tg3_full_lock(tp, 1);
18155 	tg3_disable_ints(tp);
18156 	tg3_full_unlock(tp);
18157 
18158 	netif_device_detach(dev);
18159 
18160 	tg3_full_lock(tp, 0);
18161 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18162 	tg3_flag_clear(tp, INIT_COMPLETE);
18163 	tg3_full_unlock(tp);
18164 
18165 	tg3_power_down_prepare(tp);
18166 
18167 unlock:
18168 	rtnl_unlock();
18169 	return 0;
18170 }
18171 
18172 static int tg3_resume(struct device *device)
18173 {
18174 	struct net_device *dev = dev_get_drvdata(device);
18175 	struct tg3 *tp = netdev_priv(dev);
18176 	int err = 0;
18177 
18178 	rtnl_lock();
18179 
18180 	if (!netif_running(dev))
18181 		goto unlock;
18182 
18183 	netif_device_attach(dev);
18184 
18185 	netdev_lock(dev);
18186 	tg3_full_lock(tp, 0);
18187 
18188 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18189 
18190 	tg3_flag_set(tp, INIT_COMPLETE);
18191 	err = tg3_restart_hw(tp,
18192 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18193 	if (err)
18194 		goto out;
18195 
18196 	tg3_timer_start(tp);
18197 
18198 	tg3_netif_start(tp);
18199 
18200 out:
18201 	tg3_full_unlock(tp);
18202 	netdev_unlock(dev);
18203 
18204 	if (!err)
18205 		tg3_phy_start(tp);
18206 
18207 unlock:
18208 	rtnl_unlock();
18209 	return err;
18210 }
18211 #endif /* CONFIG_PM_SLEEP */
18212 
18213 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18214 
18215 static void tg3_shutdown(struct pci_dev *pdev)
18216 {
18217 	struct net_device *dev = pci_get_drvdata(pdev);
18218 	struct tg3 *tp = netdev_priv(dev);
18219 
18220 	tg3_reset_task_cancel(tp);
18221 
18222 	rtnl_lock();
18223 
18224 	netif_device_detach(dev);
18225 
18226 	if (netif_running(dev))
18227 		dev_close(dev);
18228 
18229 	if (system_state == SYSTEM_POWER_OFF)
18230 		tg3_power_down(tp);
18231 
18232 	rtnl_unlock();
18233 
18234 	pci_disable_device(pdev);
18235 }
18236 
18237 /**
18238  * tg3_io_error_detected - called when PCI error is detected
18239  * @pdev: Pointer to PCI device
18240  * @state: The current pci connection state
18241  *
18242  * This function is called after a PCI bus error affecting
18243  * this device has been detected.
18244  */
18245 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18246 					      pci_channel_state_t state)
18247 {
18248 	struct net_device *netdev = pci_get_drvdata(pdev);
18249 	struct tg3 *tp = netdev_priv(netdev);
18250 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18251 
18252 	netdev_info(netdev, "PCI I/O error detected\n");
18253 
18254 	/* Want to make sure that the reset task doesn't run */
18255 	tg3_reset_task_cancel(tp);
18256 
18257 	rtnl_lock();
18258 
18259 	/* Could be second call or maybe we don't have netdev yet */
18260 	if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18261 		goto done;
18262 
18263 	/* We needn't recover from permanent error */
18264 	if (state == pci_channel_io_frozen)
18265 		tp->pcierr_recovery = true;
18266 
18267 	tg3_phy_stop(tp);
18268 
18269 	tg3_netif_stop(tp);
18270 
18271 	tg3_timer_stop(tp);
18272 
18273 	netif_device_detach(netdev);
18274 
18275 	/* Clean up software state, even if MMIO is blocked */
18276 	tg3_full_lock(tp, 0);
18277 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18278 	tg3_full_unlock(tp);
18279 
18280 done:
18281 	if (state == pci_channel_io_perm_failure) {
18282 		if (netdev) {
18283 			netdev_lock(netdev);
18284 			tg3_napi_enable(tp);
18285 			netdev_unlock(netdev);
18286 			dev_close(netdev);
18287 		}
18288 		err = PCI_ERS_RESULT_DISCONNECT;
18289 	} else {
18290 		pci_disable_device(pdev);
18291 	}
18292 
18293 	rtnl_unlock();
18294 
18295 	return err;
18296 }
18297 
18298 /**
18299  * tg3_io_slot_reset - called after the pci bus has been reset.
18300  * @pdev: Pointer to PCI device
18301  *
18302  * Restart the card from scratch, as if from a cold-boot.
18303  * At this point, the card has experienced a hard reset,
18304  * followed by fixups by BIOS, and has its config space
18305  * set up identically to what it was at cold boot.
18306  */
18307 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18308 {
18309 	struct net_device *netdev = pci_get_drvdata(pdev);
18310 	struct tg3 *tp = netdev_priv(netdev);
18311 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18312 	int err;
18313 
18314 	rtnl_lock();
18315 
18316 	if (pci_enable_device(pdev)) {
18317 		dev_err(&pdev->dev,
18318 			"Cannot re-enable PCI device after reset.\n");
18319 		goto done;
18320 	}
18321 
18322 	pci_set_master(pdev);
18323 	pci_restore_state(pdev);
18324 	pci_save_state(pdev);
18325 
18326 	if (!netdev || !netif_running(netdev)) {
18327 		rc = PCI_ERS_RESULT_RECOVERED;
18328 		goto done;
18329 	}
18330 
18331 	err = tg3_power_up(tp);
18332 	if (err)
18333 		goto done;
18334 
18335 	rc = PCI_ERS_RESULT_RECOVERED;
18336 
18337 done:
18338 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18339 		netdev_lock(netdev);
18340 		tg3_napi_enable(tp);
18341 		netdev_unlock(netdev);
18342 		dev_close(netdev);
18343 	}
18344 	rtnl_unlock();
18345 
18346 	return rc;
18347 }
18348 
18349 /**
18350  * tg3_io_resume - called when traffic can start flowing again.
18351  * @pdev: Pointer to PCI device
18352  *
18353  * This callback is called when the error recovery driver tells
18354  * us that its OK to resume normal operation.
18355  */
18356 static void tg3_io_resume(struct pci_dev *pdev)
18357 {
18358 	struct net_device *netdev = pci_get_drvdata(pdev);
18359 	struct tg3 *tp = netdev_priv(netdev);
18360 	int err;
18361 
18362 	rtnl_lock();
18363 
18364 	if (!netdev || !netif_running(netdev))
18365 		goto done;
18366 
18367 	netdev_lock(netdev);
18368 	tg3_full_lock(tp, 0);
18369 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18370 	tg3_flag_set(tp, INIT_COMPLETE);
18371 	err = tg3_restart_hw(tp, true);
18372 	if (err) {
18373 		tg3_full_unlock(tp);
18374 		netdev_unlock(netdev);
18375 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18376 		goto done;
18377 	}
18378 
18379 	netif_device_attach(netdev);
18380 
18381 	tg3_timer_start(tp);
18382 
18383 	tg3_netif_start(tp);
18384 
18385 	tg3_full_unlock(tp);
18386 	netdev_unlock(netdev);
18387 
18388 	tg3_phy_start(tp);
18389 
18390 done:
18391 	tp->pcierr_recovery = false;
18392 	rtnl_unlock();
18393 }
18394 
18395 static const struct pci_error_handlers tg3_err_handler = {
18396 	.error_detected	= tg3_io_error_detected,
18397 	.slot_reset	= tg3_io_slot_reset,
18398 	.resume		= tg3_io_resume
18399 };
18400 
18401 static struct pci_driver tg3_driver = {
18402 	.name		= DRV_MODULE_NAME,
18403 	.id_table	= tg3_pci_tbl,
18404 	.probe		= tg3_init_one,
18405 	.remove		= tg3_remove_one,
18406 	.err_handler	= &tg3_err_handler,
18407 	.driver.pm	= &tg3_pm_ops,
18408 	.shutdown	= tg3_shutdown,
18409 };
18410 
18411 module_pci_driver(tg3_driver);
18412