xref: /linux/drivers/net/ethernet/broadcom/tg3.c (revision 3cf0a98fea776adb09087e521fe150c295a4b031)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *	Derived from proprietary unpublished source code,
14  *	Copyright (C) 2000-2016 Broadcom Corporation.
15  *	Copyright (C) 2016-2017 Broadcom Ltd.
16  *	Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *	refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *	Permission is hereby granted for the distribution of this firmware
20  *	data in hexadecimal or equivalent format, provided this copyright
21  *	notice is accompanying it.
22  */
23 
24 
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58 #include <linux/dmi.h>
59 
60 #include <net/checksum.h>
61 #include <net/gso.h>
62 #include <net/ip.h>
63 
64 #include <linux/io.h>
65 #include <asm/byteorder.h>
66 #include <linux/uaccess.h>
67 
68 #include <uapi/linux/net_tstamp.h>
69 #include <linux/ptp_clock_kernel.h>
70 
71 #define BAR_0	0
72 #define BAR_2	2
73 
74 #include "tg3.h"
75 
76 /* Functions & macros to verify TG3_FLAGS types */
77 
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)78 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	return test_bit(flag, bits);
81 }
82 
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)83 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 	set_bit(flag, bits);
86 }
87 
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)88 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
89 {
90 	clear_bit(flag, bits);
91 }
92 
93 #define tg3_flag(tp, flag)				\
94 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_set(tp, flag)				\
96 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
97 #define tg3_flag_clear(tp, flag)			\
98 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
99 
100 #define DRV_MODULE_NAME		"tg3"
101 /* DO NOT UPDATE TG3_*_NUM defines */
102 #define TG3_MAJ_NUM			3
103 #define TG3_MIN_NUM			137
104 
105 #define RESET_KIND_SHUTDOWN	0
106 #define RESET_KIND_INIT		1
107 #define RESET_KIND_SUSPEND	2
108 
109 #define TG3_DEF_RX_MODE		0
110 #define TG3_DEF_TX_MODE		0
111 #define TG3_DEF_MSG_ENABLE	  \
112 	(NETIF_MSG_DRV		| \
113 	 NETIF_MSG_PROBE	| \
114 	 NETIF_MSG_LINK		| \
115 	 NETIF_MSG_TIMER	| \
116 	 NETIF_MSG_IFDOWN	| \
117 	 NETIF_MSG_IFUP		| \
118 	 NETIF_MSG_RX_ERR	| \
119 	 NETIF_MSG_TX_ERR)
120 
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
122 
123 /* length of time before we decide the hardware is borked,
124  * and dev->tx_timeout() should be called to fix the problem
125  */
126 
127 #define TG3_TX_TIMEOUT			(5 * HZ)
128 
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU			ETH_ZLEN
131 #define TG3_MAX_MTU(tp)	\
132 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133 
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135  * You can't change the ring sizes, but you can change where you place
136  * them in the NIC onboard memory.
137  */
138 #define TG3_RX_STD_RING_SIZE(tp) \
139 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING		200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
146 
147 /* Do not place this n-ring entries value into the tp struct itself,
148  * we really want to expose these constants to GCC so that modulo et
149  * al.  operations are done with shifts and masks instead of with
150  * hw multiply/modulo instructions.  Another solution would be to
151  * replace things like '% foo' with '& (foo - 1)'.
152  */
153 
154 #define TG3_TX_RING_SIZE		512
155 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
156 
157 #define TG3_RX_STD_RING_BYTES(tp) \
158 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
164 				 TG3_TX_RING_SIZE)
165 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
166 
167 #define TG3_DMA_BYTE_ENAB		64
168 
169 #define TG3_RX_STD_DMA_SZ		1536
170 #define TG3_RX_JMB_DMA_SZ		9046
171 
172 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
173 
174 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176 
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179 
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182 
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184  * that are at least dword aligned when used in PCIX mode.  The driver
185  * works around this bug by double copying the packet.  This workaround
186  * is built into the normal double copy length check for efficiency.
187  *
188  * However, the double copy is only necessary on those architectures
189  * where unaligned memory accesses are inefficient.  For those architectures
190  * where unaligned memory accesses incur little penalty, we can reintegrate
191  * the 5701 in the normal rx path.  Doing so saves a device structure
192  * dereference by hardcoding the double copy threshold in place.
193  */
194 #define TG3_RX_COPY_THRESHOLD		256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
197 #else
198 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
199 #endif
200 
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
205 #endif
206 
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K		2048
210 #define TG3_TX_BD_DMA_MAX_4K		4096
211 
212 #define TG3_RAW_IP_ALIGN 2
213 
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216 
217 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
218 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219 
220 #define FIRMWARE_TG3		"tigon/tg3.bin"
221 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
224 
225 MODULE_AUTHOR("David S. Miller <davem@redhat.com> and Jeff Garzik <jgarzik@pobox.com>");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_FIRMWARE(FIRMWARE_TG3);
229 MODULE_FIRMWARE(FIRMWARE_TG357766);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232 
233 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug, int, 0);
235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236 
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
239 
240 static const struct pci_device_id tg3_pci_tbl[] = {
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 			TG3_DRV_DATA_FLAG_5705_10_100},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 			TG3_DRV_DATA_FLAG_5705_10_100},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268 			TG3_DRV_DATA_FLAG_5705_10_100},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290 			PCI_VENDOR_ID_LENOVO,
291 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 	{}
357 };
358 
359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360 
361 static const struct {
362 	const char string[ETH_GSTRING_LEN];
363 } ethtool_stats_keys[] = {
364 	{ "rx_octets" },
365 	{ "rx_fragments" },
366 	{ "rx_ucast_packets" },
367 	{ "rx_mcast_packets" },
368 	{ "rx_bcast_packets" },
369 	{ "rx_fcs_errors" },
370 	{ "rx_align_errors" },
371 	{ "rx_xon_pause_rcvd" },
372 	{ "rx_xoff_pause_rcvd" },
373 	{ "rx_mac_ctrl_rcvd" },
374 	{ "rx_xoff_entered" },
375 	{ "rx_frame_too_long_errors" },
376 	{ "rx_jabbers" },
377 	{ "rx_undersize_packets" },
378 	{ "rx_in_length_errors" },
379 	{ "rx_out_length_errors" },
380 	{ "rx_64_or_less_octet_packets" },
381 	{ "rx_65_to_127_octet_packets" },
382 	{ "rx_128_to_255_octet_packets" },
383 	{ "rx_256_to_511_octet_packets" },
384 	{ "rx_512_to_1023_octet_packets" },
385 	{ "rx_1024_to_1522_octet_packets" },
386 	{ "rx_1523_to_2047_octet_packets" },
387 	{ "rx_2048_to_4095_octet_packets" },
388 	{ "rx_4096_to_8191_octet_packets" },
389 	{ "rx_8192_to_9022_octet_packets" },
390 
391 	{ "tx_octets" },
392 	{ "tx_collisions" },
393 
394 	{ "tx_xon_sent" },
395 	{ "tx_xoff_sent" },
396 	{ "tx_flow_control" },
397 	{ "tx_mac_errors" },
398 	{ "tx_single_collisions" },
399 	{ "tx_mult_collisions" },
400 	{ "tx_deferred" },
401 	{ "tx_excessive_collisions" },
402 	{ "tx_late_collisions" },
403 	{ "tx_collide_2times" },
404 	{ "tx_collide_3times" },
405 	{ "tx_collide_4times" },
406 	{ "tx_collide_5times" },
407 	{ "tx_collide_6times" },
408 	{ "tx_collide_7times" },
409 	{ "tx_collide_8times" },
410 	{ "tx_collide_9times" },
411 	{ "tx_collide_10times" },
412 	{ "tx_collide_11times" },
413 	{ "tx_collide_12times" },
414 	{ "tx_collide_13times" },
415 	{ "tx_collide_14times" },
416 	{ "tx_collide_15times" },
417 	{ "tx_ucast_packets" },
418 	{ "tx_mcast_packets" },
419 	{ "tx_bcast_packets" },
420 	{ "tx_carrier_sense_errors" },
421 	{ "tx_discards" },
422 	{ "tx_errors" },
423 
424 	{ "dma_writeq_full" },
425 	{ "dma_write_prioq_full" },
426 	{ "rxbds_empty" },
427 	{ "rx_discards" },
428 	{ "rx_errors" },
429 	{ "rx_threshold_hit" },
430 
431 	{ "dma_readq_full" },
432 	{ "dma_read_prioq_full" },
433 	{ "tx_comp_queue_full" },
434 
435 	{ "ring_set_send_prod_index" },
436 	{ "ring_status_update" },
437 	{ "nic_irqs" },
438 	{ "nic_avoided_irqs" },
439 	{ "nic_tx_threshold_hit" },
440 
441 	{ "mbuf_lwm_thresh_hit" },
442 };
443 
444 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST		0
446 #define TG3_LINK_TEST		1
447 #define TG3_REGISTER_TEST	2
448 #define TG3_MEMORY_TEST		3
449 #define TG3_MAC_LOOPB_TEST	4
450 #define TG3_PHY_LOOPB_TEST	5
451 #define TG3_EXT_LOOPB_TEST	6
452 #define TG3_INTERRUPT_TEST	7
453 
454 
455 static const struct {
456 	const char string[ETH_GSTRING_LEN];
457 } ethtool_test_keys[] = {
458 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
459 	[TG3_LINK_TEST]		= { "link test         (online) " },
460 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
461 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
462 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
463 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
464 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
465 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
466 };
467 
468 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
469 
470 
tg3_write32(struct tg3 * tp,u32 off,u32 val)471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 {
473 	writel(val, tp->regs + off);
474 }
475 
tg3_read32(struct tg3 * tp,u32 off)476 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 {
478 	return readl(tp->regs + off);
479 }
480 
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 {
483 	writel(val, tp->aperegs + off);
484 }
485 
tg3_ape_read32(struct tg3 * tp,u32 off)486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 {
488 	return readl(tp->aperegs + off);
489 }
490 
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 {
493 	unsigned long flags;
494 
495 	spin_lock_irqsave(&tp->indirect_lock, flags);
496 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500 
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 {
503 	writel(val, tp->regs + off);
504 	readl(tp->regs + off);
505 }
506 
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
508 {
509 	unsigned long flags;
510 	u32 val;
511 
512 	spin_lock_irqsave(&tp->indirect_lock, flags);
513 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 	return val;
517 }
518 
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 {
521 	unsigned long flags;
522 
523 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525 				       TG3_64BIT_REG_LOW, val);
526 		return;
527 	}
528 	if (off == TG3_RX_STD_PROD_IDX_REG) {
529 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530 				       TG3_64BIT_REG_LOW, val);
531 		return;
532 	}
533 
534 	spin_lock_irqsave(&tp->indirect_lock, flags);
535 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
538 
539 	/* In indirect mode when disabling interrupts, we also need
540 	 * to clear the interrupt bit in the GRC local ctrl register.
541 	 */
542 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543 	    (val == 0x1)) {
544 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 	}
547 }
548 
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
550 {
551 	unsigned long flags;
552 	u32 val;
553 
554 	spin_lock_irqsave(&tp->indirect_lock, flags);
555 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 	return val;
559 }
560 
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562  * where it is unsafe to read back the register without some delay.
563  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565  */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 {
568 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569 		/* Non-posted methods */
570 		tp->write32(tp, off, val);
571 	else {
572 		/* Posted method */
573 		tg3_write32(tp, off, val);
574 		if (usec_wait)
575 			udelay(usec_wait);
576 		tp->read32(tp, off);
577 	}
578 	/* Wait again after the read for the posted method to guarantee that
579 	 * the wait time is met.
580 	 */
581 	if (usec_wait)
582 		udelay(usec_wait);
583 }
584 
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 {
587 	tp->write32_mbox(tp, off, val);
588 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590 	     !tg3_flag(tp, ICH_WORKAROUND)))
591 		tp->read32_mbox(tp, off);
592 }
593 
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 {
596 	void __iomem *mbox = tp->regs + off;
597 	writel(val, mbox);
598 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
599 		writel(val, mbox);
600 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
602 		readl(mbox);
603 }
604 
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 {
607 	return readl(tp->regs + off + GRCMBOX_BASE);
608 }
609 
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 {
612 	writel(val, tp->regs + off + GRCMBOX_BASE);
613 }
614 
615 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
620 
621 #define tw32(reg, val)			tp->write32(tp, reg, val)
622 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg)			tp->read32(tp, reg)
625 
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 {
628 	unsigned long flags;
629 
630 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
632 		return;
633 
634 	spin_lock_irqsave(&tp->indirect_lock, flags);
635 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638 
639 		/* Always leave this as zero. */
640 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641 	} else {
642 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
644 
645 		/* Always leave this as zero. */
646 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 	}
648 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
649 }
650 
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 {
653 	unsigned long flags;
654 
655 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
657 		*val = 0;
658 		return;
659 	}
660 
661 	spin_lock_irqsave(&tp->indirect_lock, flags);
662 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665 
666 		/* Always leave this as zero. */
667 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668 	} else {
669 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670 		*val = tr32(TG3PCI_MEM_WIN_DATA);
671 
672 		/* Always leave this as zero. */
673 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674 	}
675 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
676 }
677 
tg3_ape_lock_init(struct tg3 * tp)678 static void tg3_ape_lock_init(struct tg3 *tp)
679 {
680 	int i;
681 	u32 regbase, bit;
682 
683 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
684 		regbase = TG3_APE_LOCK_GRANT;
685 	else
686 		regbase = TG3_APE_PER_LOCK_GRANT;
687 
688 	/* Make sure the driver hasn't any stale locks. */
689 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690 		switch (i) {
691 		case TG3_APE_LOCK_PHY0:
692 		case TG3_APE_LOCK_PHY1:
693 		case TG3_APE_LOCK_PHY2:
694 		case TG3_APE_LOCK_PHY3:
695 			bit = APE_LOCK_GRANT_DRIVER;
696 			break;
697 		default:
698 			if (!tp->pci_fn)
699 				bit = APE_LOCK_GRANT_DRIVER;
700 			else
701 				bit = 1 << tp->pci_fn;
702 		}
703 		tg3_ape_write32(tp, regbase + 4 * i, bit);
704 	}
705 
706 }
707 
tg3_ape_lock(struct tg3 * tp,int locknum)708 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 {
710 	int i, off;
711 	int ret = 0;
712 	u32 status, req, gnt, bit;
713 
714 	if (!tg3_flag(tp, ENABLE_APE))
715 		return 0;
716 
717 	switch (locknum) {
718 	case TG3_APE_LOCK_GPIO:
719 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
720 			return 0;
721 		fallthrough;
722 	case TG3_APE_LOCK_GRC:
723 	case TG3_APE_LOCK_MEM:
724 		if (!tp->pci_fn)
725 			bit = APE_LOCK_REQ_DRIVER;
726 		else
727 			bit = 1 << tp->pci_fn;
728 		break;
729 	case TG3_APE_LOCK_PHY0:
730 	case TG3_APE_LOCK_PHY1:
731 	case TG3_APE_LOCK_PHY2:
732 	case TG3_APE_LOCK_PHY3:
733 		bit = APE_LOCK_REQ_DRIVER;
734 		break;
735 	default:
736 		return -EINVAL;
737 	}
738 
739 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
740 		req = TG3_APE_LOCK_REQ;
741 		gnt = TG3_APE_LOCK_GRANT;
742 	} else {
743 		req = TG3_APE_PER_LOCK_REQ;
744 		gnt = TG3_APE_PER_LOCK_GRANT;
745 	}
746 
747 	off = 4 * locknum;
748 
749 	tg3_ape_write32(tp, req + off, bit);
750 
751 	/* Wait for up to 1 millisecond to acquire lock. */
752 	for (i = 0; i < 100; i++) {
753 		status = tg3_ape_read32(tp, gnt + off);
754 		if (status == bit)
755 			break;
756 		if (pci_channel_offline(tp->pdev))
757 			break;
758 
759 		udelay(10);
760 	}
761 
762 	if (status != bit) {
763 		/* Revoke the lock request. */
764 		tg3_ape_write32(tp, gnt + off, bit);
765 		ret = -EBUSY;
766 	}
767 
768 	return ret;
769 }
770 
tg3_ape_unlock(struct tg3 * tp,int locknum)771 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
772 {
773 	u32 gnt, bit;
774 
775 	if (!tg3_flag(tp, ENABLE_APE))
776 		return;
777 
778 	switch (locknum) {
779 	case TG3_APE_LOCK_GPIO:
780 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
781 			return;
782 		fallthrough;
783 	case TG3_APE_LOCK_GRC:
784 	case TG3_APE_LOCK_MEM:
785 		if (!tp->pci_fn)
786 			bit = APE_LOCK_GRANT_DRIVER;
787 		else
788 			bit = 1 << tp->pci_fn;
789 		break;
790 	case TG3_APE_LOCK_PHY0:
791 	case TG3_APE_LOCK_PHY1:
792 	case TG3_APE_LOCK_PHY2:
793 	case TG3_APE_LOCK_PHY3:
794 		bit = APE_LOCK_GRANT_DRIVER;
795 		break;
796 	default:
797 		return;
798 	}
799 
800 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
801 		gnt = TG3_APE_LOCK_GRANT;
802 	else
803 		gnt = TG3_APE_PER_LOCK_GRANT;
804 
805 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
806 }
807 
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)808 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
809 {
810 	u32 apedata;
811 
812 	while (timeout_us) {
813 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
814 			return -EBUSY;
815 
816 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
817 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
818 			break;
819 
820 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
821 
822 		udelay(10);
823 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
824 	}
825 
826 	return timeout_us ? 0 : -EBUSY;
827 }
828 
829 #ifdef CONFIG_TIGON3_HWMON
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)830 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
831 {
832 	u32 i, apedata;
833 
834 	for (i = 0; i < timeout_us / 10; i++) {
835 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
836 
837 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
838 			break;
839 
840 		udelay(10);
841 	}
842 
843 	return i == timeout_us / 10;
844 }
845 
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)846 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
847 				   u32 len)
848 {
849 	int err;
850 	u32 i, bufoff, msgoff, maxlen, apedata;
851 
852 	if (!tg3_flag(tp, APE_HAS_NCSI))
853 		return 0;
854 
855 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
856 	if (apedata != APE_SEG_SIG_MAGIC)
857 		return -ENODEV;
858 
859 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
860 	if (!(apedata & APE_FW_STATUS_READY))
861 		return -EAGAIN;
862 
863 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
864 		 TG3_APE_SHMEM_BASE;
865 	msgoff = bufoff + 2 * sizeof(u32);
866 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
867 
868 	while (len) {
869 		u32 length;
870 
871 		/* Cap xfer sizes to scratchpad limits. */
872 		length = (len > maxlen) ? maxlen : len;
873 		len -= length;
874 
875 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
876 		if (!(apedata & APE_FW_STATUS_READY))
877 			return -EAGAIN;
878 
879 		/* Wait for up to 1 msec for APE to service previous event. */
880 		err = tg3_ape_event_lock(tp, 1000);
881 		if (err)
882 			return err;
883 
884 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
885 			  APE_EVENT_STATUS_SCRTCHPD_READ |
886 			  APE_EVENT_STATUS_EVENT_PENDING;
887 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
888 
889 		tg3_ape_write32(tp, bufoff, base_off);
890 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
891 
892 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
893 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
894 
895 		base_off += length;
896 
897 		if (tg3_ape_wait_for_event(tp, 30000))
898 			return -EAGAIN;
899 
900 		for (i = 0; length; i += 4, length -= 4) {
901 			u32 val = tg3_ape_read32(tp, msgoff + i);
902 			memcpy(data, &val, sizeof(u32));
903 			data++;
904 		}
905 	}
906 
907 	return 0;
908 }
909 #endif
910 
tg3_ape_send_event(struct tg3 * tp,u32 event)911 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
912 {
913 	int err;
914 	u32 apedata;
915 
916 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
917 	if (apedata != APE_SEG_SIG_MAGIC)
918 		return -EAGAIN;
919 
920 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
921 	if (!(apedata & APE_FW_STATUS_READY))
922 		return -EAGAIN;
923 
924 	/* Wait for up to 20 millisecond for APE to service previous event. */
925 	err = tg3_ape_event_lock(tp, 20000);
926 	if (err)
927 		return err;
928 
929 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
930 			event | APE_EVENT_STATUS_EVENT_PENDING);
931 
932 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
933 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
934 
935 	return 0;
936 }
937 
tg3_ape_driver_state_change(struct tg3 * tp,int kind)938 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
939 {
940 	u32 event;
941 	u32 apedata;
942 
943 	if (!tg3_flag(tp, ENABLE_APE))
944 		return;
945 
946 	switch (kind) {
947 	case RESET_KIND_INIT:
948 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
949 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
950 				APE_HOST_SEG_SIG_MAGIC);
951 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
952 				APE_HOST_SEG_LEN_MAGIC);
953 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
954 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
955 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
956 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
957 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
958 				APE_HOST_BEHAV_NO_PHYLOCK);
959 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
960 				    TG3_APE_HOST_DRVR_STATE_START);
961 
962 		event = APE_EVENT_STATUS_STATE_START;
963 		break;
964 	case RESET_KIND_SHUTDOWN:
965 		if (device_may_wakeup(&tp->pdev->dev) &&
966 		    tg3_flag(tp, WOL_ENABLE)) {
967 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
968 					    TG3_APE_HOST_WOL_SPEED_AUTO);
969 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
970 		} else
971 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
972 
973 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
974 
975 		event = APE_EVENT_STATUS_STATE_UNLOAD;
976 		break;
977 	default:
978 		return;
979 	}
980 
981 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
982 
983 	tg3_ape_send_event(tp, event);
984 }
985 
tg3_send_ape_heartbeat(struct tg3 * tp,unsigned long interval)986 static void tg3_send_ape_heartbeat(struct tg3 *tp,
987 				   unsigned long interval)
988 {
989 	/* Check if hb interval has exceeded */
990 	if (!tg3_flag(tp, ENABLE_APE) ||
991 	    time_before(jiffies, tp->ape_hb_jiffies + interval))
992 		return;
993 
994 	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
995 	tp->ape_hb_jiffies = jiffies;
996 }
997 
tg3_disable_ints(struct tg3 * tp)998 static void tg3_disable_ints(struct tg3 *tp)
999 {
1000 	int i;
1001 
1002 	tw32(TG3PCI_MISC_HOST_CTRL,
1003 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1004 	for (i = 0; i < tp->irq_max; i++)
1005 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1006 }
1007 
tg3_enable_ints(struct tg3 * tp)1008 static void tg3_enable_ints(struct tg3 *tp)
1009 {
1010 	int i;
1011 
1012 	tp->irq_sync = 0;
1013 	wmb();
1014 
1015 	tw32(TG3PCI_MISC_HOST_CTRL,
1016 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1017 
1018 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1019 	for (i = 0; i < tp->irq_cnt; i++) {
1020 		struct tg3_napi *tnapi = &tp->napi[i];
1021 
1022 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1023 		if (tg3_flag(tp, 1SHOT_MSI))
1024 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1025 
1026 		tp->coal_now |= tnapi->coal_now;
1027 	}
1028 
1029 	/* Force an initial interrupt */
1030 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1031 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1032 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1033 	else
1034 		tw32(HOSTCC_MODE, tp->coal_now);
1035 
1036 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1037 }
1038 
tg3_has_work(struct tg3_napi * tnapi)1039 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1040 {
1041 	struct tg3 *tp = tnapi->tp;
1042 	struct tg3_hw_status *sblk = tnapi->hw_status;
1043 	unsigned int work_exists = 0;
1044 
1045 	/* check for phy events */
1046 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1047 		if (sblk->status & SD_STATUS_LINK_CHG)
1048 			work_exists = 1;
1049 	}
1050 
1051 	/* check for TX work to do */
1052 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1053 		work_exists = 1;
1054 
1055 	/* check for RX work to do */
1056 	if (tnapi->rx_rcb_prod_idx &&
1057 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1058 		work_exists = 1;
1059 
1060 	return work_exists;
1061 }
1062 
1063 /* tg3_int_reenable
1064  *  similar to tg3_enable_ints, but it accurately determines whether there
1065  *  is new work pending and can return without flushing the PIO write
1066  *  which reenables interrupts
1067  */
tg3_int_reenable(struct tg3_napi * tnapi)1068 static void tg3_int_reenable(struct tg3_napi *tnapi)
1069 {
1070 	struct tg3 *tp = tnapi->tp;
1071 
1072 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1073 
1074 	/* When doing tagged status, this work check is unnecessary.
1075 	 * The last_tag we write above tells the chip which piece of
1076 	 * work we've completed.
1077 	 */
1078 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1079 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1080 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1081 }
1082 
tg3_switch_clocks(struct tg3 * tp)1083 static void tg3_switch_clocks(struct tg3 *tp)
1084 {
1085 	u32 clock_ctrl;
1086 	u32 orig_clock_ctrl;
1087 
1088 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1089 		return;
1090 
1091 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1092 
1093 	orig_clock_ctrl = clock_ctrl;
1094 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1095 		       CLOCK_CTRL_CLKRUN_OENABLE |
1096 		       0x1f);
1097 	tp->pci_clock_ctrl = clock_ctrl;
1098 
1099 	if (tg3_flag(tp, 5705_PLUS)) {
1100 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1101 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1102 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1103 		}
1104 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1105 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1106 			    clock_ctrl |
1107 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1108 			    40);
1109 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1110 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1111 			    40);
1112 	}
1113 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1114 }
1115 
1116 #define PHY_BUSY_LOOPS	5000
1117 
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1118 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1119 			 u32 *val)
1120 {
1121 	u32 frame_val;
1122 	unsigned int loops;
1123 	int ret;
1124 
1125 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1126 		tw32_f(MAC_MI_MODE,
1127 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1128 		udelay(80);
1129 	}
1130 
1131 	tg3_ape_lock(tp, tp->phy_ape_lock);
1132 
1133 	*val = 0x0;
1134 
1135 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1136 		      MI_COM_PHY_ADDR_MASK);
1137 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1138 		      MI_COM_REG_ADDR_MASK);
1139 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1140 
1141 	tw32_f(MAC_MI_COM, frame_val);
1142 
1143 	loops = PHY_BUSY_LOOPS;
1144 	while (loops != 0) {
1145 		udelay(10);
1146 		frame_val = tr32(MAC_MI_COM);
1147 
1148 		if ((frame_val & MI_COM_BUSY) == 0) {
1149 			udelay(5);
1150 			frame_val = tr32(MAC_MI_COM);
1151 			break;
1152 		}
1153 		loops -= 1;
1154 	}
1155 
1156 	ret = -EBUSY;
1157 	if (loops != 0) {
1158 		*val = frame_val & MI_COM_DATA_MASK;
1159 		ret = 0;
1160 	}
1161 
1162 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1163 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1164 		udelay(80);
1165 	}
1166 
1167 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1168 
1169 	return ret;
1170 }
1171 
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1172 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1173 {
1174 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1175 }
1176 
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1177 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1178 			  u32 val)
1179 {
1180 	u32 frame_val;
1181 	unsigned int loops;
1182 	int ret;
1183 
1184 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1185 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1186 		return 0;
1187 
1188 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1189 		tw32_f(MAC_MI_MODE,
1190 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1191 		udelay(80);
1192 	}
1193 
1194 	tg3_ape_lock(tp, tp->phy_ape_lock);
1195 
1196 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1197 		      MI_COM_PHY_ADDR_MASK);
1198 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1199 		      MI_COM_REG_ADDR_MASK);
1200 	frame_val |= (val & MI_COM_DATA_MASK);
1201 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1202 
1203 	tw32_f(MAC_MI_COM, frame_val);
1204 
1205 	loops = PHY_BUSY_LOOPS;
1206 	while (loops != 0) {
1207 		udelay(10);
1208 		frame_val = tr32(MAC_MI_COM);
1209 		if ((frame_val & MI_COM_BUSY) == 0) {
1210 			udelay(5);
1211 			frame_val = tr32(MAC_MI_COM);
1212 			break;
1213 		}
1214 		loops -= 1;
1215 	}
1216 
1217 	ret = -EBUSY;
1218 	if (loops != 0)
1219 		ret = 0;
1220 
1221 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1222 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1223 		udelay(80);
1224 	}
1225 
1226 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1227 
1228 	return ret;
1229 }
1230 
tg3_writephy(struct tg3 * tp,int reg,u32 val)1231 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1232 {
1233 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1234 }
1235 
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1236 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1237 {
1238 	int err;
1239 
1240 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1241 	if (err)
1242 		goto done;
1243 
1244 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1245 	if (err)
1246 		goto done;
1247 
1248 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1249 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1250 	if (err)
1251 		goto done;
1252 
1253 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1254 
1255 done:
1256 	return err;
1257 }
1258 
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1259 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1260 {
1261 	int err;
1262 
1263 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1264 	if (err)
1265 		goto done;
1266 
1267 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1268 	if (err)
1269 		goto done;
1270 
1271 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1272 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1273 	if (err)
1274 		goto done;
1275 
1276 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1277 
1278 done:
1279 	return err;
1280 }
1281 
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1282 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1283 {
1284 	int err;
1285 
1286 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1287 	if (!err)
1288 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1289 
1290 	return err;
1291 }
1292 
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1293 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1294 {
1295 	int err;
1296 
1297 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1298 	if (!err)
1299 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1300 
1301 	return err;
1302 }
1303 
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1304 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1305 {
1306 	int err;
1307 
1308 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1309 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1310 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1311 	if (!err)
1312 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1313 
1314 	return err;
1315 }
1316 
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1317 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1318 {
1319 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1320 		set |= MII_TG3_AUXCTL_MISC_WREN;
1321 
1322 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1323 }
1324 
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1325 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1326 {
1327 	u32 val;
1328 	int err;
1329 
1330 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1331 
1332 	if (err)
1333 		return err;
1334 
1335 	if (enable)
1336 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1337 	else
1338 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1339 
1340 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1341 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1342 
1343 	return err;
1344 }
1345 
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1346 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1347 {
1348 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1349 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1350 }
1351 
tg3_bmcr_reset(struct tg3 * tp)1352 static int tg3_bmcr_reset(struct tg3 *tp)
1353 {
1354 	u32 phy_control;
1355 	int limit, err;
1356 
1357 	/* OK, reset it, and poll the BMCR_RESET bit until it
1358 	 * clears or we time out.
1359 	 */
1360 	phy_control = BMCR_RESET;
1361 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1362 	if (err != 0)
1363 		return -EBUSY;
1364 
1365 	limit = 5000;
1366 	while (limit--) {
1367 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1368 		if (err != 0)
1369 			return -EBUSY;
1370 
1371 		if ((phy_control & BMCR_RESET) == 0) {
1372 			udelay(40);
1373 			break;
1374 		}
1375 		udelay(10);
1376 	}
1377 	if (limit < 0)
1378 		return -EBUSY;
1379 
1380 	return 0;
1381 }
1382 
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1383 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1384 {
1385 	struct tg3 *tp = bp->priv;
1386 	u32 val;
1387 
1388 	spin_lock_bh(&tp->lock);
1389 
1390 	if (__tg3_readphy(tp, mii_id, reg, &val))
1391 		val = -EIO;
1392 
1393 	spin_unlock_bh(&tp->lock);
1394 
1395 	return val;
1396 }
1397 
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1398 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1399 {
1400 	struct tg3 *tp = bp->priv;
1401 	u32 ret = 0;
1402 
1403 	spin_lock_bh(&tp->lock);
1404 
1405 	if (__tg3_writephy(tp, mii_id, reg, val))
1406 		ret = -EIO;
1407 
1408 	spin_unlock_bh(&tp->lock);
1409 
1410 	return ret;
1411 }
1412 
tg3_mdio_config_5785(struct tg3 * tp)1413 static void tg3_mdio_config_5785(struct tg3 *tp)
1414 {
1415 	u32 val;
1416 	struct phy_device *phydev;
1417 
1418 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1419 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1420 	case PHY_ID_BCM50610:
1421 	case PHY_ID_BCM50610M:
1422 		val = MAC_PHYCFG2_50610_LED_MODES;
1423 		break;
1424 	case PHY_ID_BCMAC131:
1425 		val = MAC_PHYCFG2_AC131_LED_MODES;
1426 		break;
1427 	case PHY_ID_RTL8211C:
1428 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1429 		break;
1430 	case PHY_ID_RTL8201E:
1431 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1432 		break;
1433 	default:
1434 		return;
1435 	}
1436 
1437 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1438 		tw32(MAC_PHYCFG2, val);
1439 
1440 		val = tr32(MAC_PHYCFG1);
1441 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1442 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1443 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1444 		tw32(MAC_PHYCFG1, val);
1445 
1446 		return;
1447 	}
1448 
1449 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1450 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1451 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1452 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1453 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1454 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1455 		       MAC_PHYCFG2_INBAND_ENABLE;
1456 
1457 	tw32(MAC_PHYCFG2, val);
1458 
1459 	val = tr32(MAC_PHYCFG1);
1460 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1461 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1462 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1465 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1466 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1467 	}
1468 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1469 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1470 	tw32(MAC_PHYCFG1, val);
1471 
1472 	val = tr32(MAC_EXT_RGMII_MODE);
1473 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1474 		 MAC_RGMII_MODE_RX_QUALITY |
1475 		 MAC_RGMII_MODE_RX_ACTIVITY |
1476 		 MAC_RGMII_MODE_RX_ENG_DET |
1477 		 MAC_RGMII_MODE_TX_ENABLE |
1478 		 MAC_RGMII_MODE_TX_LOWPWR |
1479 		 MAC_RGMII_MODE_TX_RESET);
1480 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1481 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1482 			val |= MAC_RGMII_MODE_RX_INT_B |
1483 			       MAC_RGMII_MODE_RX_QUALITY |
1484 			       MAC_RGMII_MODE_RX_ACTIVITY |
1485 			       MAC_RGMII_MODE_RX_ENG_DET;
1486 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1487 			val |= MAC_RGMII_MODE_TX_ENABLE |
1488 			       MAC_RGMII_MODE_TX_LOWPWR |
1489 			       MAC_RGMII_MODE_TX_RESET;
1490 	}
1491 	tw32(MAC_EXT_RGMII_MODE, val);
1492 }
1493 
tg3_mdio_start(struct tg3 * tp)1494 static void tg3_mdio_start(struct tg3 *tp)
1495 {
1496 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1497 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1498 	udelay(80);
1499 
1500 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1501 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1502 		tg3_mdio_config_5785(tp);
1503 }
1504 
tg3_mdio_init(struct tg3 * tp)1505 static int tg3_mdio_init(struct tg3 *tp)
1506 {
1507 	int i;
1508 	u32 reg;
1509 	struct phy_device *phydev;
1510 
1511 	if (tg3_flag(tp, 5717_PLUS)) {
1512 		u32 is_serdes;
1513 
1514 		tp->phy_addr = tp->pci_fn + 1;
1515 
1516 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1517 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1518 		else
1519 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1520 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1521 		if (is_serdes)
1522 			tp->phy_addr += 7;
1523 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1524 		int addr;
1525 
1526 		addr = ssb_gige_get_phyaddr(tp->pdev);
1527 		if (addr < 0)
1528 			return addr;
1529 		tp->phy_addr = addr;
1530 	} else
1531 		tp->phy_addr = TG3_PHY_MII_ADDR;
1532 
1533 	tg3_mdio_start(tp);
1534 
1535 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1536 		return 0;
1537 
1538 	tp->mdio_bus = mdiobus_alloc();
1539 	if (tp->mdio_bus == NULL)
1540 		return -ENOMEM;
1541 
1542 	tp->mdio_bus->name     = "tg3 mdio bus";
1543 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1544 	tp->mdio_bus->priv     = tp;
1545 	tp->mdio_bus->parent   = &tp->pdev->dev;
1546 	tp->mdio_bus->read     = &tg3_mdio_read;
1547 	tp->mdio_bus->write    = &tg3_mdio_write;
1548 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1549 
1550 	/* The bus registration will look for all the PHYs on the mdio bus.
1551 	 * Unfortunately, it does not ensure the PHY is powered up before
1552 	 * accessing the PHY ID registers.  A chip reset is the
1553 	 * quickest way to bring the device back to an operational state..
1554 	 */
1555 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1556 		tg3_bmcr_reset(tp);
1557 
1558 	i = mdiobus_register(tp->mdio_bus);
1559 	if (i) {
1560 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1561 		mdiobus_free(tp->mdio_bus);
1562 		return i;
1563 	}
1564 
1565 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1566 
1567 	if (!phydev || !phydev->drv) {
1568 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1569 		mdiobus_unregister(tp->mdio_bus);
1570 		mdiobus_free(tp->mdio_bus);
1571 		return -ENODEV;
1572 	}
1573 
1574 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1575 	case PHY_ID_BCM57780:
1576 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1577 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1578 		break;
1579 	case PHY_ID_BCM50610:
1580 	case PHY_ID_BCM50610M:
1581 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1582 				     PHY_BRCM_RX_REFCLK_UNUSED |
1583 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1584 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1585 		fallthrough;
1586 	case PHY_ID_RTL8211C:
1587 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1588 		break;
1589 	case PHY_ID_RTL8201E:
1590 	case PHY_ID_BCMAC131:
1591 		phydev->interface = PHY_INTERFACE_MODE_MII;
1592 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1593 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1594 		break;
1595 	}
1596 
1597 	tg3_flag_set(tp, MDIOBUS_INITED);
1598 
1599 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1600 		tg3_mdio_config_5785(tp);
1601 
1602 	return 0;
1603 }
1604 
tg3_mdio_fini(struct tg3 * tp)1605 static void tg3_mdio_fini(struct tg3 *tp)
1606 {
1607 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1608 		tg3_flag_clear(tp, MDIOBUS_INITED);
1609 		mdiobus_unregister(tp->mdio_bus);
1610 		mdiobus_free(tp->mdio_bus);
1611 	}
1612 }
1613 
1614 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1615 static inline void tg3_generate_fw_event(struct tg3 *tp)
1616 {
1617 	u32 val;
1618 
1619 	val = tr32(GRC_RX_CPU_EVENT);
1620 	val |= GRC_RX_CPU_DRIVER_EVENT;
1621 	tw32_f(GRC_RX_CPU_EVENT, val);
1622 
1623 	tp->last_event_jiffies = jiffies;
1624 }
1625 
1626 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1627 
1628 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1629 static void tg3_wait_for_event_ack(struct tg3 *tp)
1630 {
1631 	int i;
1632 	unsigned int delay_cnt;
1633 	long time_remain;
1634 
1635 	/* If enough time has passed, no wait is necessary. */
1636 	time_remain = (long)(tp->last_event_jiffies + 1 +
1637 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1638 		      (long)jiffies;
1639 	if (time_remain < 0)
1640 		return;
1641 
1642 	/* Check if we can shorten the wait time. */
1643 	delay_cnt = jiffies_to_usecs(time_remain);
1644 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1645 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1646 	delay_cnt = (delay_cnt >> 3) + 1;
1647 
1648 	for (i = 0; i < delay_cnt; i++) {
1649 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1650 			break;
1651 		if (pci_channel_offline(tp->pdev))
1652 			break;
1653 
1654 		udelay(8);
1655 	}
1656 }
1657 
1658 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1659 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1660 {
1661 	u32 reg, val;
1662 
1663 	val = 0;
1664 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1665 		val = reg << 16;
1666 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1667 		val |= (reg & 0xffff);
1668 	*data++ = val;
1669 
1670 	val = 0;
1671 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1672 		val = reg << 16;
1673 	if (!tg3_readphy(tp, MII_LPA, &reg))
1674 		val |= (reg & 0xffff);
1675 	*data++ = val;
1676 
1677 	val = 0;
1678 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1679 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1680 			val = reg << 16;
1681 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1682 			val |= (reg & 0xffff);
1683 	}
1684 	*data++ = val;
1685 
1686 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1687 		val = reg << 16;
1688 	else
1689 		val = 0;
1690 	*data++ = val;
1691 }
1692 
1693 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1694 static void tg3_ump_link_report(struct tg3 *tp)
1695 {
1696 	u32 data[4];
1697 
1698 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1699 		return;
1700 
1701 	tg3_phy_gather_ump_data(tp, data);
1702 
1703 	tg3_wait_for_event_ack(tp);
1704 
1705 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1706 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1707 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1708 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1709 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1710 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1711 
1712 	tg3_generate_fw_event(tp);
1713 }
1714 
1715 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1716 static void tg3_stop_fw(struct tg3 *tp)
1717 {
1718 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1719 		/* Wait for RX cpu to ACK the previous event. */
1720 		tg3_wait_for_event_ack(tp);
1721 
1722 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1723 
1724 		tg3_generate_fw_event(tp);
1725 
1726 		/* Wait for RX cpu to ACK this event. */
1727 		tg3_wait_for_event_ack(tp);
1728 	}
1729 }
1730 
1731 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1732 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1733 {
1734 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1735 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1736 
1737 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1738 		switch (kind) {
1739 		case RESET_KIND_INIT:
1740 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741 				      DRV_STATE_START);
1742 			break;
1743 
1744 		case RESET_KIND_SHUTDOWN:
1745 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746 				      DRV_STATE_UNLOAD);
1747 			break;
1748 
1749 		case RESET_KIND_SUSPEND:
1750 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751 				      DRV_STATE_SUSPEND);
1752 			break;
1753 
1754 		default:
1755 			break;
1756 		}
1757 	}
1758 }
1759 
1760 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1761 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1762 {
1763 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1764 		switch (kind) {
1765 		case RESET_KIND_INIT:
1766 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1767 				      DRV_STATE_START_DONE);
1768 			break;
1769 
1770 		case RESET_KIND_SHUTDOWN:
1771 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 				      DRV_STATE_UNLOAD_DONE);
1773 			break;
1774 
1775 		default:
1776 			break;
1777 		}
1778 	}
1779 }
1780 
1781 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1782 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1783 {
1784 	if (tg3_flag(tp, ENABLE_ASF)) {
1785 		switch (kind) {
1786 		case RESET_KIND_INIT:
1787 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 				      DRV_STATE_START);
1789 			break;
1790 
1791 		case RESET_KIND_SHUTDOWN:
1792 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793 				      DRV_STATE_UNLOAD);
1794 			break;
1795 
1796 		case RESET_KIND_SUSPEND:
1797 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 				      DRV_STATE_SUSPEND);
1799 			break;
1800 
1801 		default:
1802 			break;
1803 		}
1804 	}
1805 }
1806 
tg3_poll_fw(struct tg3 * tp)1807 static int tg3_poll_fw(struct tg3 *tp)
1808 {
1809 	int i;
1810 	u32 val;
1811 
1812 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1813 		return 0;
1814 
1815 	if (tg3_flag(tp, IS_SSB_CORE)) {
1816 		/* We don't use firmware. */
1817 		return 0;
1818 	}
1819 
1820 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1821 		/* Wait up to 20ms for init done. */
1822 		for (i = 0; i < 200; i++) {
1823 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1824 				return 0;
1825 			if (pci_channel_offline(tp->pdev))
1826 				return -ENODEV;
1827 
1828 			udelay(100);
1829 		}
1830 		return -ENODEV;
1831 	}
1832 
1833 	/* Wait for firmware initialization to complete. */
1834 	for (i = 0; i < 100000; i++) {
1835 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1836 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1837 			break;
1838 		if (pci_channel_offline(tp->pdev)) {
1839 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1840 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1841 				netdev_info(tp->dev, "No firmware running\n");
1842 			}
1843 
1844 			break;
1845 		}
1846 
1847 		udelay(10);
1848 	}
1849 
1850 	/* Chip might not be fitted with firmware.  Some Sun onboard
1851 	 * parts are configured like that.  So don't signal the timeout
1852 	 * of the above loop as an error, but do report the lack of
1853 	 * running firmware once.
1854 	 */
1855 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1856 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1857 
1858 		netdev_info(tp->dev, "No firmware running\n");
1859 	}
1860 
1861 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1862 		/* The 57765 A0 needs a little more
1863 		 * time to do some important work.
1864 		 */
1865 		mdelay(10);
1866 	}
1867 
1868 	return 0;
1869 }
1870 
tg3_link_report(struct tg3 * tp)1871 static void tg3_link_report(struct tg3 *tp)
1872 {
1873 	if (!netif_carrier_ok(tp->dev)) {
1874 		netif_info(tp, link, tp->dev, "Link is down\n");
1875 		tg3_ump_link_report(tp);
1876 	} else if (netif_msg_link(tp)) {
1877 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1878 			    (tp->link_config.active_speed == SPEED_1000 ?
1879 			     1000 :
1880 			     (tp->link_config.active_speed == SPEED_100 ?
1881 			      100 : 10)),
1882 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1883 			     "full" : "half"));
1884 
1885 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1886 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1887 			    "on" : "off",
1888 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1889 			    "on" : "off");
1890 
1891 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1892 			netdev_info(tp->dev, "EEE is %s\n",
1893 				    tp->setlpicnt ? "enabled" : "disabled");
1894 
1895 		tg3_ump_link_report(tp);
1896 	}
1897 
1898 	tp->link_up = netif_carrier_ok(tp->dev);
1899 }
1900 
tg3_decode_flowctrl_1000T(u32 adv)1901 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1902 {
1903 	u32 flowctrl = 0;
1904 
1905 	if (adv & ADVERTISE_PAUSE_CAP) {
1906 		flowctrl |= FLOW_CTRL_RX;
1907 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1908 			flowctrl |= FLOW_CTRL_TX;
1909 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1910 		flowctrl |= FLOW_CTRL_TX;
1911 
1912 	return flowctrl;
1913 }
1914 
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1915 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1916 {
1917 	u16 miireg;
1918 
1919 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1920 		miireg = ADVERTISE_1000XPAUSE;
1921 	else if (flow_ctrl & FLOW_CTRL_TX)
1922 		miireg = ADVERTISE_1000XPSE_ASYM;
1923 	else if (flow_ctrl & FLOW_CTRL_RX)
1924 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1925 	else
1926 		miireg = 0;
1927 
1928 	return miireg;
1929 }
1930 
tg3_decode_flowctrl_1000X(u32 adv)1931 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1932 {
1933 	u32 flowctrl = 0;
1934 
1935 	if (adv & ADVERTISE_1000XPAUSE) {
1936 		flowctrl |= FLOW_CTRL_RX;
1937 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1938 			flowctrl |= FLOW_CTRL_TX;
1939 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1940 		flowctrl |= FLOW_CTRL_TX;
1941 
1942 	return flowctrl;
1943 }
1944 
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1945 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1946 {
1947 	u8 cap = 0;
1948 
1949 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1950 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1951 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1952 		if (lcladv & ADVERTISE_1000XPAUSE)
1953 			cap = FLOW_CTRL_RX;
1954 		if (rmtadv & ADVERTISE_1000XPAUSE)
1955 			cap = FLOW_CTRL_TX;
1956 	}
1957 
1958 	return cap;
1959 }
1960 
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1961 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1962 {
1963 	u8 autoneg;
1964 	u8 flowctrl = 0;
1965 	u32 old_rx_mode = tp->rx_mode;
1966 	u32 old_tx_mode = tp->tx_mode;
1967 
1968 	if (tg3_flag(tp, USE_PHYLIB))
1969 		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1970 	else
1971 		autoneg = tp->link_config.autoneg;
1972 
1973 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1974 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1975 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1976 		else
1977 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1978 	} else
1979 		flowctrl = tp->link_config.flowctrl;
1980 
1981 	tp->link_config.active_flowctrl = flowctrl;
1982 
1983 	if (flowctrl & FLOW_CTRL_RX)
1984 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1985 	else
1986 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1987 
1988 	if (old_rx_mode != tp->rx_mode)
1989 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1990 
1991 	if (flowctrl & FLOW_CTRL_TX)
1992 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1993 	else
1994 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1995 
1996 	if (old_tx_mode != tp->tx_mode)
1997 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1998 }
1999 
tg3_adjust_link(struct net_device * dev)2000 static void tg3_adjust_link(struct net_device *dev)
2001 {
2002 	u8 oldflowctrl, linkmesg = 0;
2003 	u32 mac_mode, lcl_adv, rmt_adv;
2004 	struct tg3 *tp = netdev_priv(dev);
2005 	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2006 
2007 	spin_lock_bh(&tp->lock);
2008 
2009 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2010 				    MAC_MODE_HALF_DUPLEX);
2011 
2012 	oldflowctrl = tp->link_config.active_flowctrl;
2013 
2014 	if (phydev->link) {
2015 		lcl_adv = 0;
2016 		rmt_adv = 0;
2017 
2018 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2019 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2020 		else if (phydev->speed == SPEED_1000 ||
2021 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2022 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2023 		else
2024 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2025 
2026 		if (phydev->duplex == DUPLEX_HALF)
2027 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2028 		else {
2029 			lcl_adv = mii_advertise_flowctrl(
2030 				  tp->link_config.flowctrl);
2031 
2032 			if (phydev->pause)
2033 				rmt_adv = LPA_PAUSE_CAP;
2034 			if (phydev->asym_pause)
2035 				rmt_adv |= LPA_PAUSE_ASYM;
2036 		}
2037 
2038 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2039 	} else
2040 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2041 
2042 	if (mac_mode != tp->mac_mode) {
2043 		tp->mac_mode = mac_mode;
2044 		tw32_f(MAC_MODE, tp->mac_mode);
2045 		udelay(40);
2046 	}
2047 
2048 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2049 		if (phydev->speed == SPEED_10)
2050 			tw32(MAC_MI_STAT,
2051 			     MAC_MI_STAT_10MBPS_MODE |
2052 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053 		else
2054 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055 	}
2056 
2057 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2058 		tw32(MAC_TX_LENGTHS,
2059 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2060 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2061 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2062 	else
2063 		tw32(MAC_TX_LENGTHS,
2064 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2066 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067 
2068 	if (phydev->link != tp->old_link ||
2069 	    phydev->speed != tp->link_config.active_speed ||
2070 	    phydev->duplex != tp->link_config.active_duplex ||
2071 	    oldflowctrl != tp->link_config.active_flowctrl)
2072 		linkmesg = 1;
2073 
2074 	tp->old_link = phydev->link;
2075 	tp->link_config.active_speed = phydev->speed;
2076 	tp->link_config.active_duplex = phydev->duplex;
2077 
2078 	spin_unlock_bh(&tp->lock);
2079 
2080 	if (linkmesg)
2081 		tg3_link_report(tp);
2082 }
2083 
tg3_phy_init(struct tg3 * tp)2084 static int tg3_phy_init(struct tg3 *tp)
2085 {
2086 	struct phy_device *phydev;
2087 
2088 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2089 		return 0;
2090 
2091 	/* Bring the PHY back to a known state. */
2092 	tg3_bmcr_reset(tp);
2093 
2094 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2095 
2096 	/* Attach the MAC to the PHY. */
2097 	phydev = phy_connect(tp->dev, phydev_name(phydev),
2098 			     tg3_adjust_link, phydev->interface);
2099 	if (IS_ERR(phydev)) {
2100 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2101 		return PTR_ERR(phydev);
2102 	}
2103 
2104 	/* Mask with MAC supported features. */
2105 	switch (phydev->interface) {
2106 	case PHY_INTERFACE_MODE_GMII:
2107 	case PHY_INTERFACE_MODE_RGMII:
2108 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2109 			phy_set_max_speed(phydev, SPEED_1000);
2110 			phy_support_asym_pause(phydev);
2111 			break;
2112 		}
2113 		fallthrough;
2114 	case PHY_INTERFACE_MODE_MII:
2115 		phy_set_max_speed(phydev, SPEED_100);
2116 		phy_support_asym_pause(phydev);
2117 		break;
2118 	default:
2119 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2120 		return -EINVAL;
2121 	}
2122 
2123 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2124 
2125 	phy_attached_info(phydev);
2126 
2127 	return 0;
2128 }
2129 
tg3_phy_start(struct tg3 * tp)2130 static void tg3_phy_start(struct tg3 *tp)
2131 {
2132 	struct phy_device *phydev;
2133 
2134 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2135 		return;
2136 
2137 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2138 
2139 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2140 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2141 		phydev->speed = tp->link_config.speed;
2142 		phydev->duplex = tp->link_config.duplex;
2143 		phydev->autoneg = tp->link_config.autoneg;
2144 		ethtool_convert_legacy_u32_to_link_mode(
2145 			phydev->advertising, tp->link_config.advertising);
2146 	}
2147 
2148 	phy_start(phydev);
2149 
2150 	phy_start_aneg(phydev);
2151 }
2152 
tg3_phy_stop(struct tg3 * tp)2153 static void tg3_phy_stop(struct tg3 *tp)
2154 {
2155 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2156 		return;
2157 
2158 	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2159 }
2160 
tg3_phy_fini(struct tg3 * tp)2161 static void tg3_phy_fini(struct tg3 *tp)
2162 {
2163 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2164 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2165 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2166 	}
2167 }
2168 
tg3_phy_set_extloopbk(struct tg3 * tp)2169 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2170 {
2171 	int err;
2172 	u32 val;
2173 
2174 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2175 		return 0;
2176 
2177 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2178 		/* Cannot do read-modify-write on 5401 */
2179 		err = tg3_phy_auxctl_write(tp,
2180 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2182 					   0x4c20);
2183 		goto done;
2184 	}
2185 
2186 	err = tg3_phy_auxctl_read(tp,
2187 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2188 	if (err)
2189 		return err;
2190 
2191 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2192 	err = tg3_phy_auxctl_write(tp,
2193 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2194 
2195 done:
2196 	return err;
2197 }
2198 
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2199 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2200 {
2201 	u32 phytest;
2202 
2203 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2204 		u32 phy;
2205 
2206 		tg3_writephy(tp, MII_TG3_FET_TEST,
2207 			     phytest | MII_TG3_FET_SHADOW_EN);
2208 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2209 			if (enable)
2210 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 			else
2212 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2213 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2214 		}
2215 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2216 	}
2217 }
2218 
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2219 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2220 {
2221 	u32 reg;
2222 
2223 	if (!tg3_flag(tp, 5705_PLUS) ||
2224 	    (tg3_flag(tp, 5717_PLUS) &&
2225 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2226 		return;
2227 
2228 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2229 		tg3_phy_fet_toggle_apd(tp, enable);
2230 		return;
2231 	}
2232 
2233 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2234 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2235 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2236 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2237 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2238 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2239 
2240 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2241 
2242 
2243 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2244 	if (enable)
2245 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2246 
2247 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2248 }
2249 
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2250 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2251 {
2252 	u32 phy;
2253 
2254 	if (!tg3_flag(tp, 5705_PLUS) ||
2255 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2256 		return;
2257 
2258 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2259 		u32 ephy;
2260 
2261 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2262 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2263 
2264 			tg3_writephy(tp, MII_TG3_FET_TEST,
2265 				     ephy | MII_TG3_FET_SHADOW_EN);
2266 			if (!tg3_readphy(tp, reg, &phy)) {
2267 				if (enable)
2268 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269 				else
2270 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271 				tg3_writephy(tp, reg, phy);
2272 			}
2273 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2274 		}
2275 	} else {
2276 		int ret;
2277 
2278 		ret = tg3_phy_auxctl_read(tp,
2279 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2280 		if (!ret) {
2281 			if (enable)
2282 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283 			else
2284 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285 			tg3_phy_auxctl_write(tp,
2286 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2287 		}
2288 	}
2289 }
2290 
tg3_phy_set_wirespeed(struct tg3 * tp)2291 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2292 {
2293 	int ret;
2294 	u32 val;
2295 
2296 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2297 		return;
2298 
2299 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2300 	if (!ret)
2301 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2302 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2303 }
2304 
tg3_phy_apply_otp(struct tg3 * tp)2305 static void tg3_phy_apply_otp(struct tg3 *tp)
2306 {
2307 	u32 otp, phy;
2308 
2309 	if (!tp->phy_otp)
2310 		return;
2311 
2312 	otp = tp->phy_otp;
2313 
2314 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2315 		return;
2316 
2317 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2318 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2319 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2320 
2321 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2322 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2323 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2324 
2325 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2326 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2327 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2328 
2329 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2330 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2331 
2332 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2333 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2334 
2335 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2336 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2337 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2338 
2339 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2340 }
2341 
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_keee * eee)2342 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
2343 {
2344 	u32 val;
2345 	struct ethtool_keee *dest = &tp->eee;
2346 
2347 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2348 		return;
2349 
2350 	if (eee)
2351 		dest = eee;
2352 
2353 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2354 		return;
2355 
2356 	/* Pull eee_active */
2357 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2358 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2359 		dest->eee_active = 1;
2360 	} else
2361 		dest->eee_active = 0;
2362 
2363 	/* Pull lp advertised settings */
2364 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2365 		return;
2366 	mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
2367 
2368 	/* Pull advertised and eee_enabled settings */
2369 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2370 		return;
2371 	dest->eee_enabled = !!val;
2372 	mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
2373 
2374 	/* Pull tx_lpi_enabled */
2375 	val = tr32(TG3_CPMU_EEE_MODE);
2376 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2377 
2378 	/* Pull lpi timer value */
2379 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2380 }
2381 
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2382 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2383 {
2384 	u32 val;
2385 
2386 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2387 		return;
2388 
2389 	tp->setlpicnt = 0;
2390 
2391 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2392 	    current_link_up &&
2393 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2394 	    (tp->link_config.active_speed == SPEED_100 ||
2395 	     tp->link_config.active_speed == SPEED_1000)) {
2396 		u32 eeectl;
2397 
2398 		if (tp->link_config.active_speed == SPEED_1000)
2399 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2400 		else
2401 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2402 
2403 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2404 
2405 		tg3_eee_pull_config(tp, NULL);
2406 		if (tp->eee.eee_active)
2407 			tp->setlpicnt = 2;
2408 	}
2409 
2410 	if (!tp->setlpicnt) {
2411 		if (current_link_up &&
2412 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2413 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2414 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2415 		}
2416 
2417 		val = tr32(TG3_CPMU_EEE_MODE);
2418 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2419 	}
2420 }
2421 
tg3_phy_eee_enable(struct tg3 * tp)2422 static void tg3_phy_eee_enable(struct tg3 *tp)
2423 {
2424 	u32 val;
2425 
2426 	if (tp->link_config.active_speed == SPEED_1000 &&
2427 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2428 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2429 	     tg3_flag(tp, 57765_CLASS)) &&
2430 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2431 		val = MII_TG3_DSP_TAP26_ALNOKO |
2432 		      MII_TG3_DSP_TAP26_RMRXSTO;
2433 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2434 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2435 	}
2436 
2437 	val = tr32(TG3_CPMU_EEE_MODE);
2438 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2439 }
2440 
tg3_wait_macro_done(struct tg3 * tp)2441 static int tg3_wait_macro_done(struct tg3 *tp)
2442 {
2443 	int limit = 100;
2444 
2445 	while (limit--) {
2446 		u32 tmp32;
2447 
2448 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2449 			if ((tmp32 & 0x1000) == 0)
2450 				break;
2451 		}
2452 	}
2453 	if (limit < 0)
2454 		return -EBUSY;
2455 
2456 	return 0;
2457 }
2458 
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2459 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2460 {
2461 	static const u32 test_pat[4][6] = {
2462 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2463 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2464 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2465 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2466 	};
2467 	int chan;
2468 
2469 	for (chan = 0; chan < 4; chan++) {
2470 		int i;
2471 
2472 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2473 			     (chan * 0x2000) | 0x0200);
2474 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2475 
2476 		for (i = 0; i < 6; i++)
2477 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2478 				     test_pat[chan][i]);
2479 
2480 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2481 		if (tg3_wait_macro_done(tp)) {
2482 			*resetp = 1;
2483 			return -EBUSY;
2484 		}
2485 
2486 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2487 			     (chan * 0x2000) | 0x0200);
2488 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2489 		if (tg3_wait_macro_done(tp)) {
2490 			*resetp = 1;
2491 			return -EBUSY;
2492 		}
2493 
2494 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2495 		if (tg3_wait_macro_done(tp)) {
2496 			*resetp = 1;
2497 			return -EBUSY;
2498 		}
2499 
2500 		for (i = 0; i < 6; i += 2) {
2501 			u32 low, high;
2502 
2503 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2504 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2505 			    tg3_wait_macro_done(tp)) {
2506 				*resetp = 1;
2507 				return -EBUSY;
2508 			}
2509 			low &= 0x7fff;
2510 			high &= 0x000f;
2511 			if (low != test_pat[chan][i] ||
2512 			    high != test_pat[chan][i+1]) {
2513 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2514 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2515 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2516 
2517 				return -EBUSY;
2518 			}
2519 		}
2520 	}
2521 
2522 	return 0;
2523 }
2524 
tg3_phy_reset_chanpat(struct tg3 * tp)2525 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2526 {
2527 	int chan;
2528 
2529 	for (chan = 0; chan < 4; chan++) {
2530 		int i;
2531 
2532 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2533 			     (chan * 0x2000) | 0x0200);
2534 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2535 		for (i = 0; i < 6; i++)
2536 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2537 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2538 		if (tg3_wait_macro_done(tp))
2539 			return -EBUSY;
2540 	}
2541 
2542 	return 0;
2543 }
2544 
tg3_phy_reset_5703_4_5(struct tg3 * tp)2545 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2546 {
2547 	u32 reg32, phy9_orig;
2548 	int retries, do_phy_reset, err;
2549 
2550 	retries = 10;
2551 	do_phy_reset = 1;
2552 	do {
2553 		if (do_phy_reset) {
2554 			err = tg3_bmcr_reset(tp);
2555 			if (err)
2556 				return err;
2557 			do_phy_reset = 0;
2558 		}
2559 
2560 		/* Disable transmitter and interrupt.  */
2561 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2562 			continue;
2563 
2564 		reg32 |= 0x3000;
2565 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2566 
2567 		/* Set full-duplex, 1000 mbps.  */
2568 		tg3_writephy(tp, MII_BMCR,
2569 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2570 
2571 		/* Set to master mode.  */
2572 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2573 			continue;
2574 
2575 		tg3_writephy(tp, MII_CTRL1000,
2576 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2577 
2578 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2579 		if (err)
2580 			return err;
2581 
2582 		/* Block the PHY control access.  */
2583 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2584 
2585 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2586 		if (!err)
2587 			break;
2588 	} while (--retries);
2589 
2590 	err = tg3_phy_reset_chanpat(tp);
2591 	if (err)
2592 		return err;
2593 
2594 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2595 
2596 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2597 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2598 
2599 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2600 
2601 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2602 
2603 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2604 	if (err)
2605 		return err;
2606 
2607 	reg32 &= ~0x3000;
2608 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2609 
2610 	return 0;
2611 }
2612 
tg3_carrier_off(struct tg3 * tp)2613 static void tg3_carrier_off(struct tg3 *tp)
2614 {
2615 	netif_carrier_off(tp->dev);
2616 	tp->link_up = false;
2617 }
2618 
tg3_warn_mgmt_link_flap(struct tg3 * tp)2619 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2620 {
2621 	if (tg3_flag(tp, ENABLE_ASF))
2622 		netdev_warn(tp->dev,
2623 			    "Management side-band traffic will be interrupted during phy settings change\n");
2624 }
2625 
2626 /* This will reset the tigon3 PHY if there is no valid
2627  * link unless the FORCE argument is non-zero.
2628  */
tg3_phy_reset(struct tg3 * tp)2629 static int tg3_phy_reset(struct tg3 *tp)
2630 {
2631 	u32 val, cpmuctrl;
2632 	int err;
2633 
2634 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2635 		val = tr32(GRC_MISC_CFG);
2636 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2637 		udelay(40);
2638 	}
2639 	err  = tg3_readphy(tp, MII_BMSR, &val);
2640 	err |= tg3_readphy(tp, MII_BMSR, &val);
2641 	if (err != 0)
2642 		return -EBUSY;
2643 
2644 	if (netif_running(tp->dev) && tp->link_up) {
2645 		netif_carrier_off(tp->dev);
2646 		tg3_link_report(tp);
2647 	}
2648 
2649 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2650 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2651 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2652 		err = tg3_phy_reset_5703_4_5(tp);
2653 		if (err)
2654 			return err;
2655 		goto out;
2656 	}
2657 
2658 	cpmuctrl = 0;
2659 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2660 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2661 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2662 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2663 			tw32(TG3_CPMU_CTRL,
2664 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2665 	}
2666 
2667 	err = tg3_bmcr_reset(tp);
2668 	if (err)
2669 		return err;
2670 
2671 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2672 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2673 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2674 
2675 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2676 	}
2677 
2678 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2679 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2680 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2681 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2682 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2683 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2684 			udelay(40);
2685 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2686 		}
2687 	}
2688 
2689 	if (tg3_flag(tp, 5717_PLUS) &&
2690 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2691 		return 0;
2692 
2693 	tg3_phy_apply_otp(tp);
2694 
2695 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2696 		tg3_phy_toggle_apd(tp, true);
2697 	else
2698 		tg3_phy_toggle_apd(tp, false);
2699 
2700 out:
2701 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2702 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2703 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2704 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2705 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2706 	}
2707 
2708 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2709 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2711 	}
2712 
2713 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2714 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2715 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2716 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2717 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2718 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2719 		}
2720 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2721 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2722 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2723 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2724 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2725 				tg3_writephy(tp, MII_TG3_TEST1,
2726 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2727 			} else
2728 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2729 
2730 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2731 		}
2732 	}
2733 
2734 	/* Set Extended packet length bit (bit 14) on all chips that */
2735 	/* support jumbo frames */
2736 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2737 		/* Cannot do read-modify-write on 5401 */
2738 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2739 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2740 		/* Set bit 14 with read-modify-write to preserve other bits */
2741 		err = tg3_phy_auxctl_read(tp,
2742 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2743 		if (!err)
2744 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2745 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2746 	}
2747 
2748 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2749 	 * jumbo frames transmission.
2750 	 */
2751 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2752 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2753 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2754 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2755 	}
2756 
2757 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2758 		/* adjust output voltage */
2759 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2760 	}
2761 
2762 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2763 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2764 
2765 	tg3_phy_toggle_automdix(tp, true);
2766 	tg3_phy_set_wirespeed(tp);
2767 	return 0;
2768 }
2769 
2770 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2771 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2772 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2773 					  TG3_GPIO_MSG_NEED_VAUX)
2774 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2775 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2776 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2777 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2778 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2779 
2780 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2781 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2782 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2783 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2784 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2785 
tg3_set_function_status(struct tg3 * tp,u32 newstat)2786 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2787 {
2788 	u32 status, shift;
2789 
2790 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2791 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2792 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2793 	else
2794 		status = tr32(TG3_CPMU_DRV_STATUS);
2795 
2796 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2797 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2798 	status |= (newstat << shift);
2799 
2800 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2802 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2803 	else
2804 		tw32(TG3_CPMU_DRV_STATUS, status);
2805 
2806 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2807 }
2808 
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2809 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2810 {
2811 	if (!tg3_flag(tp, IS_NIC))
2812 		return 0;
2813 
2814 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2815 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2816 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2817 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2818 			return -EIO;
2819 
2820 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2821 
2822 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2823 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2824 
2825 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2826 	} else {
2827 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2829 	}
2830 
2831 	return 0;
2832 }
2833 
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2834 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2835 {
2836 	u32 grc_local_ctrl;
2837 
2838 	if (!tg3_flag(tp, IS_NIC) ||
2839 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2840 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2841 		return;
2842 
2843 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2844 
2845 	tw32_wait_f(GRC_LOCAL_CTRL,
2846 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2847 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2848 
2849 	tw32_wait_f(GRC_LOCAL_CTRL,
2850 		    grc_local_ctrl,
2851 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2852 
2853 	tw32_wait_f(GRC_LOCAL_CTRL,
2854 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2855 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2856 }
2857 
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2858 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2859 {
2860 	if (!tg3_flag(tp, IS_NIC))
2861 		return;
2862 
2863 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2864 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2865 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2866 			    (GRC_LCLCTRL_GPIO_OE0 |
2867 			     GRC_LCLCTRL_GPIO_OE1 |
2868 			     GRC_LCLCTRL_GPIO_OE2 |
2869 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2870 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2871 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2872 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2873 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2874 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2875 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2876 				     GRC_LCLCTRL_GPIO_OE1 |
2877 				     GRC_LCLCTRL_GPIO_OE2 |
2878 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2879 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2880 				     tp->grc_local_ctrl;
2881 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2882 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2883 
2884 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2885 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2886 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2887 
2888 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2889 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2890 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2891 	} else {
2892 		u32 no_gpio2;
2893 		u32 grc_local_ctrl = 0;
2894 
2895 		/* Workaround to prevent overdrawing Amps. */
2896 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2897 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2898 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2899 				    grc_local_ctrl,
2900 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2901 		}
2902 
2903 		/* On 5753 and variants, GPIO2 cannot be used. */
2904 		no_gpio2 = tp->nic_sram_data_cfg &
2905 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2906 
2907 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2908 				  GRC_LCLCTRL_GPIO_OE1 |
2909 				  GRC_LCLCTRL_GPIO_OE2 |
2910 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2911 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2912 		if (no_gpio2) {
2913 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2914 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2915 		}
2916 		tw32_wait_f(GRC_LOCAL_CTRL,
2917 			    tp->grc_local_ctrl | grc_local_ctrl,
2918 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2919 
2920 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2921 
2922 		tw32_wait_f(GRC_LOCAL_CTRL,
2923 			    tp->grc_local_ctrl | grc_local_ctrl,
2924 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2925 
2926 		if (!no_gpio2) {
2927 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2928 			tw32_wait_f(GRC_LOCAL_CTRL,
2929 				    tp->grc_local_ctrl | grc_local_ctrl,
2930 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2931 		}
2932 	}
2933 }
2934 
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2935 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2936 {
2937 	u32 msg = 0;
2938 
2939 	/* Serialize power state transitions */
2940 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2941 		return;
2942 
2943 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2944 		msg = TG3_GPIO_MSG_NEED_VAUX;
2945 
2946 	msg = tg3_set_function_status(tp, msg);
2947 
2948 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2949 		goto done;
2950 
2951 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2952 		tg3_pwrsrc_switch_to_vaux(tp);
2953 	else
2954 		tg3_pwrsrc_die_with_vmain(tp);
2955 
2956 done:
2957 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2958 }
2959 
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2960 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2961 {
2962 	bool need_vaux = false;
2963 
2964 	/* The GPIOs do something completely different on 57765. */
2965 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2966 		return;
2967 
2968 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2969 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2970 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2971 		tg3_frob_aux_power_5717(tp, include_wol ?
2972 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2973 		return;
2974 	}
2975 
2976 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2977 		struct net_device *dev_peer;
2978 
2979 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2980 
2981 		/* remove_one() may have been run on the peer. */
2982 		if (dev_peer) {
2983 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2984 
2985 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2986 				return;
2987 
2988 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2989 			    tg3_flag(tp_peer, ENABLE_ASF))
2990 				need_vaux = true;
2991 		}
2992 	}
2993 
2994 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2995 	    tg3_flag(tp, ENABLE_ASF))
2996 		need_vaux = true;
2997 
2998 	if (need_vaux)
2999 		tg3_pwrsrc_switch_to_vaux(tp);
3000 	else
3001 		tg3_pwrsrc_die_with_vmain(tp);
3002 }
3003 
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3004 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3005 {
3006 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3007 		return 1;
3008 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3009 		if (speed != SPEED_10)
3010 			return 1;
3011 	} else if (speed == SPEED_10)
3012 		return 1;
3013 
3014 	return 0;
3015 }
3016 
tg3_phy_power_bug(struct tg3 * tp)3017 static bool tg3_phy_power_bug(struct tg3 *tp)
3018 {
3019 	switch (tg3_asic_rev(tp)) {
3020 	case ASIC_REV_5700:
3021 	case ASIC_REV_5704:
3022 		return true;
3023 	case ASIC_REV_5780:
3024 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3025 			return true;
3026 		return false;
3027 	case ASIC_REV_5717:
3028 		if (!tp->pci_fn)
3029 			return true;
3030 		return false;
3031 	case ASIC_REV_5719:
3032 	case ASIC_REV_5720:
3033 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3034 		    !tp->pci_fn)
3035 			return true;
3036 		return false;
3037 	}
3038 
3039 	return false;
3040 }
3041 
tg3_phy_led_bug(struct tg3 * tp)3042 static bool tg3_phy_led_bug(struct tg3 *tp)
3043 {
3044 	switch (tg3_asic_rev(tp)) {
3045 	case ASIC_REV_5719:
3046 	case ASIC_REV_5720:
3047 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3048 		    !tp->pci_fn)
3049 			return true;
3050 		return false;
3051 	}
3052 
3053 	return false;
3054 }
3055 
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3056 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3057 {
3058 	u32 val;
3059 
3060 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3061 		return;
3062 
3063 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3064 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3065 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3066 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3067 
3068 			sg_dig_ctrl |=
3069 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3070 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3071 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3072 		}
3073 		return;
3074 	}
3075 
3076 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3077 		tg3_bmcr_reset(tp);
3078 		val = tr32(GRC_MISC_CFG);
3079 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3080 		udelay(40);
3081 		return;
3082 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3083 		u32 phytest;
3084 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3085 			u32 phy;
3086 
3087 			tg3_writephy(tp, MII_ADVERTISE, 0);
3088 			tg3_writephy(tp, MII_BMCR,
3089 				     BMCR_ANENABLE | BMCR_ANRESTART);
3090 
3091 			tg3_writephy(tp, MII_TG3_FET_TEST,
3092 				     phytest | MII_TG3_FET_SHADOW_EN);
3093 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3094 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3095 				tg3_writephy(tp,
3096 					     MII_TG3_FET_SHDW_AUXMODE4,
3097 					     phy);
3098 			}
3099 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3100 		}
3101 		return;
3102 	} else if (do_low_power) {
3103 		if (!tg3_phy_led_bug(tp))
3104 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3105 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3106 
3107 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3108 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3109 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3110 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3111 	}
3112 
3113 	/* The PHY should not be powered down on some chips because
3114 	 * of bugs.
3115 	 */
3116 	if (tg3_phy_power_bug(tp))
3117 		return;
3118 
3119 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3120 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3121 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3122 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3123 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3124 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3125 	}
3126 
3127 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3128 }
3129 
3130 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3131 static int tg3_nvram_lock(struct tg3 *tp)
3132 {
3133 	if (tg3_flag(tp, NVRAM)) {
3134 		int i;
3135 
3136 		if (tp->nvram_lock_cnt == 0) {
3137 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3138 			for (i = 0; i < 8000; i++) {
3139 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3140 					break;
3141 				udelay(20);
3142 			}
3143 			if (i == 8000) {
3144 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3145 				return -ENODEV;
3146 			}
3147 		}
3148 		tp->nvram_lock_cnt++;
3149 	}
3150 	return 0;
3151 }
3152 
3153 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3154 static void tg3_nvram_unlock(struct tg3 *tp)
3155 {
3156 	if (tg3_flag(tp, NVRAM)) {
3157 		if (tp->nvram_lock_cnt > 0)
3158 			tp->nvram_lock_cnt--;
3159 		if (tp->nvram_lock_cnt == 0)
3160 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3161 	}
3162 }
3163 
3164 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3165 static void tg3_enable_nvram_access(struct tg3 *tp)
3166 {
3167 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3168 		u32 nvaccess = tr32(NVRAM_ACCESS);
3169 
3170 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3171 	}
3172 }
3173 
3174 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3175 static void tg3_disable_nvram_access(struct tg3 *tp)
3176 {
3177 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3178 		u32 nvaccess = tr32(NVRAM_ACCESS);
3179 
3180 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3181 	}
3182 }
3183 
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3184 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3185 					u32 offset, u32 *val)
3186 {
3187 	u32 tmp;
3188 	int i;
3189 
3190 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3191 		return -EINVAL;
3192 
3193 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3194 					EEPROM_ADDR_DEVID_MASK |
3195 					EEPROM_ADDR_READ);
3196 	tw32(GRC_EEPROM_ADDR,
3197 	     tmp |
3198 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3199 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3200 	      EEPROM_ADDR_ADDR_MASK) |
3201 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3202 
3203 	for (i = 0; i < 1000; i++) {
3204 		tmp = tr32(GRC_EEPROM_ADDR);
3205 
3206 		if (tmp & EEPROM_ADDR_COMPLETE)
3207 			break;
3208 		msleep(1);
3209 	}
3210 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3211 		return -EBUSY;
3212 
3213 	tmp = tr32(GRC_EEPROM_DATA);
3214 
3215 	/*
3216 	 * The data will always be opposite the native endian
3217 	 * format.  Perform a blind byteswap to compensate.
3218 	 */
3219 	*val = swab32(tmp);
3220 
3221 	return 0;
3222 }
3223 
3224 #define NVRAM_CMD_TIMEOUT 10000
3225 
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3226 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3227 {
3228 	int i;
3229 
3230 	tw32(NVRAM_CMD, nvram_cmd);
3231 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3232 		usleep_range(10, 40);
3233 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3234 			udelay(10);
3235 			break;
3236 		}
3237 	}
3238 
3239 	if (i == NVRAM_CMD_TIMEOUT)
3240 		return -EBUSY;
3241 
3242 	return 0;
3243 }
3244 
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3245 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3246 {
3247 	if (tg3_flag(tp, NVRAM) &&
3248 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3249 	    tg3_flag(tp, FLASH) &&
3250 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3251 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3252 
3253 		addr = ((addr / tp->nvram_pagesize) <<
3254 			ATMEL_AT45DB0X1B_PAGE_POS) +
3255 		       (addr % tp->nvram_pagesize);
3256 
3257 	return addr;
3258 }
3259 
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3260 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3261 {
3262 	if (tg3_flag(tp, NVRAM) &&
3263 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3264 	    tg3_flag(tp, FLASH) &&
3265 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3266 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3267 
3268 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3269 			tp->nvram_pagesize) +
3270 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3271 
3272 	return addr;
3273 }
3274 
3275 /* NOTE: Data read in from NVRAM is byteswapped according to
3276  * the byteswapping settings for all other register accesses.
3277  * tg3 devices are BE devices, so on a BE machine, the data
3278  * returned will be exactly as it is seen in NVRAM.  On a LE
3279  * machine, the 32-bit value will be byteswapped.
3280  */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3281 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3282 {
3283 	int ret;
3284 
3285 	if (!tg3_flag(tp, NVRAM))
3286 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3287 
3288 	offset = tg3_nvram_phys_addr(tp, offset);
3289 
3290 	if (offset > NVRAM_ADDR_MSK)
3291 		return -EINVAL;
3292 
3293 	ret = tg3_nvram_lock(tp);
3294 	if (ret)
3295 		return ret;
3296 
3297 	tg3_enable_nvram_access(tp);
3298 
3299 	tw32(NVRAM_ADDR, offset);
3300 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3301 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3302 
3303 	if (ret == 0)
3304 		*val = tr32(NVRAM_RDDATA);
3305 
3306 	tg3_disable_nvram_access(tp);
3307 
3308 	tg3_nvram_unlock(tp);
3309 
3310 	return ret;
3311 }
3312 
3313 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3314 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3315 {
3316 	u32 v;
3317 	int res = tg3_nvram_read(tp, offset, &v);
3318 	if (!res)
3319 		*val = cpu_to_be32(v);
3320 	return res;
3321 }
3322 
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3323 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3324 				    u32 offset, u32 len, u8 *buf)
3325 {
3326 	int i, j, rc = 0;
3327 	u32 val;
3328 
3329 	for (i = 0; i < len; i += 4) {
3330 		u32 addr;
3331 		__be32 data;
3332 
3333 		addr = offset + i;
3334 
3335 		memcpy(&data, buf + i, 4);
3336 
3337 		/*
3338 		 * The SEEPROM interface expects the data to always be opposite
3339 		 * the native endian format.  We accomplish this by reversing
3340 		 * all the operations that would have been performed on the
3341 		 * data from a call to tg3_nvram_read_be32().
3342 		 */
3343 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3344 
3345 		val = tr32(GRC_EEPROM_ADDR);
3346 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3347 
3348 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3349 			EEPROM_ADDR_READ);
3350 		tw32(GRC_EEPROM_ADDR, val |
3351 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3352 			(addr & EEPROM_ADDR_ADDR_MASK) |
3353 			EEPROM_ADDR_START |
3354 			EEPROM_ADDR_WRITE);
3355 
3356 		for (j = 0; j < 1000; j++) {
3357 			val = tr32(GRC_EEPROM_ADDR);
3358 
3359 			if (val & EEPROM_ADDR_COMPLETE)
3360 				break;
3361 			msleep(1);
3362 		}
3363 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3364 			rc = -EBUSY;
3365 			break;
3366 		}
3367 	}
3368 
3369 	return rc;
3370 }
3371 
3372 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3373 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3374 		u8 *buf)
3375 {
3376 	int ret = 0;
3377 	u32 pagesize = tp->nvram_pagesize;
3378 	u32 pagemask = pagesize - 1;
3379 	u32 nvram_cmd;
3380 	u8 *tmp;
3381 
3382 	tmp = kmalloc(pagesize, GFP_KERNEL);
3383 	if (tmp == NULL)
3384 		return -ENOMEM;
3385 
3386 	while (len) {
3387 		int j;
3388 		u32 phy_addr, page_off, size;
3389 
3390 		phy_addr = offset & ~pagemask;
3391 
3392 		for (j = 0; j < pagesize; j += 4) {
3393 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3394 						  (__be32 *) (tmp + j));
3395 			if (ret)
3396 				break;
3397 		}
3398 		if (ret)
3399 			break;
3400 
3401 		page_off = offset & pagemask;
3402 		size = pagesize;
3403 		if (len < size)
3404 			size = len;
3405 
3406 		len -= size;
3407 
3408 		memcpy(tmp + page_off, buf, size);
3409 
3410 		offset = offset + (pagesize - page_off);
3411 
3412 		tg3_enable_nvram_access(tp);
3413 
3414 		/*
3415 		 * Before we can erase the flash page, we need
3416 		 * to issue a special "write enable" command.
3417 		 */
3418 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3419 
3420 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3421 			break;
3422 
3423 		/* Erase the target page */
3424 		tw32(NVRAM_ADDR, phy_addr);
3425 
3426 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3427 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3428 
3429 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3430 			break;
3431 
3432 		/* Issue another write enable to start the write. */
3433 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3434 
3435 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3436 			break;
3437 
3438 		for (j = 0; j < pagesize; j += 4) {
3439 			__be32 data;
3440 
3441 			data = *((__be32 *) (tmp + j));
3442 
3443 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3444 
3445 			tw32(NVRAM_ADDR, phy_addr + j);
3446 
3447 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3448 				NVRAM_CMD_WR;
3449 
3450 			if (j == 0)
3451 				nvram_cmd |= NVRAM_CMD_FIRST;
3452 			else if (j == (pagesize - 4))
3453 				nvram_cmd |= NVRAM_CMD_LAST;
3454 
3455 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3456 			if (ret)
3457 				break;
3458 		}
3459 		if (ret)
3460 			break;
3461 	}
3462 
3463 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3464 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3465 
3466 	kfree(tmp);
3467 
3468 	return ret;
3469 }
3470 
3471 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3472 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3473 		u8 *buf)
3474 {
3475 	int i, ret = 0;
3476 
3477 	for (i = 0; i < len; i += 4, offset += 4) {
3478 		u32 page_off, phy_addr, nvram_cmd;
3479 		__be32 data;
3480 
3481 		memcpy(&data, buf + i, 4);
3482 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3483 
3484 		page_off = offset % tp->nvram_pagesize;
3485 
3486 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3487 
3488 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3489 
3490 		if (page_off == 0 || i == 0)
3491 			nvram_cmd |= NVRAM_CMD_FIRST;
3492 		if (page_off == (tp->nvram_pagesize - 4))
3493 			nvram_cmd |= NVRAM_CMD_LAST;
3494 
3495 		if (i == (len - 4))
3496 			nvram_cmd |= NVRAM_CMD_LAST;
3497 
3498 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3499 		    !tg3_flag(tp, FLASH) ||
3500 		    !tg3_flag(tp, 57765_PLUS))
3501 			tw32(NVRAM_ADDR, phy_addr);
3502 
3503 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3504 		    !tg3_flag(tp, 5755_PLUS) &&
3505 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3506 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3507 			u32 cmd;
3508 
3509 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3510 			ret = tg3_nvram_exec_cmd(tp, cmd);
3511 			if (ret)
3512 				break;
3513 		}
3514 		if (!tg3_flag(tp, FLASH)) {
3515 			/* We always do complete word writes to eeprom. */
3516 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3517 		}
3518 
3519 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3520 		if (ret)
3521 			break;
3522 	}
3523 	return ret;
3524 }
3525 
3526 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3527 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3528 {
3529 	int ret;
3530 
3531 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3532 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3533 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3534 		udelay(40);
3535 	}
3536 
3537 	if (!tg3_flag(tp, NVRAM)) {
3538 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3539 	} else {
3540 		u32 grc_mode;
3541 
3542 		ret = tg3_nvram_lock(tp);
3543 		if (ret)
3544 			return ret;
3545 
3546 		tg3_enable_nvram_access(tp);
3547 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3548 			tw32(NVRAM_WRITE1, 0x406);
3549 
3550 		grc_mode = tr32(GRC_MODE);
3551 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3552 
3553 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3554 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3555 				buf);
3556 		} else {
3557 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3558 				buf);
3559 		}
3560 
3561 		grc_mode = tr32(GRC_MODE);
3562 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3563 
3564 		tg3_disable_nvram_access(tp);
3565 		tg3_nvram_unlock(tp);
3566 	}
3567 
3568 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3569 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3570 		udelay(40);
3571 	}
3572 
3573 	return ret;
3574 }
3575 
3576 #define RX_CPU_SCRATCH_BASE	0x30000
3577 #define RX_CPU_SCRATCH_SIZE	0x04000
3578 #define TX_CPU_SCRATCH_BASE	0x34000
3579 #define TX_CPU_SCRATCH_SIZE	0x04000
3580 
3581 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3582 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3583 {
3584 	int i;
3585 	const int iters = 10000;
3586 
3587 	for (i = 0; i < iters; i++) {
3588 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3589 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3590 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3591 			break;
3592 		if (pci_channel_offline(tp->pdev))
3593 			return -EBUSY;
3594 	}
3595 
3596 	return (i == iters) ? -EBUSY : 0;
3597 }
3598 
3599 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3600 static int tg3_rxcpu_pause(struct tg3 *tp)
3601 {
3602 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3603 
3604 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3605 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3606 	udelay(10);
3607 
3608 	return rc;
3609 }
3610 
3611 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3612 static int tg3_txcpu_pause(struct tg3 *tp)
3613 {
3614 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3615 }
3616 
3617 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3618 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3619 {
3620 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3621 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3622 }
3623 
3624 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3625 static void tg3_rxcpu_resume(struct tg3 *tp)
3626 {
3627 	tg3_resume_cpu(tp, RX_CPU_BASE);
3628 }
3629 
3630 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3631 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3632 {
3633 	int rc;
3634 
3635 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3636 
3637 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3638 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3639 
3640 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3641 		return 0;
3642 	}
3643 	if (cpu_base == RX_CPU_BASE) {
3644 		rc = tg3_rxcpu_pause(tp);
3645 	} else {
3646 		/*
3647 		 * There is only an Rx CPU for the 5750 derivative in the
3648 		 * BCM4785.
3649 		 */
3650 		if (tg3_flag(tp, IS_SSB_CORE))
3651 			return 0;
3652 
3653 		rc = tg3_txcpu_pause(tp);
3654 	}
3655 
3656 	if (rc) {
3657 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3658 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3659 		return -ENODEV;
3660 	}
3661 
3662 	/* Clear firmware's nvram arbitration. */
3663 	if (tg3_flag(tp, NVRAM))
3664 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3665 	return 0;
3666 }
3667 
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3668 static int tg3_fw_data_len(struct tg3 *tp,
3669 			   const struct tg3_firmware_hdr *fw_hdr)
3670 {
3671 	int fw_len;
3672 
3673 	/* Non fragmented firmware have one firmware header followed by a
3674 	 * contiguous chunk of data to be written. The length field in that
3675 	 * header is not the length of data to be written but the complete
3676 	 * length of the bss. The data length is determined based on
3677 	 * tp->fw->size minus headers.
3678 	 *
3679 	 * Fragmented firmware have a main header followed by multiple
3680 	 * fragments. Each fragment is identical to non fragmented firmware
3681 	 * with a firmware header followed by a contiguous chunk of data. In
3682 	 * the main header, the length field is unused and set to 0xffffffff.
3683 	 * In each fragment header the length is the entire size of that
3684 	 * fragment i.e. fragment data + header length. Data length is
3685 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3686 	 */
3687 	if (tp->fw_len == 0xffffffff)
3688 		fw_len = be32_to_cpu(fw_hdr->len);
3689 	else
3690 		fw_len = tp->fw->size;
3691 
3692 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3693 }
3694 
3695 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3696 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3697 				 u32 cpu_scratch_base, int cpu_scratch_size,
3698 				 const struct tg3_firmware_hdr *fw_hdr)
3699 {
3700 	int err, i;
3701 	void (*write_op)(struct tg3 *, u32, u32);
3702 	int total_len = tp->fw->size;
3703 
3704 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3705 		netdev_err(tp->dev,
3706 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3707 			   __func__);
3708 		return -EINVAL;
3709 	}
3710 
3711 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3712 		write_op = tg3_write_mem;
3713 	else
3714 		write_op = tg3_write_indirect_reg32;
3715 
3716 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3717 		/* It is possible that bootcode is still loading at this point.
3718 		 * Get the nvram lock first before halting the cpu.
3719 		 */
3720 		int lock_err = tg3_nvram_lock(tp);
3721 		err = tg3_halt_cpu(tp, cpu_base);
3722 		if (!lock_err)
3723 			tg3_nvram_unlock(tp);
3724 		if (err)
3725 			goto out;
3726 
3727 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3728 			write_op(tp, cpu_scratch_base + i, 0);
3729 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3730 		tw32(cpu_base + CPU_MODE,
3731 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3732 	} else {
3733 		/* Subtract additional main header for fragmented firmware and
3734 		 * advance to the first fragment
3735 		 */
3736 		total_len -= TG3_FW_HDR_LEN;
3737 		fw_hdr++;
3738 	}
3739 
3740 	do {
3741 		__be32 *fw_data = (__be32 *)(fw_hdr + 1);
3742 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3743 			write_op(tp, cpu_scratch_base +
3744 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3745 				     (i * sizeof(u32)),
3746 				 be32_to_cpu(fw_data[i]));
3747 
3748 		total_len -= be32_to_cpu(fw_hdr->len);
3749 
3750 		/* Advance to next fragment */
3751 		fw_hdr = (struct tg3_firmware_hdr *)
3752 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3753 	} while (total_len > 0);
3754 
3755 	err = 0;
3756 
3757 out:
3758 	return err;
3759 }
3760 
3761 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3762 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3763 {
3764 	int i;
3765 	const int iters = 5;
3766 
3767 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3768 	tw32_f(cpu_base + CPU_PC, pc);
3769 
3770 	for (i = 0; i < iters; i++) {
3771 		if (tr32(cpu_base + CPU_PC) == pc)
3772 			break;
3773 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3774 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3775 		tw32_f(cpu_base + CPU_PC, pc);
3776 		udelay(1000);
3777 	}
3778 
3779 	return (i == iters) ? -EBUSY : 0;
3780 }
3781 
3782 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3783 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3784 {
3785 	const struct tg3_firmware_hdr *fw_hdr;
3786 	int err;
3787 
3788 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3789 
3790 	/* Firmware blob starts with version numbers, followed by
3791 	   start address and length. We are setting complete length.
3792 	   length = end_address_of_bss - start_address_of_text.
3793 	   Remainder is the blob to be loaded contiguously
3794 	   from start address. */
3795 
3796 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3797 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3798 				    fw_hdr);
3799 	if (err)
3800 		return err;
3801 
3802 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3803 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3804 				    fw_hdr);
3805 	if (err)
3806 		return err;
3807 
3808 	/* Now startup only the RX cpu. */
3809 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3810 				       be32_to_cpu(fw_hdr->base_addr));
3811 	if (err) {
3812 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3813 			   "should be %08x\n", __func__,
3814 			   tr32(RX_CPU_BASE + CPU_PC),
3815 				be32_to_cpu(fw_hdr->base_addr));
3816 		return -ENODEV;
3817 	}
3818 
3819 	tg3_rxcpu_resume(tp);
3820 
3821 	return 0;
3822 }
3823 
tg3_validate_rxcpu_state(struct tg3 * tp)3824 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3825 {
3826 	const int iters = 1000;
3827 	int i;
3828 	u32 val;
3829 
3830 	/* Wait for boot code to complete initialization and enter service
3831 	 * loop. It is then safe to download service patches
3832 	 */
3833 	for (i = 0; i < iters; i++) {
3834 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3835 			break;
3836 
3837 		udelay(10);
3838 	}
3839 
3840 	if (i == iters) {
3841 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3842 		return -EBUSY;
3843 	}
3844 
3845 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3846 	if (val & 0xff) {
3847 		netdev_warn(tp->dev,
3848 			    "Other patches exist. Not downloading EEE patch\n");
3849 		return -EEXIST;
3850 	}
3851 
3852 	return 0;
3853 }
3854 
3855 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3856 static void tg3_load_57766_firmware(struct tg3 *tp)
3857 {
3858 	struct tg3_firmware_hdr *fw_hdr;
3859 
3860 	if (!tg3_flag(tp, NO_NVRAM))
3861 		return;
3862 
3863 	if (tg3_validate_rxcpu_state(tp))
3864 		return;
3865 
3866 	if (!tp->fw)
3867 		return;
3868 
3869 	/* This firmware blob has a different format than older firmware
3870 	 * releases as given below. The main difference is we have fragmented
3871 	 * data to be written to non-contiguous locations.
3872 	 *
3873 	 * In the beginning we have a firmware header identical to other
3874 	 * firmware which consists of version, base addr and length. The length
3875 	 * here is unused and set to 0xffffffff.
3876 	 *
3877 	 * This is followed by a series of firmware fragments which are
3878 	 * individually identical to previous firmware. i.e. they have the
3879 	 * firmware header and followed by data for that fragment. The version
3880 	 * field of the individual fragment header is unused.
3881 	 */
3882 
3883 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3884 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3885 		return;
3886 
3887 	if (tg3_rxcpu_pause(tp))
3888 		return;
3889 
3890 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3891 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3892 
3893 	tg3_rxcpu_resume(tp);
3894 }
3895 
3896 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3897 static int tg3_load_tso_firmware(struct tg3 *tp)
3898 {
3899 	const struct tg3_firmware_hdr *fw_hdr;
3900 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3901 	int err;
3902 
3903 	if (!tg3_flag(tp, FW_TSO))
3904 		return 0;
3905 
3906 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3907 
3908 	/* Firmware blob starts with version numbers, followed by
3909 	   start address and length. We are setting complete length.
3910 	   length = end_address_of_bss - start_address_of_text.
3911 	   Remainder is the blob to be loaded contiguously
3912 	   from start address. */
3913 
3914 	cpu_scratch_size = tp->fw_len;
3915 
3916 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3917 		cpu_base = RX_CPU_BASE;
3918 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3919 	} else {
3920 		cpu_base = TX_CPU_BASE;
3921 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3922 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3923 	}
3924 
3925 	err = tg3_load_firmware_cpu(tp, cpu_base,
3926 				    cpu_scratch_base, cpu_scratch_size,
3927 				    fw_hdr);
3928 	if (err)
3929 		return err;
3930 
3931 	/* Now startup the cpu. */
3932 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3933 				       be32_to_cpu(fw_hdr->base_addr));
3934 	if (err) {
3935 		netdev_err(tp->dev,
3936 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3937 			   __func__, tr32(cpu_base + CPU_PC),
3938 			   be32_to_cpu(fw_hdr->base_addr));
3939 		return -ENODEV;
3940 	}
3941 
3942 	tg3_resume_cpu(tp, cpu_base);
3943 	return 0;
3944 }
3945 
3946 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,const u8 * mac_addr,int index)3947 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3948 				   int index)
3949 {
3950 	u32 addr_high, addr_low;
3951 
3952 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3953 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3954 		    (mac_addr[4] <<  8) | mac_addr[5]);
3955 
3956 	if (index < 4) {
3957 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3958 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3959 	} else {
3960 		index -= 4;
3961 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3962 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3963 	}
3964 }
3965 
3966 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3967 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3968 {
3969 	u32 addr_high;
3970 	int i;
3971 
3972 	for (i = 0; i < 4; i++) {
3973 		if (i == 1 && skip_mac_1)
3974 			continue;
3975 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3976 	}
3977 
3978 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3979 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3980 		for (i = 4; i < 16; i++)
3981 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3982 	}
3983 
3984 	addr_high = (tp->dev->dev_addr[0] +
3985 		     tp->dev->dev_addr[1] +
3986 		     tp->dev->dev_addr[2] +
3987 		     tp->dev->dev_addr[3] +
3988 		     tp->dev->dev_addr[4] +
3989 		     tp->dev->dev_addr[5]) &
3990 		TX_BACKOFF_SEED_MASK;
3991 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3992 }
3993 
tg3_enable_register_access(struct tg3 * tp)3994 static void tg3_enable_register_access(struct tg3 *tp)
3995 {
3996 	/*
3997 	 * Make sure register accesses (indirect or otherwise) will function
3998 	 * correctly.
3999 	 */
4000 	pci_write_config_dword(tp->pdev,
4001 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4002 }
4003 
tg3_power_up(struct tg3 * tp)4004 static int tg3_power_up(struct tg3 *tp)
4005 {
4006 	int err;
4007 
4008 	tg3_enable_register_access(tp);
4009 
4010 	err = pci_set_power_state(tp->pdev, PCI_D0);
4011 	if (!err) {
4012 		/* Switch out of Vaux if it is a NIC */
4013 		tg3_pwrsrc_switch_to_vmain(tp);
4014 	} else {
4015 		netdev_err(tp->dev, "Transition to D0 failed\n");
4016 	}
4017 
4018 	return err;
4019 }
4020 
4021 static int tg3_setup_phy(struct tg3 *, bool);
4022 
tg3_power_down_prepare(struct tg3 * tp)4023 static void tg3_power_down_prepare(struct tg3 *tp)
4024 {
4025 	u32 misc_host_ctrl;
4026 	bool device_should_wake, do_low_power;
4027 
4028 	tg3_enable_register_access(tp);
4029 
4030 	/* Restore the CLKREQ setting. */
4031 	if (tg3_flag(tp, CLKREQ_BUG))
4032 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4033 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4034 
4035 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4036 	tw32(TG3PCI_MISC_HOST_CTRL,
4037 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4038 
4039 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4040 			     tg3_flag(tp, WOL_ENABLE);
4041 
4042 	if (tg3_flag(tp, USE_PHYLIB)) {
4043 		do_low_power = false;
4044 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4045 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4046 			__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4047 			struct phy_device *phydev;
4048 			u32 phyid;
4049 
4050 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4051 
4052 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4053 
4054 			tp->link_config.speed = phydev->speed;
4055 			tp->link_config.duplex = phydev->duplex;
4056 			tp->link_config.autoneg = phydev->autoneg;
4057 			ethtool_convert_link_mode_to_legacy_u32(
4058 				&tp->link_config.advertising,
4059 				phydev->advertising);
4060 
4061 			linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4062 			linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4063 					 advertising);
4064 			linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4065 					 advertising);
4066 			linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4067 					 advertising);
4068 
4069 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4070 				if (tg3_flag(tp, WOL_SPEED_100MB)) {
4071 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4072 							 advertising);
4073 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4074 							 advertising);
4075 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4076 							 advertising);
4077 				} else {
4078 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4079 							 advertising);
4080 				}
4081 			}
4082 
4083 			linkmode_copy(phydev->advertising, advertising);
4084 			phy_start_aneg(phydev);
4085 
4086 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4087 			if (phyid != PHY_ID_BCMAC131) {
4088 				phyid &= PHY_BCM_OUI_MASK;
4089 				if (phyid == PHY_BCM_OUI_1 ||
4090 				    phyid == PHY_BCM_OUI_2 ||
4091 				    phyid == PHY_BCM_OUI_3)
4092 					do_low_power = true;
4093 			}
4094 		}
4095 	} else {
4096 		do_low_power = true;
4097 
4098 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4099 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4100 
4101 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4102 			tg3_setup_phy(tp, false);
4103 	}
4104 
4105 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4106 		u32 val;
4107 
4108 		val = tr32(GRC_VCPU_EXT_CTRL);
4109 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4110 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4111 		int i;
4112 		u32 val;
4113 
4114 		for (i = 0; i < 200; i++) {
4115 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4116 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4117 				break;
4118 			msleep(1);
4119 		}
4120 	}
4121 	if (tg3_flag(tp, WOL_CAP))
4122 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4123 						     WOL_DRV_STATE_SHUTDOWN |
4124 						     WOL_DRV_WOL |
4125 						     WOL_SET_MAGIC_PKT);
4126 
4127 	if (device_should_wake) {
4128 		u32 mac_mode;
4129 
4130 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4131 			if (do_low_power &&
4132 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4133 				tg3_phy_auxctl_write(tp,
4134 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4135 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4136 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4137 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4138 				udelay(40);
4139 			}
4140 
4141 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4142 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4143 			else if (tp->phy_flags &
4144 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4145 				if (tp->link_config.active_speed == SPEED_1000)
4146 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4147 				else
4148 					mac_mode = MAC_MODE_PORT_MODE_MII;
4149 			} else
4150 				mac_mode = MAC_MODE_PORT_MODE_MII;
4151 
4152 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4153 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4154 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4155 					     SPEED_100 : SPEED_10;
4156 				if (tg3_5700_link_polarity(tp, speed))
4157 					mac_mode |= MAC_MODE_LINK_POLARITY;
4158 				else
4159 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4160 			}
4161 		} else {
4162 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4163 		}
4164 
4165 		if (!tg3_flag(tp, 5750_PLUS))
4166 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4167 
4168 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4169 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4170 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4171 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4172 
4173 		if (tg3_flag(tp, ENABLE_APE))
4174 			mac_mode |= MAC_MODE_APE_TX_EN |
4175 				    MAC_MODE_APE_RX_EN |
4176 				    MAC_MODE_TDE_ENABLE;
4177 
4178 		tw32_f(MAC_MODE, mac_mode);
4179 		udelay(100);
4180 
4181 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4182 		udelay(10);
4183 	}
4184 
4185 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4186 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4187 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4188 		u32 base_val;
4189 
4190 		base_val = tp->pci_clock_ctrl;
4191 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4192 			     CLOCK_CTRL_TXCLK_DISABLE);
4193 
4194 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4195 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4196 	} else if (tg3_flag(tp, 5780_CLASS) ||
4197 		   tg3_flag(tp, CPMU_PRESENT) ||
4198 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4199 		/* do nothing */
4200 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4201 		u32 newbits1, newbits2;
4202 
4203 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4204 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4205 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4206 				    CLOCK_CTRL_TXCLK_DISABLE |
4207 				    CLOCK_CTRL_ALTCLK);
4208 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4209 		} else if (tg3_flag(tp, 5705_PLUS)) {
4210 			newbits1 = CLOCK_CTRL_625_CORE;
4211 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4212 		} else {
4213 			newbits1 = CLOCK_CTRL_ALTCLK;
4214 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4215 		}
4216 
4217 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4218 			    40);
4219 
4220 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4221 			    40);
4222 
4223 		if (!tg3_flag(tp, 5705_PLUS)) {
4224 			u32 newbits3;
4225 
4226 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4227 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4228 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4229 					    CLOCK_CTRL_TXCLK_DISABLE |
4230 					    CLOCK_CTRL_44MHZ_CORE);
4231 			} else {
4232 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4233 			}
4234 
4235 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4236 				    tp->pci_clock_ctrl | newbits3, 40);
4237 		}
4238 	}
4239 
4240 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4241 		tg3_power_down_phy(tp, do_low_power);
4242 
4243 	tg3_frob_aux_power(tp, true);
4244 
4245 	/* Workaround for unstable PLL clock */
4246 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4247 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4248 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4249 		u32 val = tr32(0x7d00);
4250 
4251 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4252 		tw32(0x7d00, val);
4253 		if (!tg3_flag(tp, ENABLE_ASF)) {
4254 			int err;
4255 
4256 			err = tg3_nvram_lock(tp);
4257 			tg3_halt_cpu(tp, RX_CPU_BASE);
4258 			if (!err)
4259 				tg3_nvram_unlock(tp);
4260 		}
4261 	}
4262 
4263 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4264 
4265 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4266 
4267 	return;
4268 }
4269 
tg3_power_down(struct tg3 * tp)4270 static void tg3_power_down(struct tg3 *tp)
4271 {
4272 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4273 	pci_set_power_state(tp->pdev, PCI_D3hot);
4274 }
4275 
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u32 * speed,u8 * duplex)4276 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4277 {
4278 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4279 	case MII_TG3_AUX_STAT_10HALF:
4280 		*speed = SPEED_10;
4281 		*duplex = DUPLEX_HALF;
4282 		break;
4283 
4284 	case MII_TG3_AUX_STAT_10FULL:
4285 		*speed = SPEED_10;
4286 		*duplex = DUPLEX_FULL;
4287 		break;
4288 
4289 	case MII_TG3_AUX_STAT_100HALF:
4290 		*speed = SPEED_100;
4291 		*duplex = DUPLEX_HALF;
4292 		break;
4293 
4294 	case MII_TG3_AUX_STAT_100FULL:
4295 		*speed = SPEED_100;
4296 		*duplex = DUPLEX_FULL;
4297 		break;
4298 
4299 	case MII_TG3_AUX_STAT_1000HALF:
4300 		*speed = SPEED_1000;
4301 		*duplex = DUPLEX_HALF;
4302 		break;
4303 
4304 	case MII_TG3_AUX_STAT_1000FULL:
4305 		*speed = SPEED_1000;
4306 		*duplex = DUPLEX_FULL;
4307 		break;
4308 
4309 	default:
4310 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4311 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4312 				 SPEED_10;
4313 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4314 				  DUPLEX_HALF;
4315 			break;
4316 		}
4317 		*speed = SPEED_UNKNOWN;
4318 		*duplex = DUPLEX_UNKNOWN;
4319 		break;
4320 	}
4321 }
4322 
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4323 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4324 {
4325 	int err = 0;
4326 	u32 val, new_adv;
4327 
4328 	new_adv = ADVERTISE_CSMA;
4329 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4330 	new_adv |= mii_advertise_flowctrl(flowctrl);
4331 
4332 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4333 	if (err)
4334 		goto done;
4335 
4336 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4337 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4338 
4339 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4340 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4341 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4342 
4343 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4344 		if (err)
4345 			goto done;
4346 	}
4347 
4348 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4349 		goto done;
4350 
4351 	tw32(TG3_CPMU_EEE_MODE,
4352 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4353 
4354 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4355 	if (!err) {
4356 		u32 err2;
4357 
4358 		if (!tp->eee.eee_enabled)
4359 			val = 0;
4360 		else
4361 			val = ethtool_adv_to_mmd_eee_adv_t(advertise);
4362 
4363 		mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
4364 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4365 		if (err)
4366 			val = 0;
4367 
4368 		switch (tg3_asic_rev(tp)) {
4369 		case ASIC_REV_5717:
4370 		case ASIC_REV_57765:
4371 		case ASIC_REV_57766:
4372 		case ASIC_REV_5719:
4373 			/* If we advertised any eee advertisements above... */
4374 			if (val)
4375 				val = MII_TG3_DSP_TAP26_ALNOKO |
4376 				      MII_TG3_DSP_TAP26_RMRXSTO |
4377 				      MII_TG3_DSP_TAP26_OPCSINPT;
4378 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4379 			fallthrough;
4380 		case ASIC_REV_5720:
4381 		case ASIC_REV_5762:
4382 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4383 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4384 						 MII_TG3_DSP_CH34TP2_HIBW01);
4385 		}
4386 
4387 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4388 		if (!err)
4389 			err = err2;
4390 	}
4391 
4392 done:
4393 	return err;
4394 }
4395 
tg3_phy_copper_begin(struct tg3 * tp)4396 static void tg3_phy_copper_begin(struct tg3 *tp)
4397 {
4398 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4399 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4400 		u32 adv, fc;
4401 
4402 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4403 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4404 			adv = ADVERTISED_10baseT_Half |
4405 			      ADVERTISED_10baseT_Full;
4406 			if (tg3_flag(tp, WOL_SPEED_100MB))
4407 				adv |= ADVERTISED_100baseT_Half |
4408 				       ADVERTISED_100baseT_Full;
4409 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4410 				if (!(tp->phy_flags &
4411 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4412 					adv |= ADVERTISED_1000baseT_Half;
4413 				adv |= ADVERTISED_1000baseT_Full;
4414 			}
4415 
4416 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4417 		} else {
4418 			adv = tp->link_config.advertising;
4419 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4420 				adv &= ~(ADVERTISED_1000baseT_Half |
4421 					 ADVERTISED_1000baseT_Full);
4422 
4423 			fc = tp->link_config.flowctrl;
4424 		}
4425 
4426 		tg3_phy_autoneg_cfg(tp, adv, fc);
4427 
4428 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4429 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4430 			/* Normally during power down we want to autonegotiate
4431 			 * the lowest possible speed for WOL. However, to avoid
4432 			 * link flap, we leave it untouched.
4433 			 */
4434 			return;
4435 		}
4436 
4437 		tg3_writephy(tp, MII_BMCR,
4438 			     BMCR_ANENABLE | BMCR_ANRESTART);
4439 	} else {
4440 		int i;
4441 		u32 bmcr, orig_bmcr;
4442 
4443 		tp->link_config.active_speed = tp->link_config.speed;
4444 		tp->link_config.active_duplex = tp->link_config.duplex;
4445 
4446 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4447 			/* With autoneg disabled, 5715 only links up when the
4448 			 * advertisement register has the configured speed
4449 			 * enabled.
4450 			 */
4451 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4452 		}
4453 
4454 		bmcr = 0;
4455 		switch (tp->link_config.speed) {
4456 		default:
4457 		case SPEED_10:
4458 			break;
4459 
4460 		case SPEED_100:
4461 			bmcr |= BMCR_SPEED100;
4462 			break;
4463 
4464 		case SPEED_1000:
4465 			bmcr |= BMCR_SPEED1000;
4466 			break;
4467 		}
4468 
4469 		if (tp->link_config.duplex == DUPLEX_FULL)
4470 			bmcr |= BMCR_FULLDPLX;
4471 
4472 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4473 		    (bmcr != orig_bmcr)) {
4474 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4475 			for (i = 0; i < 1500; i++) {
4476 				u32 tmp;
4477 
4478 				udelay(10);
4479 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4480 				    tg3_readphy(tp, MII_BMSR, &tmp))
4481 					continue;
4482 				if (!(tmp & BMSR_LSTATUS)) {
4483 					udelay(40);
4484 					break;
4485 				}
4486 			}
4487 			tg3_writephy(tp, MII_BMCR, bmcr);
4488 			udelay(40);
4489 		}
4490 	}
4491 }
4492 
tg3_phy_pull_config(struct tg3 * tp)4493 static int tg3_phy_pull_config(struct tg3 *tp)
4494 {
4495 	int err;
4496 	u32 val;
4497 
4498 	err = tg3_readphy(tp, MII_BMCR, &val);
4499 	if (err)
4500 		goto done;
4501 
4502 	if (!(val & BMCR_ANENABLE)) {
4503 		tp->link_config.autoneg = AUTONEG_DISABLE;
4504 		tp->link_config.advertising = 0;
4505 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4506 
4507 		err = -EIO;
4508 
4509 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4510 		case 0:
4511 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4512 				goto done;
4513 
4514 			tp->link_config.speed = SPEED_10;
4515 			break;
4516 		case BMCR_SPEED100:
4517 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4518 				goto done;
4519 
4520 			tp->link_config.speed = SPEED_100;
4521 			break;
4522 		case BMCR_SPEED1000:
4523 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4524 				tp->link_config.speed = SPEED_1000;
4525 				break;
4526 			}
4527 			fallthrough;
4528 		default:
4529 			goto done;
4530 		}
4531 
4532 		if (val & BMCR_FULLDPLX)
4533 			tp->link_config.duplex = DUPLEX_FULL;
4534 		else
4535 			tp->link_config.duplex = DUPLEX_HALF;
4536 
4537 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4538 
4539 		err = 0;
4540 		goto done;
4541 	}
4542 
4543 	tp->link_config.autoneg = AUTONEG_ENABLE;
4544 	tp->link_config.advertising = ADVERTISED_Autoneg;
4545 	tg3_flag_set(tp, PAUSE_AUTONEG);
4546 
4547 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4548 		u32 adv;
4549 
4550 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4551 		if (err)
4552 			goto done;
4553 
4554 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4555 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4556 
4557 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4558 	} else {
4559 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4560 	}
4561 
4562 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4563 		u32 adv;
4564 
4565 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4566 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4567 			if (err)
4568 				goto done;
4569 
4570 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4571 		} else {
4572 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4573 			if (err)
4574 				goto done;
4575 
4576 			adv = tg3_decode_flowctrl_1000X(val);
4577 			tp->link_config.flowctrl = adv;
4578 
4579 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4580 			adv = mii_adv_to_ethtool_adv_x(val);
4581 		}
4582 
4583 		tp->link_config.advertising |= adv;
4584 	}
4585 
4586 done:
4587 	return err;
4588 }
4589 
tg3_init_5401phy_dsp(struct tg3 * tp)4590 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4591 {
4592 	int err;
4593 
4594 	/* Turn off tap power management. */
4595 	/* Set Extended packet length bit */
4596 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4597 
4598 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4599 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4600 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4601 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4602 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4603 
4604 	udelay(40);
4605 
4606 	return err;
4607 }
4608 
tg3_phy_eee_config_ok(struct tg3 * tp)4609 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4610 {
4611 	struct ethtool_keee eee = {};
4612 
4613 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4614 		return true;
4615 
4616 	tg3_eee_pull_config(tp, &eee);
4617 
4618 	if (tp->eee.eee_enabled) {
4619 		if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
4620 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4621 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4622 			return false;
4623 	} else {
4624 		/* EEE is disabled but we're advertising */
4625 		if (!linkmode_empty(eee.advertised))
4626 			return false;
4627 	}
4628 
4629 	return true;
4630 }
4631 
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4632 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4633 {
4634 	u32 advmsk, tgtadv, advertising;
4635 
4636 	advertising = tp->link_config.advertising;
4637 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4638 
4639 	advmsk = ADVERTISE_ALL;
4640 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4641 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4642 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4643 	}
4644 
4645 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4646 		return false;
4647 
4648 	if ((*lcladv & advmsk) != tgtadv)
4649 		return false;
4650 
4651 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4652 		u32 tg3_ctrl;
4653 
4654 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4655 
4656 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4657 			return false;
4658 
4659 		if (tgtadv &&
4660 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4661 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4662 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4663 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4664 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4665 		} else {
4666 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4667 		}
4668 
4669 		if (tg3_ctrl != tgtadv)
4670 			return false;
4671 	}
4672 
4673 	return true;
4674 }
4675 
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4676 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4677 {
4678 	u32 lpeth = 0;
4679 
4680 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4681 		u32 val;
4682 
4683 		if (tg3_readphy(tp, MII_STAT1000, &val))
4684 			return false;
4685 
4686 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4687 	}
4688 
4689 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4690 		return false;
4691 
4692 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4693 	tp->link_config.rmt_adv = lpeth;
4694 
4695 	return true;
4696 }
4697 
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4698 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4699 {
4700 	if (curr_link_up != tp->link_up) {
4701 		if (curr_link_up) {
4702 			netif_carrier_on(tp->dev);
4703 		} else {
4704 			netif_carrier_off(tp->dev);
4705 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4706 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4707 		}
4708 
4709 		tg3_link_report(tp);
4710 		return true;
4711 	}
4712 
4713 	return false;
4714 }
4715 
tg3_clear_mac_status(struct tg3 * tp)4716 static void tg3_clear_mac_status(struct tg3 *tp)
4717 {
4718 	tw32(MAC_EVENT, 0);
4719 
4720 	tw32_f(MAC_STATUS,
4721 	       MAC_STATUS_SYNC_CHANGED |
4722 	       MAC_STATUS_CFG_CHANGED |
4723 	       MAC_STATUS_MI_COMPLETION |
4724 	       MAC_STATUS_LNKSTATE_CHANGED);
4725 	udelay(40);
4726 }
4727 
tg3_setup_eee(struct tg3 * tp)4728 static void tg3_setup_eee(struct tg3 *tp)
4729 {
4730 	u32 val;
4731 
4732 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4733 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4734 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4735 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4736 
4737 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4738 
4739 	tw32_f(TG3_CPMU_EEE_CTRL,
4740 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4741 
4742 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4743 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4744 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4745 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4746 
4747 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4748 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4749 
4750 	if (tg3_flag(tp, ENABLE_APE))
4751 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4752 
4753 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4754 
4755 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4756 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4757 	       (tp->eee.tx_lpi_timer & 0xffff));
4758 
4759 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4760 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4761 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4762 }
4763 
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4764 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4765 {
4766 	bool current_link_up;
4767 	u32 bmsr, val;
4768 	u32 lcl_adv, rmt_adv;
4769 	u32 current_speed;
4770 	u8 current_duplex;
4771 	int i, err;
4772 
4773 	tg3_clear_mac_status(tp);
4774 
4775 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4776 		tw32_f(MAC_MI_MODE,
4777 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4778 		udelay(80);
4779 	}
4780 
4781 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4782 
4783 	/* Some third-party PHYs need to be reset on link going
4784 	 * down.
4785 	 */
4786 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4787 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4788 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4789 	    tp->link_up) {
4790 		tg3_readphy(tp, MII_BMSR, &bmsr);
4791 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4792 		    !(bmsr & BMSR_LSTATUS))
4793 			force_reset = true;
4794 	}
4795 	if (force_reset)
4796 		tg3_phy_reset(tp);
4797 
4798 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4799 		tg3_readphy(tp, MII_BMSR, &bmsr);
4800 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4801 		    !tg3_flag(tp, INIT_COMPLETE))
4802 			bmsr = 0;
4803 
4804 		if (!(bmsr & BMSR_LSTATUS)) {
4805 			err = tg3_init_5401phy_dsp(tp);
4806 			if (err)
4807 				return err;
4808 
4809 			tg3_readphy(tp, MII_BMSR, &bmsr);
4810 			for (i = 0; i < 1000; i++) {
4811 				udelay(10);
4812 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4813 				    (bmsr & BMSR_LSTATUS)) {
4814 					udelay(40);
4815 					break;
4816 				}
4817 			}
4818 
4819 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4820 			    TG3_PHY_REV_BCM5401_B0 &&
4821 			    !(bmsr & BMSR_LSTATUS) &&
4822 			    tp->link_config.active_speed == SPEED_1000) {
4823 				err = tg3_phy_reset(tp);
4824 				if (!err)
4825 					err = tg3_init_5401phy_dsp(tp);
4826 				if (err)
4827 					return err;
4828 			}
4829 		}
4830 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4831 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4832 		/* 5701 {A0,B0} CRC bug workaround */
4833 		tg3_writephy(tp, 0x15, 0x0a75);
4834 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4835 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4836 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4837 	}
4838 
4839 	/* Clear pending interrupts... */
4840 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4841 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4842 
4843 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4844 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4845 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4846 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4847 
4848 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4849 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4850 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4851 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4852 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4853 		else
4854 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4855 	}
4856 
4857 	current_link_up = false;
4858 	current_speed = SPEED_UNKNOWN;
4859 	current_duplex = DUPLEX_UNKNOWN;
4860 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4861 	tp->link_config.rmt_adv = 0;
4862 
4863 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4864 		err = tg3_phy_auxctl_read(tp,
4865 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4866 					  &val);
4867 		if (!err && !(val & (1 << 10))) {
4868 			tg3_phy_auxctl_write(tp,
4869 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4870 					     val | (1 << 10));
4871 			goto relink;
4872 		}
4873 	}
4874 
4875 	bmsr = 0;
4876 	for (i = 0; i < 100; i++) {
4877 		tg3_readphy(tp, MII_BMSR, &bmsr);
4878 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4879 		    (bmsr & BMSR_LSTATUS))
4880 			break;
4881 		udelay(40);
4882 	}
4883 
4884 	if (bmsr & BMSR_LSTATUS) {
4885 		u32 aux_stat, bmcr;
4886 
4887 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4888 		for (i = 0; i < 2000; i++) {
4889 			udelay(10);
4890 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4891 			    aux_stat)
4892 				break;
4893 		}
4894 
4895 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4896 					     &current_speed,
4897 					     &current_duplex);
4898 
4899 		bmcr = 0;
4900 		for (i = 0; i < 200; i++) {
4901 			tg3_readphy(tp, MII_BMCR, &bmcr);
4902 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4903 				continue;
4904 			if (bmcr && bmcr != 0x7fff)
4905 				break;
4906 			udelay(10);
4907 		}
4908 
4909 		lcl_adv = 0;
4910 		rmt_adv = 0;
4911 
4912 		tp->link_config.active_speed = current_speed;
4913 		tp->link_config.active_duplex = current_duplex;
4914 
4915 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4916 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4917 
4918 			if ((bmcr & BMCR_ANENABLE) &&
4919 			    eee_config_ok &&
4920 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4921 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4922 				current_link_up = true;
4923 
4924 			/* EEE settings changes take effect only after a phy
4925 			 * reset.  If we have skipped a reset due to Link Flap
4926 			 * Avoidance being enabled, do it now.
4927 			 */
4928 			if (!eee_config_ok &&
4929 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4930 			    !force_reset) {
4931 				tg3_setup_eee(tp);
4932 				tg3_phy_reset(tp);
4933 			}
4934 		} else {
4935 			if (!(bmcr & BMCR_ANENABLE) &&
4936 			    tp->link_config.speed == current_speed &&
4937 			    tp->link_config.duplex == current_duplex) {
4938 				current_link_up = true;
4939 			}
4940 		}
4941 
4942 		if (current_link_up &&
4943 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4944 			u32 reg, bit;
4945 
4946 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4947 				reg = MII_TG3_FET_GEN_STAT;
4948 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4949 			} else {
4950 				reg = MII_TG3_EXT_STAT;
4951 				bit = MII_TG3_EXT_STAT_MDIX;
4952 			}
4953 
4954 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4955 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4956 
4957 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4958 		}
4959 	}
4960 
4961 relink:
4962 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4963 		tg3_phy_copper_begin(tp);
4964 
4965 		if (tg3_flag(tp, ROBOSWITCH)) {
4966 			current_link_up = true;
4967 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4968 			current_speed = SPEED_1000;
4969 			current_duplex = DUPLEX_FULL;
4970 			tp->link_config.active_speed = current_speed;
4971 			tp->link_config.active_duplex = current_duplex;
4972 		}
4973 
4974 		tg3_readphy(tp, MII_BMSR, &bmsr);
4975 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4976 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4977 			current_link_up = true;
4978 	}
4979 
4980 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4981 	if (current_link_up) {
4982 		if (tp->link_config.active_speed == SPEED_100 ||
4983 		    tp->link_config.active_speed == SPEED_10)
4984 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4985 		else
4986 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4987 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4988 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4989 	else
4990 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4991 
4992 	/* In order for the 5750 core in BCM4785 chip to work properly
4993 	 * in RGMII mode, the Led Control Register must be set up.
4994 	 */
4995 	if (tg3_flag(tp, RGMII_MODE)) {
4996 		u32 led_ctrl = tr32(MAC_LED_CTRL);
4997 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4998 
4999 		if (tp->link_config.active_speed == SPEED_10)
5000 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5001 		else if (tp->link_config.active_speed == SPEED_100)
5002 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5003 				     LED_CTRL_100MBPS_ON);
5004 		else if (tp->link_config.active_speed == SPEED_1000)
5005 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5006 				     LED_CTRL_1000MBPS_ON);
5007 
5008 		tw32(MAC_LED_CTRL, led_ctrl);
5009 		udelay(40);
5010 	}
5011 
5012 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5013 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5014 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5015 
5016 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5017 		if (current_link_up &&
5018 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5019 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5020 		else
5021 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5022 	}
5023 
5024 	/* ??? Without this setting Netgear GA302T PHY does not
5025 	 * ??? send/receive packets...
5026 	 */
5027 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5028 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5029 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5030 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5031 		udelay(80);
5032 	}
5033 
5034 	tw32_f(MAC_MODE, tp->mac_mode);
5035 	udelay(40);
5036 
5037 	tg3_phy_eee_adjust(tp, current_link_up);
5038 
5039 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5040 		/* Polled via timer. */
5041 		tw32_f(MAC_EVENT, 0);
5042 	} else {
5043 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5044 	}
5045 	udelay(40);
5046 
5047 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5048 	    current_link_up &&
5049 	    tp->link_config.active_speed == SPEED_1000 &&
5050 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5051 		udelay(120);
5052 		tw32_f(MAC_STATUS,
5053 		     (MAC_STATUS_SYNC_CHANGED |
5054 		      MAC_STATUS_CFG_CHANGED));
5055 		udelay(40);
5056 		tg3_write_mem(tp,
5057 			      NIC_SRAM_FIRMWARE_MBOX,
5058 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5059 	}
5060 
5061 	/* Prevent send BD corruption. */
5062 	if (tg3_flag(tp, CLKREQ_BUG)) {
5063 		if (tp->link_config.active_speed == SPEED_100 ||
5064 		    tp->link_config.active_speed == SPEED_10)
5065 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5066 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5067 		else
5068 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5069 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5070 	}
5071 
5072 	tg3_test_and_report_link_chg(tp, current_link_up);
5073 
5074 	return 0;
5075 }
5076 
5077 struct tg3_fiber_aneginfo {
5078 	int state;
5079 #define ANEG_STATE_UNKNOWN		0
5080 #define ANEG_STATE_AN_ENABLE		1
5081 #define ANEG_STATE_RESTART_INIT		2
5082 #define ANEG_STATE_RESTART		3
5083 #define ANEG_STATE_DISABLE_LINK_OK	4
5084 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5085 #define ANEG_STATE_ABILITY_DETECT	6
5086 #define ANEG_STATE_ACK_DETECT_INIT	7
5087 #define ANEG_STATE_ACK_DETECT		8
5088 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5089 #define ANEG_STATE_COMPLETE_ACK		10
5090 #define ANEG_STATE_IDLE_DETECT_INIT	11
5091 #define ANEG_STATE_IDLE_DETECT		12
5092 #define ANEG_STATE_LINK_OK		13
5093 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5094 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5095 
5096 	u32 flags;
5097 #define MR_AN_ENABLE		0x00000001
5098 #define MR_RESTART_AN		0x00000002
5099 #define MR_AN_COMPLETE		0x00000004
5100 #define MR_PAGE_RX		0x00000008
5101 #define MR_NP_LOADED		0x00000010
5102 #define MR_TOGGLE_TX		0x00000020
5103 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5104 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5105 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5106 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5107 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5108 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5109 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5110 #define MR_TOGGLE_RX		0x00002000
5111 #define MR_NP_RX		0x00004000
5112 
5113 #define MR_LINK_OK		0x80000000
5114 
5115 	unsigned long link_time, cur_time;
5116 
5117 	u32 ability_match_cfg;
5118 	int ability_match_count;
5119 
5120 	char ability_match, idle_match, ack_match;
5121 
5122 	u32 txconfig, rxconfig;
5123 #define ANEG_CFG_NP		0x00000080
5124 #define ANEG_CFG_ACK		0x00000040
5125 #define ANEG_CFG_RF2		0x00000020
5126 #define ANEG_CFG_RF1		0x00000010
5127 #define ANEG_CFG_PS2		0x00000001
5128 #define ANEG_CFG_PS1		0x00008000
5129 #define ANEG_CFG_HD		0x00004000
5130 #define ANEG_CFG_FD		0x00002000
5131 #define ANEG_CFG_INVAL		0x00001f06
5132 
5133 };
5134 #define ANEG_OK		0
5135 #define ANEG_DONE	1
5136 #define ANEG_TIMER_ENAB	2
5137 #define ANEG_FAILED	-1
5138 
5139 #define ANEG_STATE_SETTLE_TIME	10000
5140 
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5141 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5142 				   struct tg3_fiber_aneginfo *ap)
5143 {
5144 	u16 flowctrl;
5145 	unsigned long delta;
5146 	u32 rx_cfg_reg;
5147 	int ret;
5148 
5149 	if (ap->state == ANEG_STATE_UNKNOWN) {
5150 		ap->rxconfig = 0;
5151 		ap->link_time = 0;
5152 		ap->cur_time = 0;
5153 		ap->ability_match_cfg = 0;
5154 		ap->ability_match_count = 0;
5155 		ap->ability_match = 0;
5156 		ap->idle_match = 0;
5157 		ap->ack_match = 0;
5158 	}
5159 	ap->cur_time++;
5160 
5161 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5162 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5163 
5164 		if (rx_cfg_reg != ap->ability_match_cfg) {
5165 			ap->ability_match_cfg = rx_cfg_reg;
5166 			ap->ability_match = 0;
5167 			ap->ability_match_count = 0;
5168 		} else {
5169 			if (++ap->ability_match_count > 1) {
5170 				ap->ability_match = 1;
5171 				ap->ability_match_cfg = rx_cfg_reg;
5172 			}
5173 		}
5174 		if (rx_cfg_reg & ANEG_CFG_ACK)
5175 			ap->ack_match = 1;
5176 		else
5177 			ap->ack_match = 0;
5178 
5179 		ap->idle_match = 0;
5180 	} else {
5181 		ap->idle_match = 1;
5182 		ap->ability_match_cfg = 0;
5183 		ap->ability_match_count = 0;
5184 		ap->ability_match = 0;
5185 		ap->ack_match = 0;
5186 
5187 		rx_cfg_reg = 0;
5188 	}
5189 
5190 	ap->rxconfig = rx_cfg_reg;
5191 	ret = ANEG_OK;
5192 
5193 	switch (ap->state) {
5194 	case ANEG_STATE_UNKNOWN:
5195 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5196 			ap->state = ANEG_STATE_AN_ENABLE;
5197 
5198 		fallthrough;
5199 	case ANEG_STATE_AN_ENABLE:
5200 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5201 		if (ap->flags & MR_AN_ENABLE) {
5202 			ap->link_time = 0;
5203 			ap->cur_time = 0;
5204 			ap->ability_match_cfg = 0;
5205 			ap->ability_match_count = 0;
5206 			ap->ability_match = 0;
5207 			ap->idle_match = 0;
5208 			ap->ack_match = 0;
5209 
5210 			ap->state = ANEG_STATE_RESTART_INIT;
5211 		} else {
5212 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5213 		}
5214 		break;
5215 
5216 	case ANEG_STATE_RESTART_INIT:
5217 		ap->link_time = ap->cur_time;
5218 		ap->flags &= ~(MR_NP_LOADED);
5219 		ap->txconfig = 0;
5220 		tw32(MAC_TX_AUTO_NEG, 0);
5221 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5222 		tw32_f(MAC_MODE, tp->mac_mode);
5223 		udelay(40);
5224 
5225 		ret = ANEG_TIMER_ENAB;
5226 		ap->state = ANEG_STATE_RESTART;
5227 
5228 		fallthrough;
5229 	case ANEG_STATE_RESTART:
5230 		delta = ap->cur_time - ap->link_time;
5231 		if (delta > ANEG_STATE_SETTLE_TIME)
5232 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5233 		else
5234 			ret = ANEG_TIMER_ENAB;
5235 		break;
5236 
5237 	case ANEG_STATE_DISABLE_LINK_OK:
5238 		ret = ANEG_DONE;
5239 		break;
5240 
5241 	case ANEG_STATE_ABILITY_DETECT_INIT:
5242 		ap->flags &= ~(MR_TOGGLE_TX);
5243 		ap->txconfig = ANEG_CFG_FD;
5244 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5245 		if (flowctrl & ADVERTISE_1000XPAUSE)
5246 			ap->txconfig |= ANEG_CFG_PS1;
5247 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5248 			ap->txconfig |= ANEG_CFG_PS2;
5249 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5250 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5251 		tw32_f(MAC_MODE, tp->mac_mode);
5252 		udelay(40);
5253 
5254 		ap->state = ANEG_STATE_ABILITY_DETECT;
5255 		break;
5256 
5257 	case ANEG_STATE_ABILITY_DETECT:
5258 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5259 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5260 		break;
5261 
5262 	case ANEG_STATE_ACK_DETECT_INIT:
5263 		ap->txconfig |= ANEG_CFG_ACK;
5264 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5265 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5266 		tw32_f(MAC_MODE, tp->mac_mode);
5267 		udelay(40);
5268 
5269 		ap->state = ANEG_STATE_ACK_DETECT;
5270 
5271 		fallthrough;
5272 	case ANEG_STATE_ACK_DETECT:
5273 		if (ap->ack_match != 0) {
5274 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5275 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5276 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5277 			} else {
5278 				ap->state = ANEG_STATE_AN_ENABLE;
5279 			}
5280 		} else if (ap->ability_match != 0 &&
5281 			   ap->rxconfig == 0) {
5282 			ap->state = ANEG_STATE_AN_ENABLE;
5283 		}
5284 		break;
5285 
5286 	case ANEG_STATE_COMPLETE_ACK_INIT:
5287 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5288 			ret = ANEG_FAILED;
5289 			break;
5290 		}
5291 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5292 			       MR_LP_ADV_HALF_DUPLEX |
5293 			       MR_LP_ADV_SYM_PAUSE |
5294 			       MR_LP_ADV_ASYM_PAUSE |
5295 			       MR_LP_ADV_REMOTE_FAULT1 |
5296 			       MR_LP_ADV_REMOTE_FAULT2 |
5297 			       MR_LP_ADV_NEXT_PAGE |
5298 			       MR_TOGGLE_RX |
5299 			       MR_NP_RX);
5300 		if (ap->rxconfig & ANEG_CFG_FD)
5301 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5302 		if (ap->rxconfig & ANEG_CFG_HD)
5303 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5304 		if (ap->rxconfig & ANEG_CFG_PS1)
5305 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5306 		if (ap->rxconfig & ANEG_CFG_PS2)
5307 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5308 		if (ap->rxconfig & ANEG_CFG_RF1)
5309 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5310 		if (ap->rxconfig & ANEG_CFG_RF2)
5311 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5312 		if (ap->rxconfig & ANEG_CFG_NP)
5313 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5314 
5315 		ap->link_time = ap->cur_time;
5316 
5317 		ap->flags ^= (MR_TOGGLE_TX);
5318 		if (ap->rxconfig & 0x0008)
5319 			ap->flags |= MR_TOGGLE_RX;
5320 		if (ap->rxconfig & ANEG_CFG_NP)
5321 			ap->flags |= MR_NP_RX;
5322 		ap->flags |= MR_PAGE_RX;
5323 
5324 		ap->state = ANEG_STATE_COMPLETE_ACK;
5325 		ret = ANEG_TIMER_ENAB;
5326 		break;
5327 
5328 	case ANEG_STATE_COMPLETE_ACK:
5329 		if (ap->ability_match != 0 &&
5330 		    ap->rxconfig == 0) {
5331 			ap->state = ANEG_STATE_AN_ENABLE;
5332 			break;
5333 		}
5334 		delta = ap->cur_time - ap->link_time;
5335 		if (delta > ANEG_STATE_SETTLE_TIME) {
5336 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5337 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5338 			} else {
5339 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5340 				    !(ap->flags & MR_NP_RX)) {
5341 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5342 				} else {
5343 					ret = ANEG_FAILED;
5344 				}
5345 			}
5346 		}
5347 		break;
5348 
5349 	case ANEG_STATE_IDLE_DETECT_INIT:
5350 		ap->link_time = ap->cur_time;
5351 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5352 		tw32_f(MAC_MODE, tp->mac_mode);
5353 		udelay(40);
5354 
5355 		ap->state = ANEG_STATE_IDLE_DETECT;
5356 		ret = ANEG_TIMER_ENAB;
5357 		break;
5358 
5359 	case ANEG_STATE_IDLE_DETECT:
5360 		if (ap->ability_match != 0 &&
5361 		    ap->rxconfig == 0) {
5362 			ap->state = ANEG_STATE_AN_ENABLE;
5363 			break;
5364 		}
5365 		delta = ap->cur_time - ap->link_time;
5366 		if (delta > ANEG_STATE_SETTLE_TIME) {
5367 			/* XXX another gem from the Broadcom driver :( */
5368 			ap->state = ANEG_STATE_LINK_OK;
5369 		}
5370 		break;
5371 
5372 	case ANEG_STATE_LINK_OK:
5373 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5374 		ret = ANEG_DONE;
5375 		break;
5376 
5377 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5378 		/* ??? unimplemented */
5379 		break;
5380 
5381 	case ANEG_STATE_NEXT_PAGE_WAIT:
5382 		/* ??? unimplemented */
5383 		break;
5384 
5385 	default:
5386 		ret = ANEG_FAILED;
5387 		break;
5388 	}
5389 
5390 	return ret;
5391 }
5392 
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5393 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5394 {
5395 	int res = 0;
5396 	struct tg3_fiber_aneginfo aninfo;
5397 	int status = ANEG_FAILED;
5398 	unsigned int tick;
5399 	u32 tmp;
5400 
5401 	tw32_f(MAC_TX_AUTO_NEG, 0);
5402 
5403 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5404 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5405 	udelay(40);
5406 
5407 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5408 	udelay(40);
5409 
5410 	memset(&aninfo, 0, sizeof(aninfo));
5411 	aninfo.flags |= MR_AN_ENABLE;
5412 	aninfo.state = ANEG_STATE_UNKNOWN;
5413 	aninfo.cur_time = 0;
5414 	tick = 0;
5415 	while (++tick < 195000) {
5416 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5417 		if (status == ANEG_DONE || status == ANEG_FAILED)
5418 			break;
5419 
5420 		udelay(1);
5421 	}
5422 
5423 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5424 	tw32_f(MAC_MODE, tp->mac_mode);
5425 	udelay(40);
5426 
5427 	*txflags = aninfo.txconfig;
5428 	*rxflags = aninfo.flags;
5429 
5430 	if (status == ANEG_DONE &&
5431 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5432 			     MR_LP_ADV_FULL_DUPLEX)))
5433 		res = 1;
5434 
5435 	return res;
5436 }
5437 
tg3_init_bcm8002(struct tg3 * tp)5438 static void tg3_init_bcm8002(struct tg3 *tp)
5439 {
5440 	u32 mac_status = tr32(MAC_STATUS);
5441 	int i;
5442 
5443 	/* Reset when initting first time or we have a link. */
5444 	if (tg3_flag(tp, INIT_COMPLETE) &&
5445 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5446 		return;
5447 
5448 	/* Set PLL lock range. */
5449 	tg3_writephy(tp, 0x16, 0x8007);
5450 
5451 	/* SW reset */
5452 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5453 
5454 	/* Wait for reset to complete. */
5455 	/* XXX schedule_timeout() ... */
5456 	for (i = 0; i < 500; i++)
5457 		udelay(10);
5458 
5459 	/* Config mode; select PMA/Ch 1 regs. */
5460 	tg3_writephy(tp, 0x10, 0x8411);
5461 
5462 	/* Enable auto-lock and comdet, select txclk for tx. */
5463 	tg3_writephy(tp, 0x11, 0x0a10);
5464 
5465 	tg3_writephy(tp, 0x18, 0x00a0);
5466 	tg3_writephy(tp, 0x16, 0x41ff);
5467 
5468 	/* Assert and deassert POR. */
5469 	tg3_writephy(tp, 0x13, 0x0400);
5470 	udelay(40);
5471 	tg3_writephy(tp, 0x13, 0x0000);
5472 
5473 	tg3_writephy(tp, 0x11, 0x0a50);
5474 	udelay(40);
5475 	tg3_writephy(tp, 0x11, 0x0a10);
5476 
5477 	/* Wait for signal to stabilize */
5478 	/* XXX schedule_timeout() ... */
5479 	for (i = 0; i < 15000; i++)
5480 		udelay(10);
5481 
5482 	/* Deselect the channel register so we can read the PHYID
5483 	 * later.
5484 	 */
5485 	tg3_writephy(tp, 0x10, 0x8011);
5486 }
5487 
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5488 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5489 {
5490 	u16 flowctrl;
5491 	bool current_link_up;
5492 	u32 sg_dig_ctrl, sg_dig_status;
5493 	u32 serdes_cfg, expected_sg_dig_ctrl;
5494 	int workaround, port_a;
5495 
5496 	serdes_cfg = 0;
5497 	workaround = 0;
5498 	port_a = 1;
5499 	current_link_up = false;
5500 
5501 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5502 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5503 		workaround = 1;
5504 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5505 			port_a = 0;
5506 
5507 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5508 		/* preserve bits 20-23 for voltage regulator */
5509 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5510 	}
5511 
5512 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5513 
5514 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5515 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5516 			if (workaround) {
5517 				u32 val = serdes_cfg;
5518 
5519 				if (port_a)
5520 					val |= 0xc010000;
5521 				else
5522 					val |= 0x4010000;
5523 				tw32_f(MAC_SERDES_CFG, val);
5524 			}
5525 
5526 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5527 		}
5528 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5529 			tg3_setup_flow_control(tp, 0, 0);
5530 			current_link_up = true;
5531 		}
5532 		goto out;
5533 	}
5534 
5535 	/* Want auto-negotiation.  */
5536 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5537 
5538 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5539 	if (flowctrl & ADVERTISE_1000XPAUSE)
5540 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5541 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5542 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5543 
5544 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5545 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5546 		    tp->serdes_counter &&
5547 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5548 				    MAC_STATUS_RCVD_CFG)) ==
5549 		     MAC_STATUS_PCS_SYNCED)) {
5550 			tp->serdes_counter--;
5551 			current_link_up = true;
5552 			goto out;
5553 		}
5554 restart_autoneg:
5555 		if (workaround)
5556 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5557 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5558 		udelay(5);
5559 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5560 
5561 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5562 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5563 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5564 				 MAC_STATUS_SIGNAL_DET)) {
5565 		sg_dig_status = tr32(SG_DIG_STATUS);
5566 		mac_status = tr32(MAC_STATUS);
5567 
5568 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5569 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5570 			u32 local_adv = 0, remote_adv = 0;
5571 
5572 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5573 				local_adv |= ADVERTISE_1000XPAUSE;
5574 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5575 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5576 
5577 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5578 				remote_adv |= LPA_1000XPAUSE;
5579 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5580 				remote_adv |= LPA_1000XPAUSE_ASYM;
5581 
5582 			tp->link_config.rmt_adv =
5583 					   mii_adv_to_ethtool_adv_x(remote_adv);
5584 
5585 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5586 			current_link_up = true;
5587 			tp->serdes_counter = 0;
5588 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5589 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5590 			if (tp->serdes_counter)
5591 				tp->serdes_counter--;
5592 			else {
5593 				if (workaround) {
5594 					u32 val = serdes_cfg;
5595 
5596 					if (port_a)
5597 						val |= 0xc010000;
5598 					else
5599 						val |= 0x4010000;
5600 
5601 					tw32_f(MAC_SERDES_CFG, val);
5602 				}
5603 
5604 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5605 				udelay(40);
5606 
5607 				/* Link parallel detection - link is up */
5608 				/* only if we have PCS_SYNC and not */
5609 				/* receiving config code words */
5610 				mac_status = tr32(MAC_STATUS);
5611 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5612 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5613 					tg3_setup_flow_control(tp, 0, 0);
5614 					current_link_up = true;
5615 					tp->phy_flags |=
5616 						TG3_PHYFLG_PARALLEL_DETECT;
5617 					tp->serdes_counter =
5618 						SERDES_PARALLEL_DET_TIMEOUT;
5619 				} else
5620 					goto restart_autoneg;
5621 			}
5622 		}
5623 	} else {
5624 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5625 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5626 	}
5627 
5628 out:
5629 	return current_link_up;
5630 }
5631 
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5632 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5633 {
5634 	bool current_link_up = false;
5635 
5636 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5637 		goto out;
5638 
5639 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5640 		u32 txflags, rxflags;
5641 		int i;
5642 
5643 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5644 			u32 local_adv = 0, remote_adv = 0;
5645 
5646 			if (txflags & ANEG_CFG_PS1)
5647 				local_adv |= ADVERTISE_1000XPAUSE;
5648 			if (txflags & ANEG_CFG_PS2)
5649 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5650 
5651 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5652 				remote_adv |= LPA_1000XPAUSE;
5653 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5654 				remote_adv |= LPA_1000XPAUSE_ASYM;
5655 
5656 			tp->link_config.rmt_adv =
5657 					   mii_adv_to_ethtool_adv_x(remote_adv);
5658 
5659 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5660 
5661 			current_link_up = true;
5662 		}
5663 		for (i = 0; i < 30; i++) {
5664 			udelay(20);
5665 			tw32_f(MAC_STATUS,
5666 			       (MAC_STATUS_SYNC_CHANGED |
5667 				MAC_STATUS_CFG_CHANGED));
5668 			udelay(40);
5669 			if ((tr32(MAC_STATUS) &
5670 			     (MAC_STATUS_SYNC_CHANGED |
5671 			      MAC_STATUS_CFG_CHANGED)) == 0)
5672 				break;
5673 		}
5674 
5675 		mac_status = tr32(MAC_STATUS);
5676 		if (!current_link_up &&
5677 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5678 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5679 			current_link_up = true;
5680 	} else {
5681 		tg3_setup_flow_control(tp, 0, 0);
5682 
5683 		/* Forcing 1000FD link up. */
5684 		current_link_up = true;
5685 
5686 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5687 		udelay(40);
5688 
5689 		tw32_f(MAC_MODE, tp->mac_mode);
5690 		udelay(40);
5691 	}
5692 
5693 out:
5694 	return current_link_up;
5695 }
5696 
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5697 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5698 {
5699 	u32 orig_pause_cfg;
5700 	u32 orig_active_speed;
5701 	u8 orig_active_duplex;
5702 	u32 mac_status;
5703 	bool current_link_up;
5704 	int i;
5705 
5706 	orig_pause_cfg = tp->link_config.active_flowctrl;
5707 	orig_active_speed = tp->link_config.active_speed;
5708 	orig_active_duplex = tp->link_config.active_duplex;
5709 
5710 	if (!tg3_flag(tp, HW_AUTONEG) &&
5711 	    tp->link_up &&
5712 	    tg3_flag(tp, INIT_COMPLETE)) {
5713 		mac_status = tr32(MAC_STATUS);
5714 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5715 			       MAC_STATUS_SIGNAL_DET |
5716 			       MAC_STATUS_CFG_CHANGED |
5717 			       MAC_STATUS_RCVD_CFG);
5718 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5719 				   MAC_STATUS_SIGNAL_DET)) {
5720 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5721 					    MAC_STATUS_CFG_CHANGED));
5722 			return 0;
5723 		}
5724 	}
5725 
5726 	tw32_f(MAC_TX_AUTO_NEG, 0);
5727 
5728 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5729 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5730 	tw32_f(MAC_MODE, tp->mac_mode);
5731 	udelay(40);
5732 
5733 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5734 		tg3_init_bcm8002(tp);
5735 
5736 	/* Enable link change event even when serdes polling.  */
5737 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5738 	udelay(40);
5739 
5740 	tp->link_config.rmt_adv = 0;
5741 	mac_status = tr32(MAC_STATUS);
5742 
5743 	if (tg3_flag(tp, HW_AUTONEG))
5744 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5745 	else
5746 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5747 
5748 	tp->napi[0].hw_status->status =
5749 		(SD_STATUS_UPDATED |
5750 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5751 
5752 	for (i = 0; i < 100; i++) {
5753 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5754 				    MAC_STATUS_CFG_CHANGED));
5755 		udelay(5);
5756 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5757 					 MAC_STATUS_CFG_CHANGED |
5758 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5759 			break;
5760 	}
5761 
5762 	mac_status = tr32(MAC_STATUS);
5763 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5764 		current_link_up = false;
5765 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5766 		    tp->serdes_counter == 0) {
5767 			tw32_f(MAC_MODE, (tp->mac_mode |
5768 					  MAC_MODE_SEND_CONFIGS));
5769 			udelay(1);
5770 			tw32_f(MAC_MODE, tp->mac_mode);
5771 		}
5772 	}
5773 
5774 	if (current_link_up) {
5775 		tp->link_config.active_speed = SPEED_1000;
5776 		tp->link_config.active_duplex = DUPLEX_FULL;
5777 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5778 				    LED_CTRL_LNKLED_OVERRIDE |
5779 				    LED_CTRL_1000MBPS_ON));
5780 	} else {
5781 		tp->link_config.active_speed = SPEED_UNKNOWN;
5782 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5783 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5784 				    LED_CTRL_LNKLED_OVERRIDE |
5785 				    LED_CTRL_TRAFFIC_OVERRIDE));
5786 	}
5787 
5788 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5789 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5790 		if (orig_pause_cfg != now_pause_cfg ||
5791 		    orig_active_speed != tp->link_config.active_speed ||
5792 		    orig_active_duplex != tp->link_config.active_duplex)
5793 			tg3_link_report(tp);
5794 	}
5795 
5796 	return 0;
5797 }
5798 
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5799 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5800 {
5801 	int err = 0;
5802 	u32 bmsr, bmcr;
5803 	u32 current_speed = SPEED_UNKNOWN;
5804 	u8 current_duplex = DUPLEX_UNKNOWN;
5805 	bool current_link_up = false;
5806 	u32 local_adv, remote_adv, sgsr;
5807 
5808 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5809 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5810 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5811 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5812 
5813 		if (force_reset)
5814 			tg3_phy_reset(tp);
5815 
5816 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5817 
5818 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5819 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5820 		} else {
5821 			current_link_up = true;
5822 			if (sgsr & SERDES_TG3_SPEED_1000) {
5823 				current_speed = SPEED_1000;
5824 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5825 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5826 				current_speed = SPEED_100;
5827 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5828 			} else {
5829 				current_speed = SPEED_10;
5830 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5831 			}
5832 
5833 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5834 				current_duplex = DUPLEX_FULL;
5835 			else
5836 				current_duplex = DUPLEX_HALF;
5837 		}
5838 
5839 		tw32_f(MAC_MODE, tp->mac_mode);
5840 		udelay(40);
5841 
5842 		tg3_clear_mac_status(tp);
5843 
5844 		goto fiber_setup_done;
5845 	}
5846 
5847 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5848 	tw32_f(MAC_MODE, tp->mac_mode);
5849 	udelay(40);
5850 
5851 	tg3_clear_mac_status(tp);
5852 
5853 	if (force_reset)
5854 		tg3_phy_reset(tp);
5855 
5856 	tp->link_config.rmt_adv = 0;
5857 
5858 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5859 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5860 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5861 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5862 			bmsr |= BMSR_LSTATUS;
5863 		else
5864 			bmsr &= ~BMSR_LSTATUS;
5865 	}
5866 
5867 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5868 
5869 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5870 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5871 		/* do nothing, just check for link up at the end */
5872 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5873 		u32 adv, newadv;
5874 
5875 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5876 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5877 				 ADVERTISE_1000XPAUSE |
5878 				 ADVERTISE_1000XPSE_ASYM |
5879 				 ADVERTISE_SLCT);
5880 
5881 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5882 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5883 
5884 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5885 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5886 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5887 			tg3_writephy(tp, MII_BMCR, bmcr);
5888 
5889 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5890 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5891 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5892 
5893 			return err;
5894 		}
5895 	} else {
5896 		u32 new_bmcr;
5897 
5898 		bmcr &= ~BMCR_SPEED1000;
5899 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5900 
5901 		if (tp->link_config.duplex == DUPLEX_FULL)
5902 			new_bmcr |= BMCR_FULLDPLX;
5903 
5904 		if (new_bmcr != bmcr) {
5905 			/* BMCR_SPEED1000 is a reserved bit that needs
5906 			 * to be set on write.
5907 			 */
5908 			new_bmcr |= BMCR_SPEED1000;
5909 
5910 			/* Force a linkdown */
5911 			if (tp->link_up) {
5912 				u32 adv;
5913 
5914 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5915 				adv &= ~(ADVERTISE_1000XFULL |
5916 					 ADVERTISE_1000XHALF |
5917 					 ADVERTISE_SLCT);
5918 				tg3_writephy(tp, MII_ADVERTISE, adv);
5919 				tg3_writephy(tp, MII_BMCR, bmcr |
5920 							   BMCR_ANRESTART |
5921 							   BMCR_ANENABLE);
5922 				udelay(10);
5923 				tg3_carrier_off(tp);
5924 			}
5925 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5926 			bmcr = new_bmcr;
5927 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5928 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5929 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5930 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5931 					bmsr |= BMSR_LSTATUS;
5932 				else
5933 					bmsr &= ~BMSR_LSTATUS;
5934 			}
5935 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5936 		}
5937 	}
5938 
5939 	if (bmsr & BMSR_LSTATUS) {
5940 		current_speed = SPEED_1000;
5941 		current_link_up = true;
5942 		if (bmcr & BMCR_FULLDPLX)
5943 			current_duplex = DUPLEX_FULL;
5944 		else
5945 			current_duplex = DUPLEX_HALF;
5946 
5947 		local_adv = 0;
5948 		remote_adv = 0;
5949 
5950 		if (bmcr & BMCR_ANENABLE) {
5951 			u32 common;
5952 
5953 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5954 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5955 			common = local_adv & remote_adv;
5956 			if (common & (ADVERTISE_1000XHALF |
5957 				      ADVERTISE_1000XFULL)) {
5958 				if (common & ADVERTISE_1000XFULL)
5959 					current_duplex = DUPLEX_FULL;
5960 				else
5961 					current_duplex = DUPLEX_HALF;
5962 
5963 				tp->link_config.rmt_adv =
5964 					   mii_adv_to_ethtool_adv_x(remote_adv);
5965 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5966 				/* Link is up via parallel detect */
5967 			} else {
5968 				current_link_up = false;
5969 			}
5970 		}
5971 	}
5972 
5973 fiber_setup_done:
5974 	if (current_link_up && current_duplex == DUPLEX_FULL)
5975 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5976 
5977 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5978 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5979 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5980 
5981 	tw32_f(MAC_MODE, tp->mac_mode);
5982 	udelay(40);
5983 
5984 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5985 
5986 	tp->link_config.active_speed = current_speed;
5987 	tp->link_config.active_duplex = current_duplex;
5988 
5989 	tg3_test_and_report_link_chg(tp, current_link_up);
5990 	return err;
5991 }
5992 
tg3_serdes_parallel_detect(struct tg3 * tp)5993 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5994 {
5995 	if (tp->serdes_counter) {
5996 		/* Give autoneg time to complete. */
5997 		tp->serdes_counter--;
5998 		return;
5999 	}
6000 
6001 	if (!tp->link_up &&
6002 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6003 		u32 bmcr;
6004 
6005 		tg3_readphy(tp, MII_BMCR, &bmcr);
6006 		if (bmcr & BMCR_ANENABLE) {
6007 			u32 phy1, phy2;
6008 
6009 			/* Select shadow register 0x1f */
6010 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6011 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6012 
6013 			/* Select expansion interrupt status register */
6014 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6015 					 MII_TG3_DSP_EXP1_INT_STAT);
6016 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6017 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6018 
6019 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6020 				/* We have signal detect and not receiving
6021 				 * config code words, link is up by parallel
6022 				 * detection.
6023 				 */
6024 
6025 				bmcr &= ~BMCR_ANENABLE;
6026 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6027 				tg3_writephy(tp, MII_BMCR, bmcr);
6028 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6029 			}
6030 		}
6031 	} else if (tp->link_up &&
6032 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6033 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6034 		u32 phy2;
6035 
6036 		/* Select expansion interrupt status register */
6037 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6038 				 MII_TG3_DSP_EXP1_INT_STAT);
6039 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6040 		if (phy2 & 0x20) {
6041 			u32 bmcr;
6042 
6043 			/* Config code words received, turn on autoneg. */
6044 			tg3_readphy(tp, MII_BMCR, &bmcr);
6045 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6046 
6047 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6048 
6049 		}
6050 	}
6051 }
6052 
tg3_setup_phy(struct tg3 * tp,bool force_reset)6053 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6054 {
6055 	u32 val;
6056 	int err;
6057 
6058 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6059 		err = tg3_setup_fiber_phy(tp, force_reset);
6060 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6061 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6062 	else
6063 		err = tg3_setup_copper_phy(tp, force_reset);
6064 
6065 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6066 		u32 scale;
6067 
6068 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6069 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6070 			scale = 65;
6071 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6072 			scale = 6;
6073 		else
6074 			scale = 12;
6075 
6076 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6077 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6078 		tw32(GRC_MISC_CFG, val);
6079 	}
6080 
6081 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6082 	      (6 << TX_LENGTHS_IPG_SHIFT);
6083 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6084 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6085 		val |= tr32(MAC_TX_LENGTHS) &
6086 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6087 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6088 
6089 	if (tp->link_config.active_speed == SPEED_1000 &&
6090 	    tp->link_config.active_duplex == DUPLEX_HALF)
6091 		tw32(MAC_TX_LENGTHS, val |
6092 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6093 	else
6094 		tw32(MAC_TX_LENGTHS, val |
6095 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6096 
6097 	if (!tg3_flag(tp, 5705_PLUS)) {
6098 		if (tp->link_up) {
6099 			tw32(HOSTCC_STAT_COAL_TICKS,
6100 			     tp->coal.stats_block_coalesce_usecs);
6101 		} else {
6102 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6103 		}
6104 	}
6105 
6106 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6107 		val = tr32(PCIE_PWR_MGMT_THRESH);
6108 		if (!tp->link_up)
6109 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6110 			      tp->pwrmgmt_thresh;
6111 		else
6112 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6113 		tw32(PCIE_PWR_MGMT_THRESH, val);
6114 	}
6115 
6116 	return err;
6117 }
6118 
6119 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp,struct ptp_system_timestamp * sts)6120 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6121 {
6122 	u64 stamp;
6123 
6124 	ptp_read_system_prets(sts);
6125 	stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6126 	ptp_read_system_postts(sts);
6127 	stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6128 
6129 	return stamp;
6130 }
6131 
6132 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6133 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6134 {
6135 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6136 
6137 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6138 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6139 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6140 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6141 }
6142 
6143 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6144 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * info)6145 static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
6146 {
6147 	struct tg3 *tp = netdev_priv(dev);
6148 
6149 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
6150 
6151 	if (tg3_flag(tp, PTP_CAPABLE)) {
6152 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6153 					SOF_TIMESTAMPING_RX_HARDWARE |
6154 					SOF_TIMESTAMPING_RAW_HARDWARE;
6155 	}
6156 
6157 	if (tp->ptp_clock)
6158 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6159 
6160 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6161 
6162 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6163 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6164 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6165 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6166 	return 0;
6167 }
6168 
tg3_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)6169 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6170 {
6171 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6172 	u64 correction;
6173 	bool neg_adj;
6174 
6175 	/* Frequency adjustment is performed using hardware with a 24 bit
6176 	 * accumulator and a programmable correction value. On each clk, the
6177 	 * correction value gets added to the accumulator and when it
6178 	 * overflows, the time counter is incremented/decremented.
6179 	 */
6180 	neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6181 
6182 	tg3_full_lock(tp, 0);
6183 
6184 	if (correction)
6185 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6186 		     TG3_EAV_REF_CLK_CORRECT_EN |
6187 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6188 		     ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6189 	else
6190 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6191 
6192 	tg3_full_unlock(tp);
6193 
6194 	return 0;
6195 }
6196 
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6197 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6198 {
6199 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6200 
6201 	tg3_full_lock(tp, 0);
6202 	tp->ptp_adjust += delta;
6203 	tg3_full_unlock(tp);
6204 
6205 	return 0;
6206 }
6207 
tg3_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)6208 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6209 			    struct ptp_system_timestamp *sts)
6210 {
6211 	u64 ns;
6212 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6213 
6214 	tg3_full_lock(tp, 0);
6215 	ns = tg3_refclk_read(tp, sts);
6216 	ns += tp->ptp_adjust;
6217 	tg3_full_unlock(tp);
6218 
6219 	*ts = ns_to_timespec64(ns);
6220 
6221 	return 0;
6222 }
6223 
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6224 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6225 			   const struct timespec64 *ts)
6226 {
6227 	u64 ns;
6228 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6229 
6230 	ns = timespec64_to_ns(ts);
6231 
6232 	tg3_full_lock(tp, 0);
6233 	tg3_refclk_write(tp, ns);
6234 	tp->ptp_adjust = 0;
6235 	tg3_full_unlock(tp);
6236 
6237 	return 0;
6238 }
6239 
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6240 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6241 			  struct ptp_clock_request *rq, int on)
6242 {
6243 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6244 	u32 clock_ctl;
6245 	int rval = 0;
6246 
6247 	switch (rq->type) {
6248 	case PTP_CLK_REQ_PEROUT:
6249 		/* Reject requests with unsupported flags */
6250 		if (rq->perout.flags)
6251 			return -EOPNOTSUPP;
6252 
6253 		if (rq->perout.index != 0)
6254 			return -EINVAL;
6255 
6256 		tg3_full_lock(tp, 0);
6257 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6258 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6259 
6260 		if (on) {
6261 			u64 nsec;
6262 
6263 			nsec = rq->perout.start.sec * 1000000000ULL +
6264 			       rq->perout.start.nsec;
6265 
6266 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6267 				netdev_warn(tp->dev,
6268 					    "Device supports only a one-shot timesync output, period must be 0\n");
6269 				rval = -EINVAL;
6270 				goto err_out;
6271 			}
6272 
6273 			if (nsec & (1ULL << 63)) {
6274 				netdev_warn(tp->dev,
6275 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6276 				rval = -EINVAL;
6277 				goto err_out;
6278 			}
6279 
6280 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6281 			tw32(TG3_EAV_WATCHDOG0_MSB,
6282 			     TG3_EAV_WATCHDOG0_EN |
6283 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6284 
6285 			tw32(TG3_EAV_REF_CLCK_CTL,
6286 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6287 		} else {
6288 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6289 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6290 		}
6291 
6292 err_out:
6293 		tg3_full_unlock(tp);
6294 		return rval;
6295 
6296 	default:
6297 		break;
6298 	}
6299 
6300 	return -EOPNOTSUPP;
6301 }
6302 
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6303 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6304 				     struct skb_shared_hwtstamps *timestamp)
6305 {
6306 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6307 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6308 					   tp->ptp_adjust);
6309 }
6310 
tg3_read_tx_tstamp(struct tg3 * tp,u64 * hwclock)6311 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
6312 {
6313 	*hwclock = tr32(TG3_TX_TSTAMP_LSB);
6314 	*hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6315 }
6316 
tg3_ptp_ts_aux_work(struct ptp_clock_info * ptp)6317 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
6318 {
6319 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6320 	struct skb_shared_hwtstamps timestamp;
6321 	u64 hwclock;
6322 
6323 	if (tp->ptp_txts_retrycnt > 2)
6324 		goto done;
6325 
6326 	tg3_read_tx_tstamp(tp, &hwclock);
6327 
6328 	if (hwclock != tp->pre_tx_ts) {
6329 		tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6330 		skb_tstamp_tx(tp->tx_tstamp_skb, &timestamp);
6331 		goto done;
6332 	}
6333 	tp->ptp_txts_retrycnt++;
6334 	return HZ / 10;
6335 done:
6336 	dev_consume_skb_any(tp->tx_tstamp_skb);
6337 	tp->tx_tstamp_skb = NULL;
6338 	tp->ptp_txts_retrycnt = 0;
6339 	tp->pre_tx_ts = 0;
6340 	return -1;
6341 }
6342 
6343 static const struct ptp_clock_info tg3_ptp_caps = {
6344 	.owner		= THIS_MODULE,
6345 	.name		= "tg3 clock",
6346 	.max_adj	= 250000000,
6347 	.n_alarm	= 0,
6348 	.n_ext_ts	= 0,
6349 	.n_per_out	= 1,
6350 	.n_pins		= 0,
6351 	.pps		= 0,
6352 	.adjfine	= tg3_ptp_adjfine,
6353 	.adjtime	= tg3_ptp_adjtime,
6354 	.do_aux_work	= tg3_ptp_ts_aux_work,
6355 	.gettimex64	= tg3_ptp_gettimex,
6356 	.settime64	= tg3_ptp_settime,
6357 	.enable		= tg3_ptp_enable,
6358 };
6359 
6360 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6361 static void tg3_ptp_init(struct tg3 *tp)
6362 {
6363 	if (!tg3_flag(tp, PTP_CAPABLE))
6364 		return;
6365 
6366 	/* Initialize the hardware clock to the system time. */
6367 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6368 	tp->ptp_adjust = 0;
6369 	tp->ptp_info = tg3_ptp_caps;
6370 }
6371 
6372 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6373 static void tg3_ptp_resume(struct tg3 *tp)
6374 {
6375 	if (!tg3_flag(tp, PTP_CAPABLE))
6376 		return;
6377 
6378 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6379 	tp->ptp_adjust = 0;
6380 }
6381 
tg3_ptp_fini(struct tg3 * tp)6382 static void tg3_ptp_fini(struct tg3 *tp)
6383 {
6384 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6385 		return;
6386 
6387 	ptp_clock_unregister(tp->ptp_clock);
6388 	tp->ptp_clock = NULL;
6389 	tp->ptp_adjust = 0;
6390 	dev_consume_skb_any(tp->tx_tstamp_skb);
6391 	tp->tx_tstamp_skb = NULL;
6392 }
6393 
tg3_irq_sync(struct tg3 * tp)6394 static inline int tg3_irq_sync(struct tg3 *tp)
6395 {
6396 	return tp->irq_sync;
6397 }
6398 
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6399 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6400 {
6401 	int i;
6402 
6403 	dst = (u32 *)((u8 *)dst + off);
6404 	for (i = 0; i < len; i += sizeof(u32))
6405 		*dst++ = tr32(off + i);
6406 }
6407 
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6408 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6409 {
6410 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6411 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6412 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6413 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6414 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6415 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6416 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6417 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6418 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6419 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6420 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6421 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6422 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6423 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6424 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6425 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6426 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6427 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6428 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6429 
6430 	if (tg3_flag(tp, SUPPORT_MSIX))
6431 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6432 
6433 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6434 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6435 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6436 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6437 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6438 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6439 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6440 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6441 
6442 	if (!tg3_flag(tp, 5705_PLUS)) {
6443 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6444 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6445 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6446 	}
6447 
6448 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6449 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6450 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6451 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6452 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6453 
6454 	if (tg3_flag(tp, NVRAM))
6455 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6456 }
6457 
tg3_dump_state(struct tg3 * tp)6458 static void tg3_dump_state(struct tg3 *tp)
6459 {
6460 	int i;
6461 	u32 *regs;
6462 
6463 	/* If it is a PCI error, all registers will be 0xffff,
6464 	 * we don't dump them out, just report the error and return
6465 	 */
6466 	if (tp->pdev->error_state != pci_channel_io_normal) {
6467 		netdev_err(tp->dev, "PCI channel ERROR!\n");
6468 		return;
6469 	}
6470 
6471 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6472 	if (!regs)
6473 		return;
6474 
6475 	if (tg3_flag(tp, PCI_EXPRESS)) {
6476 		/* Read up to but not including private PCI registers */
6477 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6478 			regs[i / sizeof(u32)] = tr32(i);
6479 	} else
6480 		tg3_dump_legacy_regs(tp, regs);
6481 
6482 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6483 		if (!regs[i + 0] && !regs[i + 1] &&
6484 		    !regs[i + 2] && !regs[i + 3])
6485 			continue;
6486 
6487 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6488 			   i * 4,
6489 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6490 	}
6491 
6492 	kfree(regs);
6493 
6494 	for (i = 0; i < tp->irq_cnt; i++) {
6495 		struct tg3_napi *tnapi = &tp->napi[i];
6496 
6497 		/* SW status block */
6498 		netdev_err(tp->dev,
6499 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6500 			   i,
6501 			   tnapi->hw_status->status,
6502 			   tnapi->hw_status->status_tag,
6503 			   tnapi->hw_status->rx_jumbo_consumer,
6504 			   tnapi->hw_status->rx_consumer,
6505 			   tnapi->hw_status->rx_mini_consumer,
6506 			   tnapi->hw_status->idx[0].rx_producer,
6507 			   tnapi->hw_status->idx[0].tx_consumer);
6508 
6509 		netdev_err(tp->dev,
6510 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6511 			   i,
6512 			   tnapi->last_tag, tnapi->last_irq_tag,
6513 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6514 			   tnapi->rx_rcb_ptr,
6515 			   tnapi->prodring.rx_std_prod_idx,
6516 			   tnapi->prodring.rx_std_cons_idx,
6517 			   tnapi->prodring.rx_jmb_prod_idx,
6518 			   tnapi->prodring.rx_jmb_cons_idx);
6519 	}
6520 }
6521 
6522 /* This is called whenever we suspect that the system chipset is re-
6523  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6524  * is bogus tx completions. We try to recover by setting the
6525  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6526  * in the workqueue.
6527  */
tg3_tx_recover(struct tg3 * tp)6528 static void tg3_tx_recover(struct tg3 *tp)
6529 {
6530 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6531 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6532 
6533 	netdev_warn(tp->dev,
6534 		    "The system may be re-ordering memory-mapped I/O "
6535 		    "cycles to the network device, attempting to recover. "
6536 		    "Please report the problem to the driver maintainer "
6537 		    "and include system chipset information.\n");
6538 
6539 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6540 }
6541 
tg3_tx_avail(struct tg3_napi * tnapi)6542 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6543 {
6544 	/* Tell compiler to fetch tx indices from memory. */
6545 	barrier();
6546 	return tnapi->tx_pending -
6547 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6548 }
6549 
6550 /* Tigon3 never reports partial packet sends.  So we do not
6551  * need special logic to handle SKBs that have not had all
6552  * of their frags sent yet, like SunGEM does.
6553  */
tg3_tx(struct tg3_napi * tnapi)6554 static void tg3_tx(struct tg3_napi *tnapi)
6555 {
6556 	struct tg3 *tp = tnapi->tp;
6557 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6558 	u32 sw_idx = tnapi->tx_cons;
6559 	struct netdev_queue *txq;
6560 	int index = tnapi - tp->napi;
6561 	unsigned int pkts_compl = 0, bytes_compl = 0;
6562 
6563 	if (tg3_flag(tp, ENABLE_TSS))
6564 		index--;
6565 
6566 	txq = netdev_get_tx_queue(tp->dev, index);
6567 
6568 	while (sw_idx != hw_idx) {
6569 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6570 		bool complete_skb_later = false;
6571 		struct sk_buff *skb = ri->skb;
6572 		int i, tx_bug = 0;
6573 
6574 		if (unlikely(skb == NULL)) {
6575 			tg3_tx_recover(tp);
6576 			return;
6577 		}
6578 
6579 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6580 			struct skb_shared_hwtstamps timestamp;
6581 			u64 hwclock;
6582 
6583 			tg3_read_tx_tstamp(tp, &hwclock);
6584 			if (hwclock != tp->pre_tx_ts) {
6585 				tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6586 				skb_tstamp_tx(skb, &timestamp);
6587 				tp->pre_tx_ts = 0;
6588 			} else {
6589 				tp->tx_tstamp_skb = skb;
6590 				complete_skb_later = true;
6591 			}
6592 		}
6593 
6594 		dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6595 				 skb_headlen(skb), DMA_TO_DEVICE);
6596 
6597 		ri->skb = NULL;
6598 
6599 		while (ri->fragmented) {
6600 			ri->fragmented = false;
6601 			sw_idx = NEXT_TX(sw_idx);
6602 			ri = &tnapi->tx_buffers[sw_idx];
6603 		}
6604 
6605 		sw_idx = NEXT_TX(sw_idx);
6606 
6607 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6608 			ri = &tnapi->tx_buffers[sw_idx];
6609 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6610 				tx_bug = 1;
6611 
6612 			dma_unmap_page(&tp->pdev->dev,
6613 				       dma_unmap_addr(ri, mapping),
6614 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6615 				       DMA_TO_DEVICE);
6616 
6617 			while (ri->fragmented) {
6618 				ri->fragmented = false;
6619 				sw_idx = NEXT_TX(sw_idx);
6620 				ri = &tnapi->tx_buffers[sw_idx];
6621 			}
6622 
6623 			sw_idx = NEXT_TX(sw_idx);
6624 		}
6625 
6626 		pkts_compl++;
6627 		bytes_compl += skb->len;
6628 
6629 		if (!complete_skb_later)
6630 			dev_consume_skb_any(skb);
6631 		else
6632 			ptp_schedule_worker(tp->ptp_clock, 0);
6633 
6634 		if (unlikely(tx_bug)) {
6635 			tg3_tx_recover(tp);
6636 			return;
6637 		}
6638 	}
6639 
6640 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6641 
6642 	tnapi->tx_cons = sw_idx;
6643 
6644 	/* Need to make the tx_cons update visible to __tg3_start_xmit()
6645 	 * before checking for netif_queue_stopped().  Without the
6646 	 * memory barrier, there is a small possibility that __tg3_start_xmit()
6647 	 * will miss it and cause the queue to be stopped forever.
6648 	 */
6649 	smp_mb();
6650 
6651 	if (unlikely(netif_tx_queue_stopped(txq) &&
6652 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6653 		__netif_tx_lock(txq, smp_processor_id());
6654 		if (netif_tx_queue_stopped(txq) &&
6655 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6656 			netif_tx_wake_queue(txq);
6657 		__netif_tx_unlock(txq);
6658 	}
6659 }
6660 
tg3_frag_free(bool is_frag,void * data)6661 static void tg3_frag_free(bool is_frag, void *data)
6662 {
6663 	if (is_frag)
6664 		skb_free_frag(data);
6665 	else
6666 		kfree(data);
6667 }
6668 
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6669 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6670 {
6671 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6672 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6673 
6674 	if (!ri->data)
6675 		return;
6676 
6677 	dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6678 			 DMA_FROM_DEVICE);
6679 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6680 	ri->data = NULL;
6681 }
6682 
6683 
6684 /* Returns size of skb allocated or < 0 on error.
6685  *
6686  * We only need to fill in the address because the other members
6687  * of the RX descriptor are invariant, see tg3_init_rings.
6688  *
6689  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6690  * posting buffers we only dirty the first cache line of the RX
6691  * descriptor (containing the address).  Whereas for the RX status
6692  * buffers the cpu only reads the last cacheline of the RX descriptor
6693  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6694  */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6695 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6696 			     u32 opaque_key, u32 dest_idx_unmasked,
6697 			     unsigned int *frag_size)
6698 {
6699 	struct tg3_rx_buffer_desc *desc;
6700 	struct ring_info *map;
6701 	u8 *data;
6702 	dma_addr_t mapping;
6703 	int skb_size, data_size, dest_idx;
6704 
6705 	switch (opaque_key) {
6706 	case RXD_OPAQUE_RING_STD:
6707 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6708 		desc = &tpr->rx_std[dest_idx];
6709 		map = &tpr->rx_std_buffers[dest_idx];
6710 		data_size = tp->rx_pkt_map_sz;
6711 		break;
6712 
6713 	case RXD_OPAQUE_RING_JUMBO:
6714 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6715 		desc = &tpr->rx_jmb[dest_idx].std;
6716 		map = &tpr->rx_jmb_buffers[dest_idx];
6717 		data_size = TG3_RX_JMB_MAP_SZ;
6718 		break;
6719 
6720 	default:
6721 		return -EINVAL;
6722 	}
6723 
6724 	/* Do not overwrite any of the map or rp information
6725 	 * until we are sure we can commit to a new buffer.
6726 	 *
6727 	 * Callers depend upon this behavior and assume that
6728 	 * we leave everything unchanged if we fail.
6729 	 */
6730 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6731 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6732 	if (skb_size <= PAGE_SIZE) {
6733 		data = napi_alloc_frag(skb_size);
6734 		*frag_size = skb_size;
6735 	} else {
6736 		data = kmalloc(skb_size, GFP_ATOMIC);
6737 		*frag_size = 0;
6738 	}
6739 	if (!data)
6740 		return -ENOMEM;
6741 
6742 	mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6743 				 data_size, DMA_FROM_DEVICE);
6744 	if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6745 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6746 		return -EIO;
6747 	}
6748 
6749 	map->data = data;
6750 	dma_unmap_addr_set(map, mapping, mapping);
6751 
6752 	desc->addr_hi = ((u64)mapping >> 32);
6753 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6754 
6755 	return data_size;
6756 }
6757 
6758 /* We only need to move over in the address because the other
6759  * members of the RX descriptor are invariant.  See notes above
6760  * tg3_alloc_rx_data for full details.
6761  */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6762 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6763 			   struct tg3_rx_prodring_set *dpr,
6764 			   u32 opaque_key, int src_idx,
6765 			   u32 dest_idx_unmasked)
6766 {
6767 	struct tg3 *tp = tnapi->tp;
6768 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6769 	struct ring_info *src_map, *dest_map;
6770 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6771 	int dest_idx;
6772 
6773 	switch (opaque_key) {
6774 	case RXD_OPAQUE_RING_STD:
6775 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6776 		dest_desc = &dpr->rx_std[dest_idx];
6777 		dest_map = &dpr->rx_std_buffers[dest_idx];
6778 		src_desc = &spr->rx_std[src_idx];
6779 		src_map = &spr->rx_std_buffers[src_idx];
6780 		break;
6781 
6782 	case RXD_OPAQUE_RING_JUMBO:
6783 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6784 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6785 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6786 		src_desc = &spr->rx_jmb[src_idx].std;
6787 		src_map = &spr->rx_jmb_buffers[src_idx];
6788 		break;
6789 
6790 	default:
6791 		return;
6792 	}
6793 
6794 	dest_map->data = src_map->data;
6795 	dma_unmap_addr_set(dest_map, mapping,
6796 			   dma_unmap_addr(src_map, mapping));
6797 	dest_desc->addr_hi = src_desc->addr_hi;
6798 	dest_desc->addr_lo = src_desc->addr_lo;
6799 
6800 	/* Ensure that the update to the skb happens after the physical
6801 	 * addresses have been transferred to the new BD location.
6802 	 */
6803 	smp_wmb();
6804 
6805 	src_map->data = NULL;
6806 }
6807 
6808 /* The RX ring scheme is composed of multiple rings which post fresh
6809  * buffers to the chip, and one special ring the chip uses to report
6810  * status back to the host.
6811  *
6812  * The special ring reports the status of received packets to the
6813  * host.  The chip does not write into the original descriptor the
6814  * RX buffer was obtained from.  The chip simply takes the original
6815  * descriptor as provided by the host, updates the status and length
6816  * field, then writes this into the next status ring entry.
6817  *
6818  * Each ring the host uses to post buffers to the chip is described
6819  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6820  * it is first placed into the on-chip ram.  When the packet's length
6821  * is known, it walks down the TG3_BDINFO entries to select the ring.
6822  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6823  * which is within the range of the new packet's length is chosen.
6824  *
6825  * The "separate ring for rx status" scheme may sound queer, but it makes
6826  * sense from a cache coherency perspective.  If only the host writes
6827  * to the buffer post rings, and only the chip writes to the rx status
6828  * rings, then cache lines never move beyond shared-modified state.
6829  * If both the host and chip were to write into the same ring, cache line
6830  * eviction could occur since both entities want it in an exclusive state.
6831  */
tg3_rx(struct tg3_napi * tnapi,int budget)6832 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6833 {
6834 	struct tg3 *tp = tnapi->tp;
6835 	u32 work_mask, rx_std_posted = 0;
6836 	u32 std_prod_idx, jmb_prod_idx;
6837 	u32 sw_idx = tnapi->rx_rcb_ptr;
6838 	u16 hw_idx;
6839 	int received;
6840 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6841 
6842 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6843 	/*
6844 	 * We need to order the read of hw_idx and the read of
6845 	 * the opaque cookie.
6846 	 */
6847 	rmb();
6848 	work_mask = 0;
6849 	received = 0;
6850 	std_prod_idx = tpr->rx_std_prod_idx;
6851 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6852 	while (sw_idx != hw_idx && budget > 0) {
6853 		struct ring_info *ri;
6854 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6855 		unsigned int len;
6856 		struct sk_buff *skb;
6857 		dma_addr_t dma_addr;
6858 		u32 opaque_key, desc_idx, *post_ptr;
6859 		u8 *data;
6860 		u64 tstamp = 0;
6861 
6862 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6863 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6864 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6865 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6866 			dma_addr = dma_unmap_addr(ri, mapping);
6867 			data = ri->data;
6868 			post_ptr = &std_prod_idx;
6869 			rx_std_posted++;
6870 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6871 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6872 			dma_addr = dma_unmap_addr(ri, mapping);
6873 			data = ri->data;
6874 			post_ptr = &jmb_prod_idx;
6875 		} else
6876 			goto next_pkt_nopost;
6877 
6878 		work_mask |= opaque_key;
6879 
6880 		if (desc->err_vlan & RXD_ERR_MASK) {
6881 		drop_it:
6882 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6883 				       desc_idx, *post_ptr);
6884 		drop_it_no_recycle:
6885 			/* Other statistics kept track of by card. */
6886 			tnapi->rx_dropped++;
6887 			goto next_pkt;
6888 		}
6889 
6890 		prefetch(data + TG3_RX_OFFSET(tp));
6891 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6892 		      ETH_FCS_LEN;
6893 
6894 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6895 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6896 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6897 		     RXD_FLAG_PTPSTAT_PTPV2) {
6898 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6899 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6900 		}
6901 
6902 		if (len > TG3_RX_COPY_THRESH(tp)) {
6903 			int skb_size;
6904 			unsigned int frag_size;
6905 
6906 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6907 						    *post_ptr, &frag_size);
6908 			if (skb_size < 0)
6909 				goto drop_it;
6910 
6911 			dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6912 					 DMA_FROM_DEVICE);
6913 
6914 			/* Ensure that the update to the data happens
6915 			 * after the usage of the old DMA mapping.
6916 			 */
6917 			smp_wmb();
6918 
6919 			ri->data = NULL;
6920 
6921 			if (frag_size)
6922 				skb = build_skb(data, frag_size);
6923 			else
6924 				skb = slab_build_skb(data);
6925 			if (!skb) {
6926 				tg3_frag_free(frag_size != 0, data);
6927 				goto drop_it_no_recycle;
6928 			}
6929 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6930 		} else {
6931 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6932 				       desc_idx, *post_ptr);
6933 
6934 			skb = netdev_alloc_skb(tp->dev,
6935 					       len + TG3_RAW_IP_ALIGN);
6936 			if (skb == NULL)
6937 				goto drop_it_no_recycle;
6938 
6939 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6940 			dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6941 						DMA_FROM_DEVICE);
6942 			memcpy(skb->data,
6943 			       data + TG3_RX_OFFSET(tp),
6944 			       len);
6945 			dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6946 						   len, DMA_FROM_DEVICE);
6947 		}
6948 
6949 		skb_put(skb, len);
6950 		if (tstamp)
6951 			tg3_hwclock_to_timestamp(tp, tstamp,
6952 						 skb_hwtstamps(skb));
6953 
6954 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6955 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6956 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6957 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6958 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6959 		else
6960 			skb_checksum_none_assert(skb);
6961 
6962 		skb->protocol = eth_type_trans(skb, tp->dev);
6963 
6964 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6965 		    skb->protocol != htons(ETH_P_8021Q) &&
6966 		    skb->protocol != htons(ETH_P_8021AD)) {
6967 			dev_kfree_skb_any(skb);
6968 			goto drop_it_no_recycle;
6969 		}
6970 
6971 		if (desc->type_flags & RXD_FLAG_VLAN &&
6972 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6973 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6974 					       desc->err_vlan & RXD_VLAN_MASK);
6975 
6976 		napi_gro_receive(&tnapi->napi, skb);
6977 
6978 		received++;
6979 		budget--;
6980 
6981 next_pkt:
6982 		(*post_ptr)++;
6983 
6984 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6985 			tpr->rx_std_prod_idx = std_prod_idx &
6986 					       tp->rx_std_ring_mask;
6987 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6988 				     tpr->rx_std_prod_idx);
6989 			work_mask &= ~RXD_OPAQUE_RING_STD;
6990 			rx_std_posted = 0;
6991 		}
6992 next_pkt_nopost:
6993 		sw_idx++;
6994 		sw_idx &= tp->rx_ret_ring_mask;
6995 
6996 		/* Refresh hw_idx to see if there is new work */
6997 		if (sw_idx == hw_idx) {
6998 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6999 			rmb();
7000 		}
7001 	}
7002 
7003 	/* ACK the status ring. */
7004 	tnapi->rx_rcb_ptr = sw_idx;
7005 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
7006 
7007 	/* Refill RX ring(s). */
7008 	if (!tg3_flag(tp, ENABLE_RSS)) {
7009 		/* Sync BD data before updating mailbox */
7010 		wmb();
7011 
7012 		if (work_mask & RXD_OPAQUE_RING_STD) {
7013 			tpr->rx_std_prod_idx = std_prod_idx &
7014 					       tp->rx_std_ring_mask;
7015 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7016 				     tpr->rx_std_prod_idx);
7017 		}
7018 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7019 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
7020 					       tp->rx_jmb_ring_mask;
7021 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7022 				     tpr->rx_jmb_prod_idx);
7023 		}
7024 	} else if (work_mask) {
7025 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7026 		 * updated before the producer indices can be updated.
7027 		 */
7028 		smp_wmb();
7029 
7030 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7031 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7032 
7033 		if (tnapi != &tp->napi[1]) {
7034 			tp->rx_refill = true;
7035 			napi_schedule(&tp->napi[1].napi);
7036 		}
7037 	}
7038 
7039 	return received;
7040 }
7041 
tg3_poll_link(struct tg3 * tp)7042 static void tg3_poll_link(struct tg3 *tp)
7043 {
7044 	/* handle link change and other phy events */
7045 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7046 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7047 
7048 		if (sblk->status & SD_STATUS_LINK_CHG) {
7049 			sblk->status = SD_STATUS_UPDATED |
7050 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7051 			spin_lock(&tp->lock);
7052 			if (tg3_flag(tp, USE_PHYLIB)) {
7053 				tw32_f(MAC_STATUS,
7054 				     (MAC_STATUS_SYNC_CHANGED |
7055 				      MAC_STATUS_CFG_CHANGED |
7056 				      MAC_STATUS_MI_COMPLETION |
7057 				      MAC_STATUS_LNKSTATE_CHANGED));
7058 				udelay(40);
7059 			} else
7060 				tg3_setup_phy(tp, false);
7061 			spin_unlock(&tp->lock);
7062 		}
7063 	}
7064 }
7065 
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7066 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7067 				struct tg3_rx_prodring_set *dpr,
7068 				struct tg3_rx_prodring_set *spr)
7069 {
7070 	u32 si, di, cpycnt, src_prod_idx;
7071 	int i, err = 0;
7072 
7073 	while (1) {
7074 		src_prod_idx = spr->rx_std_prod_idx;
7075 
7076 		/* Make sure updates to the rx_std_buffers[] entries and the
7077 		 * standard producer index are seen in the correct order.
7078 		 */
7079 		smp_rmb();
7080 
7081 		if (spr->rx_std_cons_idx == src_prod_idx)
7082 			break;
7083 
7084 		if (spr->rx_std_cons_idx < src_prod_idx)
7085 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7086 		else
7087 			cpycnt = tp->rx_std_ring_mask + 1 -
7088 				 spr->rx_std_cons_idx;
7089 
7090 		cpycnt = min(cpycnt,
7091 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7092 
7093 		si = spr->rx_std_cons_idx;
7094 		di = dpr->rx_std_prod_idx;
7095 
7096 		for (i = di; i < di + cpycnt; i++) {
7097 			if (dpr->rx_std_buffers[i].data) {
7098 				cpycnt = i - di;
7099 				err = -ENOSPC;
7100 				break;
7101 			}
7102 		}
7103 
7104 		if (!cpycnt)
7105 			break;
7106 
7107 		/* Ensure that updates to the rx_std_buffers ring and the
7108 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7109 		 * ordered correctly WRT the skb check above.
7110 		 */
7111 		smp_rmb();
7112 
7113 		memcpy(&dpr->rx_std_buffers[di],
7114 		       &spr->rx_std_buffers[si],
7115 		       cpycnt * sizeof(struct ring_info));
7116 
7117 		for (i = 0; i < cpycnt; i++, di++, si++) {
7118 			struct tg3_rx_buffer_desc *sbd, *dbd;
7119 			sbd = &spr->rx_std[si];
7120 			dbd = &dpr->rx_std[di];
7121 			dbd->addr_hi = sbd->addr_hi;
7122 			dbd->addr_lo = sbd->addr_lo;
7123 		}
7124 
7125 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7126 				       tp->rx_std_ring_mask;
7127 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7128 				       tp->rx_std_ring_mask;
7129 	}
7130 
7131 	while (1) {
7132 		src_prod_idx = spr->rx_jmb_prod_idx;
7133 
7134 		/* Make sure updates to the rx_jmb_buffers[] entries and
7135 		 * the jumbo producer index are seen in the correct order.
7136 		 */
7137 		smp_rmb();
7138 
7139 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7140 			break;
7141 
7142 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7143 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7144 		else
7145 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7146 				 spr->rx_jmb_cons_idx;
7147 
7148 		cpycnt = min(cpycnt,
7149 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7150 
7151 		si = spr->rx_jmb_cons_idx;
7152 		di = dpr->rx_jmb_prod_idx;
7153 
7154 		for (i = di; i < di + cpycnt; i++) {
7155 			if (dpr->rx_jmb_buffers[i].data) {
7156 				cpycnt = i - di;
7157 				err = -ENOSPC;
7158 				break;
7159 			}
7160 		}
7161 
7162 		if (!cpycnt)
7163 			break;
7164 
7165 		/* Ensure that updates to the rx_jmb_buffers ring and the
7166 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7167 		 * ordered correctly WRT the skb check above.
7168 		 */
7169 		smp_rmb();
7170 
7171 		memcpy(&dpr->rx_jmb_buffers[di],
7172 		       &spr->rx_jmb_buffers[si],
7173 		       cpycnt * sizeof(struct ring_info));
7174 
7175 		for (i = 0; i < cpycnt; i++, di++, si++) {
7176 			struct tg3_rx_buffer_desc *sbd, *dbd;
7177 			sbd = &spr->rx_jmb[si].std;
7178 			dbd = &dpr->rx_jmb[di].std;
7179 			dbd->addr_hi = sbd->addr_hi;
7180 			dbd->addr_lo = sbd->addr_lo;
7181 		}
7182 
7183 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7184 				       tp->rx_jmb_ring_mask;
7185 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7186 				       tp->rx_jmb_ring_mask;
7187 	}
7188 
7189 	return err;
7190 }
7191 
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7192 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7193 {
7194 	struct tg3 *tp = tnapi->tp;
7195 
7196 	/* run TX completion thread */
7197 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7198 		tg3_tx(tnapi);
7199 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7200 			return work_done;
7201 	}
7202 
7203 	if (!tnapi->rx_rcb_prod_idx)
7204 		return work_done;
7205 
7206 	/* run RX thread, within the bounds set by NAPI.
7207 	 * All RX "locking" is done by ensuring outside
7208 	 * code synchronizes with tg3->napi.poll()
7209 	 */
7210 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7211 		work_done += tg3_rx(tnapi, budget - work_done);
7212 
7213 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7214 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7215 		int i, err = 0;
7216 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7217 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7218 
7219 		tp->rx_refill = false;
7220 		for (i = 1; i <= tp->rxq_cnt; i++)
7221 			err |= tg3_rx_prodring_xfer(tp, dpr,
7222 						    &tp->napi[i].prodring);
7223 
7224 		wmb();
7225 
7226 		if (std_prod_idx != dpr->rx_std_prod_idx)
7227 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7228 				     dpr->rx_std_prod_idx);
7229 
7230 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7231 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7232 				     dpr->rx_jmb_prod_idx);
7233 
7234 		if (err)
7235 			tw32_f(HOSTCC_MODE, tp->coal_now);
7236 	}
7237 
7238 	return work_done;
7239 }
7240 
tg3_reset_task_schedule(struct tg3 * tp)7241 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7242 {
7243 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7244 		schedule_work(&tp->reset_task);
7245 }
7246 
tg3_reset_task_cancel(struct tg3 * tp)7247 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7248 {
7249 	if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7250 		cancel_work_sync(&tp->reset_task);
7251 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7252 }
7253 
tg3_poll_msix(struct napi_struct * napi,int budget)7254 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7255 {
7256 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7257 	struct tg3 *tp = tnapi->tp;
7258 	int work_done = 0;
7259 	struct tg3_hw_status *sblk = tnapi->hw_status;
7260 
7261 	while (1) {
7262 		work_done = tg3_poll_work(tnapi, work_done, budget);
7263 
7264 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7265 			goto tx_recovery;
7266 
7267 		if (unlikely(work_done >= budget))
7268 			break;
7269 
7270 		/* tp->last_tag is used in tg3_int_reenable() below
7271 		 * to tell the hw how much work has been processed,
7272 		 * so we must read it before checking for more work.
7273 		 */
7274 		tnapi->last_tag = sblk->status_tag;
7275 		tnapi->last_irq_tag = tnapi->last_tag;
7276 		rmb();
7277 
7278 		/* check for RX/TX work to do */
7279 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7280 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7281 
7282 			/* This test here is not race free, but will reduce
7283 			 * the number of interrupts by looping again.
7284 			 */
7285 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7286 				continue;
7287 
7288 			napi_complete_done(napi, work_done);
7289 			/* Reenable interrupts. */
7290 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7291 
7292 			/* This test here is synchronized by napi_schedule()
7293 			 * and napi_complete() to close the race condition.
7294 			 */
7295 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7296 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7297 						  HOSTCC_MODE_ENABLE |
7298 						  tnapi->coal_now);
7299 			}
7300 			break;
7301 		}
7302 	}
7303 
7304 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7305 	return work_done;
7306 
7307 tx_recovery:
7308 	/* work_done is guaranteed to be less than budget. */
7309 	napi_complete(napi);
7310 	tg3_reset_task_schedule(tp);
7311 	return work_done;
7312 }
7313 
tg3_process_error(struct tg3 * tp)7314 static void tg3_process_error(struct tg3 *tp)
7315 {
7316 	u32 val;
7317 	bool real_error = false;
7318 
7319 	if (tg3_flag(tp, ERROR_PROCESSED))
7320 		return;
7321 
7322 	/* Check Flow Attention register */
7323 	val = tr32(HOSTCC_FLOW_ATTN);
7324 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7325 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7326 		real_error = true;
7327 	}
7328 
7329 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7330 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7331 		real_error = true;
7332 	}
7333 
7334 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7335 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7336 		real_error = true;
7337 	}
7338 
7339 	if (!real_error)
7340 		return;
7341 
7342 	tg3_dump_state(tp);
7343 
7344 	tg3_flag_set(tp, ERROR_PROCESSED);
7345 	tg3_reset_task_schedule(tp);
7346 }
7347 
tg3_poll(struct napi_struct * napi,int budget)7348 static int tg3_poll(struct napi_struct *napi, int budget)
7349 {
7350 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7351 	struct tg3 *tp = tnapi->tp;
7352 	int work_done = 0;
7353 	struct tg3_hw_status *sblk = tnapi->hw_status;
7354 
7355 	while (1) {
7356 		if (sblk->status & SD_STATUS_ERROR)
7357 			tg3_process_error(tp);
7358 
7359 		tg3_poll_link(tp);
7360 
7361 		work_done = tg3_poll_work(tnapi, work_done, budget);
7362 
7363 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7364 			goto tx_recovery;
7365 
7366 		if (unlikely(work_done >= budget))
7367 			break;
7368 
7369 		if (tg3_flag(tp, TAGGED_STATUS)) {
7370 			/* tp->last_tag is used in tg3_int_reenable() below
7371 			 * to tell the hw how much work has been processed,
7372 			 * so we must read it before checking for more work.
7373 			 */
7374 			tnapi->last_tag = sblk->status_tag;
7375 			tnapi->last_irq_tag = tnapi->last_tag;
7376 			rmb();
7377 		} else
7378 			sblk->status &= ~SD_STATUS_UPDATED;
7379 
7380 		if (likely(!tg3_has_work(tnapi))) {
7381 			napi_complete_done(napi, work_done);
7382 			tg3_int_reenable(tnapi);
7383 			break;
7384 		}
7385 	}
7386 
7387 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7388 	return work_done;
7389 
7390 tx_recovery:
7391 	/* work_done is guaranteed to be less than budget. */
7392 	napi_complete(napi);
7393 	tg3_reset_task_schedule(tp);
7394 	return work_done;
7395 }
7396 
tg3_napi_disable(struct tg3 * tp)7397 static void tg3_napi_disable(struct tg3 *tp)
7398 {
7399 	int txq_idx = tp->txq_cnt - 1;
7400 	int rxq_idx = tp->rxq_cnt - 1;
7401 	struct tg3_napi *tnapi;
7402 	int i;
7403 
7404 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
7405 		tnapi = &tp->napi[i];
7406 		if (tnapi->tx_buffers) {
7407 			netif_queue_set_napi(tp->dev, txq_idx,
7408 					     NETDEV_QUEUE_TYPE_TX, NULL);
7409 			txq_idx--;
7410 		}
7411 		if (tnapi->rx_rcb) {
7412 			netif_queue_set_napi(tp->dev, rxq_idx,
7413 					     NETDEV_QUEUE_TYPE_RX, NULL);
7414 			rxq_idx--;
7415 		}
7416 		napi_disable(&tnapi->napi);
7417 	}
7418 }
7419 
tg3_napi_enable(struct tg3 * tp)7420 static void tg3_napi_enable(struct tg3 *tp)
7421 {
7422 	int txq_idx = 0, rxq_idx = 0;
7423 	struct tg3_napi *tnapi;
7424 	int i;
7425 
7426 	for (i = 0; i < tp->irq_cnt; i++) {
7427 		tnapi = &tp->napi[i];
7428 		napi_enable_locked(&tnapi->napi);
7429 		if (tnapi->tx_buffers) {
7430 			netif_queue_set_napi(tp->dev, txq_idx,
7431 					     NETDEV_QUEUE_TYPE_TX,
7432 					     &tnapi->napi);
7433 			txq_idx++;
7434 		}
7435 		if (tnapi->rx_rcb) {
7436 			netif_queue_set_napi(tp->dev, rxq_idx,
7437 					     NETDEV_QUEUE_TYPE_RX,
7438 					     &tnapi->napi);
7439 			rxq_idx++;
7440 		}
7441 	}
7442 }
7443 
tg3_napi_init(struct tg3 * tp)7444 static void tg3_napi_init(struct tg3 *tp)
7445 {
7446 	int i;
7447 
7448 	for (i = 0; i < tp->irq_cnt; i++) {
7449 		netif_napi_add_locked(tp->dev, &tp->napi[i].napi,
7450 				      i ? tg3_poll_msix : tg3_poll);
7451 		netif_napi_set_irq_locked(&tp->napi[i].napi,
7452 					  tp->napi[i].irq_vec);
7453 	}
7454 }
7455 
tg3_napi_fini(struct tg3 * tp)7456 static void tg3_napi_fini(struct tg3 *tp)
7457 {
7458 	int i;
7459 
7460 	for (i = 0; i < tp->irq_cnt; i++)
7461 		netif_napi_del(&tp->napi[i].napi);
7462 }
7463 
tg3_netif_stop(struct tg3 * tp)7464 static inline void tg3_netif_stop(struct tg3 *tp)
7465 {
7466 	netif_trans_update(tp->dev);	/* prevent tx timeout */
7467 	tg3_napi_disable(tp);
7468 	netif_carrier_off(tp->dev);
7469 	netif_tx_disable(tp->dev);
7470 }
7471 
7472 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7473 static inline void tg3_netif_start(struct tg3 *tp)
7474 {
7475 	tg3_ptp_resume(tp);
7476 
7477 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7478 	 * appropriate so long as all callers are assured to
7479 	 * have free tx slots (such as after tg3_init_hw)
7480 	 */
7481 	netif_tx_wake_all_queues(tp->dev);
7482 
7483 	if (tp->link_up)
7484 		netif_carrier_on(tp->dev);
7485 
7486 	tg3_napi_enable(tp);
7487 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7488 	tg3_enable_ints(tp);
7489 }
7490 
tg3_irq_quiesce(struct tg3 * tp)7491 static void tg3_irq_quiesce(struct tg3 *tp)
7492 	__releases(tp->lock)
7493 	__acquires(tp->lock)
7494 {
7495 	int i;
7496 
7497 	BUG_ON(tp->irq_sync);
7498 
7499 	tp->irq_sync = 1;
7500 	smp_mb();
7501 
7502 	spin_unlock_bh(&tp->lock);
7503 
7504 	for (i = 0; i < tp->irq_cnt; i++)
7505 		synchronize_irq(tp->napi[i].irq_vec);
7506 
7507 	spin_lock_bh(&tp->lock);
7508 }
7509 
7510 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7511  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7512  * with as well.  Most of the time, this is not necessary except when
7513  * shutting down the device.
7514  */
tg3_full_lock(struct tg3 * tp,int irq_sync)7515 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7516 {
7517 	spin_lock_bh(&tp->lock);
7518 	if (irq_sync)
7519 		tg3_irq_quiesce(tp);
7520 }
7521 
tg3_full_unlock(struct tg3 * tp)7522 static inline void tg3_full_unlock(struct tg3 *tp)
7523 {
7524 	spin_unlock_bh(&tp->lock);
7525 }
7526 
7527 /* One-shot MSI handler - Chip automatically disables interrupt
7528  * after sending MSI so driver doesn't have to do it.
7529  */
tg3_msi_1shot(int irq,void * dev_id)7530 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7531 {
7532 	struct tg3_napi *tnapi = dev_id;
7533 	struct tg3 *tp = tnapi->tp;
7534 
7535 	prefetch(tnapi->hw_status);
7536 	if (tnapi->rx_rcb)
7537 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7538 
7539 	if (likely(!tg3_irq_sync(tp)))
7540 		napi_schedule(&tnapi->napi);
7541 
7542 	return IRQ_HANDLED;
7543 }
7544 
7545 /* MSI ISR - No need to check for interrupt sharing and no need to
7546  * flush status block and interrupt mailbox. PCI ordering rules
7547  * guarantee that MSI will arrive after the status block.
7548  */
tg3_msi(int irq,void * dev_id)7549 static irqreturn_t tg3_msi(int irq, void *dev_id)
7550 {
7551 	struct tg3_napi *tnapi = dev_id;
7552 	struct tg3 *tp = tnapi->tp;
7553 
7554 	prefetch(tnapi->hw_status);
7555 	if (tnapi->rx_rcb)
7556 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7557 	/*
7558 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7559 	 * chip-internal interrupt pending events.
7560 	 * Writing non-zero to intr-mbox-0 additional tells the
7561 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7562 	 * event coalescing.
7563 	 */
7564 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7565 	if (likely(!tg3_irq_sync(tp)))
7566 		napi_schedule(&tnapi->napi);
7567 
7568 	return IRQ_RETVAL(1);
7569 }
7570 
tg3_interrupt(int irq,void * dev_id)7571 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7572 {
7573 	struct tg3_napi *tnapi = dev_id;
7574 	struct tg3 *tp = tnapi->tp;
7575 	struct tg3_hw_status *sblk = tnapi->hw_status;
7576 	unsigned int handled = 1;
7577 
7578 	/* In INTx mode, it is possible for the interrupt to arrive at
7579 	 * the CPU before the status block posted prior to the interrupt.
7580 	 * Reading the PCI State register will confirm whether the
7581 	 * interrupt is ours and will flush the status block.
7582 	 */
7583 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7584 		if (tg3_flag(tp, CHIP_RESETTING) ||
7585 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7586 			handled = 0;
7587 			goto out;
7588 		}
7589 	}
7590 
7591 	/*
7592 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7593 	 * chip-internal interrupt pending events.
7594 	 * Writing non-zero to intr-mbox-0 additional tells the
7595 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7596 	 * event coalescing.
7597 	 *
7598 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7599 	 * spurious interrupts.  The flush impacts performance but
7600 	 * excessive spurious interrupts can be worse in some cases.
7601 	 */
7602 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7603 	if (tg3_irq_sync(tp))
7604 		goto out;
7605 	sblk->status &= ~SD_STATUS_UPDATED;
7606 	if (likely(tg3_has_work(tnapi))) {
7607 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7608 		napi_schedule(&tnapi->napi);
7609 	} else {
7610 		/* No work, shared interrupt perhaps?  re-enable
7611 		 * interrupts, and flush that PCI write
7612 		 */
7613 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7614 			       0x00000000);
7615 	}
7616 out:
7617 	return IRQ_RETVAL(handled);
7618 }
7619 
tg3_interrupt_tagged(int irq,void * dev_id)7620 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7621 {
7622 	struct tg3_napi *tnapi = dev_id;
7623 	struct tg3 *tp = tnapi->tp;
7624 	struct tg3_hw_status *sblk = tnapi->hw_status;
7625 	unsigned int handled = 1;
7626 
7627 	/* In INTx mode, it is possible for the interrupt to arrive at
7628 	 * the CPU before the status block posted prior to the interrupt.
7629 	 * Reading the PCI State register will confirm whether the
7630 	 * interrupt is ours and will flush the status block.
7631 	 */
7632 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7633 		if (tg3_flag(tp, CHIP_RESETTING) ||
7634 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7635 			handled = 0;
7636 			goto out;
7637 		}
7638 	}
7639 
7640 	/*
7641 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7642 	 * chip-internal interrupt pending events.
7643 	 * writing non-zero to intr-mbox-0 additional tells the
7644 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7645 	 * event coalescing.
7646 	 *
7647 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7648 	 * spurious interrupts.  The flush impacts performance but
7649 	 * excessive spurious interrupts can be worse in some cases.
7650 	 */
7651 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7652 
7653 	/*
7654 	 * In a shared interrupt configuration, sometimes other devices'
7655 	 * interrupts will scream.  We record the current status tag here
7656 	 * so that the above check can report that the screaming interrupts
7657 	 * are unhandled.  Eventually they will be silenced.
7658 	 */
7659 	tnapi->last_irq_tag = sblk->status_tag;
7660 
7661 	if (tg3_irq_sync(tp))
7662 		goto out;
7663 
7664 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7665 
7666 	napi_schedule(&tnapi->napi);
7667 
7668 out:
7669 	return IRQ_RETVAL(handled);
7670 }
7671 
7672 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7673 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7674 {
7675 	struct tg3_napi *tnapi = dev_id;
7676 	struct tg3 *tp = tnapi->tp;
7677 	struct tg3_hw_status *sblk = tnapi->hw_status;
7678 
7679 	if ((sblk->status & SD_STATUS_UPDATED) ||
7680 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7681 		tg3_disable_ints(tp);
7682 		return IRQ_RETVAL(1);
7683 	}
7684 	return IRQ_RETVAL(0);
7685 }
7686 
7687 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7688 static void tg3_poll_controller(struct net_device *dev)
7689 {
7690 	int i;
7691 	struct tg3 *tp = netdev_priv(dev);
7692 
7693 	if (tg3_irq_sync(tp))
7694 		return;
7695 
7696 	for (i = 0; i < tp->irq_cnt; i++)
7697 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7698 }
7699 #endif
7700 
tg3_tx_timeout(struct net_device * dev,unsigned int txqueue)7701 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7702 {
7703 	struct tg3 *tp = netdev_priv(dev);
7704 
7705 	if (netif_msg_tx_err(tp)) {
7706 		netdev_err(dev, "transmit timed out, resetting\n");
7707 		tg3_dump_state(tp);
7708 	}
7709 
7710 	tg3_reset_task_schedule(tp);
7711 }
7712 
7713 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7714 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7715 {
7716 	u32 base = (u32) mapping & 0xffffffff;
7717 
7718 	return base + len + 8 < base;
7719 }
7720 
7721 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7722  * of any 4GB boundaries: 4G, 8G, etc
7723  */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7724 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7725 					   u32 len, u32 mss)
7726 {
7727 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7728 		u32 base = (u32) mapping & 0xffffffff;
7729 
7730 		return ((base + len + (mss & 0x3fff)) < base);
7731 	}
7732 	return 0;
7733 }
7734 
7735 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7736 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7737 					  int len)
7738 {
7739 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7740 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7741 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7742 	return 0;
7743 #else
7744 	return 0;
7745 #endif
7746 }
7747 
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7748 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7749 				 dma_addr_t mapping, u32 len, u32 flags,
7750 				 u32 mss, u32 vlan)
7751 {
7752 	txbd->addr_hi = ((u64) mapping >> 32);
7753 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7754 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7755 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7756 }
7757 
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7758 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7759 			    dma_addr_t map, u32 len, u32 flags,
7760 			    u32 mss, u32 vlan)
7761 {
7762 	struct tg3 *tp = tnapi->tp;
7763 	bool hwbug = false;
7764 
7765 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7766 		hwbug = true;
7767 
7768 	if (tg3_4g_overflow_test(map, len))
7769 		hwbug = true;
7770 
7771 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7772 		hwbug = true;
7773 
7774 	if (tg3_40bit_overflow_test(tp, map, len))
7775 		hwbug = true;
7776 
7777 	if (tp->dma_limit) {
7778 		u32 prvidx = *entry;
7779 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7780 		while (len > tp->dma_limit && *budget) {
7781 			u32 frag_len = tp->dma_limit;
7782 			len -= tp->dma_limit;
7783 
7784 			/* Avoid the 8byte DMA problem */
7785 			if (len <= 8) {
7786 				len += tp->dma_limit / 2;
7787 				frag_len = tp->dma_limit / 2;
7788 			}
7789 
7790 			tnapi->tx_buffers[*entry].fragmented = true;
7791 
7792 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7793 				      frag_len, tmp_flag, mss, vlan);
7794 			*budget -= 1;
7795 			prvidx = *entry;
7796 			*entry = NEXT_TX(*entry);
7797 
7798 			map += frag_len;
7799 		}
7800 
7801 		if (len) {
7802 			if (*budget) {
7803 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7804 					      len, flags, mss, vlan);
7805 				*budget -= 1;
7806 				*entry = NEXT_TX(*entry);
7807 			} else {
7808 				hwbug = true;
7809 				tnapi->tx_buffers[prvidx].fragmented = false;
7810 			}
7811 		}
7812 	} else {
7813 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7814 			      len, flags, mss, vlan);
7815 		*entry = NEXT_TX(*entry);
7816 	}
7817 
7818 	return hwbug;
7819 }
7820 
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7821 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7822 {
7823 	int i;
7824 	struct sk_buff *skb;
7825 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7826 
7827 	skb = txb->skb;
7828 	txb->skb = NULL;
7829 
7830 	dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7831 			 skb_headlen(skb), DMA_TO_DEVICE);
7832 
7833 	while (txb->fragmented) {
7834 		txb->fragmented = false;
7835 		entry = NEXT_TX(entry);
7836 		txb = &tnapi->tx_buffers[entry];
7837 	}
7838 
7839 	for (i = 0; i <= last; i++) {
7840 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7841 
7842 		entry = NEXT_TX(entry);
7843 		txb = &tnapi->tx_buffers[entry];
7844 
7845 		dma_unmap_page(&tnapi->tp->pdev->dev,
7846 			       dma_unmap_addr(txb, mapping),
7847 			       skb_frag_size(frag), DMA_TO_DEVICE);
7848 
7849 		while (txb->fragmented) {
7850 			txb->fragmented = false;
7851 			entry = NEXT_TX(entry);
7852 			txb = &tnapi->tx_buffers[entry];
7853 		}
7854 	}
7855 }
7856 
7857 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7858 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7859 				       struct sk_buff **pskb,
7860 				       u32 *entry, u32 *budget,
7861 				       u32 base_flags, u32 mss, u32 vlan)
7862 {
7863 	struct tg3 *tp = tnapi->tp;
7864 	struct sk_buff *new_skb, *skb = *pskb;
7865 	dma_addr_t new_addr = 0;
7866 	int ret = 0;
7867 
7868 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7869 		new_skb = skb_copy(skb, GFP_ATOMIC);
7870 	else {
7871 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7872 
7873 		new_skb = skb_copy_expand(skb,
7874 					  skb_headroom(skb) + more_headroom,
7875 					  skb_tailroom(skb), GFP_ATOMIC);
7876 	}
7877 
7878 	if (!new_skb) {
7879 		ret = -1;
7880 	} else {
7881 		/* New SKB is guaranteed to be linear. */
7882 		new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7883 					  new_skb->len, DMA_TO_DEVICE);
7884 		/* Make sure the mapping succeeded */
7885 		if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7886 			dev_kfree_skb_any(new_skb);
7887 			ret = -1;
7888 		} else {
7889 			u32 save_entry = *entry;
7890 
7891 			base_flags |= TXD_FLAG_END;
7892 
7893 			tnapi->tx_buffers[*entry].skb = new_skb;
7894 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7895 					   mapping, new_addr);
7896 
7897 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7898 					    new_skb->len, base_flags,
7899 					    mss, vlan)) {
7900 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7901 				dev_kfree_skb_any(new_skb);
7902 				ret = -1;
7903 			}
7904 		}
7905 	}
7906 
7907 	dev_consume_skb_any(skb);
7908 	*pskb = new_skb;
7909 	return ret;
7910 }
7911 
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7912 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7913 {
7914 	/* Check if we will never have enough descriptors,
7915 	 * as gso_segs can be more than current ring size
7916 	 */
7917 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7918 }
7919 
7920 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
7921 
7922 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7923  * indicated in tg3_tx_frag_set()
7924  */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7925 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7926 		       struct netdev_queue *txq, struct sk_buff *skb)
7927 {
7928 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7929 	struct sk_buff *segs, *seg, *next;
7930 
7931 	/* Estimate the number of fragments in the worst case */
7932 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7933 		netif_tx_stop_queue(txq);
7934 
7935 		/* netif_tx_stop_queue() must be done before checking
7936 		 * checking tx index in tg3_tx_avail() below, because in
7937 		 * tg3_tx(), we update tx index before checking for
7938 		 * netif_tx_queue_stopped().
7939 		 */
7940 		smp_mb();
7941 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7942 			return NETDEV_TX_BUSY;
7943 
7944 		netif_tx_wake_queue(txq);
7945 	}
7946 
7947 	segs = skb_gso_segment(skb, tp->dev->features &
7948 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7949 	if (IS_ERR(segs) || !segs) {
7950 		tnapi->tx_dropped++;
7951 		goto tg3_tso_bug_end;
7952 	}
7953 
7954 	skb_list_walk_safe(segs, seg, next) {
7955 		skb_mark_not_on_list(seg);
7956 		__tg3_start_xmit(seg, tp->dev);
7957 	}
7958 
7959 tg3_tso_bug_end:
7960 	dev_consume_skb_any(skb);
7961 
7962 	return NETDEV_TX_OK;
7963 }
7964 
7965 /* hard_start_xmit for all devices */
__tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7966 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7967 {
7968 	struct tg3 *tp = netdev_priv(dev);
7969 	u32 len, entry, base_flags, mss, vlan = 0;
7970 	u32 budget;
7971 	int i = -1, would_hit_hwbug;
7972 	dma_addr_t mapping;
7973 	struct tg3_napi *tnapi;
7974 	struct netdev_queue *txq;
7975 	unsigned int last;
7976 	struct iphdr *iph = NULL;
7977 	struct tcphdr *tcph = NULL;
7978 	__sum16 tcp_csum = 0, ip_csum = 0;
7979 	__be16 ip_tot_len = 0;
7980 
7981 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7982 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7983 	if (tg3_flag(tp, ENABLE_TSS))
7984 		tnapi++;
7985 
7986 	budget = tg3_tx_avail(tnapi);
7987 
7988 	/* We are running in BH disabled context with netif_tx_lock
7989 	 * and TX reclaim runs via tp->napi.poll inside of a software
7990 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7991 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7992 	 */
7993 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7994 		if (!netif_tx_queue_stopped(txq)) {
7995 			netif_tx_stop_queue(txq);
7996 
7997 			/* This is a hard error, log it. */
7998 			netdev_err(dev,
7999 				   "BUG! Tx Ring full when queue awake!\n");
8000 		}
8001 		return NETDEV_TX_BUSY;
8002 	}
8003 
8004 	entry = tnapi->tx_prod;
8005 	base_flags = 0;
8006 
8007 	mss = skb_shinfo(skb)->gso_size;
8008 	if (mss) {
8009 		u32 tcp_opt_len, hdr_len;
8010 
8011 		if (skb_cow_head(skb, 0))
8012 			goto drop;
8013 
8014 		iph = ip_hdr(skb);
8015 		tcp_opt_len = tcp_optlen(skb);
8016 
8017 		hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
8018 
8019 		/* HW/FW can not correctly segment packets that have been
8020 		 * vlan encapsulated.
8021 		 */
8022 		if (skb->protocol == htons(ETH_P_8021Q) ||
8023 		    skb->protocol == htons(ETH_P_8021AD)) {
8024 			if (tg3_tso_bug_gso_check(tnapi, skb))
8025 				return tg3_tso_bug(tp, tnapi, txq, skb);
8026 			goto drop;
8027 		}
8028 
8029 		if (!skb_is_gso_v6(skb)) {
8030 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
8031 			    tg3_flag(tp, TSO_BUG)) {
8032 				if (tg3_tso_bug_gso_check(tnapi, skb))
8033 					return tg3_tso_bug(tp, tnapi, txq, skb);
8034 				goto drop;
8035 			}
8036 			ip_csum = iph->check;
8037 			ip_tot_len = iph->tot_len;
8038 			iph->check = 0;
8039 			iph->tot_len = htons(mss + hdr_len);
8040 		}
8041 
8042 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
8043 			       TXD_FLAG_CPU_POST_DMA);
8044 
8045 		tcph = tcp_hdr(skb);
8046 		tcp_csum = tcph->check;
8047 
8048 		if (tg3_flag(tp, HW_TSO_1) ||
8049 		    tg3_flag(tp, HW_TSO_2) ||
8050 		    tg3_flag(tp, HW_TSO_3)) {
8051 			tcph->check = 0;
8052 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8053 		} else {
8054 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8055 							 0, IPPROTO_TCP, 0);
8056 		}
8057 
8058 		if (tg3_flag(tp, HW_TSO_3)) {
8059 			mss |= (hdr_len & 0xc) << 12;
8060 			if (hdr_len & 0x10)
8061 				base_flags |= 0x00000010;
8062 			base_flags |= (hdr_len & 0x3e0) << 5;
8063 		} else if (tg3_flag(tp, HW_TSO_2))
8064 			mss |= hdr_len << 9;
8065 		else if (tg3_flag(tp, HW_TSO_1) ||
8066 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
8067 			if (tcp_opt_len || iph->ihl > 5) {
8068 				int tsflags;
8069 
8070 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8071 				mss |= (tsflags << 11);
8072 			}
8073 		} else {
8074 			if (tcp_opt_len || iph->ihl > 5) {
8075 				int tsflags;
8076 
8077 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8078 				base_flags |= tsflags << 12;
8079 			}
8080 		}
8081 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8082 		/* HW/FW can not correctly checksum packets that have been
8083 		 * vlan encapsulated.
8084 		 */
8085 		if (skb->protocol == htons(ETH_P_8021Q) ||
8086 		    skb->protocol == htons(ETH_P_8021AD)) {
8087 			if (skb_checksum_help(skb))
8088 				goto drop;
8089 		} else  {
8090 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8091 		}
8092 	}
8093 
8094 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8095 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8096 		base_flags |= TXD_FLAG_JMB_PKT;
8097 
8098 	if (skb_vlan_tag_present(skb)) {
8099 		base_flags |= TXD_FLAG_VLAN;
8100 		vlan = skb_vlan_tag_get(skb);
8101 	}
8102 
8103 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8104 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8105 		tg3_full_lock(tp, 0);
8106 		if (!tp->pre_tx_ts) {
8107 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8108 			base_flags |= TXD_FLAG_HWTSTAMP;
8109 			tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
8110 		}
8111 		tg3_full_unlock(tp);
8112 	}
8113 
8114 	len = skb_headlen(skb);
8115 
8116 	mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8117 				 DMA_TO_DEVICE);
8118 	if (dma_mapping_error(&tp->pdev->dev, mapping))
8119 		goto drop;
8120 
8121 
8122 	tnapi->tx_buffers[entry].skb = skb;
8123 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8124 
8125 	would_hit_hwbug = 0;
8126 
8127 	if (tg3_flag(tp, 5701_DMA_BUG))
8128 		would_hit_hwbug = 1;
8129 
8130 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8131 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8132 			    mss, vlan)) {
8133 		would_hit_hwbug = 1;
8134 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8135 		u32 tmp_mss = mss;
8136 
8137 		if (!tg3_flag(tp, HW_TSO_1) &&
8138 		    !tg3_flag(tp, HW_TSO_2) &&
8139 		    !tg3_flag(tp, HW_TSO_3))
8140 			tmp_mss = 0;
8141 
8142 		/* Now loop through additional data
8143 		 * fragments, and queue them.
8144 		 */
8145 		last = skb_shinfo(skb)->nr_frags - 1;
8146 		for (i = 0; i <= last; i++) {
8147 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8148 
8149 			len = skb_frag_size(frag);
8150 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8151 						   len, DMA_TO_DEVICE);
8152 
8153 			tnapi->tx_buffers[entry].skb = NULL;
8154 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8155 					   mapping);
8156 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8157 				goto dma_error;
8158 
8159 			if (!budget ||
8160 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8161 					    len, base_flags |
8162 					    ((i == last) ? TXD_FLAG_END : 0),
8163 					    tmp_mss, vlan)) {
8164 				would_hit_hwbug = 1;
8165 				break;
8166 			}
8167 		}
8168 	}
8169 
8170 	if (would_hit_hwbug) {
8171 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8172 
8173 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8174 			/* If it's a TSO packet, do GSO instead of
8175 			 * allocating and copying to a large linear SKB
8176 			 */
8177 			if (ip_tot_len) {
8178 				iph->check = ip_csum;
8179 				iph->tot_len = ip_tot_len;
8180 			}
8181 			tcph->check = tcp_csum;
8182 			return tg3_tso_bug(tp, tnapi, txq, skb);
8183 		}
8184 
8185 		/* If the workaround fails due to memory/mapping
8186 		 * failure, silently drop this packet.
8187 		 */
8188 		entry = tnapi->tx_prod;
8189 		budget = tg3_tx_avail(tnapi);
8190 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8191 						base_flags, mss, vlan))
8192 			goto drop_nofree;
8193 	}
8194 
8195 	skb_tx_timestamp(skb);
8196 	netdev_tx_sent_queue(txq, skb->len);
8197 
8198 	/* Sync BD data before updating mailbox */
8199 	wmb();
8200 
8201 	tnapi->tx_prod = entry;
8202 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8203 		netif_tx_stop_queue(txq);
8204 
8205 		/* netif_tx_stop_queue() must be done before checking
8206 		 * checking tx index in tg3_tx_avail() below, because in
8207 		 * tg3_tx(), we update tx index before checking for
8208 		 * netif_tx_queue_stopped().
8209 		 */
8210 		smp_mb();
8211 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8212 			netif_tx_wake_queue(txq);
8213 	}
8214 
8215 	return NETDEV_TX_OK;
8216 
8217 dma_error:
8218 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8219 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8220 drop:
8221 	dev_kfree_skb_any(skb);
8222 drop_nofree:
8223 	tnapi->tx_dropped++;
8224 	return NETDEV_TX_OK;
8225 }
8226 
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)8227 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8228 {
8229 	struct netdev_queue *txq;
8230 	u16 skb_queue_mapping;
8231 	netdev_tx_t ret;
8232 
8233 	skb_queue_mapping = skb_get_queue_mapping(skb);
8234 	txq = netdev_get_tx_queue(dev, skb_queue_mapping);
8235 
8236 	ret = __tg3_start_xmit(skb, dev);
8237 
8238 	/* Notify the hardware that packets are ready by updating the TX ring
8239 	 * tail pointer. We respect netdev_xmit_more() thus avoiding poking
8240 	 * the hardware for every packet. To guarantee forward progress the TX
8241 	 * ring must be drained when it is full as indicated by
8242 	 * netif_xmit_stopped(). This needs to happen even when the current
8243 	 * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
8244 	 * queued by previous __tg3_start_xmit() calls might get stuck in
8245 	 * the queue forever.
8246 	 */
8247 	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8248 		struct tg3_napi *tnapi;
8249 		struct tg3 *tp;
8250 
8251 		tp = netdev_priv(dev);
8252 		tnapi = &tp->napi[skb_queue_mapping];
8253 
8254 		if (tg3_flag(tp, ENABLE_TSS))
8255 			tnapi++;
8256 
8257 		tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
8258 	}
8259 
8260 	return ret;
8261 }
8262 
tg3_mac_loopback(struct tg3 * tp,bool enable)8263 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8264 {
8265 	if (enable) {
8266 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8267 				  MAC_MODE_PORT_MODE_MASK);
8268 
8269 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8270 
8271 		if (!tg3_flag(tp, 5705_PLUS))
8272 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8273 
8274 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8275 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8276 		else
8277 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8278 	} else {
8279 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8280 
8281 		if (tg3_flag(tp, 5705_PLUS) ||
8282 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8283 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8284 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8285 	}
8286 
8287 	tw32(MAC_MODE, tp->mac_mode);
8288 	udelay(40);
8289 }
8290 
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8291 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8292 {
8293 	u32 val, bmcr, mac_mode, ptest = 0;
8294 
8295 	tg3_phy_toggle_apd(tp, false);
8296 	tg3_phy_toggle_automdix(tp, false);
8297 
8298 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8299 		return -EIO;
8300 
8301 	bmcr = BMCR_FULLDPLX;
8302 	switch (speed) {
8303 	case SPEED_10:
8304 		break;
8305 	case SPEED_100:
8306 		bmcr |= BMCR_SPEED100;
8307 		break;
8308 	case SPEED_1000:
8309 	default:
8310 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8311 			speed = SPEED_100;
8312 			bmcr |= BMCR_SPEED100;
8313 		} else {
8314 			speed = SPEED_1000;
8315 			bmcr |= BMCR_SPEED1000;
8316 		}
8317 	}
8318 
8319 	if (extlpbk) {
8320 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8321 			tg3_readphy(tp, MII_CTRL1000, &val);
8322 			val |= CTL1000_AS_MASTER |
8323 			       CTL1000_ENABLE_MASTER;
8324 			tg3_writephy(tp, MII_CTRL1000, val);
8325 		} else {
8326 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8327 				MII_TG3_FET_PTEST_TRIM_2;
8328 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8329 		}
8330 	} else
8331 		bmcr |= BMCR_LOOPBACK;
8332 
8333 	tg3_writephy(tp, MII_BMCR, bmcr);
8334 
8335 	/* The write needs to be flushed for the FETs */
8336 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8337 		tg3_readphy(tp, MII_BMCR, &bmcr);
8338 
8339 	udelay(40);
8340 
8341 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8342 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8343 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8344 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8345 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8346 
8347 		/* The write needs to be flushed for the AC131 */
8348 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8349 	}
8350 
8351 	/* Reset to prevent losing 1st rx packet intermittently */
8352 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8353 	    tg3_flag(tp, 5780_CLASS)) {
8354 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8355 		udelay(10);
8356 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8357 	}
8358 
8359 	mac_mode = tp->mac_mode &
8360 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8361 	if (speed == SPEED_1000)
8362 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8363 	else
8364 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8365 
8366 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8367 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8368 
8369 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8370 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8371 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8372 			mac_mode |= MAC_MODE_LINK_POLARITY;
8373 
8374 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8375 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8376 	}
8377 
8378 	tw32(MAC_MODE, mac_mode);
8379 	udelay(40);
8380 
8381 	return 0;
8382 }
8383 
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8384 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8385 {
8386 	struct tg3 *tp = netdev_priv(dev);
8387 
8388 	if (features & NETIF_F_LOOPBACK) {
8389 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8390 			return;
8391 
8392 		spin_lock_bh(&tp->lock);
8393 		tg3_mac_loopback(tp, true);
8394 		netif_carrier_on(tp->dev);
8395 		spin_unlock_bh(&tp->lock);
8396 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8397 	} else {
8398 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8399 			return;
8400 
8401 		spin_lock_bh(&tp->lock);
8402 		tg3_mac_loopback(tp, false);
8403 		/* Force link status check */
8404 		tg3_setup_phy(tp, true);
8405 		spin_unlock_bh(&tp->lock);
8406 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8407 	}
8408 }
8409 
tg3_fix_features(struct net_device * dev,netdev_features_t features)8410 static netdev_features_t tg3_fix_features(struct net_device *dev,
8411 	netdev_features_t features)
8412 {
8413 	struct tg3 *tp = netdev_priv(dev);
8414 
8415 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8416 		features &= ~NETIF_F_ALL_TSO;
8417 
8418 	return features;
8419 }
8420 
tg3_set_features(struct net_device * dev,netdev_features_t features)8421 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8422 {
8423 	netdev_features_t changed = dev->features ^ features;
8424 
8425 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8426 		tg3_set_loopback(dev, features);
8427 
8428 	return 0;
8429 }
8430 
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8431 static void tg3_rx_prodring_free(struct tg3 *tp,
8432 				 struct tg3_rx_prodring_set *tpr)
8433 {
8434 	int i;
8435 
8436 	if (tpr != &tp->napi[0].prodring) {
8437 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8438 		     i = (i + 1) & tp->rx_std_ring_mask)
8439 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8440 					tp->rx_pkt_map_sz);
8441 
8442 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8443 			for (i = tpr->rx_jmb_cons_idx;
8444 			     i != tpr->rx_jmb_prod_idx;
8445 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8446 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8447 						TG3_RX_JMB_MAP_SZ);
8448 			}
8449 		}
8450 
8451 		return;
8452 	}
8453 
8454 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8455 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8456 				tp->rx_pkt_map_sz);
8457 
8458 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8459 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8460 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8461 					TG3_RX_JMB_MAP_SZ);
8462 	}
8463 }
8464 
8465 /* Initialize rx rings for packet processing.
8466  *
8467  * The chip has been shut down and the driver detached from
8468  * the networking, so no interrupts or new tx packets will
8469  * end up in the driver.  tp->{tx,}lock are held and thus
8470  * we may not sleep.
8471  */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8472 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8473 				 struct tg3_rx_prodring_set *tpr)
8474 {
8475 	u32 i, rx_pkt_dma_sz;
8476 
8477 	tpr->rx_std_cons_idx = 0;
8478 	tpr->rx_std_prod_idx = 0;
8479 	tpr->rx_jmb_cons_idx = 0;
8480 	tpr->rx_jmb_prod_idx = 0;
8481 
8482 	if (tpr != &tp->napi[0].prodring) {
8483 		memset(&tpr->rx_std_buffers[0], 0,
8484 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8485 		if (tpr->rx_jmb_buffers)
8486 			memset(&tpr->rx_jmb_buffers[0], 0,
8487 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8488 		goto done;
8489 	}
8490 
8491 	/* Zero out all descriptors. */
8492 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8493 
8494 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8495 	if (tg3_flag(tp, 5780_CLASS) &&
8496 	    tp->dev->mtu > ETH_DATA_LEN)
8497 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8498 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8499 
8500 	/* Initialize invariants of the rings, we only set this
8501 	 * stuff once.  This works because the card does not
8502 	 * write into the rx buffer posting rings.
8503 	 */
8504 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8505 		struct tg3_rx_buffer_desc *rxd;
8506 
8507 		rxd = &tpr->rx_std[i];
8508 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8509 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8510 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8511 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8512 	}
8513 
8514 	/* Now allocate fresh SKBs for each rx ring. */
8515 	for (i = 0; i < tp->rx_pending; i++) {
8516 		unsigned int frag_size;
8517 
8518 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8519 				      &frag_size) < 0) {
8520 			netdev_warn(tp->dev,
8521 				    "Using a smaller RX standard ring. Only "
8522 				    "%d out of %d buffers were allocated "
8523 				    "successfully\n", i, tp->rx_pending);
8524 			if (i == 0)
8525 				goto initfail;
8526 			tp->rx_pending = i;
8527 			break;
8528 		}
8529 	}
8530 
8531 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8532 		goto done;
8533 
8534 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8535 
8536 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8537 		goto done;
8538 
8539 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8540 		struct tg3_rx_buffer_desc *rxd;
8541 
8542 		rxd = &tpr->rx_jmb[i].std;
8543 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8544 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8545 				  RXD_FLAG_JUMBO;
8546 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8547 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8548 	}
8549 
8550 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8551 		unsigned int frag_size;
8552 
8553 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8554 				      &frag_size) < 0) {
8555 			netdev_warn(tp->dev,
8556 				    "Using a smaller RX jumbo ring. Only %d "
8557 				    "out of %d buffers were allocated "
8558 				    "successfully\n", i, tp->rx_jumbo_pending);
8559 			if (i == 0)
8560 				goto initfail;
8561 			tp->rx_jumbo_pending = i;
8562 			break;
8563 		}
8564 	}
8565 
8566 done:
8567 	return 0;
8568 
8569 initfail:
8570 	tg3_rx_prodring_free(tp, tpr);
8571 	return -ENOMEM;
8572 }
8573 
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8574 static void tg3_rx_prodring_fini(struct tg3 *tp,
8575 				 struct tg3_rx_prodring_set *tpr)
8576 {
8577 	kfree(tpr->rx_std_buffers);
8578 	tpr->rx_std_buffers = NULL;
8579 	kfree(tpr->rx_jmb_buffers);
8580 	tpr->rx_jmb_buffers = NULL;
8581 	if (tpr->rx_std) {
8582 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8583 				  tpr->rx_std, tpr->rx_std_mapping);
8584 		tpr->rx_std = NULL;
8585 	}
8586 	if (tpr->rx_jmb) {
8587 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8588 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8589 		tpr->rx_jmb = NULL;
8590 	}
8591 }
8592 
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8593 static int tg3_rx_prodring_init(struct tg3 *tp,
8594 				struct tg3_rx_prodring_set *tpr)
8595 {
8596 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8597 				      GFP_KERNEL);
8598 	if (!tpr->rx_std_buffers)
8599 		return -ENOMEM;
8600 
8601 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8602 					 TG3_RX_STD_RING_BYTES(tp),
8603 					 &tpr->rx_std_mapping,
8604 					 GFP_KERNEL);
8605 	if (!tpr->rx_std)
8606 		goto err_out;
8607 
8608 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8609 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8610 					      GFP_KERNEL);
8611 		if (!tpr->rx_jmb_buffers)
8612 			goto err_out;
8613 
8614 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8615 						 TG3_RX_JMB_RING_BYTES(tp),
8616 						 &tpr->rx_jmb_mapping,
8617 						 GFP_KERNEL);
8618 		if (!tpr->rx_jmb)
8619 			goto err_out;
8620 	}
8621 
8622 	return 0;
8623 
8624 err_out:
8625 	tg3_rx_prodring_fini(tp, tpr);
8626 	return -ENOMEM;
8627 }
8628 
8629 /* Free up pending packets in all rx/tx rings.
8630  *
8631  * The chip has been shut down and the driver detached from
8632  * the networking, so no interrupts or new tx packets will
8633  * end up in the driver.  tp->{tx,}lock is not held and we are not
8634  * in an interrupt context and thus may sleep.
8635  */
tg3_free_rings(struct tg3 * tp)8636 static void tg3_free_rings(struct tg3 *tp)
8637 {
8638 	int i, j;
8639 
8640 	for (j = 0; j < tp->irq_cnt; j++) {
8641 		struct tg3_napi *tnapi = &tp->napi[j];
8642 
8643 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8644 
8645 		if (!tnapi->tx_buffers)
8646 			continue;
8647 
8648 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8649 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8650 
8651 			if (!skb)
8652 				continue;
8653 
8654 			tg3_tx_skb_unmap(tnapi, i,
8655 					 skb_shinfo(skb)->nr_frags - 1);
8656 
8657 			dev_consume_skb_any(skb);
8658 		}
8659 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8660 	}
8661 }
8662 
8663 /* Initialize tx/rx rings for packet processing.
8664  *
8665  * The chip has been shut down and the driver detached from
8666  * the networking, so no interrupts or new tx packets will
8667  * end up in the driver.  tp->{tx,}lock are held and thus
8668  * we may not sleep.
8669  */
tg3_init_rings(struct tg3 * tp)8670 static int tg3_init_rings(struct tg3 *tp)
8671 {
8672 	int i;
8673 
8674 	/* Free up all the SKBs. */
8675 	tg3_free_rings(tp);
8676 
8677 	for (i = 0; i < tp->irq_cnt; i++) {
8678 		struct tg3_napi *tnapi = &tp->napi[i];
8679 
8680 		tnapi->last_tag = 0;
8681 		tnapi->last_irq_tag = 0;
8682 		tnapi->hw_status->status = 0;
8683 		tnapi->hw_status->status_tag = 0;
8684 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8685 
8686 		tnapi->tx_prod = 0;
8687 		tnapi->tx_cons = 0;
8688 		if (tnapi->tx_ring)
8689 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8690 
8691 		tnapi->rx_rcb_ptr = 0;
8692 		if (tnapi->rx_rcb)
8693 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8694 
8695 		if (tnapi->prodring.rx_std &&
8696 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8697 			tg3_free_rings(tp);
8698 			return -ENOMEM;
8699 		}
8700 	}
8701 
8702 	return 0;
8703 }
8704 
tg3_mem_tx_release(struct tg3 * tp)8705 static void tg3_mem_tx_release(struct tg3 *tp)
8706 {
8707 	int i;
8708 
8709 	for (i = 0; i < tp->irq_max; i++) {
8710 		struct tg3_napi *tnapi = &tp->napi[i];
8711 
8712 		if (tnapi->tx_ring) {
8713 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8714 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8715 			tnapi->tx_ring = NULL;
8716 		}
8717 
8718 		kfree(tnapi->tx_buffers);
8719 		tnapi->tx_buffers = NULL;
8720 	}
8721 }
8722 
tg3_mem_tx_acquire(struct tg3 * tp)8723 static int tg3_mem_tx_acquire(struct tg3 *tp)
8724 {
8725 	int i;
8726 	struct tg3_napi *tnapi = &tp->napi[0];
8727 
8728 	/* If multivector TSS is enabled, vector 0 does not handle
8729 	 * tx interrupts.  Don't allocate any resources for it.
8730 	 */
8731 	if (tg3_flag(tp, ENABLE_TSS))
8732 		tnapi++;
8733 
8734 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8735 		tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8736 					    sizeof(struct tg3_tx_ring_info),
8737 					    GFP_KERNEL);
8738 		if (!tnapi->tx_buffers)
8739 			goto err_out;
8740 
8741 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8742 						    TG3_TX_RING_BYTES,
8743 						    &tnapi->tx_desc_mapping,
8744 						    GFP_KERNEL);
8745 		if (!tnapi->tx_ring)
8746 			goto err_out;
8747 	}
8748 
8749 	return 0;
8750 
8751 err_out:
8752 	tg3_mem_tx_release(tp);
8753 	return -ENOMEM;
8754 }
8755 
tg3_mem_rx_release(struct tg3 * tp)8756 static void tg3_mem_rx_release(struct tg3 *tp)
8757 {
8758 	int i;
8759 
8760 	for (i = 0; i < tp->irq_max; i++) {
8761 		struct tg3_napi *tnapi = &tp->napi[i];
8762 
8763 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8764 
8765 		if (!tnapi->rx_rcb)
8766 			continue;
8767 
8768 		dma_free_coherent(&tp->pdev->dev,
8769 				  TG3_RX_RCB_RING_BYTES(tp),
8770 				  tnapi->rx_rcb,
8771 				  tnapi->rx_rcb_mapping);
8772 		tnapi->rx_rcb = NULL;
8773 	}
8774 }
8775 
tg3_mem_rx_acquire(struct tg3 * tp)8776 static int tg3_mem_rx_acquire(struct tg3 *tp)
8777 {
8778 	unsigned int i, limit;
8779 
8780 	limit = tp->rxq_cnt;
8781 
8782 	/* If RSS is enabled, we need a (dummy) producer ring
8783 	 * set on vector zero.  This is the true hw prodring.
8784 	 */
8785 	if (tg3_flag(tp, ENABLE_RSS))
8786 		limit++;
8787 
8788 	for (i = 0; i < limit; i++) {
8789 		struct tg3_napi *tnapi = &tp->napi[i];
8790 
8791 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8792 			goto err_out;
8793 
8794 		/* If multivector RSS is enabled, vector 0
8795 		 * does not handle rx or tx interrupts.
8796 		 * Don't allocate any resources for it.
8797 		 */
8798 		if (!i && tg3_flag(tp, ENABLE_RSS))
8799 			continue;
8800 
8801 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8802 						   TG3_RX_RCB_RING_BYTES(tp),
8803 						   &tnapi->rx_rcb_mapping,
8804 						   GFP_KERNEL);
8805 		if (!tnapi->rx_rcb)
8806 			goto err_out;
8807 	}
8808 
8809 	return 0;
8810 
8811 err_out:
8812 	tg3_mem_rx_release(tp);
8813 	return -ENOMEM;
8814 }
8815 
8816 /*
8817  * Must not be invoked with interrupt sources disabled and
8818  * the hardware shutdown down.
8819  */
tg3_free_consistent(struct tg3 * tp)8820 static void tg3_free_consistent(struct tg3 *tp)
8821 {
8822 	int i;
8823 
8824 	for (i = 0; i < tp->irq_cnt; i++) {
8825 		struct tg3_napi *tnapi = &tp->napi[i];
8826 
8827 		if (tnapi->hw_status) {
8828 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8829 					  tnapi->hw_status,
8830 					  tnapi->status_mapping);
8831 			tnapi->hw_status = NULL;
8832 		}
8833 	}
8834 
8835 	tg3_mem_rx_release(tp);
8836 	tg3_mem_tx_release(tp);
8837 
8838 	/* tp->hw_stats can be referenced safely:
8839 	 *     1. under rtnl_lock
8840 	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8841 	 */
8842 	if (tp->hw_stats) {
8843 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8844 				  tp->hw_stats, tp->stats_mapping);
8845 		tp->hw_stats = NULL;
8846 	}
8847 }
8848 
8849 /*
8850  * Must not be invoked with interrupt sources disabled and
8851  * the hardware shutdown down.  Can sleep.
8852  */
tg3_alloc_consistent(struct tg3 * tp)8853 static int tg3_alloc_consistent(struct tg3 *tp)
8854 {
8855 	int i;
8856 
8857 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8858 					  sizeof(struct tg3_hw_stats),
8859 					  &tp->stats_mapping, GFP_KERNEL);
8860 	if (!tp->hw_stats)
8861 		goto err_out;
8862 
8863 	for (i = 0; i < tp->irq_cnt; i++) {
8864 		struct tg3_napi *tnapi = &tp->napi[i];
8865 		struct tg3_hw_status *sblk;
8866 
8867 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8868 						      TG3_HW_STATUS_SIZE,
8869 						      &tnapi->status_mapping,
8870 						      GFP_KERNEL);
8871 		if (!tnapi->hw_status)
8872 			goto err_out;
8873 
8874 		sblk = tnapi->hw_status;
8875 
8876 		if (tg3_flag(tp, ENABLE_RSS)) {
8877 			u16 *prodptr = NULL;
8878 
8879 			/*
8880 			 * When RSS is enabled, the status block format changes
8881 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8882 			 * and "rx_mini_consumer" members get mapped to the
8883 			 * other three rx return ring producer indexes.
8884 			 */
8885 			switch (i) {
8886 			case 1:
8887 				prodptr = &sblk->idx[0].rx_producer;
8888 				break;
8889 			case 2:
8890 				prodptr = &sblk->rx_jumbo_consumer;
8891 				break;
8892 			case 3:
8893 				prodptr = &sblk->reserved;
8894 				break;
8895 			case 4:
8896 				prodptr = &sblk->rx_mini_consumer;
8897 				break;
8898 			}
8899 			tnapi->rx_rcb_prod_idx = prodptr;
8900 		} else {
8901 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8902 		}
8903 	}
8904 
8905 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8906 		goto err_out;
8907 
8908 	return 0;
8909 
8910 err_out:
8911 	tg3_free_consistent(tp);
8912 	return -ENOMEM;
8913 }
8914 
8915 #define MAX_WAIT_CNT 1000
8916 
8917 /* To stop a block, clear the enable bit and poll till it
8918  * clears.  tp->lock is held.
8919  */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8920 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8921 {
8922 	unsigned int i;
8923 	u32 val;
8924 
8925 	if (tg3_flag(tp, 5705_PLUS)) {
8926 		switch (ofs) {
8927 		case RCVLSC_MODE:
8928 		case DMAC_MODE:
8929 		case MBFREE_MODE:
8930 		case BUFMGR_MODE:
8931 		case MEMARB_MODE:
8932 			/* We can't enable/disable these bits of the
8933 			 * 5705/5750, just say success.
8934 			 */
8935 			return 0;
8936 
8937 		default:
8938 			break;
8939 		}
8940 	}
8941 
8942 	val = tr32(ofs);
8943 	val &= ~enable_bit;
8944 	tw32_f(ofs, val);
8945 
8946 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8947 		if (pci_channel_offline(tp->pdev)) {
8948 			dev_err(&tp->pdev->dev,
8949 				"tg3_stop_block device offline, "
8950 				"ofs=%lx enable_bit=%x\n",
8951 				ofs, enable_bit);
8952 			return -ENODEV;
8953 		}
8954 
8955 		udelay(100);
8956 		val = tr32(ofs);
8957 		if ((val & enable_bit) == 0)
8958 			break;
8959 	}
8960 
8961 	if (i == MAX_WAIT_CNT && !silent) {
8962 		dev_err(&tp->pdev->dev,
8963 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8964 			ofs, enable_bit);
8965 		return -ENODEV;
8966 	}
8967 
8968 	return 0;
8969 }
8970 
8971 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8972 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8973 {
8974 	int i, err;
8975 
8976 	tg3_disable_ints(tp);
8977 
8978 	if (pci_channel_offline(tp->pdev)) {
8979 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8980 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8981 		err = -ENODEV;
8982 		goto err_no_dev;
8983 	}
8984 
8985 	tp->rx_mode &= ~RX_MODE_ENABLE;
8986 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8987 	udelay(10);
8988 
8989 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8990 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8991 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8992 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8993 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8994 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8995 
8996 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8997 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8998 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8999 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
9000 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
9001 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
9002 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
9003 
9004 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
9005 	tw32_f(MAC_MODE, tp->mac_mode);
9006 	udelay(40);
9007 
9008 	tp->tx_mode &= ~TX_MODE_ENABLE;
9009 	tw32_f(MAC_TX_MODE, tp->tx_mode);
9010 
9011 	for (i = 0; i < MAX_WAIT_CNT; i++) {
9012 		udelay(100);
9013 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
9014 			break;
9015 	}
9016 	if (i >= MAX_WAIT_CNT) {
9017 		dev_err(&tp->pdev->dev,
9018 			"%s timed out, TX_MODE_ENABLE will not clear "
9019 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
9020 		err |= -ENODEV;
9021 	}
9022 
9023 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
9024 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
9025 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
9026 
9027 	tw32(FTQ_RESET, 0xffffffff);
9028 	tw32(FTQ_RESET, 0x00000000);
9029 
9030 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
9031 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
9032 
9033 err_no_dev:
9034 	for (i = 0; i < tp->irq_cnt; i++) {
9035 		struct tg3_napi *tnapi = &tp->napi[i];
9036 		if (tnapi->hw_status)
9037 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9038 	}
9039 
9040 	return err;
9041 }
9042 
9043 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)9044 static void tg3_save_pci_state(struct tg3 *tp)
9045 {
9046 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
9047 }
9048 
9049 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)9050 static void tg3_restore_pci_state(struct tg3 *tp)
9051 {
9052 	u32 val;
9053 
9054 	/* Re-enable indirect register accesses. */
9055 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9056 			       tp->misc_host_ctrl);
9057 
9058 	/* Set MAX PCI retry to zero. */
9059 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
9060 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9061 	    tg3_flag(tp, PCIX_MODE))
9062 		val |= PCISTATE_RETRY_SAME_DMA;
9063 	/* Allow reads and writes to the APE register and memory space. */
9064 	if (tg3_flag(tp, ENABLE_APE))
9065 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9066 		       PCISTATE_ALLOW_APE_SHMEM_WR |
9067 		       PCISTATE_ALLOW_APE_PSPACE_WR;
9068 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
9069 
9070 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
9071 
9072 	if (!tg3_flag(tp, PCI_EXPRESS)) {
9073 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
9074 				      tp->pci_cacheline_sz);
9075 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
9076 				      tp->pci_lat_timer);
9077 	}
9078 
9079 	/* Make sure PCI-X relaxed ordering bit is clear. */
9080 	if (tg3_flag(tp, PCIX_MODE)) {
9081 		u16 pcix_cmd;
9082 
9083 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9084 				     &pcix_cmd);
9085 		pcix_cmd &= ~PCI_X_CMD_ERO;
9086 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9087 				      pcix_cmd);
9088 	}
9089 
9090 	if (tg3_flag(tp, 5780_CLASS)) {
9091 
9092 		/* Chip reset on 5780 will reset MSI enable bit,
9093 		 * so need to restore it.
9094 		 */
9095 		if (tg3_flag(tp, USING_MSI)) {
9096 			u16 ctrl;
9097 
9098 			pci_read_config_word(tp->pdev,
9099 					     tp->msi_cap + PCI_MSI_FLAGS,
9100 					     &ctrl);
9101 			pci_write_config_word(tp->pdev,
9102 					      tp->msi_cap + PCI_MSI_FLAGS,
9103 					      ctrl | PCI_MSI_FLAGS_ENABLE);
9104 			val = tr32(MSGINT_MODE);
9105 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9106 		}
9107 	}
9108 }
9109 
tg3_override_clk(struct tg3 * tp)9110 static void tg3_override_clk(struct tg3 *tp)
9111 {
9112 	u32 val;
9113 
9114 	switch (tg3_asic_rev(tp)) {
9115 	case ASIC_REV_5717:
9116 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9117 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9118 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9119 		break;
9120 
9121 	case ASIC_REV_5719:
9122 	case ASIC_REV_5720:
9123 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9124 		break;
9125 
9126 	default:
9127 		return;
9128 	}
9129 }
9130 
tg3_restore_clk(struct tg3 * tp)9131 static void tg3_restore_clk(struct tg3 *tp)
9132 {
9133 	u32 val;
9134 
9135 	switch (tg3_asic_rev(tp)) {
9136 	case ASIC_REV_5717:
9137 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9138 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9139 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9140 		break;
9141 
9142 	case ASIC_REV_5719:
9143 	case ASIC_REV_5720:
9144 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9145 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9146 		break;
9147 
9148 	default:
9149 		return;
9150 	}
9151 }
9152 
9153 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9154 static int tg3_chip_reset(struct tg3 *tp)
9155 	__releases(tp->lock)
9156 	__acquires(tp->lock)
9157 {
9158 	u32 val;
9159 	void (*write_op)(struct tg3 *, u32, u32);
9160 	int i, err;
9161 
9162 	if (!pci_device_is_present(tp->pdev))
9163 		return -ENODEV;
9164 
9165 	tg3_nvram_lock(tp);
9166 
9167 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9168 
9169 	/* No matching tg3_nvram_unlock() after this because
9170 	 * chip reset below will undo the nvram lock.
9171 	 */
9172 	tp->nvram_lock_cnt = 0;
9173 
9174 	/* GRC_MISC_CFG core clock reset will clear the memory
9175 	 * enable bit in PCI register 4 and the MSI enable bit
9176 	 * on some chips, so we save relevant registers here.
9177 	 */
9178 	tg3_save_pci_state(tp);
9179 
9180 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9181 	    tg3_flag(tp, 5755_PLUS))
9182 		tw32(GRC_FASTBOOT_PC, 0);
9183 
9184 	/*
9185 	 * We must avoid the readl() that normally takes place.
9186 	 * It locks machines, causes machine checks, and other
9187 	 * fun things.  So, temporarily disable the 5701
9188 	 * hardware workaround, while we do the reset.
9189 	 */
9190 	write_op = tp->write32;
9191 	if (write_op == tg3_write_flush_reg32)
9192 		tp->write32 = tg3_write32;
9193 
9194 	/* Prevent the irq handler from reading or writing PCI registers
9195 	 * during chip reset when the memory enable bit in the PCI command
9196 	 * register may be cleared.  The chip does not generate interrupt
9197 	 * at this time, but the irq handler may still be called due to irq
9198 	 * sharing or irqpoll.
9199 	 */
9200 	tg3_flag_set(tp, CHIP_RESETTING);
9201 	for (i = 0; i < tp->irq_cnt; i++) {
9202 		struct tg3_napi *tnapi = &tp->napi[i];
9203 		if (tnapi->hw_status) {
9204 			tnapi->hw_status->status = 0;
9205 			tnapi->hw_status->status_tag = 0;
9206 		}
9207 		tnapi->last_tag = 0;
9208 		tnapi->last_irq_tag = 0;
9209 	}
9210 	smp_mb();
9211 
9212 	tg3_full_unlock(tp);
9213 
9214 	for (i = 0; i < tp->irq_cnt; i++)
9215 		synchronize_irq(tp->napi[i].irq_vec);
9216 
9217 	tg3_full_lock(tp, 0);
9218 
9219 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9220 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9221 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9222 	}
9223 
9224 	/* do the reset */
9225 	val = GRC_MISC_CFG_CORECLK_RESET;
9226 
9227 	if (tg3_flag(tp, PCI_EXPRESS)) {
9228 		/* Force PCIe 1.0a mode */
9229 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9230 		    !tg3_flag(tp, 57765_PLUS) &&
9231 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9232 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9233 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9234 
9235 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9236 			tw32(GRC_MISC_CFG, (1 << 29));
9237 			val |= (1 << 29);
9238 		}
9239 	}
9240 
9241 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9242 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9243 		tw32(GRC_VCPU_EXT_CTRL,
9244 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9245 	}
9246 
9247 	/* Set the clock to the highest frequency to avoid timeouts. With link
9248 	 * aware mode, the clock speed could be slow and bootcode does not
9249 	 * complete within the expected time. Override the clock to allow the
9250 	 * bootcode to finish sooner and then restore it.
9251 	 */
9252 	tg3_override_clk(tp);
9253 
9254 	/* Manage gphy power for all CPMU absent PCIe devices. */
9255 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9256 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9257 
9258 	tw32(GRC_MISC_CFG, val);
9259 
9260 	/* restore 5701 hardware bug workaround write method */
9261 	tp->write32 = write_op;
9262 
9263 	/* Unfortunately, we have to delay before the PCI read back.
9264 	 * Some 575X chips even will not respond to a PCI cfg access
9265 	 * when the reset command is given to the chip.
9266 	 *
9267 	 * How do these hardware designers expect things to work
9268 	 * properly if the PCI write is posted for a long period
9269 	 * of time?  It is always necessary to have some method by
9270 	 * which a register read back can occur to push the write
9271 	 * out which does the reset.
9272 	 *
9273 	 * For most tg3 variants the trick below was working.
9274 	 * Ho hum...
9275 	 */
9276 	udelay(120);
9277 
9278 	/* Flush PCI posted writes.  The normal MMIO registers
9279 	 * are inaccessible at this time so this is the only
9280 	 * way to make this reliably (actually, this is no longer
9281 	 * the case, see above).  I tried to use indirect
9282 	 * register read/write but this upset some 5701 variants.
9283 	 */
9284 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9285 
9286 	udelay(120);
9287 
9288 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9289 		u16 val16;
9290 
9291 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9292 			int j;
9293 			u32 cfg_val;
9294 
9295 			/* Wait for link training to complete.  */
9296 			for (j = 0; j < 5000; j++)
9297 				udelay(100);
9298 
9299 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9300 			pci_write_config_dword(tp->pdev, 0xc4,
9301 					       cfg_val | (1 << 15));
9302 		}
9303 
9304 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9305 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9306 		/*
9307 		 * Older PCIe devices only support the 128 byte
9308 		 * MPS setting.  Enforce the restriction.
9309 		 */
9310 		if (!tg3_flag(tp, CPMU_PRESENT))
9311 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9312 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9313 
9314 		/* Clear error status */
9315 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9316 				      PCI_EXP_DEVSTA_CED |
9317 				      PCI_EXP_DEVSTA_NFED |
9318 				      PCI_EXP_DEVSTA_FED |
9319 				      PCI_EXP_DEVSTA_URD);
9320 	}
9321 
9322 	tg3_restore_pci_state(tp);
9323 
9324 	tg3_flag_clear(tp, CHIP_RESETTING);
9325 	tg3_flag_clear(tp, ERROR_PROCESSED);
9326 
9327 	val = 0;
9328 	if (tg3_flag(tp, 5780_CLASS))
9329 		val = tr32(MEMARB_MODE);
9330 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9331 
9332 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9333 		tg3_stop_fw(tp);
9334 		tw32(0x5000, 0x400);
9335 	}
9336 
9337 	if (tg3_flag(tp, IS_SSB_CORE)) {
9338 		/*
9339 		 * BCM4785: In order to avoid repercussions from using
9340 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9341 		 * which is not required.
9342 		 */
9343 		tg3_stop_fw(tp);
9344 		tg3_halt_cpu(tp, RX_CPU_BASE);
9345 	}
9346 
9347 	err = tg3_poll_fw(tp);
9348 	if (err)
9349 		return err;
9350 
9351 	tw32(GRC_MODE, tp->grc_mode);
9352 
9353 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9354 		val = tr32(0xc4);
9355 
9356 		tw32(0xc4, val | (1 << 15));
9357 	}
9358 
9359 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9360 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9361 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9362 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9363 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9364 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9365 	}
9366 
9367 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9368 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9369 		val = tp->mac_mode;
9370 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9371 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9372 		val = tp->mac_mode;
9373 	} else
9374 		val = 0;
9375 
9376 	tw32_f(MAC_MODE, val);
9377 	udelay(40);
9378 
9379 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9380 
9381 	tg3_mdio_start(tp);
9382 
9383 	if (tg3_flag(tp, PCI_EXPRESS) &&
9384 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9385 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9386 	    !tg3_flag(tp, 57765_PLUS)) {
9387 		val = tr32(0x7c00);
9388 
9389 		tw32(0x7c00, val | (1 << 25));
9390 	}
9391 
9392 	tg3_restore_clk(tp);
9393 
9394 	/* Increase the core clock speed to fix tx timeout issue for 5762
9395 	 * with 100Mbps link speed.
9396 	 */
9397 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9398 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9399 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9400 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9401 	}
9402 
9403 	/* Reprobe ASF enable state.  */
9404 	tg3_flag_clear(tp, ENABLE_ASF);
9405 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9406 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9407 
9408 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9409 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9410 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9411 		u32 nic_cfg;
9412 
9413 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9414 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9415 			tg3_flag_set(tp, ENABLE_ASF);
9416 			tp->last_event_jiffies = jiffies;
9417 			if (tg3_flag(tp, 5750_PLUS))
9418 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9419 
9420 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9421 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9422 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9423 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9424 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9425 		}
9426 	}
9427 
9428 	return 0;
9429 }
9430 
9431 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9432 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9433 static void __tg3_set_rx_mode(struct net_device *);
9434 
9435 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9436 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9437 {
9438 	int err, i;
9439 
9440 	tg3_stop_fw(tp);
9441 
9442 	tg3_write_sig_pre_reset(tp, kind);
9443 
9444 	tg3_abort_hw(tp, silent);
9445 	err = tg3_chip_reset(tp);
9446 
9447 	__tg3_set_mac_addr(tp, false);
9448 
9449 	tg3_write_sig_legacy(tp, kind);
9450 	tg3_write_sig_post_reset(tp, kind);
9451 
9452 	if (tp->hw_stats) {
9453 		/* Save the stats across chip resets... */
9454 		tg3_get_nstats(tp, &tp->net_stats_prev);
9455 		tg3_get_estats(tp, &tp->estats_prev);
9456 
9457 		/* And make sure the next sample is new data */
9458 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9459 
9460 		for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
9461 			struct tg3_napi *tnapi = &tp->napi[i];
9462 
9463 			tnapi->rx_dropped = 0;
9464 			tnapi->tx_dropped = 0;
9465 		}
9466 	}
9467 
9468 	return err;
9469 }
9470 
tg3_set_mac_addr(struct net_device * dev,void * p)9471 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9472 {
9473 	struct tg3 *tp = netdev_priv(dev);
9474 	struct sockaddr *addr = p;
9475 	int err = 0;
9476 	bool skip_mac_1 = false;
9477 
9478 	if (!is_valid_ether_addr(addr->sa_data))
9479 		return -EADDRNOTAVAIL;
9480 
9481 	eth_hw_addr_set(dev, addr->sa_data);
9482 
9483 	if (!netif_running(dev))
9484 		return 0;
9485 
9486 	if (tg3_flag(tp, ENABLE_ASF)) {
9487 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9488 
9489 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9490 		addr0_low = tr32(MAC_ADDR_0_LOW);
9491 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9492 		addr1_low = tr32(MAC_ADDR_1_LOW);
9493 
9494 		/* Skip MAC addr 1 if ASF is using it. */
9495 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9496 		    !(addr1_high == 0 && addr1_low == 0))
9497 			skip_mac_1 = true;
9498 	}
9499 	spin_lock_bh(&tp->lock);
9500 	__tg3_set_mac_addr(tp, skip_mac_1);
9501 	__tg3_set_rx_mode(dev);
9502 	spin_unlock_bh(&tp->lock);
9503 
9504 	return err;
9505 }
9506 
9507 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9508 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9509 			   dma_addr_t mapping, u32 maxlen_flags,
9510 			   u32 nic_addr)
9511 {
9512 	tg3_write_mem(tp,
9513 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9514 		      ((u64) mapping >> 32));
9515 	tg3_write_mem(tp,
9516 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9517 		      ((u64) mapping & 0xffffffff));
9518 	tg3_write_mem(tp,
9519 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9520 		       maxlen_flags);
9521 
9522 	if (!tg3_flag(tp, 5705_PLUS))
9523 		tg3_write_mem(tp,
9524 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9525 			      nic_addr);
9526 }
9527 
9528 
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9529 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9530 {
9531 	int i = 0;
9532 
9533 	if (!tg3_flag(tp, ENABLE_TSS)) {
9534 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9535 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9536 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9537 	} else {
9538 		tw32(HOSTCC_TXCOL_TICKS, 0);
9539 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9540 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9541 
9542 		for (; i < tp->txq_cnt; i++) {
9543 			u32 reg;
9544 
9545 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9546 			tw32(reg, ec->tx_coalesce_usecs);
9547 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9548 			tw32(reg, ec->tx_max_coalesced_frames);
9549 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9550 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9551 		}
9552 	}
9553 
9554 	for (; i < tp->irq_max - 1; i++) {
9555 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9556 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9557 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9558 	}
9559 }
9560 
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9561 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9562 {
9563 	int i = 0;
9564 	u32 limit = tp->rxq_cnt;
9565 
9566 	if (!tg3_flag(tp, ENABLE_RSS)) {
9567 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9568 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9569 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9570 		limit--;
9571 	} else {
9572 		tw32(HOSTCC_RXCOL_TICKS, 0);
9573 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9574 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9575 	}
9576 
9577 	for (; i < limit; i++) {
9578 		u32 reg;
9579 
9580 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9581 		tw32(reg, ec->rx_coalesce_usecs);
9582 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9583 		tw32(reg, ec->rx_max_coalesced_frames);
9584 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9585 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9586 	}
9587 
9588 	for (; i < tp->irq_max - 1; i++) {
9589 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9590 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9591 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9592 	}
9593 }
9594 
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9595 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9596 {
9597 	tg3_coal_tx_init(tp, ec);
9598 	tg3_coal_rx_init(tp, ec);
9599 
9600 	if (!tg3_flag(tp, 5705_PLUS)) {
9601 		u32 val = ec->stats_block_coalesce_usecs;
9602 
9603 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9604 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9605 
9606 		if (!tp->link_up)
9607 			val = 0;
9608 
9609 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9610 	}
9611 }
9612 
9613 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9614 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9615 {
9616 	u32 txrcb, limit;
9617 
9618 	/* Disable all transmit rings but the first. */
9619 	if (!tg3_flag(tp, 5705_PLUS))
9620 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9621 	else if (tg3_flag(tp, 5717_PLUS))
9622 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9623 	else if (tg3_flag(tp, 57765_CLASS) ||
9624 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9625 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9626 	else
9627 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9628 
9629 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9630 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9631 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9632 			      BDINFO_FLAGS_DISABLED);
9633 }
9634 
9635 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9636 static void tg3_tx_rcbs_init(struct tg3 *tp)
9637 {
9638 	int i = 0;
9639 	u32 txrcb = NIC_SRAM_SEND_RCB;
9640 
9641 	if (tg3_flag(tp, ENABLE_TSS))
9642 		i++;
9643 
9644 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9645 		struct tg3_napi *tnapi = &tp->napi[i];
9646 
9647 		if (!tnapi->tx_ring)
9648 			continue;
9649 
9650 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9651 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9652 			       NIC_SRAM_TX_BUFFER_DESC);
9653 	}
9654 }
9655 
9656 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9657 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9658 {
9659 	u32 rxrcb, limit;
9660 
9661 	/* Disable all receive return rings but the first. */
9662 	if (tg3_flag(tp, 5717_PLUS))
9663 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9664 	else if (!tg3_flag(tp, 5705_PLUS))
9665 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9666 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9667 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9668 		 tg3_flag(tp, 57765_CLASS))
9669 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9670 	else
9671 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9672 
9673 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9674 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9675 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9676 			      BDINFO_FLAGS_DISABLED);
9677 }
9678 
9679 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9680 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9681 {
9682 	int i = 0;
9683 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9684 
9685 	if (tg3_flag(tp, ENABLE_RSS))
9686 		i++;
9687 
9688 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9689 		struct tg3_napi *tnapi = &tp->napi[i];
9690 
9691 		if (!tnapi->rx_rcb)
9692 			continue;
9693 
9694 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9695 			       (tp->rx_ret_ring_mask + 1) <<
9696 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9697 	}
9698 }
9699 
9700 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9701 static void tg3_rings_reset(struct tg3 *tp)
9702 {
9703 	int i;
9704 	u32 stblk;
9705 	struct tg3_napi *tnapi = &tp->napi[0];
9706 
9707 	tg3_tx_rcbs_disable(tp);
9708 
9709 	tg3_rx_ret_rcbs_disable(tp);
9710 
9711 	/* Disable interrupts */
9712 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9713 	tp->napi[0].chk_msi_cnt = 0;
9714 	tp->napi[0].last_rx_cons = 0;
9715 	tp->napi[0].last_tx_cons = 0;
9716 
9717 	/* Zero mailbox registers. */
9718 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9719 		for (i = 1; i < tp->irq_max; i++) {
9720 			tp->napi[i].tx_prod = 0;
9721 			tp->napi[i].tx_cons = 0;
9722 			if (tg3_flag(tp, ENABLE_TSS))
9723 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9724 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9725 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9726 			tp->napi[i].chk_msi_cnt = 0;
9727 			tp->napi[i].last_rx_cons = 0;
9728 			tp->napi[i].last_tx_cons = 0;
9729 		}
9730 		if (!tg3_flag(tp, ENABLE_TSS))
9731 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9732 	} else {
9733 		tp->napi[0].tx_prod = 0;
9734 		tp->napi[0].tx_cons = 0;
9735 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9736 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9737 	}
9738 
9739 	/* Make sure the NIC-based send BD rings are disabled. */
9740 	if (!tg3_flag(tp, 5705_PLUS)) {
9741 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9742 		for (i = 0; i < 16; i++)
9743 			tw32_tx_mbox(mbox + i * 8, 0);
9744 	}
9745 
9746 	/* Clear status block in ram. */
9747 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9748 
9749 	/* Set status block DMA address */
9750 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9751 	     ((u64) tnapi->status_mapping >> 32));
9752 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9753 	     ((u64) tnapi->status_mapping & 0xffffffff));
9754 
9755 	stblk = HOSTCC_STATBLCK_RING1;
9756 
9757 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9758 		u64 mapping = (u64)tnapi->status_mapping;
9759 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9760 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9761 		stblk += 8;
9762 
9763 		/* Clear status block in ram. */
9764 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9765 	}
9766 
9767 	tg3_tx_rcbs_init(tp);
9768 	tg3_rx_ret_rcbs_init(tp);
9769 }
9770 
tg3_setup_rxbd_thresholds(struct tg3 * tp)9771 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9772 {
9773 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9774 
9775 	if (!tg3_flag(tp, 5750_PLUS) ||
9776 	    tg3_flag(tp, 5780_CLASS) ||
9777 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9778 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9779 	    tg3_flag(tp, 57765_PLUS))
9780 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9781 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9782 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9783 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9784 	else
9785 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9786 
9787 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9788 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9789 
9790 	val = min(nic_rep_thresh, host_rep_thresh);
9791 	tw32(RCVBDI_STD_THRESH, val);
9792 
9793 	if (tg3_flag(tp, 57765_PLUS))
9794 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9795 
9796 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9797 		return;
9798 
9799 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9800 
9801 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9802 
9803 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9804 	tw32(RCVBDI_JUMBO_THRESH, val);
9805 
9806 	if (tg3_flag(tp, 57765_PLUS))
9807 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9808 }
9809 
calc_crc(unsigned char * buf,int len)9810 static inline u32 calc_crc(unsigned char *buf, int len)
9811 {
9812 	u32 reg;
9813 	u32 tmp;
9814 	int j, k;
9815 
9816 	reg = 0xffffffff;
9817 
9818 	for (j = 0; j < len; j++) {
9819 		reg ^= buf[j];
9820 
9821 		for (k = 0; k < 8; k++) {
9822 			tmp = reg & 0x01;
9823 
9824 			reg >>= 1;
9825 
9826 			if (tmp)
9827 				reg ^= CRC32_POLY_LE;
9828 		}
9829 	}
9830 
9831 	return ~reg;
9832 }
9833 
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9834 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9835 {
9836 	/* accept or reject all multicast frames */
9837 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9838 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9839 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9840 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9841 }
9842 
__tg3_set_rx_mode(struct net_device * dev)9843 static void __tg3_set_rx_mode(struct net_device *dev)
9844 {
9845 	struct tg3 *tp = netdev_priv(dev);
9846 	u32 rx_mode;
9847 
9848 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9849 				  RX_MODE_KEEP_VLAN_TAG);
9850 
9851 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9852 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9853 	 * flag clear.
9854 	 */
9855 	if (!tg3_flag(tp, ENABLE_ASF))
9856 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9857 #endif
9858 
9859 	if (dev->flags & IFF_PROMISC) {
9860 		/* Promiscuous mode. */
9861 		rx_mode |= RX_MODE_PROMISC;
9862 	} else if (dev->flags & IFF_ALLMULTI) {
9863 		/* Accept all multicast. */
9864 		tg3_set_multi(tp, 1);
9865 	} else if (netdev_mc_empty(dev)) {
9866 		/* Reject all multicast. */
9867 		tg3_set_multi(tp, 0);
9868 	} else {
9869 		/* Accept one or more multicast(s). */
9870 		struct netdev_hw_addr *ha;
9871 		u32 mc_filter[4] = { 0, };
9872 		u32 regidx;
9873 		u32 bit;
9874 		u32 crc;
9875 
9876 		netdev_for_each_mc_addr(ha, dev) {
9877 			crc = calc_crc(ha->addr, ETH_ALEN);
9878 			bit = ~crc & 0x7f;
9879 			regidx = (bit & 0x60) >> 5;
9880 			bit &= 0x1f;
9881 			mc_filter[regidx] |= (1 << bit);
9882 		}
9883 
9884 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9885 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9886 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9887 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9888 	}
9889 
9890 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9891 		rx_mode |= RX_MODE_PROMISC;
9892 	} else if (!(dev->flags & IFF_PROMISC)) {
9893 		/* Add all entries into to the mac addr filter list */
9894 		int i = 0;
9895 		struct netdev_hw_addr *ha;
9896 
9897 		netdev_for_each_uc_addr(ha, dev) {
9898 			__tg3_set_one_mac_addr(tp, ha->addr,
9899 					       i + TG3_UCAST_ADDR_IDX(tp));
9900 			i++;
9901 		}
9902 	}
9903 
9904 	if (rx_mode != tp->rx_mode) {
9905 		tp->rx_mode = rx_mode;
9906 		tw32_f(MAC_RX_MODE, rx_mode);
9907 		udelay(10);
9908 	}
9909 }
9910 
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9911 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9912 {
9913 	int i;
9914 
9915 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9916 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9917 }
9918 
tg3_rss_check_indir_tbl(struct tg3 * tp)9919 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9920 {
9921 	int i;
9922 
9923 	if (!tg3_flag(tp, SUPPORT_MSIX))
9924 		return;
9925 
9926 	if (tp->rxq_cnt == 1) {
9927 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9928 		return;
9929 	}
9930 
9931 	/* Validate table against current IRQ count */
9932 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9933 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9934 			break;
9935 	}
9936 
9937 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9938 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9939 }
9940 
tg3_rss_write_indir_tbl(struct tg3 * tp)9941 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9942 {
9943 	int i = 0;
9944 	u32 reg = MAC_RSS_INDIR_TBL_0;
9945 
9946 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9947 		u32 val = tp->rss_ind_tbl[i];
9948 		i++;
9949 		for (; i % 8; i++) {
9950 			val <<= 4;
9951 			val |= tp->rss_ind_tbl[i];
9952 		}
9953 		tw32(reg, val);
9954 		reg += 4;
9955 	}
9956 }
9957 
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9958 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9959 {
9960 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9961 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9962 	else
9963 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9964 }
9965 
9966 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9967 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9968 {
9969 	u32 val, rdmac_mode;
9970 	int i, err, limit;
9971 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9972 
9973 	tg3_disable_ints(tp);
9974 
9975 	tg3_stop_fw(tp);
9976 
9977 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9978 
9979 	if (tg3_flag(tp, INIT_COMPLETE))
9980 		tg3_abort_hw(tp, 1);
9981 
9982 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9983 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9984 		tg3_phy_pull_config(tp);
9985 		tg3_eee_pull_config(tp, NULL);
9986 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9987 	}
9988 
9989 	/* Enable MAC control of LPI */
9990 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9991 		tg3_setup_eee(tp);
9992 
9993 	if (reset_phy)
9994 		tg3_phy_reset(tp);
9995 
9996 	err = tg3_chip_reset(tp);
9997 	if (err)
9998 		return err;
9999 
10000 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
10001 
10002 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
10003 		val = tr32(TG3_CPMU_CTRL);
10004 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
10005 		tw32(TG3_CPMU_CTRL, val);
10006 
10007 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10008 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10009 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
10010 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10011 
10012 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
10013 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
10014 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
10015 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
10016 
10017 		val = tr32(TG3_CPMU_HST_ACC);
10018 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
10019 		val |= CPMU_HST_ACC_MACCLK_6_25;
10020 		tw32(TG3_CPMU_HST_ACC, val);
10021 	}
10022 
10023 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
10024 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
10025 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
10026 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
10027 		tw32(PCIE_PWR_MGMT_THRESH, val);
10028 
10029 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
10030 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
10031 
10032 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
10033 
10034 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
10035 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
10036 	}
10037 
10038 	if (tg3_flag(tp, L1PLLPD_EN)) {
10039 		u32 grc_mode = tr32(GRC_MODE);
10040 
10041 		/* Access the lower 1K of PL PCIE block registers. */
10042 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10043 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10044 
10045 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
10046 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
10047 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
10048 
10049 		tw32(GRC_MODE, grc_mode);
10050 	}
10051 
10052 	if (tg3_flag(tp, 57765_CLASS)) {
10053 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
10054 			u32 grc_mode = tr32(GRC_MODE);
10055 
10056 			/* Access the lower 1K of PL PCIE block registers. */
10057 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10058 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10059 
10060 			val = tr32(TG3_PCIE_TLDLPL_PORT +
10061 				   TG3_PCIE_PL_LO_PHYCTL5);
10062 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
10063 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
10064 
10065 			tw32(GRC_MODE, grc_mode);
10066 		}
10067 
10068 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
10069 			u32 grc_mode;
10070 
10071 			/* Fix transmit hangs */
10072 			val = tr32(TG3_CPMU_PADRNG_CTL);
10073 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
10074 			tw32(TG3_CPMU_PADRNG_CTL, val);
10075 
10076 			grc_mode = tr32(GRC_MODE);
10077 
10078 			/* Access the lower 1K of DL PCIE block registers. */
10079 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10080 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
10081 
10082 			val = tr32(TG3_PCIE_TLDLPL_PORT +
10083 				   TG3_PCIE_DL_LO_FTSMAX);
10084 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10085 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10086 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10087 
10088 			tw32(GRC_MODE, grc_mode);
10089 		}
10090 
10091 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10092 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10093 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
10094 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10095 	}
10096 
10097 	/* This works around an issue with Athlon chipsets on
10098 	 * B3 tigon3 silicon.  This bit has no effect on any
10099 	 * other revision.  But do not set this on PCI Express
10100 	 * chips and don't even touch the clocks if the CPMU is present.
10101 	 */
10102 	if (!tg3_flag(tp, CPMU_PRESENT)) {
10103 		if (!tg3_flag(tp, PCI_EXPRESS))
10104 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10105 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10106 	}
10107 
10108 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10109 	    tg3_flag(tp, PCIX_MODE)) {
10110 		val = tr32(TG3PCI_PCISTATE);
10111 		val |= PCISTATE_RETRY_SAME_DMA;
10112 		tw32(TG3PCI_PCISTATE, val);
10113 	}
10114 
10115 	if (tg3_flag(tp, ENABLE_APE)) {
10116 		/* Allow reads and writes to the
10117 		 * APE register and memory space.
10118 		 */
10119 		val = tr32(TG3PCI_PCISTATE);
10120 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10121 		       PCISTATE_ALLOW_APE_SHMEM_WR |
10122 		       PCISTATE_ALLOW_APE_PSPACE_WR;
10123 		tw32(TG3PCI_PCISTATE, val);
10124 	}
10125 
10126 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10127 		/* Enable some hw fixes.  */
10128 		val = tr32(TG3PCI_MSI_DATA);
10129 		val |= (1 << 26) | (1 << 28) | (1 << 29);
10130 		tw32(TG3PCI_MSI_DATA, val);
10131 	}
10132 
10133 	/* Descriptor ring init may make accesses to the
10134 	 * NIC SRAM area to setup the TX descriptors, so we
10135 	 * can only do this after the hardware has been
10136 	 * successfully reset.
10137 	 */
10138 	err = tg3_init_rings(tp);
10139 	if (err)
10140 		return err;
10141 
10142 	if (tg3_flag(tp, 57765_PLUS)) {
10143 		val = tr32(TG3PCI_DMA_RW_CTRL) &
10144 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10145 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10146 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10147 		if (!tg3_flag(tp, 57765_CLASS) &&
10148 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10149 		    tg3_asic_rev(tp) != ASIC_REV_5762)
10150 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10151 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10152 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10153 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10154 		/* This value is determined during the probe time DMA
10155 		 * engine test, tg3_test_dma.
10156 		 */
10157 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10158 	}
10159 
10160 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10161 			  GRC_MODE_4X_NIC_SEND_RINGS |
10162 			  GRC_MODE_NO_TX_PHDR_CSUM |
10163 			  GRC_MODE_NO_RX_PHDR_CSUM);
10164 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10165 
10166 	/* Pseudo-header checksum is done by hardware logic and not
10167 	 * the offload processers, so make the chip do the pseudo-
10168 	 * header checksums on receive.  For transmit it is more
10169 	 * convenient to do the pseudo-header checksum in software
10170 	 * as Linux does that on transmit for us in all cases.
10171 	 */
10172 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10173 
10174 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10175 	if (tp->rxptpctl)
10176 		tw32(TG3_RX_PTP_CTL,
10177 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10178 
10179 	if (tg3_flag(tp, PTP_CAPABLE))
10180 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10181 
10182 	tw32(GRC_MODE, tp->grc_mode | val);
10183 
10184 	/* On one of the AMD platform, MRRS is restricted to 4000 because of
10185 	 * south bridge limitation. As a workaround, Driver is setting MRRS
10186 	 * to 2048 instead of default 4096.
10187 	 */
10188 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10189 	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10190 		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10191 		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10192 	}
10193 
10194 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10195 	val = tr32(GRC_MISC_CFG);
10196 	val &= ~0xff;
10197 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10198 	tw32(GRC_MISC_CFG, val);
10199 
10200 	/* Initialize MBUF/DESC pool. */
10201 	if (tg3_flag(tp, 5750_PLUS)) {
10202 		/* Do nothing.  */
10203 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10204 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10205 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10206 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10207 		else
10208 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10209 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10210 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10211 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10212 		int fw_len;
10213 
10214 		fw_len = tp->fw_len;
10215 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10216 		tw32(BUFMGR_MB_POOL_ADDR,
10217 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10218 		tw32(BUFMGR_MB_POOL_SIZE,
10219 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10220 	}
10221 
10222 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10223 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10224 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10225 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10226 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10227 		tw32(BUFMGR_MB_HIGH_WATER,
10228 		     tp->bufmgr_config.mbuf_high_water);
10229 	} else {
10230 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10231 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10232 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10233 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10234 		tw32(BUFMGR_MB_HIGH_WATER,
10235 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10236 	}
10237 	tw32(BUFMGR_DMA_LOW_WATER,
10238 	     tp->bufmgr_config.dma_low_water);
10239 	tw32(BUFMGR_DMA_HIGH_WATER,
10240 	     tp->bufmgr_config.dma_high_water);
10241 
10242 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10243 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10244 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10245 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10246 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10247 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10248 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10249 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10250 	tw32(BUFMGR_MODE, val);
10251 	for (i = 0; i < 2000; i++) {
10252 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10253 			break;
10254 		udelay(10);
10255 	}
10256 	if (i >= 2000) {
10257 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10258 		return -ENODEV;
10259 	}
10260 
10261 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10262 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10263 
10264 	tg3_setup_rxbd_thresholds(tp);
10265 
10266 	/* Initialize TG3_BDINFO's at:
10267 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10268 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10269 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10270 	 *
10271 	 * like so:
10272 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10273 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10274 	 *                              ring attribute flags
10275 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10276 	 *
10277 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10278 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10279 	 *
10280 	 * The size of each ring is fixed in the firmware, but the location is
10281 	 * configurable.
10282 	 */
10283 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10284 	     ((u64) tpr->rx_std_mapping >> 32));
10285 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10286 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10287 	if (!tg3_flag(tp, 5717_PLUS))
10288 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10289 		     NIC_SRAM_RX_BUFFER_DESC);
10290 
10291 	/* Disable the mini ring */
10292 	if (!tg3_flag(tp, 5705_PLUS))
10293 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10294 		     BDINFO_FLAGS_DISABLED);
10295 
10296 	/* Program the jumbo buffer descriptor ring control
10297 	 * blocks on those devices that have them.
10298 	 */
10299 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10300 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10301 
10302 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10303 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10304 			     ((u64) tpr->rx_jmb_mapping >> 32));
10305 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10306 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10307 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10308 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10309 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10310 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10311 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10312 			    tg3_flag(tp, 57765_CLASS) ||
10313 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10314 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10315 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10316 		} else {
10317 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10318 			     BDINFO_FLAGS_DISABLED);
10319 		}
10320 
10321 		if (tg3_flag(tp, 57765_PLUS)) {
10322 			val = TG3_RX_STD_RING_SIZE(tp);
10323 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10324 			val |= (TG3_RX_STD_DMA_SZ << 2);
10325 		} else
10326 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10327 	} else
10328 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10329 
10330 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10331 
10332 	tpr->rx_std_prod_idx = tp->rx_pending;
10333 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10334 
10335 	tpr->rx_jmb_prod_idx =
10336 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10337 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10338 
10339 	tg3_rings_reset(tp);
10340 
10341 	/* Initialize MAC address and backoff seed. */
10342 	__tg3_set_mac_addr(tp, false);
10343 
10344 	/* MTU + ethernet header + FCS + optional VLAN tag */
10345 	tw32(MAC_RX_MTU_SIZE,
10346 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10347 
10348 	/* The slot time is changed by tg3_setup_phy if we
10349 	 * run at gigabit with half duplex.
10350 	 */
10351 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10352 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10353 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10354 
10355 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10356 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10357 		val |= tr32(MAC_TX_LENGTHS) &
10358 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10359 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10360 
10361 	tw32(MAC_TX_LENGTHS, val);
10362 
10363 	/* Receive rules. */
10364 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10365 	tw32(RCVLPC_CONFIG, 0x0181);
10366 
10367 	/* Calculate RDMAC_MODE setting early, we need it to determine
10368 	 * the RCVLPC_STATE_ENABLE mask.
10369 	 */
10370 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10371 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10372 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10373 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10374 		      RDMAC_MODE_LNGREAD_ENAB);
10375 
10376 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10377 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10378 
10379 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10380 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10381 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10382 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10383 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10384 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10385 
10386 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10387 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10388 		if (tg3_flag(tp, TSO_CAPABLE)) {
10389 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10390 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10391 			   !tg3_flag(tp, IS_5788)) {
10392 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10393 		}
10394 	}
10395 
10396 	if (tg3_flag(tp, PCI_EXPRESS))
10397 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10398 
10399 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10400 		tp->dma_limit = 0;
10401 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10402 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10403 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10404 		}
10405 	}
10406 
10407 	if (tg3_flag(tp, HW_TSO_1) ||
10408 	    tg3_flag(tp, HW_TSO_2) ||
10409 	    tg3_flag(tp, HW_TSO_3))
10410 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10411 
10412 	if (tg3_flag(tp, 57765_PLUS) ||
10413 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10414 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10415 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10416 
10417 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10418 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10419 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10420 
10421 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10422 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10423 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10424 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10425 	    tg3_flag(tp, 57765_PLUS)) {
10426 		u32 tgtreg;
10427 
10428 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10429 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10430 		else
10431 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10432 
10433 		val = tr32(tgtreg);
10434 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10435 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10436 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10437 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10438 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10439 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10440 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10441 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10442 		}
10443 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10444 	}
10445 
10446 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10447 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10448 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10449 		u32 tgtreg;
10450 
10451 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10452 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10453 		else
10454 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10455 
10456 		val = tr32(tgtreg);
10457 		tw32(tgtreg, val |
10458 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10459 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10460 	}
10461 
10462 	/* Receive/send statistics. */
10463 	if (tg3_flag(tp, 5750_PLUS)) {
10464 		val = tr32(RCVLPC_STATS_ENABLE);
10465 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10466 		tw32(RCVLPC_STATS_ENABLE, val);
10467 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10468 		   tg3_flag(tp, TSO_CAPABLE)) {
10469 		val = tr32(RCVLPC_STATS_ENABLE);
10470 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10471 		tw32(RCVLPC_STATS_ENABLE, val);
10472 	} else {
10473 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10474 	}
10475 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10476 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10477 	tw32(SNDDATAI_STATSCTRL,
10478 	     (SNDDATAI_SCTRL_ENABLE |
10479 	      SNDDATAI_SCTRL_FASTUPD));
10480 
10481 	/* Setup host coalescing engine. */
10482 	tw32(HOSTCC_MODE, 0);
10483 	for (i = 0; i < 2000; i++) {
10484 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10485 			break;
10486 		udelay(10);
10487 	}
10488 
10489 	__tg3_set_coalesce(tp, &tp->coal);
10490 
10491 	if (!tg3_flag(tp, 5705_PLUS)) {
10492 		/* Status/statistics block address.  See tg3_timer,
10493 		 * the tg3_periodic_fetch_stats call there, and
10494 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10495 		 */
10496 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10497 		     ((u64) tp->stats_mapping >> 32));
10498 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10499 		     ((u64) tp->stats_mapping & 0xffffffff));
10500 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10501 
10502 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10503 
10504 		/* Clear statistics and status block memory areas */
10505 		for (i = NIC_SRAM_STATS_BLK;
10506 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10507 		     i += sizeof(u32)) {
10508 			tg3_write_mem(tp, i, 0);
10509 			udelay(40);
10510 		}
10511 	}
10512 
10513 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10514 
10515 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10516 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10517 	if (!tg3_flag(tp, 5705_PLUS))
10518 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10519 
10520 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10521 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10522 		/* reset to prevent losing 1st rx packet intermittently */
10523 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10524 		udelay(10);
10525 	}
10526 
10527 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10528 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10529 			MAC_MODE_FHDE_ENABLE;
10530 	if (tg3_flag(tp, ENABLE_APE))
10531 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10532 	if (!tg3_flag(tp, 5705_PLUS) &&
10533 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10534 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10535 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10536 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10537 	udelay(40);
10538 
10539 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10540 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10541 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10542 	 * whether used as inputs or outputs, are set by boot code after
10543 	 * reset.
10544 	 */
10545 	if (!tg3_flag(tp, IS_NIC)) {
10546 		u32 gpio_mask;
10547 
10548 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10549 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10550 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10551 
10552 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10553 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10554 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10555 
10556 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10557 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10558 
10559 		tp->grc_local_ctrl &= ~gpio_mask;
10560 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10561 
10562 		/* GPIO1 must be driven high for eeprom write protect */
10563 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10564 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10565 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10566 	}
10567 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10568 	udelay(100);
10569 
10570 	if (tg3_flag(tp, USING_MSIX)) {
10571 		val = tr32(MSGINT_MODE);
10572 		val |= MSGINT_MODE_ENABLE;
10573 		if (tp->irq_cnt > 1)
10574 			val |= MSGINT_MODE_MULTIVEC_EN;
10575 		if (!tg3_flag(tp, 1SHOT_MSI))
10576 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10577 		tw32(MSGINT_MODE, val);
10578 	}
10579 
10580 	if (!tg3_flag(tp, 5705_PLUS)) {
10581 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10582 		udelay(40);
10583 	}
10584 
10585 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10586 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10587 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10588 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10589 	       WDMAC_MODE_LNGREAD_ENAB);
10590 
10591 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10592 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10593 		if (tg3_flag(tp, TSO_CAPABLE) &&
10594 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10595 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10596 			/* nothing */
10597 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10598 			   !tg3_flag(tp, IS_5788)) {
10599 			val |= WDMAC_MODE_RX_ACCEL;
10600 		}
10601 	}
10602 
10603 	/* Enable host coalescing bug fix */
10604 	if (tg3_flag(tp, 5755_PLUS))
10605 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10606 
10607 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10608 		val |= WDMAC_MODE_BURST_ALL_DATA;
10609 
10610 	tw32_f(WDMAC_MODE, val);
10611 	udelay(40);
10612 
10613 	if (tg3_flag(tp, PCIX_MODE)) {
10614 		u16 pcix_cmd;
10615 
10616 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10617 				     &pcix_cmd);
10618 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10619 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10620 			pcix_cmd |= PCI_X_CMD_READ_2K;
10621 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10622 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10623 			pcix_cmd |= PCI_X_CMD_READ_2K;
10624 		}
10625 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10626 				      pcix_cmd);
10627 	}
10628 
10629 	tw32_f(RDMAC_MODE, rdmac_mode);
10630 	udelay(40);
10631 
10632 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10633 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10634 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10635 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10636 				break;
10637 		}
10638 		if (i < TG3_NUM_RDMA_CHANNELS) {
10639 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10640 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10641 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10642 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10643 		}
10644 	}
10645 
10646 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10647 	if (!tg3_flag(tp, 5705_PLUS))
10648 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10649 
10650 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10651 		tw32(SNDDATAC_MODE,
10652 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10653 	else
10654 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10655 
10656 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10657 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10658 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10659 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10660 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10661 	tw32(RCVDBDI_MODE, val);
10662 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10663 	if (tg3_flag(tp, HW_TSO_1) ||
10664 	    tg3_flag(tp, HW_TSO_2) ||
10665 	    tg3_flag(tp, HW_TSO_3))
10666 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10667 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10668 	if (tg3_flag(tp, ENABLE_TSS))
10669 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10670 	tw32(SNDBDI_MODE, val);
10671 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10672 
10673 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10674 		err = tg3_load_5701_a0_firmware_fix(tp);
10675 		if (err)
10676 			return err;
10677 	}
10678 
10679 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10680 		/* Ignore any errors for the firmware download. If download
10681 		 * fails, the device will operate with EEE disabled
10682 		 */
10683 		tg3_load_57766_firmware(tp);
10684 	}
10685 
10686 	if (tg3_flag(tp, TSO_CAPABLE)) {
10687 		err = tg3_load_tso_firmware(tp);
10688 		if (err)
10689 			return err;
10690 	}
10691 
10692 	tp->tx_mode = TX_MODE_ENABLE;
10693 
10694 	if (tg3_flag(tp, 5755_PLUS) ||
10695 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10696 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10697 
10698 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10699 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10700 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10701 		tp->tx_mode &= ~val;
10702 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10703 	}
10704 
10705 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10706 	udelay(100);
10707 
10708 	if (tg3_flag(tp, ENABLE_RSS)) {
10709 		u32 rss_key[10];
10710 
10711 		tg3_rss_write_indir_tbl(tp);
10712 
10713 		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10714 
10715 		for (i = 0; i < 10 ; i++)
10716 			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10717 	}
10718 
10719 	tp->rx_mode = RX_MODE_ENABLE;
10720 	if (tg3_flag(tp, 5755_PLUS))
10721 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10722 
10723 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10724 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10725 
10726 	if (tg3_flag(tp, ENABLE_RSS))
10727 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10728 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10729 			       RX_MODE_RSS_IPV6_HASH_EN |
10730 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10731 			       RX_MODE_RSS_IPV4_HASH_EN |
10732 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10733 
10734 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10735 	udelay(10);
10736 
10737 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10738 
10739 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10740 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10741 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10742 		udelay(10);
10743 	}
10744 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10745 	udelay(10);
10746 
10747 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10748 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10749 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10750 			/* Set drive transmission level to 1.2V  */
10751 			/* only if the signal pre-emphasis bit is not set  */
10752 			val = tr32(MAC_SERDES_CFG);
10753 			val &= 0xfffff000;
10754 			val |= 0x880;
10755 			tw32(MAC_SERDES_CFG, val);
10756 		}
10757 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10758 			tw32(MAC_SERDES_CFG, 0x616000);
10759 	}
10760 
10761 	/* Prevent chip from dropping frames when flow control
10762 	 * is enabled.
10763 	 */
10764 	if (tg3_flag(tp, 57765_CLASS))
10765 		val = 1;
10766 	else
10767 		val = 2;
10768 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10769 
10770 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10771 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10772 		/* Use hardware link auto-negotiation */
10773 		tg3_flag_set(tp, HW_AUTONEG);
10774 	}
10775 
10776 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10777 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10778 		u32 tmp;
10779 
10780 		tmp = tr32(SERDES_RX_CTRL);
10781 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10782 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10783 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10784 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10785 	}
10786 
10787 	if (!tg3_flag(tp, USE_PHYLIB)) {
10788 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10789 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10790 
10791 		err = tg3_setup_phy(tp, false);
10792 		if (err)
10793 			return err;
10794 
10795 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10796 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10797 			u32 tmp;
10798 
10799 			/* Clear CRC stats. */
10800 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10801 				tg3_writephy(tp, MII_TG3_TEST1,
10802 					     tmp | MII_TG3_TEST1_CRC_EN);
10803 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10804 			}
10805 		}
10806 	}
10807 
10808 	__tg3_set_rx_mode(tp->dev);
10809 
10810 	/* Initialize receive rules. */
10811 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10812 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10813 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10814 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10815 
10816 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10817 		limit = 8;
10818 	else
10819 		limit = 16;
10820 	if (tg3_flag(tp, ENABLE_ASF))
10821 		limit -= 4;
10822 	switch (limit) {
10823 	case 16:
10824 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10825 		fallthrough;
10826 	case 15:
10827 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10828 		fallthrough;
10829 	case 14:
10830 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10831 		fallthrough;
10832 	case 13:
10833 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10834 		fallthrough;
10835 	case 12:
10836 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10837 		fallthrough;
10838 	case 11:
10839 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10840 		fallthrough;
10841 	case 10:
10842 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10843 		fallthrough;
10844 	case 9:
10845 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10846 		fallthrough;
10847 	case 8:
10848 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10849 		fallthrough;
10850 	case 7:
10851 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10852 		fallthrough;
10853 	case 6:
10854 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10855 		fallthrough;
10856 	case 5:
10857 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10858 		fallthrough;
10859 	case 4:
10860 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10861 	case 3:
10862 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10863 	case 2:
10864 	case 1:
10865 
10866 	default:
10867 		break;
10868 	}
10869 
10870 	if (tg3_flag(tp, ENABLE_APE))
10871 		/* Write our heartbeat update interval to APE. */
10872 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10873 				APE_HOST_HEARTBEAT_INT_5SEC);
10874 
10875 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10876 
10877 	return 0;
10878 }
10879 
10880 /* Called at device open time to get the chip ready for
10881  * packet processing.  Invoked with tp->lock held.
10882  */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10883 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10884 {
10885 	/* Chip may have been just powered on. If so, the boot code may still
10886 	 * be running initialization. Wait for it to finish to avoid races in
10887 	 * accessing the hardware.
10888 	 */
10889 	tg3_enable_register_access(tp);
10890 	tg3_poll_fw(tp);
10891 
10892 	tg3_switch_clocks(tp);
10893 
10894 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10895 
10896 	return tg3_reset_hw(tp, reset_phy);
10897 }
10898 
10899 #ifdef CONFIG_TIGON3_HWMON
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10900 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10901 {
10902 	u32 off, len = TG3_OCIR_LEN;
10903 	int i;
10904 
10905 	for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10906 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10907 
10908 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10909 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10910 			memset(ocir, 0, len);
10911 	}
10912 }
10913 
10914 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10915 static ssize_t tg3_show_temp(struct device *dev,
10916 			     struct device_attribute *devattr, char *buf)
10917 {
10918 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10919 	struct tg3 *tp = dev_get_drvdata(dev);
10920 	u32 temperature;
10921 
10922 	spin_lock_bh(&tp->lock);
10923 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10924 				sizeof(temperature));
10925 	spin_unlock_bh(&tp->lock);
10926 	return sprintf(buf, "%u\n", temperature * 1000);
10927 }
10928 
10929 
10930 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10931 			  TG3_TEMP_SENSOR_OFFSET);
10932 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10933 			  TG3_TEMP_CAUTION_OFFSET);
10934 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10935 			  TG3_TEMP_MAX_OFFSET);
10936 
10937 static struct attribute *tg3_attrs[] = {
10938 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10939 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10940 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10941 	NULL
10942 };
10943 ATTRIBUTE_GROUPS(tg3);
10944 
tg3_hwmon_close(struct tg3 * tp)10945 static void tg3_hwmon_close(struct tg3 *tp)
10946 {
10947 	if (tp->hwmon_dev) {
10948 		hwmon_device_unregister(tp->hwmon_dev);
10949 		tp->hwmon_dev = NULL;
10950 	}
10951 }
10952 
tg3_hwmon_open(struct tg3 * tp)10953 static void tg3_hwmon_open(struct tg3 *tp)
10954 {
10955 	int i;
10956 	u32 size = 0;
10957 	struct pci_dev *pdev = tp->pdev;
10958 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10959 
10960 	tg3_sd_scan_scratchpad(tp, ocirs);
10961 
10962 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10963 		if (!ocirs[i].src_data_length)
10964 			continue;
10965 
10966 		size += ocirs[i].src_hdr_length;
10967 		size += ocirs[i].src_data_length;
10968 	}
10969 
10970 	if (!size)
10971 		return;
10972 
10973 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10974 							  tp, tg3_groups);
10975 	if (IS_ERR(tp->hwmon_dev)) {
10976 		tp->hwmon_dev = NULL;
10977 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10978 	}
10979 }
10980 #else
tg3_hwmon_close(struct tg3 * tp)10981 static inline void tg3_hwmon_close(struct tg3 *tp) { }
tg3_hwmon_open(struct tg3 * tp)10982 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10983 #endif /* CONFIG_TIGON3_HWMON */
10984 
10985 
10986 #define TG3_STAT_ADD32(PSTAT, REG) \
10987 do {	u32 __val = tr32(REG); \
10988 	(PSTAT)->low += __val; \
10989 	if ((PSTAT)->low < __val) \
10990 		(PSTAT)->high += 1; \
10991 } while (0)
10992 
tg3_periodic_fetch_stats(struct tg3 * tp)10993 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10994 {
10995 	struct tg3_hw_stats *sp = tp->hw_stats;
10996 
10997 	if (!tp->link_up)
10998 		return;
10999 
11000 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
11001 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
11002 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
11003 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
11004 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
11005 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
11006 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
11007 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
11008 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
11009 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
11010 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
11011 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
11012 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
11013 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
11014 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
11015 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
11016 		u32 val;
11017 
11018 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
11019 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
11020 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
11021 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
11022 	}
11023 
11024 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
11025 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
11026 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
11027 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
11028 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
11029 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
11030 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
11031 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
11032 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
11033 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
11034 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
11035 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
11036 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
11037 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
11038 
11039 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
11040 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
11041 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
11042 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
11043 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
11044 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
11045 	} else {
11046 		u32 val = tr32(HOSTCC_FLOW_ATTN);
11047 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
11048 		if (val) {
11049 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
11050 			sp->rx_discards.low += val;
11051 			if (sp->rx_discards.low < val)
11052 				sp->rx_discards.high += 1;
11053 		}
11054 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
11055 	}
11056 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
11057 }
11058 
tg3_chk_missed_msi(struct tg3 * tp)11059 static void tg3_chk_missed_msi(struct tg3 *tp)
11060 {
11061 	u32 i;
11062 
11063 	for (i = 0; i < tp->irq_cnt; i++) {
11064 		struct tg3_napi *tnapi = &tp->napi[i];
11065 
11066 		if (tg3_has_work(tnapi)) {
11067 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
11068 			    tnapi->last_tx_cons == tnapi->tx_cons) {
11069 				if (tnapi->chk_msi_cnt < 1) {
11070 					tnapi->chk_msi_cnt++;
11071 					return;
11072 				}
11073 				tg3_msi(0, tnapi);
11074 			}
11075 		}
11076 		tnapi->chk_msi_cnt = 0;
11077 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
11078 		tnapi->last_tx_cons = tnapi->tx_cons;
11079 	}
11080 }
11081 
tg3_timer(struct timer_list * t)11082 static void tg3_timer(struct timer_list *t)
11083 {
11084 	struct tg3 *tp = from_timer(tp, t, timer);
11085 
11086 	spin_lock(&tp->lock);
11087 
11088 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11089 		spin_unlock(&tp->lock);
11090 		goto restart_timer;
11091 	}
11092 
11093 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11094 	    tg3_flag(tp, 57765_CLASS))
11095 		tg3_chk_missed_msi(tp);
11096 
11097 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11098 		/* BCM4785: Flush posted writes from GbE to host memory. */
11099 		tr32(HOSTCC_MODE);
11100 	}
11101 
11102 	if (!tg3_flag(tp, TAGGED_STATUS)) {
11103 		/* All of this garbage is because when using non-tagged
11104 		 * IRQ status the mailbox/status_block protocol the chip
11105 		 * uses with the cpu is race prone.
11106 		 */
11107 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11108 			tw32(GRC_LOCAL_CTRL,
11109 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11110 		} else {
11111 			tw32(HOSTCC_MODE, tp->coalesce_mode |
11112 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11113 		}
11114 
11115 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11116 			spin_unlock(&tp->lock);
11117 			tg3_reset_task_schedule(tp);
11118 			goto restart_timer;
11119 		}
11120 	}
11121 
11122 	/* This part only runs once per second. */
11123 	if (!--tp->timer_counter) {
11124 		if (tg3_flag(tp, 5705_PLUS))
11125 			tg3_periodic_fetch_stats(tp);
11126 
11127 		if (tp->setlpicnt && !--tp->setlpicnt)
11128 			tg3_phy_eee_enable(tp);
11129 
11130 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
11131 			u32 mac_stat;
11132 			int phy_event;
11133 
11134 			mac_stat = tr32(MAC_STATUS);
11135 
11136 			phy_event = 0;
11137 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11138 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11139 					phy_event = 1;
11140 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11141 				phy_event = 1;
11142 
11143 			if (phy_event)
11144 				tg3_setup_phy(tp, false);
11145 		} else if (tg3_flag(tp, POLL_SERDES)) {
11146 			u32 mac_stat = tr32(MAC_STATUS);
11147 			int need_setup = 0;
11148 
11149 			if (tp->link_up &&
11150 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11151 				need_setup = 1;
11152 			}
11153 			if (!tp->link_up &&
11154 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11155 					 MAC_STATUS_SIGNAL_DET))) {
11156 				need_setup = 1;
11157 			}
11158 			if (need_setup) {
11159 				if (!tp->serdes_counter) {
11160 					tw32_f(MAC_MODE,
11161 					     (tp->mac_mode &
11162 					      ~MAC_MODE_PORT_MODE_MASK));
11163 					udelay(40);
11164 					tw32_f(MAC_MODE, tp->mac_mode);
11165 					udelay(40);
11166 				}
11167 				tg3_setup_phy(tp, false);
11168 			}
11169 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11170 			   tg3_flag(tp, 5780_CLASS)) {
11171 			tg3_serdes_parallel_detect(tp);
11172 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11173 			u32 cpmu = tr32(TG3_CPMU_STATUS);
11174 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11175 					 TG3_CPMU_STATUS_LINK_MASK);
11176 
11177 			if (link_up != tp->link_up)
11178 				tg3_setup_phy(tp, false);
11179 		}
11180 
11181 		tp->timer_counter = tp->timer_multiplier;
11182 	}
11183 
11184 	/* Heartbeat is only sent once every 2 seconds.
11185 	 *
11186 	 * The heartbeat is to tell the ASF firmware that the host
11187 	 * driver is still alive.  In the event that the OS crashes,
11188 	 * ASF needs to reset the hardware to free up the FIFO space
11189 	 * that may be filled with rx packets destined for the host.
11190 	 * If the FIFO is full, ASF will no longer function properly.
11191 	 *
11192 	 * Unintended resets have been reported on real time kernels
11193 	 * where the timer doesn't run on time.  Netpoll will also have
11194 	 * same problem.
11195 	 *
11196 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11197 	 * to check the ring condition when the heartbeat is expiring
11198 	 * before doing the reset.  This will prevent most unintended
11199 	 * resets.
11200 	 */
11201 	if (!--tp->asf_counter) {
11202 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11203 			tg3_wait_for_event_ack(tp);
11204 
11205 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11206 				      FWCMD_NICDRV_ALIVE3);
11207 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11208 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11209 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11210 
11211 			tg3_generate_fw_event(tp);
11212 		}
11213 		tp->asf_counter = tp->asf_multiplier;
11214 	}
11215 
11216 	/* Update the APE heartbeat every 5 seconds.*/
11217 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11218 
11219 	spin_unlock(&tp->lock);
11220 
11221 restart_timer:
11222 	tp->timer.expires = jiffies + tp->timer_offset;
11223 	add_timer(&tp->timer);
11224 }
11225 
tg3_timer_init(struct tg3 * tp)11226 static void tg3_timer_init(struct tg3 *tp)
11227 {
11228 	if (tg3_flag(tp, TAGGED_STATUS) &&
11229 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11230 	    !tg3_flag(tp, 57765_CLASS))
11231 		tp->timer_offset = HZ;
11232 	else
11233 		tp->timer_offset = HZ / 10;
11234 
11235 	BUG_ON(tp->timer_offset > HZ);
11236 
11237 	tp->timer_multiplier = (HZ / tp->timer_offset);
11238 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11239 			     TG3_FW_UPDATE_FREQ_SEC;
11240 
11241 	timer_setup(&tp->timer, tg3_timer, 0);
11242 }
11243 
tg3_timer_start(struct tg3 * tp)11244 static void tg3_timer_start(struct tg3 *tp)
11245 {
11246 	tp->asf_counter   = tp->asf_multiplier;
11247 	tp->timer_counter = tp->timer_multiplier;
11248 
11249 	tp->timer.expires = jiffies + tp->timer_offset;
11250 	add_timer(&tp->timer);
11251 }
11252 
tg3_timer_stop(struct tg3 * tp)11253 static void tg3_timer_stop(struct tg3 *tp)
11254 {
11255 	del_timer_sync(&tp->timer);
11256 }
11257 
11258 /* Restart hardware after configuration changes, self-test, etc.
11259  * Invoked with tp->lock held.
11260  */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11261 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11262 	__releases(tp->lock)
11263 	__acquires(tp->lock)
11264 	__releases(tp->dev->lock)
11265 	__acquires(tp->dev->lock)
11266 {
11267 	int err;
11268 
11269 	err = tg3_init_hw(tp, reset_phy);
11270 	if (err) {
11271 		netdev_err(tp->dev,
11272 			   "Failed to re-initialize device, aborting\n");
11273 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11274 		tg3_full_unlock(tp);
11275 		tg3_timer_stop(tp);
11276 		tp->irq_sync = 0;
11277 		tg3_napi_enable(tp);
11278 		netdev_unlock(tp->dev);
11279 		dev_close(tp->dev);
11280 		netdev_lock(tp->dev);
11281 		tg3_full_lock(tp, 0);
11282 	}
11283 	return err;
11284 }
11285 
tg3_reset_task(struct work_struct * work)11286 static void tg3_reset_task(struct work_struct *work)
11287 {
11288 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11289 	int err;
11290 
11291 	rtnl_lock();
11292 	tg3_full_lock(tp, 0);
11293 
11294 	if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11295 	    tp->pdev->error_state != pci_channel_io_normal) {
11296 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11297 		tg3_full_unlock(tp);
11298 		rtnl_unlock();
11299 		return;
11300 	}
11301 
11302 	tg3_full_unlock(tp);
11303 
11304 	tg3_phy_stop(tp);
11305 
11306 	tg3_netif_stop(tp);
11307 
11308 	netdev_lock(tp->dev);
11309 	tg3_full_lock(tp, 1);
11310 
11311 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11312 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11313 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11314 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11315 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11316 	}
11317 
11318 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11319 	err = tg3_init_hw(tp, true);
11320 	if (err) {
11321 		tg3_full_unlock(tp);
11322 		tp->irq_sync = 0;
11323 		tg3_napi_enable(tp);
11324 		/* Clear this flag so that tg3_reset_task_cancel() will not
11325 		 * call cancel_work_sync() and wait forever.
11326 		 */
11327 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11328 		netdev_unlock(tp->dev);
11329 		dev_close(tp->dev);
11330 		goto out;
11331 	}
11332 
11333 	tg3_netif_start(tp);
11334 	tg3_full_unlock(tp);
11335 	netdev_unlock(tp->dev);
11336 	tg3_phy_start(tp);
11337 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11338 out:
11339 	rtnl_unlock();
11340 }
11341 
tg3_request_irq(struct tg3 * tp,int irq_num)11342 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11343 {
11344 	irq_handler_t fn;
11345 	unsigned long flags;
11346 	char *name;
11347 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11348 
11349 	if (tp->irq_cnt == 1)
11350 		name = tp->dev->name;
11351 	else {
11352 		name = &tnapi->irq_lbl[0];
11353 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11354 			snprintf(name, sizeof(tnapi->irq_lbl),
11355 				 "%s-txrx-%d", tp->dev->name, irq_num);
11356 		else if (tnapi->tx_buffers)
11357 			snprintf(name, sizeof(tnapi->irq_lbl),
11358 				 "%s-tx-%d", tp->dev->name, irq_num);
11359 		else if (tnapi->rx_rcb)
11360 			snprintf(name, sizeof(tnapi->irq_lbl),
11361 				 "%s-rx-%d", tp->dev->name, irq_num);
11362 		else
11363 			snprintf(name, sizeof(tnapi->irq_lbl),
11364 				 "%s-%d", tp->dev->name, irq_num);
11365 	}
11366 
11367 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11368 		fn = tg3_msi;
11369 		if (tg3_flag(tp, 1SHOT_MSI))
11370 			fn = tg3_msi_1shot;
11371 		flags = 0;
11372 	} else {
11373 		fn = tg3_interrupt;
11374 		if (tg3_flag(tp, TAGGED_STATUS))
11375 			fn = tg3_interrupt_tagged;
11376 		flags = IRQF_SHARED;
11377 	}
11378 
11379 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11380 }
11381 
tg3_test_interrupt(struct tg3 * tp)11382 static int tg3_test_interrupt(struct tg3 *tp)
11383 {
11384 	struct tg3_napi *tnapi = &tp->napi[0];
11385 	struct net_device *dev = tp->dev;
11386 	int err, i, intr_ok = 0;
11387 	u32 val;
11388 
11389 	if (!netif_running(dev))
11390 		return -ENODEV;
11391 
11392 	tg3_disable_ints(tp);
11393 
11394 	free_irq(tnapi->irq_vec, tnapi);
11395 
11396 	/*
11397 	 * Turn off MSI one shot mode.  Otherwise this test has no
11398 	 * observable way to know whether the interrupt was delivered.
11399 	 */
11400 	if (tg3_flag(tp, 57765_PLUS)) {
11401 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11402 		tw32(MSGINT_MODE, val);
11403 	}
11404 
11405 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11406 			  IRQF_SHARED, dev->name, tnapi);
11407 	if (err)
11408 		return err;
11409 
11410 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11411 	tg3_enable_ints(tp);
11412 
11413 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11414 	       tnapi->coal_now);
11415 
11416 	for (i = 0; i < 5; i++) {
11417 		u32 int_mbox, misc_host_ctrl;
11418 
11419 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11420 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11421 
11422 		if ((int_mbox != 0) ||
11423 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11424 			intr_ok = 1;
11425 			break;
11426 		}
11427 
11428 		if (tg3_flag(tp, 57765_PLUS) &&
11429 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11430 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11431 
11432 		msleep(10);
11433 	}
11434 
11435 	tg3_disable_ints(tp);
11436 
11437 	free_irq(tnapi->irq_vec, tnapi);
11438 
11439 	err = tg3_request_irq(tp, 0);
11440 
11441 	if (err)
11442 		return err;
11443 
11444 	if (intr_ok) {
11445 		/* Reenable MSI one shot mode. */
11446 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11447 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11448 			tw32(MSGINT_MODE, val);
11449 		}
11450 		return 0;
11451 	}
11452 
11453 	return -EIO;
11454 }
11455 
11456 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11457  * successfully restored
11458  */
tg3_test_msi(struct tg3 * tp)11459 static int tg3_test_msi(struct tg3 *tp)
11460 {
11461 	int err;
11462 	u16 pci_cmd;
11463 
11464 	if (!tg3_flag(tp, USING_MSI))
11465 		return 0;
11466 
11467 	/* Turn off SERR reporting in case MSI terminates with Master
11468 	 * Abort.
11469 	 */
11470 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11471 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11472 			      pci_cmd & ~PCI_COMMAND_SERR);
11473 
11474 	err = tg3_test_interrupt(tp);
11475 
11476 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11477 
11478 	if (!err)
11479 		return 0;
11480 
11481 	/* other failures */
11482 	if (err != -EIO)
11483 		return err;
11484 
11485 	/* MSI test failed, go back to INTx mode */
11486 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11487 		    "to INTx mode. Please report this failure to the PCI "
11488 		    "maintainer and include system chipset information\n");
11489 
11490 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11491 
11492 	pci_disable_msi(tp->pdev);
11493 
11494 	tg3_flag_clear(tp, USING_MSI);
11495 	tp->napi[0].irq_vec = tp->pdev->irq;
11496 
11497 	err = tg3_request_irq(tp, 0);
11498 	if (err)
11499 		return err;
11500 
11501 	/* Need to reset the chip because the MSI cycle may have terminated
11502 	 * with Master Abort.
11503 	 */
11504 	tg3_full_lock(tp, 1);
11505 
11506 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11507 	err = tg3_init_hw(tp, true);
11508 
11509 	tg3_full_unlock(tp);
11510 
11511 	if (err)
11512 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11513 
11514 	return err;
11515 }
11516 
tg3_request_firmware(struct tg3 * tp)11517 static int tg3_request_firmware(struct tg3 *tp)
11518 {
11519 	const struct tg3_firmware_hdr *fw_hdr;
11520 
11521 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11522 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11523 			   tp->fw_needed);
11524 		return -ENOENT;
11525 	}
11526 
11527 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11528 
11529 	/* Firmware blob starts with version numbers, followed by
11530 	 * start address and _full_ length including BSS sections
11531 	 * (which must be longer than the actual data, of course
11532 	 */
11533 
11534 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11535 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11536 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11537 			   tp->fw_len, tp->fw_needed);
11538 		release_firmware(tp->fw);
11539 		tp->fw = NULL;
11540 		return -EINVAL;
11541 	}
11542 
11543 	/* We no longer need firmware; we have it. */
11544 	tp->fw_needed = NULL;
11545 	return 0;
11546 }
11547 
tg3_irq_count(struct tg3 * tp)11548 static u32 tg3_irq_count(struct tg3 *tp)
11549 {
11550 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11551 
11552 	if (irq_cnt > 1) {
11553 		/* We want as many rx rings enabled as there are cpus.
11554 		 * In multiqueue MSI-X mode, the first MSI-X vector
11555 		 * only deals with link interrupts, etc, so we add
11556 		 * one to the number of vectors we are requesting.
11557 		 */
11558 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11559 	}
11560 
11561 	return irq_cnt;
11562 }
11563 
tg3_enable_msix(struct tg3 * tp)11564 static bool tg3_enable_msix(struct tg3 *tp)
11565 {
11566 	int i, rc;
11567 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11568 
11569 	tp->txq_cnt = tp->txq_req;
11570 	tp->rxq_cnt = tp->rxq_req;
11571 	if (!tp->rxq_cnt)
11572 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11573 	if (tp->rxq_cnt > tp->rxq_max)
11574 		tp->rxq_cnt = tp->rxq_max;
11575 
11576 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11577 	 * scheduling of the TX rings can cause starvation of rings with
11578 	 * small packets when other rings have TSO or jumbo packets.
11579 	 */
11580 	if (!tp->txq_req)
11581 		tp->txq_cnt = 1;
11582 
11583 	tp->irq_cnt = tg3_irq_count(tp);
11584 
11585 	for (i = 0; i < tp->irq_max; i++) {
11586 		msix_ent[i].entry  = i;
11587 		msix_ent[i].vector = 0;
11588 	}
11589 
11590 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11591 	if (rc < 0) {
11592 		return false;
11593 	} else if (rc < tp->irq_cnt) {
11594 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11595 			      tp->irq_cnt, rc);
11596 		tp->irq_cnt = rc;
11597 		tp->rxq_cnt = max(rc - 1, 1);
11598 		if (tp->txq_cnt)
11599 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11600 	}
11601 
11602 	for (i = 0; i < tp->irq_max; i++)
11603 		tp->napi[i].irq_vec = msix_ent[i].vector;
11604 
11605 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11606 		pci_disable_msix(tp->pdev);
11607 		return false;
11608 	}
11609 
11610 	if (tp->irq_cnt == 1)
11611 		return true;
11612 
11613 	tg3_flag_set(tp, ENABLE_RSS);
11614 
11615 	if (tp->txq_cnt > 1)
11616 		tg3_flag_set(tp, ENABLE_TSS);
11617 
11618 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11619 
11620 	return true;
11621 }
11622 
tg3_ints_init(struct tg3 * tp)11623 static void tg3_ints_init(struct tg3 *tp)
11624 {
11625 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11626 	    !tg3_flag(tp, TAGGED_STATUS)) {
11627 		/* All MSI supporting chips should support tagged
11628 		 * status.  Assert that this is the case.
11629 		 */
11630 		netdev_warn(tp->dev,
11631 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11632 		goto defcfg;
11633 	}
11634 
11635 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11636 		tg3_flag_set(tp, USING_MSIX);
11637 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11638 		tg3_flag_set(tp, USING_MSI);
11639 
11640 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11641 		u32 msi_mode = tr32(MSGINT_MODE);
11642 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11643 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11644 		if (!tg3_flag(tp, 1SHOT_MSI))
11645 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11646 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11647 	}
11648 defcfg:
11649 	if (!tg3_flag(tp, USING_MSIX)) {
11650 		tp->irq_cnt = 1;
11651 		tp->napi[0].irq_vec = tp->pdev->irq;
11652 	}
11653 
11654 	if (tp->irq_cnt == 1) {
11655 		tp->txq_cnt = 1;
11656 		tp->rxq_cnt = 1;
11657 		netif_set_real_num_tx_queues(tp->dev, 1);
11658 		netif_set_real_num_rx_queues(tp->dev, 1);
11659 	}
11660 }
11661 
tg3_ints_fini(struct tg3 * tp)11662 static void tg3_ints_fini(struct tg3 *tp)
11663 {
11664 	if (tg3_flag(tp, USING_MSIX))
11665 		pci_disable_msix(tp->pdev);
11666 	else if (tg3_flag(tp, USING_MSI))
11667 		pci_disable_msi(tp->pdev);
11668 	tg3_flag_clear(tp, USING_MSI);
11669 	tg3_flag_clear(tp, USING_MSIX);
11670 	tg3_flag_clear(tp, ENABLE_RSS);
11671 	tg3_flag_clear(tp, ENABLE_TSS);
11672 }
11673 
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11674 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11675 		     bool init)
11676 {
11677 	struct net_device *dev = tp->dev;
11678 	int i, err;
11679 
11680 	/*
11681 	 * Setup interrupts first so we know how
11682 	 * many NAPI resources to allocate
11683 	 */
11684 	tg3_ints_init(tp);
11685 
11686 	tg3_rss_check_indir_tbl(tp);
11687 
11688 	/* The placement of this call is tied
11689 	 * to the setup and use of Host TX descriptors.
11690 	 */
11691 	err = tg3_alloc_consistent(tp);
11692 	if (err)
11693 		goto out_ints_fini;
11694 
11695 	netdev_lock(dev);
11696 	tg3_napi_init(tp);
11697 
11698 	tg3_napi_enable(tp);
11699 	netdev_unlock(dev);
11700 
11701 	for (i = 0; i < tp->irq_cnt; i++) {
11702 		err = tg3_request_irq(tp, i);
11703 		if (err) {
11704 			for (i--; i >= 0; i--) {
11705 				struct tg3_napi *tnapi = &tp->napi[i];
11706 
11707 				free_irq(tnapi->irq_vec, tnapi);
11708 			}
11709 			goto out_napi_fini;
11710 		}
11711 	}
11712 
11713 	tg3_full_lock(tp, 0);
11714 
11715 	if (init)
11716 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11717 
11718 	err = tg3_init_hw(tp, reset_phy);
11719 	if (err) {
11720 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11721 		tg3_free_rings(tp);
11722 	}
11723 
11724 	tg3_full_unlock(tp);
11725 
11726 	if (err)
11727 		goto out_free_irq;
11728 
11729 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11730 		err = tg3_test_msi(tp);
11731 
11732 		if (err) {
11733 			tg3_full_lock(tp, 0);
11734 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11735 			tg3_free_rings(tp);
11736 			tg3_full_unlock(tp);
11737 
11738 			goto out_napi_fini;
11739 		}
11740 
11741 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11742 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11743 
11744 			tw32(PCIE_TRANSACTION_CFG,
11745 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11746 		}
11747 	}
11748 
11749 	tg3_phy_start(tp);
11750 
11751 	tg3_hwmon_open(tp);
11752 
11753 	tg3_full_lock(tp, 0);
11754 
11755 	tg3_timer_start(tp);
11756 	tg3_flag_set(tp, INIT_COMPLETE);
11757 	tg3_enable_ints(tp);
11758 
11759 	tg3_ptp_resume(tp);
11760 
11761 	tg3_full_unlock(tp);
11762 
11763 	netif_tx_start_all_queues(dev);
11764 
11765 	/*
11766 	 * Reset loopback feature if it was turned on while the device was down
11767 	 * make sure that it's installed properly now.
11768 	 */
11769 	if (dev->features & NETIF_F_LOOPBACK)
11770 		tg3_set_loopback(dev, dev->features);
11771 
11772 	return 0;
11773 
11774 out_free_irq:
11775 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11776 		struct tg3_napi *tnapi = &tp->napi[i];
11777 		free_irq(tnapi->irq_vec, tnapi);
11778 	}
11779 
11780 out_napi_fini:
11781 	tg3_napi_disable(tp);
11782 	tg3_napi_fini(tp);
11783 	tg3_free_consistent(tp);
11784 
11785 out_ints_fini:
11786 	tg3_ints_fini(tp);
11787 
11788 	return err;
11789 }
11790 
tg3_stop(struct tg3 * tp)11791 static void tg3_stop(struct tg3 *tp)
11792 {
11793 	int i;
11794 
11795 	tg3_reset_task_cancel(tp);
11796 	tg3_netif_stop(tp);
11797 
11798 	tg3_timer_stop(tp);
11799 
11800 	tg3_hwmon_close(tp);
11801 
11802 	tg3_phy_stop(tp);
11803 
11804 	tg3_full_lock(tp, 1);
11805 
11806 	tg3_disable_ints(tp);
11807 
11808 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11809 	tg3_free_rings(tp);
11810 	tg3_flag_clear(tp, INIT_COMPLETE);
11811 
11812 	tg3_full_unlock(tp);
11813 
11814 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11815 		struct tg3_napi *tnapi = &tp->napi[i];
11816 		free_irq(tnapi->irq_vec, tnapi);
11817 	}
11818 
11819 	tg3_ints_fini(tp);
11820 
11821 	tg3_napi_fini(tp);
11822 
11823 	tg3_free_consistent(tp);
11824 }
11825 
tg3_open(struct net_device * dev)11826 static int tg3_open(struct net_device *dev)
11827 {
11828 	struct tg3 *tp = netdev_priv(dev);
11829 	int err;
11830 
11831 	if (tp->pcierr_recovery) {
11832 		netdev_err(dev, "Failed to open device. PCI error recovery "
11833 			   "in progress\n");
11834 		return -EAGAIN;
11835 	}
11836 
11837 	if (tp->fw_needed) {
11838 		err = tg3_request_firmware(tp);
11839 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11840 			if (err) {
11841 				netdev_warn(tp->dev, "EEE capability disabled\n");
11842 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11843 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11844 				netdev_warn(tp->dev, "EEE capability restored\n");
11845 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11846 			}
11847 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11848 			if (err)
11849 				return err;
11850 		} else if (err) {
11851 			netdev_warn(tp->dev, "TSO capability disabled\n");
11852 			tg3_flag_clear(tp, TSO_CAPABLE);
11853 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11854 			netdev_notice(tp->dev, "TSO capability restored\n");
11855 			tg3_flag_set(tp, TSO_CAPABLE);
11856 		}
11857 	}
11858 
11859 	tg3_carrier_off(tp);
11860 
11861 	err = tg3_power_up(tp);
11862 	if (err)
11863 		return err;
11864 
11865 	tg3_full_lock(tp, 0);
11866 
11867 	tg3_disable_ints(tp);
11868 	tg3_flag_clear(tp, INIT_COMPLETE);
11869 
11870 	tg3_full_unlock(tp);
11871 
11872 	err = tg3_start(tp,
11873 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11874 			true, true);
11875 	if (err) {
11876 		tg3_frob_aux_power(tp, false);
11877 		pci_set_power_state(tp->pdev, PCI_D3hot);
11878 	}
11879 
11880 	return err;
11881 }
11882 
tg3_close(struct net_device * dev)11883 static int tg3_close(struct net_device *dev)
11884 {
11885 	struct tg3 *tp = netdev_priv(dev);
11886 
11887 	if (tp->pcierr_recovery) {
11888 		netdev_err(dev, "Failed to close device. PCI error recovery "
11889 			   "in progress\n");
11890 		return -EAGAIN;
11891 	}
11892 
11893 	tg3_stop(tp);
11894 
11895 	if (pci_device_is_present(tp->pdev)) {
11896 		tg3_power_down_prepare(tp);
11897 
11898 		tg3_carrier_off(tp);
11899 	}
11900 	return 0;
11901 }
11902 
get_stat64(tg3_stat64_t * val)11903 static inline u64 get_stat64(tg3_stat64_t *val)
11904 {
11905        return ((u64)val->high << 32) | ((u64)val->low);
11906 }
11907 
tg3_calc_crc_errors(struct tg3 * tp)11908 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11909 {
11910 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11911 
11912 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11913 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11914 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11915 		u32 val;
11916 
11917 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11918 			tg3_writephy(tp, MII_TG3_TEST1,
11919 				     val | MII_TG3_TEST1_CRC_EN);
11920 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11921 		} else
11922 			val = 0;
11923 
11924 		tp->phy_crc_errors += val;
11925 
11926 		return tp->phy_crc_errors;
11927 	}
11928 
11929 	return get_stat64(&hw_stats->rx_fcs_errors);
11930 }
11931 
11932 #define ESTAT_ADD(member) \
11933 	estats->member =	old_estats->member + \
11934 				get_stat64(&hw_stats->member)
11935 
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11936 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11937 {
11938 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11939 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11940 
11941 	ESTAT_ADD(rx_octets);
11942 	ESTAT_ADD(rx_fragments);
11943 	ESTAT_ADD(rx_ucast_packets);
11944 	ESTAT_ADD(rx_mcast_packets);
11945 	ESTAT_ADD(rx_bcast_packets);
11946 	ESTAT_ADD(rx_fcs_errors);
11947 	ESTAT_ADD(rx_align_errors);
11948 	ESTAT_ADD(rx_xon_pause_rcvd);
11949 	ESTAT_ADD(rx_xoff_pause_rcvd);
11950 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11951 	ESTAT_ADD(rx_xoff_entered);
11952 	ESTAT_ADD(rx_frame_too_long_errors);
11953 	ESTAT_ADD(rx_jabbers);
11954 	ESTAT_ADD(rx_undersize_packets);
11955 	ESTAT_ADD(rx_in_length_errors);
11956 	ESTAT_ADD(rx_out_length_errors);
11957 	ESTAT_ADD(rx_64_or_less_octet_packets);
11958 	ESTAT_ADD(rx_65_to_127_octet_packets);
11959 	ESTAT_ADD(rx_128_to_255_octet_packets);
11960 	ESTAT_ADD(rx_256_to_511_octet_packets);
11961 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11962 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11963 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11964 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11965 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11966 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11967 
11968 	ESTAT_ADD(tx_octets);
11969 	ESTAT_ADD(tx_collisions);
11970 	ESTAT_ADD(tx_xon_sent);
11971 	ESTAT_ADD(tx_xoff_sent);
11972 	ESTAT_ADD(tx_flow_control);
11973 	ESTAT_ADD(tx_mac_errors);
11974 	ESTAT_ADD(tx_single_collisions);
11975 	ESTAT_ADD(tx_mult_collisions);
11976 	ESTAT_ADD(tx_deferred);
11977 	ESTAT_ADD(tx_excessive_collisions);
11978 	ESTAT_ADD(tx_late_collisions);
11979 	ESTAT_ADD(tx_collide_2times);
11980 	ESTAT_ADD(tx_collide_3times);
11981 	ESTAT_ADD(tx_collide_4times);
11982 	ESTAT_ADD(tx_collide_5times);
11983 	ESTAT_ADD(tx_collide_6times);
11984 	ESTAT_ADD(tx_collide_7times);
11985 	ESTAT_ADD(tx_collide_8times);
11986 	ESTAT_ADD(tx_collide_9times);
11987 	ESTAT_ADD(tx_collide_10times);
11988 	ESTAT_ADD(tx_collide_11times);
11989 	ESTAT_ADD(tx_collide_12times);
11990 	ESTAT_ADD(tx_collide_13times);
11991 	ESTAT_ADD(tx_collide_14times);
11992 	ESTAT_ADD(tx_collide_15times);
11993 	ESTAT_ADD(tx_ucast_packets);
11994 	ESTAT_ADD(tx_mcast_packets);
11995 	ESTAT_ADD(tx_bcast_packets);
11996 	ESTAT_ADD(tx_carrier_sense_errors);
11997 	ESTAT_ADD(tx_discards);
11998 	ESTAT_ADD(tx_errors);
11999 
12000 	ESTAT_ADD(dma_writeq_full);
12001 	ESTAT_ADD(dma_write_prioq_full);
12002 	ESTAT_ADD(rxbds_empty);
12003 	ESTAT_ADD(rx_discards);
12004 	ESTAT_ADD(rx_errors);
12005 	ESTAT_ADD(rx_threshold_hit);
12006 
12007 	ESTAT_ADD(dma_readq_full);
12008 	ESTAT_ADD(dma_read_prioq_full);
12009 	ESTAT_ADD(tx_comp_queue_full);
12010 
12011 	ESTAT_ADD(ring_set_send_prod_index);
12012 	ESTAT_ADD(ring_status_update);
12013 	ESTAT_ADD(nic_irqs);
12014 	ESTAT_ADD(nic_avoided_irqs);
12015 	ESTAT_ADD(nic_tx_threshold_hit);
12016 
12017 	ESTAT_ADD(mbuf_lwm_thresh_hit);
12018 }
12019 
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)12020 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
12021 {
12022 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
12023 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
12024 	unsigned long rx_dropped;
12025 	unsigned long tx_dropped;
12026 	int i;
12027 
12028 	stats->rx_packets = old_stats->rx_packets +
12029 		get_stat64(&hw_stats->rx_ucast_packets) +
12030 		get_stat64(&hw_stats->rx_mcast_packets) +
12031 		get_stat64(&hw_stats->rx_bcast_packets);
12032 
12033 	stats->tx_packets = old_stats->tx_packets +
12034 		get_stat64(&hw_stats->tx_ucast_packets) +
12035 		get_stat64(&hw_stats->tx_mcast_packets) +
12036 		get_stat64(&hw_stats->tx_bcast_packets);
12037 
12038 	stats->rx_bytes = old_stats->rx_bytes +
12039 		get_stat64(&hw_stats->rx_octets);
12040 	stats->tx_bytes = old_stats->tx_bytes +
12041 		get_stat64(&hw_stats->tx_octets);
12042 
12043 	stats->rx_errors = old_stats->rx_errors +
12044 		get_stat64(&hw_stats->rx_errors);
12045 	stats->tx_errors = old_stats->tx_errors +
12046 		get_stat64(&hw_stats->tx_errors) +
12047 		get_stat64(&hw_stats->tx_mac_errors) +
12048 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
12049 		get_stat64(&hw_stats->tx_discards);
12050 
12051 	stats->multicast = old_stats->multicast +
12052 		get_stat64(&hw_stats->rx_mcast_packets);
12053 	stats->collisions = old_stats->collisions +
12054 		get_stat64(&hw_stats->tx_collisions);
12055 
12056 	stats->rx_length_errors = old_stats->rx_length_errors +
12057 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
12058 		get_stat64(&hw_stats->rx_undersize_packets);
12059 
12060 	stats->rx_frame_errors = old_stats->rx_frame_errors +
12061 		get_stat64(&hw_stats->rx_align_errors);
12062 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
12063 		get_stat64(&hw_stats->tx_discards);
12064 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
12065 		get_stat64(&hw_stats->tx_carrier_sense_errors);
12066 
12067 	stats->rx_crc_errors = old_stats->rx_crc_errors +
12068 		tg3_calc_crc_errors(tp);
12069 
12070 	stats->rx_missed_errors = old_stats->rx_missed_errors +
12071 		get_stat64(&hw_stats->rx_discards);
12072 
12073 	/* Aggregate per-queue counters. The per-queue counters are updated
12074 	 * by a single writer, race-free. The result computed by this loop
12075 	 * might not be 100% accurate (counters can be updated in the middle of
12076 	 * the loop) but the next tg3_get_nstats() will recompute the current
12077 	 * value so it is acceptable.
12078 	 *
12079 	 * Note that these counters wrap around at 4G on 32bit machines.
12080 	 */
12081 	rx_dropped = (unsigned long)(old_stats->rx_dropped);
12082 	tx_dropped = (unsigned long)(old_stats->tx_dropped);
12083 
12084 	for (i = 0; i < tp->irq_cnt; i++) {
12085 		struct tg3_napi *tnapi = &tp->napi[i];
12086 
12087 		rx_dropped += tnapi->rx_dropped;
12088 		tx_dropped += tnapi->tx_dropped;
12089 	}
12090 
12091 	stats->rx_dropped = rx_dropped;
12092 	stats->tx_dropped = tx_dropped;
12093 }
12094 
tg3_get_regs_len(struct net_device * dev)12095 static int tg3_get_regs_len(struct net_device *dev)
12096 {
12097 	return TG3_REG_BLK_SIZE;
12098 }
12099 
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)12100 static void tg3_get_regs(struct net_device *dev,
12101 		struct ethtool_regs *regs, void *_p)
12102 {
12103 	struct tg3 *tp = netdev_priv(dev);
12104 
12105 	regs->version = 0;
12106 
12107 	memset(_p, 0, TG3_REG_BLK_SIZE);
12108 
12109 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12110 		return;
12111 
12112 	tg3_full_lock(tp, 0);
12113 
12114 	tg3_dump_legacy_regs(tp, (u32 *)_p);
12115 
12116 	tg3_full_unlock(tp);
12117 }
12118 
tg3_get_eeprom_len(struct net_device * dev)12119 static int tg3_get_eeprom_len(struct net_device *dev)
12120 {
12121 	struct tg3 *tp = netdev_priv(dev);
12122 
12123 	return tp->nvram_size;
12124 }
12125 
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12126 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12127 {
12128 	struct tg3 *tp = netdev_priv(dev);
12129 	int ret, cpmu_restore = 0;
12130 	u8  *pd;
12131 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12132 	__be32 val;
12133 
12134 	if (tg3_flag(tp, NO_NVRAM))
12135 		return -EINVAL;
12136 
12137 	offset = eeprom->offset;
12138 	len = eeprom->len;
12139 	eeprom->len = 0;
12140 
12141 	eeprom->magic = TG3_EEPROM_MAGIC;
12142 
12143 	/* Override clock, link aware and link idle modes */
12144 	if (tg3_flag(tp, CPMU_PRESENT)) {
12145 		cpmu_val = tr32(TG3_CPMU_CTRL);
12146 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12147 				CPMU_CTRL_LINK_IDLE_MODE)) {
12148 			tw32(TG3_CPMU_CTRL, cpmu_val &
12149 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
12150 					     CPMU_CTRL_LINK_IDLE_MODE));
12151 			cpmu_restore = 1;
12152 		}
12153 	}
12154 	tg3_override_clk(tp);
12155 
12156 	if (offset & 3) {
12157 		/* adjustments to start on required 4 byte boundary */
12158 		b_offset = offset & 3;
12159 		b_count = 4 - b_offset;
12160 		if (b_count > len) {
12161 			/* i.e. offset=1 len=2 */
12162 			b_count = len;
12163 		}
12164 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12165 		if (ret)
12166 			goto eeprom_done;
12167 		memcpy(data, ((char *)&val) + b_offset, b_count);
12168 		len -= b_count;
12169 		offset += b_count;
12170 		eeprom->len += b_count;
12171 	}
12172 
12173 	/* read bytes up to the last 4 byte boundary */
12174 	pd = &data[eeprom->len];
12175 	for (i = 0; i < (len - (len & 3)); i += 4) {
12176 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
12177 		if (ret) {
12178 			if (i)
12179 				i -= 4;
12180 			eeprom->len += i;
12181 			goto eeprom_done;
12182 		}
12183 		memcpy(pd + i, &val, 4);
12184 		if (need_resched()) {
12185 			if (signal_pending(current)) {
12186 				eeprom->len += i;
12187 				ret = -EINTR;
12188 				goto eeprom_done;
12189 			}
12190 			cond_resched();
12191 		}
12192 	}
12193 	eeprom->len += i;
12194 
12195 	if (len & 3) {
12196 		/* read last bytes not ending on 4 byte boundary */
12197 		pd = &data[eeprom->len];
12198 		b_count = len & 3;
12199 		b_offset = offset + len - b_count;
12200 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12201 		if (ret)
12202 			goto eeprom_done;
12203 		memcpy(pd, &val, b_count);
12204 		eeprom->len += b_count;
12205 	}
12206 	ret = 0;
12207 
12208 eeprom_done:
12209 	/* Restore clock, link aware and link idle modes */
12210 	tg3_restore_clk(tp);
12211 	if (cpmu_restore)
12212 		tw32(TG3_CPMU_CTRL, cpmu_val);
12213 
12214 	return ret;
12215 }
12216 
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12217 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12218 {
12219 	struct tg3 *tp = netdev_priv(dev);
12220 	int ret;
12221 	u32 offset, len, b_offset, odd_len;
12222 	u8 *buf;
12223 	__be32 start = 0, end;
12224 
12225 	if (tg3_flag(tp, NO_NVRAM) ||
12226 	    eeprom->magic != TG3_EEPROM_MAGIC)
12227 		return -EINVAL;
12228 
12229 	offset = eeprom->offset;
12230 	len = eeprom->len;
12231 
12232 	if ((b_offset = (offset & 3))) {
12233 		/* adjustments to start on required 4 byte boundary */
12234 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12235 		if (ret)
12236 			return ret;
12237 		len += b_offset;
12238 		offset &= ~3;
12239 		if (len < 4)
12240 			len = 4;
12241 	}
12242 
12243 	odd_len = 0;
12244 	if (len & 3) {
12245 		/* adjustments to end on required 4 byte boundary */
12246 		odd_len = 1;
12247 		len = (len + 3) & ~3;
12248 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12249 		if (ret)
12250 			return ret;
12251 	}
12252 
12253 	buf = data;
12254 	if (b_offset || odd_len) {
12255 		buf = kmalloc(len, GFP_KERNEL);
12256 		if (!buf)
12257 			return -ENOMEM;
12258 		if (b_offset)
12259 			memcpy(buf, &start, 4);
12260 		if (odd_len)
12261 			memcpy(buf+len-4, &end, 4);
12262 		memcpy(buf + b_offset, data, eeprom->len);
12263 	}
12264 
12265 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12266 
12267 	if (buf != data)
12268 		kfree(buf);
12269 
12270 	return ret;
12271 }
12272 
tg3_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)12273 static int tg3_get_link_ksettings(struct net_device *dev,
12274 				  struct ethtool_link_ksettings *cmd)
12275 {
12276 	struct tg3 *tp = netdev_priv(dev);
12277 	u32 supported, advertising;
12278 
12279 	if (tg3_flag(tp, USE_PHYLIB)) {
12280 		struct phy_device *phydev;
12281 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12282 			return -EAGAIN;
12283 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12284 		phy_ethtool_ksettings_get(phydev, cmd);
12285 
12286 		return 0;
12287 	}
12288 
12289 	supported = (SUPPORTED_Autoneg);
12290 
12291 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12292 		supported |= (SUPPORTED_1000baseT_Half |
12293 			      SUPPORTED_1000baseT_Full);
12294 
12295 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12296 		supported |= (SUPPORTED_100baseT_Half |
12297 			      SUPPORTED_100baseT_Full |
12298 			      SUPPORTED_10baseT_Half |
12299 			      SUPPORTED_10baseT_Full |
12300 			      SUPPORTED_TP);
12301 		cmd->base.port = PORT_TP;
12302 	} else {
12303 		supported |= SUPPORTED_FIBRE;
12304 		cmd->base.port = PORT_FIBRE;
12305 	}
12306 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12307 						supported);
12308 
12309 	advertising = tp->link_config.advertising;
12310 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12311 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12312 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12313 				advertising |= ADVERTISED_Pause;
12314 			} else {
12315 				advertising |= ADVERTISED_Pause |
12316 					ADVERTISED_Asym_Pause;
12317 			}
12318 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12319 			advertising |= ADVERTISED_Asym_Pause;
12320 		}
12321 	}
12322 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12323 						advertising);
12324 
12325 	if (netif_running(dev) && tp->link_up) {
12326 		cmd->base.speed = tp->link_config.active_speed;
12327 		cmd->base.duplex = tp->link_config.active_duplex;
12328 		ethtool_convert_legacy_u32_to_link_mode(
12329 			cmd->link_modes.lp_advertising,
12330 			tp->link_config.rmt_adv);
12331 
12332 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12333 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12334 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12335 			else
12336 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12337 		}
12338 	} else {
12339 		cmd->base.speed = SPEED_UNKNOWN;
12340 		cmd->base.duplex = DUPLEX_UNKNOWN;
12341 		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12342 	}
12343 	cmd->base.phy_address = tp->phy_addr;
12344 	cmd->base.autoneg = tp->link_config.autoneg;
12345 	return 0;
12346 }
12347 
tg3_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)12348 static int tg3_set_link_ksettings(struct net_device *dev,
12349 				  const struct ethtool_link_ksettings *cmd)
12350 {
12351 	struct tg3 *tp = netdev_priv(dev);
12352 	u32 speed = cmd->base.speed;
12353 	u32 advertising;
12354 
12355 	if (tg3_flag(tp, USE_PHYLIB)) {
12356 		struct phy_device *phydev;
12357 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12358 			return -EAGAIN;
12359 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12360 		return phy_ethtool_ksettings_set(phydev, cmd);
12361 	}
12362 
12363 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12364 	    cmd->base.autoneg != AUTONEG_DISABLE)
12365 		return -EINVAL;
12366 
12367 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12368 	    cmd->base.duplex != DUPLEX_FULL &&
12369 	    cmd->base.duplex != DUPLEX_HALF)
12370 		return -EINVAL;
12371 
12372 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12373 						cmd->link_modes.advertising);
12374 
12375 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12376 		u32 mask = ADVERTISED_Autoneg |
12377 			   ADVERTISED_Pause |
12378 			   ADVERTISED_Asym_Pause;
12379 
12380 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12381 			mask |= ADVERTISED_1000baseT_Half |
12382 				ADVERTISED_1000baseT_Full;
12383 
12384 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12385 			mask |= ADVERTISED_100baseT_Half |
12386 				ADVERTISED_100baseT_Full |
12387 				ADVERTISED_10baseT_Half |
12388 				ADVERTISED_10baseT_Full |
12389 				ADVERTISED_TP;
12390 		else
12391 			mask |= ADVERTISED_FIBRE;
12392 
12393 		if (advertising & ~mask)
12394 			return -EINVAL;
12395 
12396 		mask &= (ADVERTISED_1000baseT_Half |
12397 			 ADVERTISED_1000baseT_Full |
12398 			 ADVERTISED_100baseT_Half |
12399 			 ADVERTISED_100baseT_Full |
12400 			 ADVERTISED_10baseT_Half |
12401 			 ADVERTISED_10baseT_Full);
12402 
12403 		advertising &= mask;
12404 	} else {
12405 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12406 			if (speed != SPEED_1000)
12407 				return -EINVAL;
12408 
12409 			if (cmd->base.duplex != DUPLEX_FULL)
12410 				return -EINVAL;
12411 		} else {
12412 			if (speed != SPEED_100 &&
12413 			    speed != SPEED_10)
12414 				return -EINVAL;
12415 		}
12416 	}
12417 
12418 	tg3_full_lock(tp, 0);
12419 
12420 	tp->link_config.autoneg = cmd->base.autoneg;
12421 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12422 		tp->link_config.advertising = (advertising |
12423 					      ADVERTISED_Autoneg);
12424 		tp->link_config.speed = SPEED_UNKNOWN;
12425 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12426 	} else {
12427 		tp->link_config.advertising = 0;
12428 		tp->link_config.speed = speed;
12429 		tp->link_config.duplex = cmd->base.duplex;
12430 	}
12431 
12432 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12433 
12434 	tg3_warn_mgmt_link_flap(tp);
12435 
12436 	if (netif_running(dev))
12437 		tg3_setup_phy(tp, true);
12438 
12439 	tg3_full_unlock(tp);
12440 
12441 	return 0;
12442 }
12443 
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12444 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12445 {
12446 	struct tg3 *tp = netdev_priv(dev);
12447 
12448 	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12449 	strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12450 	strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12451 }
12452 
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12453 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12454 {
12455 	struct tg3 *tp = netdev_priv(dev);
12456 
12457 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12458 		wol->supported = WAKE_MAGIC;
12459 	else
12460 		wol->supported = 0;
12461 	wol->wolopts = 0;
12462 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12463 		wol->wolopts = WAKE_MAGIC;
12464 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12465 }
12466 
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12467 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12468 {
12469 	struct tg3 *tp = netdev_priv(dev);
12470 	struct device *dp = &tp->pdev->dev;
12471 
12472 	if (wol->wolopts & ~WAKE_MAGIC)
12473 		return -EINVAL;
12474 	if ((wol->wolopts & WAKE_MAGIC) &&
12475 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12476 		return -EINVAL;
12477 
12478 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12479 
12480 	if (device_may_wakeup(dp))
12481 		tg3_flag_set(tp, WOL_ENABLE);
12482 	else
12483 		tg3_flag_clear(tp, WOL_ENABLE);
12484 
12485 	return 0;
12486 }
12487 
tg3_get_msglevel(struct net_device * dev)12488 static u32 tg3_get_msglevel(struct net_device *dev)
12489 {
12490 	struct tg3 *tp = netdev_priv(dev);
12491 	return tp->msg_enable;
12492 }
12493 
tg3_set_msglevel(struct net_device * dev,u32 value)12494 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12495 {
12496 	struct tg3 *tp = netdev_priv(dev);
12497 	tp->msg_enable = value;
12498 }
12499 
tg3_nway_reset(struct net_device * dev)12500 static int tg3_nway_reset(struct net_device *dev)
12501 {
12502 	struct tg3 *tp = netdev_priv(dev);
12503 	int r;
12504 
12505 	if (!netif_running(dev))
12506 		return -EAGAIN;
12507 
12508 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12509 		return -EINVAL;
12510 
12511 	tg3_warn_mgmt_link_flap(tp);
12512 
12513 	if (tg3_flag(tp, USE_PHYLIB)) {
12514 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12515 			return -EAGAIN;
12516 		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12517 	} else {
12518 		u32 bmcr;
12519 
12520 		spin_lock_bh(&tp->lock);
12521 		r = -EINVAL;
12522 		tg3_readphy(tp, MII_BMCR, &bmcr);
12523 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12524 		    ((bmcr & BMCR_ANENABLE) ||
12525 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12526 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12527 						   BMCR_ANENABLE);
12528 			r = 0;
12529 		}
12530 		spin_unlock_bh(&tp->lock);
12531 	}
12532 
12533 	return r;
12534 }
12535 
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12536 static void tg3_get_ringparam(struct net_device *dev,
12537 			      struct ethtool_ringparam *ering,
12538 			      struct kernel_ethtool_ringparam *kernel_ering,
12539 			      struct netlink_ext_ack *extack)
12540 {
12541 	struct tg3 *tp = netdev_priv(dev);
12542 
12543 	ering->rx_max_pending = tp->rx_std_ring_mask;
12544 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12545 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12546 	else
12547 		ering->rx_jumbo_max_pending = 0;
12548 
12549 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12550 
12551 	ering->rx_pending = tp->rx_pending;
12552 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12553 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12554 	else
12555 		ering->rx_jumbo_pending = 0;
12556 
12557 	ering->tx_pending = tp->napi[0].tx_pending;
12558 }
12559 
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12560 static int tg3_set_ringparam(struct net_device *dev,
12561 			     struct ethtool_ringparam *ering,
12562 			     struct kernel_ethtool_ringparam *kernel_ering,
12563 			     struct netlink_ext_ack *extack)
12564 {
12565 	struct tg3 *tp = netdev_priv(dev);
12566 	int i, irq_sync = 0, err = 0;
12567 	bool reset_phy = false;
12568 
12569 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12570 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12571 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12572 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12573 	    (tg3_flag(tp, TSO_BUG) &&
12574 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12575 		return -EINVAL;
12576 
12577 	if (netif_running(dev)) {
12578 		tg3_phy_stop(tp);
12579 		tg3_netif_stop(tp);
12580 		irq_sync = 1;
12581 	}
12582 
12583 	netdev_lock(dev);
12584 	tg3_full_lock(tp, irq_sync);
12585 
12586 	tp->rx_pending = ering->rx_pending;
12587 
12588 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12589 	    tp->rx_pending > 63)
12590 		tp->rx_pending = 63;
12591 
12592 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12593 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12594 
12595 	for (i = 0; i < tp->irq_max; i++)
12596 		tp->napi[i].tx_pending = ering->tx_pending;
12597 
12598 	if (netif_running(dev)) {
12599 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12600 		/* Reset PHY to avoid PHY lock up */
12601 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12602 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12603 		    tg3_asic_rev(tp) == ASIC_REV_5720)
12604 			reset_phy = true;
12605 
12606 		err = tg3_restart_hw(tp, reset_phy);
12607 		if (!err)
12608 			tg3_netif_start(tp);
12609 	}
12610 
12611 	tg3_full_unlock(tp);
12612 	netdev_unlock(dev);
12613 
12614 	if (irq_sync && !err)
12615 		tg3_phy_start(tp);
12616 
12617 	return err;
12618 }
12619 
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12620 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12621 {
12622 	struct tg3 *tp = netdev_priv(dev);
12623 
12624 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12625 
12626 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12627 		epause->rx_pause = 1;
12628 	else
12629 		epause->rx_pause = 0;
12630 
12631 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12632 		epause->tx_pause = 1;
12633 	else
12634 		epause->tx_pause = 0;
12635 }
12636 
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12637 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12638 {
12639 	struct tg3 *tp = netdev_priv(dev);
12640 	int err = 0;
12641 	bool reset_phy = false;
12642 
12643 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12644 		tg3_warn_mgmt_link_flap(tp);
12645 
12646 	if (tg3_flag(tp, USE_PHYLIB)) {
12647 		struct phy_device *phydev;
12648 
12649 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12650 
12651 		if (!phy_validate_pause(phydev, epause))
12652 			return -EINVAL;
12653 
12654 		tp->link_config.flowctrl = 0;
12655 		phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12656 		if (epause->rx_pause) {
12657 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12658 
12659 			if (epause->tx_pause) {
12660 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12661 			}
12662 		} else if (epause->tx_pause) {
12663 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12664 		}
12665 
12666 		if (epause->autoneg)
12667 			tg3_flag_set(tp, PAUSE_AUTONEG);
12668 		else
12669 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12670 
12671 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12672 			if (phydev->autoneg) {
12673 				/* phy_set_asym_pause() will
12674 				 * renegotiate the link to inform our
12675 				 * link partner of our flow control
12676 				 * settings, even if the flow control
12677 				 * is forced.  Let tg3_adjust_link()
12678 				 * do the final flow control setup.
12679 				 */
12680 				return 0;
12681 			}
12682 
12683 			if (!epause->autoneg)
12684 				tg3_setup_flow_control(tp, 0, 0);
12685 		}
12686 	} else {
12687 		int irq_sync = 0;
12688 
12689 		if (netif_running(dev)) {
12690 			tg3_netif_stop(tp);
12691 			irq_sync = 1;
12692 		}
12693 
12694 		netdev_lock(dev);
12695 		tg3_full_lock(tp, irq_sync);
12696 
12697 		if (epause->autoneg)
12698 			tg3_flag_set(tp, PAUSE_AUTONEG);
12699 		else
12700 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12701 		if (epause->rx_pause)
12702 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12703 		else
12704 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12705 		if (epause->tx_pause)
12706 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12707 		else
12708 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12709 
12710 		if (netif_running(dev)) {
12711 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12712 			/* Reset PHY to avoid PHY lock up */
12713 			if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12714 			    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12715 			    tg3_asic_rev(tp) == ASIC_REV_5720)
12716 				reset_phy = true;
12717 
12718 			err = tg3_restart_hw(tp, reset_phy);
12719 			if (!err)
12720 				tg3_netif_start(tp);
12721 		}
12722 
12723 		tg3_full_unlock(tp);
12724 		netdev_unlock(dev);
12725 	}
12726 
12727 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12728 
12729 	return err;
12730 }
12731 
tg3_get_sset_count(struct net_device * dev,int sset)12732 static int tg3_get_sset_count(struct net_device *dev, int sset)
12733 {
12734 	switch (sset) {
12735 	case ETH_SS_TEST:
12736 		return TG3_NUM_TEST;
12737 	case ETH_SS_STATS:
12738 		return TG3_NUM_STATS;
12739 	default:
12740 		return -EOPNOTSUPP;
12741 	}
12742 }
12743 
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)12744 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12745 			 u32 *rules __always_unused)
12746 {
12747 	struct tg3 *tp = netdev_priv(dev);
12748 
12749 	if (!tg3_flag(tp, SUPPORT_MSIX))
12750 		return -EOPNOTSUPP;
12751 
12752 	switch (info->cmd) {
12753 	case ETHTOOL_GRXRINGS:
12754 		if (netif_running(tp->dev))
12755 			info->data = tp->rxq_cnt;
12756 		else {
12757 			info->data = num_online_cpus();
12758 			if (info->data > TG3_RSS_MAX_NUM_QS)
12759 				info->data = TG3_RSS_MAX_NUM_QS;
12760 		}
12761 
12762 		return 0;
12763 
12764 	default:
12765 		return -EOPNOTSUPP;
12766 	}
12767 }
12768 
tg3_get_rxfh_indir_size(struct net_device * dev)12769 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12770 {
12771 	u32 size = 0;
12772 	struct tg3 *tp = netdev_priv(dev);
12773 
12774 	if (tg3_flag(tp, SUPPORT_MSIX))
12775 		size = TG3_RSS_INDIR_TBL_SIZE;
12776 
12777 	return size;
12778 }
12779 
tg3_get_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh)12780 static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
12781 {
12782 	struct tg3 *tp = netdev_priv(dev);
12783 	int i;
12784 
12785 	rxfh->hfunc = ETH_RSS_HASH_TOP;
12786 	if (!rxfh->indir)
12787 		return 0;
12788 
12789 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12790 		rxfh->indir[i] = tp->rss_ind_tbl[i];
12791 
12792 	return 0;
12793 }
12794 
tg3_set_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)12795 static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
12796 			struct netlink_ext_ack *extack)
12797 {
12798 	struct tg3 *tp = netdev_priv(dev);
12799 	size_t i;
12800 
12801 	/* We require at least one supported parameter to be changed and no
12802 	 * change in any of the unsupported parameters
12803 	 */
12804 	if (rxfh->key ||
12805 	    (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
12806 	     rxfh->hfunc != ETH_RSS_HASH_TOP))
12807 		return -EOPNOTSUPP;
12808 
12809 	if (!rxfh->indir)
12810 		return 0;
12811 
12812 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12813 		tp->rss_ind_tbl[i] = rxfh->indir[i];
12814 
12815 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12816 		return 0;
12817 
12818 	/* It is legal to write the indirection
12819 	 * table while the device is running.
12820 	 */
12821 	tg3_full_lock(tp, 0);
12822 	tg3_rss_write_indir_tbl(tp);
12823 	tg3_full_unlock(tp);
12824 
12825 	return 0;
12826 }
12827 
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12828 static void tg3_get_channels(struct net_device *dev,
12829 			     struct ethtool_channels *channel)
12830 {
12831 	struct tg3 *tp = netdev_priv(dev);
12832 	u32 deflt_qs = netif_get_num_default_rss_queues();
12833 
12834 	channel->max_rx = tp->rxq_max;
12835 	channel->max_tx = tp->txq_max;
12836 
12837 	if (netif_running(dev)) {
12838 		channel->rx_count = tp->rxq_cnt;
12839 		channel->tx_count = tp->txq_cnt;
12840 	} else {
12841 		if (tp->rxq_req)
12842 			channel->rx_count = tp->rxq_req;
12843 		else
12844 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12845 
12846 		if (tp->txq_req)
12847 			channel->tx_count = tp->txq_req;
12848 		else
12849 			channel->tx_count = min(deflt_qs, tp->txq_max);
12850 	}
12851 }
12852 
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12853 static int tg3_set_channels(struct net_device *dev,
12854 			    struct ethtool_channels *channel)
12855 {
12856 	struct tg3 *tp = netdev_priv(dev);
12857 
12858 	if (!tg3_flag(tp, SUPPORT_MSIX))
12859 		return -EOPNOTSUPP;
12860 
12861 	if (channel->rx_count > tp->rxq_max ||
12862 	    channel->tx_count > tp->txq_max)
12863 		return -EINVAL;
12864 
12865 	tp->rxq_req = channel->rx_count;
12866 	tp->txq_req = channel->tx_count;
12867 
12868 	if (!netif_running(dev))
12869 		return 0;
12870 
12871 	tg3_stop(tp);
12872 
12873 	tg3_carrier_off(tp);
12874 
12875 	tg3_start(tp, true, false, false);
12876 
12877 	return 0;
12878 }
12879 
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12880 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12881 {
12882 	switch (stringset) {
12883 	case ETH_SS_STATS:
12884 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12885 		break;
12886 	case ETH_SS_TEST:
12887 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12888 		break;
12889 	default:
12890 		WARN_ON(1);	/* we need a WARN() */
12891 		break;
12892 	}
12893 }
12894 
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12895 static int tg3_set_phys_id(struct net_device *dev,
12896 			    enum ethtool_phys_id_state state)
12897 {
12898 	struct tg3 *tp = netdev_priv(dev);
12899 
12900 	switch (state) {
12901 	case ETHTOOL_ID_ACTIVE:
12902 		return 1;	/* cycle on/off once per second */
12903 
12904 	case ETHTOOL_ID_ON:
12905 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12906 		     LED_CTRL_1000MBPS_ON |
12907 		     LED_CTRL_100MBPS_ON |
12908 		     LED_CTRL_10MBPS_ON |
12909 		     LED_CTRL_TRAFFIC_OVERRIDE |
12910 		     LED_CTRL_TRAFFIC_BLINK |
12911 		     LED_CTRL_TRAFFIC_LED);
12912 		break;
12913 
12914 	case ETHTOOL_ID_OFF:
12915 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12916 		     LED_CTRL_TRAFFIC_OVERRIDE);
12917 		break;
12918 
12919 	case ETHTOOL_ID_INACTIVE:
12920 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12921 		break;
12922 	}
12923 
12924 	return 0;
12925 }
12926 
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12927 static void tg3_get_ethtool_stats(struct net_device *dev,
12928 				   struct ethtool_stats *estats, u64 *tmp_stats)
12929 {
12930 	struct tg3 *tp = netdev_priv(dev);
12931 
12932 	if (tp->hw_stats)
12933 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12934 	else
12935 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12936 }
12937 
tg3_vpd_readblock(struct tg3 * tp,unsigned int * vpdlen)12938 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12939 {
12940 	int i;
12941 	__be32 *buf;
12942 	u32 offset = 0, len = 0;
12943 	u32 magic, val;
12944 
12945 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12946 		return NULL;
12947 
12948 	if (magic == TG3_EEPROM_MAGIC) {
12949 		for (offset = TG3_NVM_DIR_START;
12950 		     offset < TG3_NVM_DIR_END;
12951 		     offset += TG3_NVM_DIRENT_SIZE) {
12952 			if (tg3_nvram_read(tp, offset, &val))
12953 				return NULL;
12954 
12955 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12956 			    TG3_NVM_DIRTYPE_EXTVPD)
12957 				break;
12958 		}
12959 
12960 		if (offset != TG3_NVM_DIR_END) {
12961 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12962 			if (tg3_nvram_read(tp, offset + 4, &offset))
12963 				return NULL;
12964 
12965 			offset = tg3_nvram_logical_addr(tp, offset);
12966 		}
12967 
12968 		if (!offset || !len) {
12969 			offset = TG3_NVM_VPD_OFF;
12970 			len = TG3_NVM_VPD_LEN;
12971 		}
12972 
12973 		buf = kmalloc(len, GFP_KERNEL);
12974 		if (!buf)
12975 			return NULL;
12976 
12977 		for (i = 0; i < len; i += 4) {
12978 			/* The data is in little-endian format in NVRAM.
12979 			 * Use the big-endian read routines to preserve
12980 			 * the byte order as it exists in NVRAM.
12981 			 */
12982 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12983 				goto error;
12984 		}
12985 		*vpdlen = len;
12986 	} else {
12987 		buf = pci_vpd_alloc(tp->pdev, vpdlen);
12988 		if (IS_ERR(buf))
12989 			return NULL;
12990 	}
12991 
12992 	return buf;
12993 
12994 error:
12995 	kfree(buf);
12996 	return NULL;
12997 }
12998 
12999 #define NVRAM_TEST_SIZE 0x100
13000 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
13001 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
13002 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
13003 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
13004 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
13005 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
13006 #define NVRAM_SELFBOOT_HW_SIZE 0x20
13007 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
13008 
tg3_test_nvram(struct tg3 * tp)13009 static int tg3_test_nvram(struct tg3 *tp)
13010 {
13011 	u32 csum, magic;
13012 	__be32 *buf;
13013 	int i, j, k, err = 0, size;
13014 	unsigned int len;
13015 
13016 	if (tg3_flag(tp, NO_NVRAM))
13017 		return 0;
13018 
13019 	if (tg3_nvram_read(tp, 0, &magic) != 0)
13020 		return -EIO;
13021 
13022 	if (magic == TG3_EEPROM_MAGIC)
13023 		size = NVRAM_TEST_SIZE;
13024 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
13025 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
13026 		    TG3_EEPROM_SB_FORMAT_1) {
13027 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
13028 			case TG3_EEPROM_SB_REVISION_0:
13029 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
13030 				break;
13031 			case TG3_EEPROM_SB_REVISION_2:
13032 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
13033 				break;
13034 			case TG3_EEPROM_SB_REVISION_3:
13035 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
13036 				break;
13037 			case TG3_EEPROM_SB_REVISION_4:
13038 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
13039 				break;
13040 			case TG3_EEPROM_SB_REVISION_5:
13041 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
13042 				break;
13043 			case TG3_EEPROM_SB_REVISION_6:
13044 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
13045 				break;
13046 			default:
13047 				return -EIO;
13048 			}
13049 		} else
13050 			return 0;
13051 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13052 		size = NVRAM_SELFBOOT_HW_SIZE;
13053 	else
13054 		return -EIO;
13055 
13056 	buf = kmalloc(size, GFP_KERNEL);
13057 	if (buf == NULL)
13058 		return -ENOMEM;
13059 
13060 	err = -EIO;
13061 	for (i = 0, j = 0; i < size; i += 4, j++) {
13062 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
13063 		if (err)
13064 			break;
13065 	}
13066 	if (i < size)
13067 		goto out;
13068 
13069 	/* Selfboot format */
13070 	magic = be32_to_cpu(buf[0]);
13071 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
13072 	    TG3_EEPROM_MAGIC_FW) {
13073 		u8 *buf8 = (u8 *) buf, csum8 = 0;
13074 
13075 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
13076 		    TG3_EEPROM_SB_REVISION_2) {
13077 			/* For rev 2, the csum doesn't include the MBA. */
13078 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
13079 				csum8 += buf8[i];
13080 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
13081 				csum8 += buf8[i];
13082 		} else {
13083 			for (i = 0; i < size; i++)
13084 				csum8 += buf8[i];
13085 		}
13086 
13087 		if (csum8 == 0) {
13088 			err = 0;
13089 			goto out;
13090 		}
13091 
13092 		err = -EIO;
13093 		goto out;
13094 	}
13095 
13096 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13097 	    TG3_EEPROM_MAGIC_HW) {
13098 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13099 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13100 		u8 *buf8 = (u8 *) buf;
13101 
13102 		/* Separate the parity bits and the data bytes.  */
13103 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13104 			if ((i == 0) || (i == 8)) {
13105 				int l;
13106 				u8 msk;
13107 
13108 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13109 					parity[k++] = buf8[i] & msk;
13110 				i++;
13111 			} else if (i == 16) {
13112 				int l;
13113 				u8 msk;
13114 
13115 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13116 					parity[k++] = buf8[i] & msk;
13117 				i++;
13118 
13119 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13120 					parity[k++] = buf8[i] & msk;
13121 				i++;
13122 			}
13123 			data[j++] = buf8[i];
13124 		}
13125 
13126 		err = -EIO;
13127 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13128 			u8 hw8 = hweight8(data[i]);
13129 
13130 			if ((hw8 & 0x1) && parity[i])
13131 				goto out;
13132 			else if (!(hw8 & 0x1) && !parity[i])
13133 				goto out;
13134 		}
13135 		err = 0;
13136 		goto out;
13137 	}
13138 
13139 	err = -EIO;
13140 
13141 	/* Bootstrap checksum at offset 0x10 */
13142 	csum = calc_crc((unsigned char *) buf, 0x10);
13143 
13144 	/* The type of buf is __be32 *, but this value is __le32 */
13145 	if (csum != le32_to_cpu((__force __le32)buf[0x10 / 4]))
13146 		goto out;
13147 
13148 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13149 	csum = calc_crc((unsigned char *)&buf[0x74 / 4], 0x88);
13150 
13151 	/* The type of buf is __be32 *, but this value is __le32 */
13152 	if (csum != le32_to_cpu((__force __le32)buf[0xfc / 4]))
13153 		goto out;
13154 
13155 	kfree(buf);
13156 
13157 	buf = tg3_vpd_readblock(tp, &len);
13158 	if (!buf)
13159 		return -ENOMEM;
13160 
13161 	err = pci_vpd_check_csum(buf, len);
13162 	/* go on if no checksum found */
13163 	if (err == 1)
13164 		err = 0;
13165 out:
13166 	kfree(buf);
13167 	return err;
13168 }
13169 
13170 #define TG3_SERDES_TIMEOUT_SEC	2
13171 #define TG3_COPPER_TIMEOUT_SEC	6
13172 
tg3_test_link(struct tg3 * tp)13173 static int tg3_test_link(struct tg3 *tp)
13174 {
13175 	int i, max;
13176 
13177 	if (!netif_running(tp->dev))
13178 		return -ENODEV;
13179 
13180 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13181 		max = TG3_SERDES_TIMEOUT_SEC;
13182 	else
13183 		max = TG3_COPPER_TIMEOUT_SEC;
13184 
13185 	for (i = 0; i < max; i++) {
13186 		if (tp->link_up)
13187 			return 0;
13188 
13189 		if (msleep_interruptible(1000))
13190 			break;
13191 	}
13192 
13193 	return -EIO;
13194 }
13195 
13196 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13197 static int tg3_test_registers(struct tg3 *tp)
13198 {
13199 	int i, is_5705, is_5750;
13200 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13201 	static struct {
13202 		u16 offset;
13203 		u16 flags;
13204 #define TG3_FL_5705	0x1
13205 #define TG3_FL_NOT_5705	0x2
13206 #define TG3_FL_NOT_5788	0x4
13207 #define TG3_FL_NOT_5750	0x8
13208 		u32 read_mask;
13209 		u32 write_mask;
13210 	} reg_tbl[] = {
13211 		/* MAC Control Registers */
13212 		{ MAC_MODE, TG3_FL_NOT_5705,
13213 			0x00000000, 0x00ef6f8c },
13214 		{ MAC_MODE, TG3_FL_5705,
13215 			0x00000000, 0x01ef6b8c },
13216 		{ MAC_STATUS, TG3_FL_NOT_5705,
13217 			0x03800107, 0x00000000 },
13218 		{ MAC_STATUS, TG3_FL_5705,
13219 			0x03800100, 0x00000000 },
13220 		{ MAC_ADDR_0_HIGH, 0x0000,
13221 			0x00000000, 0x0000ffff },
13222 		{ MAC_ADDR_0_LOW, 0x0000,
13223 			0x00000000, 0xffffffff },
13224 		{ MAC_RX_MTU_SIZE, 0x0000,
13225 			0x00000000, 0x0000ffff },
13226 		{ MAC_TX_MODE, 0x0000,
13227 			0x00000000, 0x00000070 },
13228 		{ MAC_TX_LENGTHS, 0x0000,
13229 			0x00000000, 0x00003fff },
13230 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13231 			0x00000000, 0x000007fc },
13232 		{ MAC_RX_MODE, TG3_FL_5705,
13233 			0x00000000, 0x000007dc },
13234 		{ MAC_HASH_REG_0, 0x0000,
13235 			0x00000000, 0xffffffff },
13236 		{ MAC_HASH_REG_1, 0x0000,
13237 			0x00000000, 0xffffffff },
13238 		{ MAC_HASH_REG_2, 0x0000,
13239 			0x00000000, 0xffffffff },
13240 		{ MAC_HASH_REG_3, 0x0000,
13241 			0x00000000, 0xffffffff },
13242 
13243 		/* Receive Data and Receive BD Initiator Control Registers. */
13244 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13245 			0x00000000, 0xffffffff },
13246 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13247 			0x00000000, 0xffffffff },
13248 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13249 			0x00000000, 0x00000003 },
13250 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13251 			0x00000000, 0xffffffff },
13252 		{ RCVDBDI_STD_BD+0, 0x0000,
13253 			0x00000000, 0xffffffff },
13254 		{ RCVDBDI_STD_BD+4, 0x0000,
13255 			0x00000000, 0xffffffff },
13256 		{ RCVDBDI_STD_BD+8, 0x0000,
13257 			0x00000000, 0xffff0002 },
13258 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13259 			0x00000000, 0xffffffff },
13260 
13261 		/* Receive BD Initiator Control Registers. */
13262 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13263 			0x00000000, 0xffffffff },
13264 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13265 			0x00000000, 0x000003ff },
13266 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13267 			0x00000000, 0xffffffff },
13268 
13269 		/* Host Coalescing Control Registers. */
13270 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13271 			0x00000000, 0x00000004 },
13272 		{ HOSTCC_MODE, TG3_FL_5705,
13273 			0x00000000, 0x000000f6 },
13274 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13275 			0x00000000, 0xffffffff },
13276 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13277 			0x00000000, 0x000003ff },
13278 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13279 			0x00000000, 0xffffffff },
13280 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13281 			0x00000000, 0x000003ff },
13282 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13283 			0x00000000, 0xffffffff },
13284 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13285 			0x00000000, 0x000000ff },
13286 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13287 			0x00000000, 0xffffffff },
13288 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13289 			0x00000000, 0x000000ff },
13290 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13291 			0x00000000, 0xffffffff },
13292 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13293 			0x00000000, 0xffffffff },
13294 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13295 			0x00000000, 0xffffffff },
13296 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13297 			0x00000000, 0x000000ff },
13298 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13299 			0x00000000, 0xffffffff },
13300 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13301 			0x00000000, 0x000000ff },
13302 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13303 			0x00000000, 0xffffffff },
13304 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13305 			0x00000000, 0xffffffff },
13306 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13307 			0x00000000, 0xffffffff },
13308 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13309 			0x00000000, 0xffffffff },
13310 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13311 			0x00000000, 0xffffffff },
13312 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13313 			0xffffffff, 0x00000000 },
13314 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13315 			0xffffffff, 0x00000000 },
13316 
13317 		/* Buffer Manager Control Registers. */
13318 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13319 			0x00000000, 0x007fff80 },
13320 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13321 			0x00000000, 0x007fffff },
13322 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13323 			0x00000000, 0x0000003f },
13324 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13325 			0x00000000, 0x000001ff },
13326 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13327 			0x00000000, 0x000001ff },
13328 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13329 			0xffffffff, 0x00000000 },
13330 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13331 			0xffffffff, 0x00000000 },
13332 
13333 		/* Mailbox Registers */
13334 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13335 			0x00000000, 0x000001ff },
13336 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13337 			0x00000000, 0x000001ff },
13338 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13339 			0x00000000, 0x000007ff },
13340 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13341 			0x00000000, 0x000001ff },
13342 
13343 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13344 	};
13345 
13346 	is_5705 = is_5750 = 0;
13347 	if (tg3_flag(tp, 5705_PLUS)) {
13348 		is_5705 = 1;
13349 		if (tg3_flag(tp, 5750_PLUS))
13350 			is_5750 = 1;
13351 	}
13352 
13353 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13354 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13355 			continue;
13356 
13357 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13358 			continue;
13359 
13360 		if (tg3_flag(tp, IS_5788) &&
13361 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13362 			continue;
13363 
13364 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13365 			continue;
13366 
13367 		offset = (u32) reg_tbl[i].offset;
13368 		read_mask = reg_tbl[i].read_mask;
13369 		write_mask = reg_tbl[i].write_mask;
13370 
13371 		/* Save the original register content */
13372 		save_val = tr32(offset);
13373 
13374 		/* Determine the read-only value. */
13375 		read_val = save_val & read_mask;
13376 
13377 		/* Write zero to the register, then make sure the read-only bits
13378 		 * are not changed and the read/write bits are all zeros.
13379 		 */
13380 		tw32(offset, 0);
13381 
13382 		val = tr32(offset);
13383 
13384 		/* Test the read-only and read/write bits. */
13385 		if (((val & read_mask) != read_val) || (val & write_mask))
13386 			goto out;
13387 
13388 		/* Write ones to all the bits defined by RdMask and WrMask, then
13389 		 * make sure the read-only bits are not changed and the
13390 		 * read/write bits are all ones.
13391 		 */
13392 		tw32(offset, read_mask | write_mask);
13393 
13394 		val = tr32(offset);
13395 
13396 		/* Test the read-only bits. */
13397 		if ((val & read_mask) != read_val)
13398 			goto out;
13399 
13400 		/* Test the read/write bits. */
13401 		if ((val & write_mask) != write_mask)
13402 			goto out;
13403 
13404 		tw32(offset, save_val);
13405 	}
13406 
13407 	return 0;
13408 
13409 out:
13410 	if (netif_msg_hw(tp))
13411 		netdev_err(tp->dev,
13412 			   "Register test failed at offset %x\n", offset);
13413 	tw32(offset, save_val);
13414 	return -EIO;
13415 }
13416 
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13417 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13418 {
13419 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13420 	int i;
13421 	u32 j;
13422 
13423 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13424 		for (j = 0; j < len; j += 4) {
13425 			u32 val;
13426 
13427 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13428 			tg3_read_mem(tp, offset + j, &val);
13429 			if (val != test_pattern[i])
13430 				return -EIO;
13431 		}
13432 	}
13433 	return 0;
13434 }
13435 
tg3_test_memory(struct tg3 * tp)13436 static int tg3_test_memory(struct tg3 *tp)
13437 {
13438 	static struct mem_entry {
13439 		u32 offset;
13440 		u32 len;
13441 	} mem_tbl_570x[] = {
13442 		{ 0x00000000, 0x00b50},
13443 		{ 0x00002000, 0x1c000},
13444 		{ 0xffffffff, 0x00000}
13445 	}, mem_tbl_5705[] = {
13446 		{ 0x00000100, 0x0000c},
13447 		{ 0x00000200, 0x00008},
13448 		{ 0x00004000, 0x00800},
13449 		{ 0x00006000, 0x01000},
13450 		{ 0x00008000, 0x02000},
13451 		{ 0x00010000, 0x0e000},
13452 		{ 0xffffffff, 0x00000}
13453 	}, mem_tbl_5755[] = {
13454 		{ 0x00000200, 0x00008},
13455 		{ 0x00004000, 0x00800},
13456 		{ 0x00006000, 0x00800},
13457 		{ 0x00008000, 0x02000},
13458 		{ 0x00010000, 0x0c000},
13459 		{ 0xffffffff, 0x00000}
13460 	}, mem_tbl_5906[] = {
13461 		{ 0x00000200, 0x00008},
13462 		{ 0x00004000, 0x00400},
13463 		{ 0x00006000, 0x00400},
13464 		{ 0x00008000, 0x01000},
13465 		{ 0x00010000, 0x01000},
13466 		{ 0xffffffff, 0x00000}
13467 	}, mem_tbl_5717[] = {
13468 		{ 0x00000200, 0x00008},
13469 		{ 0x00010000, 0x0a000},
13470 		{ 0x00020000, 0x13c00},
13471 		{ 0xffffffff, 0x00000}
13472 	}, mem_tbl_57765[] = {
13473 		{ 0x00000200, 0x00008},
13474 		{ 0x00004000, 0x00800},
13475 		{ 0x00006000, 0x09800},
13476 		{ 0x00010000, 0x0a000},
13477 		{ 0xffffffff, 0x00000}
13478 	};
13479 	struct mem_entry *mem_tbl;
13480 	int err = 0;
13481 	int i;
13482 
13483 	if (tg3_flag(tp, 5717_PLUS))
13484 		mem_tbl = mem_tbl_5717;
13485 	else if (tg3_flag(tp, 57765_CLASS) ||
13486 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13487 		mem_tbl = mem_tbl_57765;
13488 	else if (tg3_flag(tp, 5755_PLUS))
13489 		mem_tbl = mem_tbl_5755;
13490 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13491 		mem_tbl = mem_tbl_5906;
13492 	else if (tg3_flag(tp, 5705_PLUS))
13493 		mem_tbl = mem_tbl_5705;
13494 	else
13495 		mem_tbl = mem_tbl_570x;
13496 
13497 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13498 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13499 		if (err)
13500 			break;
13501 	}
13502 
13503 	return err;
13504 }
13505 
13506 #define TG3_TSO_MSS		500
13507 
13508 #define TG3_TSO_IP_HDR_LEN	20
13509 #define TG3_TSO_TCP_HDR_LEN	20
13510 #define TG3_TSO_TCP_OPT_LEN	12
13511 
13512 static const u8 tg3_tso_header[] = {
13513 0x08, 0x00,
13514 0x45, 0x00, 0x00, 0x00,
13515 0x00, 0x00, 0x40, 0x00,
13516 0x40, 0x06, 0x00, 0x00,
13517 0x0a, 0x00, 0x00, 0x01,
13518 0x0a, 0x00, 0x00, 0x02,
13519 0x0d, 0x00, 0xe0, 0x00,
13520 0x00, 0x00, 0x01, 0x00,
13521 0x00, 0x00, 0x02, 0x00,
13522 0x80, 0x10, 0x10, 0x00,
13523 0x14, 0x09, 0x00, 0x00,
13524 0x01, 0x01, 0x08, 0x0a,
13525 0x11, 0x11, 0x11, 0x11,
13526 0x11, 0x11, 0x11, 0x11,
13527 };
13528 
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13529 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13530 {
13531 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13532 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13533 	u32 budget;
13534 	struct sk_buff *skb;
13535 	u8 *tx_data, *rx_data;
13536 	dma_addr_t map;
13537 	int num_pkts, tx_len, rx_len, i, err;
13538 	struct tg3_rx_buffer_desc *desc;
13539 	struct tg3_napi *tnapi, *rnapi;
13540 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13541 
13542 	tnapi = &tp->napi[0];
13543 	rnapi = &tp->napi[0];
13544 	if (tp->irq_cnt > 1) {
13545 		if (tg3_flag(tp, ENABLE_RSS))
13546 			rnapi = &tp->napi[1];
13547 		if (tg3_flag(tp, ENABLE_TSS))
13548 			tnapi = &tp->napi[1];
13549 	}
13550 	coal_now = tnapi->coal_now | rnapi->coal_now;
13551 
13552 	err = -EIO;
13553 
13554 	tx_len = pktsz;
13555 	skb = netdev_alloc_skb(tp->dev, tx_len);
13556 	if (!skb)
13557 		return -ENOMEM;
13558 
13559 	tx_data = skb_put(skb, tx_len);
13560 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13561 	memset(tx_data + ETH_ALEN, 0x0, 8);
13562 
13563 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13564 
13565 	if (tso_loopback) {
13566 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13567 
13568 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13569 			      TG3_TSO_TCP_OPT_LEN;
13570 
13571 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13572 		       sizeof(tg3_tso_header));
13573 		mss = TG3_TSO_MSS;
13574 
13575 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13576 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13577 
13578 		/* Set the total length field in the IP header */
13579 		iph->tot_len = htons((u16)(mss + hdr_len));
13580 
13581 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13582 			      TXD_FLAG_CPU_POST_DMA);
13583 
13584 		if (tg3_flag(tp, HW_TSO_1) ||
13585 		    tg3_flag(tp, HW_TSO_2) ||
13586 		    tg3_flag(tp, HW_TSO_3)) {
13587 			struct tcphdr *th;
13588 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13589 			th = (struct tcphdr *)&tx_data[val];
13590 			th->check = 0;
13591 		} else
13592 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13593 
13594 		if (tg3_flag(tp, HW_TSO_3)) {
13595 			mss |= (hdr_len & 0xc) << 12;
13596 			if (hdr_len & 0x10)
13597 				base_flags |= 0x00000010;
13598 			base_flags |= (hdr_len & 0x3e0) << 5;
13599 		} else if (tg3_flag(tp, HW_TSO_2))
13600 			mss |= hdr_len << 9;
13601 		else if (tg3_flag(tp, HW_TSO_1) ||
13602 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13603 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13604 		} else {
13605 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13606 		}
13607 
13608 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13609 	} else {
13610 		num_pkts = 1;
13611 		data_off = ETH_HLEN;
13612 
13613 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13614 		    tx_len > VLAN_ETH_FRAME_LEN)
13615 			base_flags |= TXD_FLAG_JMB_PKT;
13616 	}
13617 
13618 	for (i = data_off; i < tx_len; i++)
13619 		tx_data[i] = (u8) (i & 0xff);
13620 
13621 	map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13622 	if (dma_mapping_error(&tp->pdev->dev, map)) {
13623 		dev_kfree_skb(skb);
13624 		return -EIO;
13625 	}
13626 
13627 	val = tnapi->tx_prod;
13628 	tnapi->tx_buffers[val].skb = skb;
13629 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13630 
13631 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13632 	       rnapi->coal_now);
13633 
13634 	udelay(10);
13635 
13636 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13637 
13638 	budget = tg3_tx_avail(tnapi);
13639 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13640 			    base_flags | TXD_FLAG_END, mss, 0)) {
13641 		tnapi->tx_buffers[val].skb = NULL;
13642 		dev_kfree_skb(skb);
13643 		return -EIO;
13644 	}
13645 
13646 	tnapi->tx_prod++;
13647 
13648 	/* Sync BD data before updating mailbox */
13649 	wmb();
13650 
13651 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13652 	tr32_mailbox(tnapi->prodmbox);
13653 
13654 	udelay(10);
13655 
13656 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13657 	for (i = 0; i < 35; i++) {
13658 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13659 		       coal_now);
13660 
13661 		udelay(10);
13662 
13663 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13664 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13665 		if ((tx_idx == tnapi->tx_prod) &&
13666 		    (rx_idx == (rx_start_idx + num_pkts)))
13667 			break;
13668 	}
13669 
13670 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13671 	dev_kfree_skb(skb);
13672 
13673 	if (tx_idx != tnapi->tx_prod)
13674 		goto out;
13675 
13676 	if (rx_idx != rx_start_idx + num_pkts)
13677 		goto out;
13678 
13679 	val = data_off;
13680 	while (rx_idx != rx_start_idx) {
13681 		desc = &rnapi->rx_rcb[rx_start_idx++];
13682 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13683 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13684 
13685 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13686 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13687 			goto out;
13688 
13689 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13690 			 - ETH_FCS_LEN;
13691 
13692 		if (!tso_loopback) {
13693 			if (rx_len != tx_len)
13694 				goto out;
13695 
13696 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13697 				if (opaque_key != RXD_OPAQUE_RING_STD)
13698 					goto out;
13699 			} else {
13700 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13701 					goto out;
13702 			}
13703 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13704 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13705 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13706 			goto out;
13707 		}
13708 
13709 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13710 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13711 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13712 					     mapping);
13713 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13714 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13715 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13716 					     mapping);
13717 		} else
13718 			goto out;
13719 
13720 		dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13721 					DMA_FROM_DEVICE);
13722 
13723 		rx_data += TG3_RX_OFFSET(tp);
13724 		for (i = data_off; i < rx_len; i++, val++) {
13725 			if (*(rx_data + i) != (u8) (val & 0xff))
13726 				goto out;
13727 		}
13728 	}
13729 
13730 	err = 0;
13731 
13732 	/* tg3_free_rings will unmap and free the rx_data */
13733 out:
13734 	return err;
13735 }
13736 
13737 #define TG3_STD_LOOPBACK_FAILED		1
13738 #define TG3_JMB_LOOPBACK_FAILED		2
13739 #define TG3_TSO_LOOPBACK_FAILED		4
13740 #define TG3_LOOPBACK_FAILED \
13741 	(TG3_STD_LOOPBACK_FAILED | \
13742 	 TG3_JMB_LOOPBACK_FAILED | \
13743 	 TG3_TSO_LOOPBACK_FAILED)
13744 
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13745 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13746 {
13747 	int err = -EIO;
13748 	u32 eee_cap;
13749 	u32 jmb_pkt_sz = 9000;
13750 
13751 	if (tp->dma_limit)
13752 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13753 
13754 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13755 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13756 
13757 	if (!netif_running(tp->dev)) {
13758 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13759 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13760 		if (do_extlpbk)
13761 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13762 		goto done;
13763 	}
13764 
13765 	err = tg3_reset_hw(tp, true);
13766 	if (err) {
13767 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13768 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13769 		if (do_extlpbk)
13770 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13771 		goto done;
13772 	}
13773 
13774 	if (tg3_flag(tp, ENABLE_RSS)) {
13775 		int i;
13776 
13777 		/* Reroute all rx packets to the 1st queue */
13778 		for (i = MAC_RSS_INDIR_TBL_0;
13779 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13780 			tw32(i, 0x0);
13781 	}
13782 
13783 	/* HW errata - mac loopback fails in some cases on 5780.
13784 	 * Normal traffic and PHY loopback are not affected by
13785 	 * errata.  Also, the MAC loopback test is deprecated for
13786 	 * all newer ASIC revisions.
13787 	 */
13788 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13789 	    !tg3_flag(tp, CPMU_PRESENT)) {
13790 		tg3_mac_loopback(tp, true);
13791 
13792 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13793 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13794 
13795 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13796 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13797 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13798 
13799 		tg3_mac_loopback(tp, false);
13800 	}
13801 
13802 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13803 	    !tg3_flag(tp, USE_PHYLIB)) {
13804 		int i;
13805 
13806 		tg3_phy_lpbk_set(tp, 0, false);
13807 
13808 		/* Wait for link */
13809 		for (i = 0; i < 100; i++) {
13810 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13811 				break;
13812 			mdelay(1);
13813 		}
13814 
13815 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13816 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13817 		if (tg3_flag(tp, TSO_CAPABLE) &&
13818 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13819 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13820 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13821 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13822 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13823 
13824 		if (do_extlpbk) {
13825 			tg3_phy_lpbk_set(tp, 0, true);
13826 
13827 			/* All link indications report up, but the hardware
13828 			 * isn't really ready for about 20 msec.  Double it
13829 			 * to be sure.
13830 			 */
13831 			mdelay(40);
13832 
13833 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13834 				data[TG3_EXT_LOOPB_TEST] |=
13835 							TG3_STD_LOOPBACK_FAILED;
13836 			if (tg3_flag(tp, TSO_CAPABLE) &&
13837 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13838 				data[TG3_EXT_LOOPB_TEST] |=
13839 							TG3_TSO_LOOPBACK_FAILED;
13840 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13841 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13842 				data[TG3_EXT_LOOPB_TEST] |=
13843 							TG3_JMB_LOOPBACK_FAILED;
13844 		}
13845 
13846 		/* Re-enable gphy autopowerdown. */
13847 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13848 			tg3_phy_toggle_apd(tp, true);
13849 	}
13850 
13851 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13852 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13853 
13854 done:
13855 	tp->phy_flags |= eee_cap;
13856 
13857 	return err;
13858 }
13859 
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13860 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13861 			  u64 *data)
13862 {
13863 	struct tg3 *tp = netdev_priv(dev);
13864 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13865 
13866 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13867 		if (tg3_power_up(tp)) {
13868 			etest->flags |= ETH_TEST_FL_FAILED;
13869 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13870 			return;
13871 		}
13872 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13873 	}
13874 
13875 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13876 
13877 	if (tg3_test_nvram(tp) != 0) {
13878 		etest->flags |= ETH_TEST_FL_FAILED;
13879 		data[TG3_NVRAM_TEST] = 1;
13880 	}
13881 	if (!doextlpbk && tg3_test_link(tp)) {
13882 		etest->flags |= ETH_TEST_FL_FAILED;
13883 		data[TG3_LINK_TEST] = 1;
13884 	}
13885 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13886 		int err, err2 = 0, irq_sync = 0;
13887 
13888 		if (netif_running(dev)) {
13889 			tg3_phy_stop(tp);
13890 			tg3_netif_stop(tp);
13891 			irq_sync = 1;
13892 		}
13893 
13894 		tg3_full_lock(tp, irq_sync);
13895 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13896 		err = tg3_nvram_lock(tp);
13897 		tg3_halt_cpu(tp, RX_CPU_BASE);
13898 		if (!tg3_flag(tp, 5705_PLUS))
13899 			tg3_halt_cpu(tp, TX_CPU_BASE);
13900 		if (!err)
13901 			tg3_nvram_unlock(tp);
13902 
13903 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13904 			tg3_phy_reset(tp);
13905 
13906 		if (tg3_test_registers(tp) != 0) {
13907 			etest->flags |= ETH_TEST_FL_FAILED;
13908 			data[TG3_REGISTER_TEST] = 1;
13909 		}
13910 
13911 		if (tg3_test_memory(tp) != 0) {
13912 			etest->flags |= ETH_TEST_FL_FAILED;
13913 			data[TG3_MEMORY_TEST] = 1;
13914 		}
13915 
13916 		if (doextlpbk)
13917 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13918 
13919 		if (tg3_test_loopback(tp, data, doextlpbk))
13920 			etest->flags |= ETH_TEST_FL_FAILED;
13921 
13922 		tg3_full_unlock(tp);
13923 
13924 		if (tg3_test_interrupt(tp) != 0) {
13925 			etest->flags |= ETH_TEST_FL_FAILED;
13926 			data[TG3_INTERRUPT_TEST] = 1;
13927 		}
13928 
13929 		netdev_lock(dev);
13930 		tg3_full_lock(tp, 0);
13931 
13932 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13933 		if (netif_running(dev)) {
13934 			tg3_flag_set(tp, INIT_COMPLETE);
13935 			err2 = tg3_restart_hw(tp, true);
13936 			if (!err2)
13937 				tg3_netif_start(tp);
13938 		}
13939 
13940 		tg3_full_unlock(tp);
13941 		netdev_unlock(dev);
13942 
13943 		if (irq_sync && !err2)
13944 			tg3_phy_start(tp);
13945 	}
13946 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13947 		tg3_power_down_prepare(tp);
13948 
13949 }
13950 
tg3_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)13951 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13952 {
13953 	struct tg3 *tp = netdev_priv(dev);
13954 	struct hwtstamp_config stmpconf;
13955 
13956 	if (!tg3_flag(tp, PTP_CAPABLE))
13957 		return -EOPNOTSUPP;
13958 
13959 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13960 		return -EFAULT;
13961 
13962 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13963 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13964 		return -ERANGE;
13965 
13966 	switch (stmpconf.rx_filter) {
13967 	case HWTSTAMP_FILTER_NONE:
13968 		tp->rxptpctl = 0;
13969 		break;
13970 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13971 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13972 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13973 		break;
13974 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13975 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13976 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13977 		break;
13978 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13979 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13980 			       TG3_RX_PTP_CTL_DELAY_REQ;
13981 		break;
13982 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13983 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13984 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13985 		break;
13986 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13987 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13988 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13989 		break;
13990 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13991 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13992 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13993 		break;
13994 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13995 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13996 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13997 		break;
13998 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13999 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
14000 			       TG3_RX_PTP_CTL_SYNC_EVNT;
14001 		break;
14002 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
14003 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
14004 			       TG3_RX_PTP_CTL_SYNC_EVNT;
14005 		break;
14006 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
14007 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
14008 			       TG3_RX_PTP_CTL_DELAY_REQ;
14009 		break;
14010 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
14011 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
14012 			       TG3_RX_PTP_CTL_DELAY_REQ;
14013 		break;
14014 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
14015 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
14016 			       TG3_RX_PTP_CTL_DELAY_REQ;
14017 		break;
14018 	default:
14019 		return -ERANGE;
14020 	}
14021 
14022 	if (netif_running(dev) && tp->rxptpctl)
14023 		tw32(TG3_RX_PTP_CTL,
14024 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
14025 
14026 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
14027 		tg3_flag_set(tp, TX_TSTAMP_EN);
14028 	else
14029 		tg3_flag_clear(tp, TX_TSTAMP_EN);
14030 
14031 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14032 		-EFAULT : 0;
14033 }
14034 
tg3_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)14035 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
14036 {
14037 	struct tg3 *tp = netdev_priv(dev);
14038 	struct hwtstamp_config stmpconf;
14039 
14040 	if (!tg3_flag(tp, PTP_CAPABLE))
14041 		return -EOPNOTSUPP;
14042 
14043 	stmpconf.flags = 0;
14044 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
14045 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
14046 
14047 	switch (tp->rxptpctl) {
14048 	case 0:
14049 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
14050 		break;
14051 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
14052 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14053 		break;
14054 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14055 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
14056 		break;
14057 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14058 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
14059 		break;
14060 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14061 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14062 		break;
14063 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14064 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14065 		break;
14066 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14067 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14068 		break;
14069 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14070 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
14071 		break;
14072 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14073 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
14074 		break;
14075 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14076 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14077 		break;
14078 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14079 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14080 		break;
14081 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14082 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14083 		break;
14084 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14085 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14086 		break;
14087 	default:
14088 		WARN_ON_ONCE(1);
14089 		return -ERANGE;
14090 	}
14091 
14092 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14093 		-EFAULT : 0;
14094 }
14095 
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)14096 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14097 {
14098 	struct mii_ioctl_data *data = if_mii(ifr);
14099 	struct tg3 *tp = netdev_priv(dev);
14100 	int err;
14101 
14102 	if (tg3_flag(tp, USE_PHYLIB)) {
14103 		struct phy_device *phydev;
14104 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14105 			return -EAGAIN;
14106 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14107 		return phy_mii_ioctl(phydev, ifr, cmd);
14108 	}
14109 
14110 	switch (cmd) {
14111 	case SIOCGMIIPHY:
14112 		data->phy_id = tp->phy_addr;
14113 
14114 		fallthrough;
14115 	case SIOCGMIIREG: {
14116 		u32 mii_regval;
14117 
14118 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14119 			break;			/* We have no PHY */
14120 
14121 		if (!netif_running(dev))
14122 			return -EAGAIN;
14123 
14124 		spin_lock_bh(&tp->lock);
14125 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
14126 				    data->reg_num & 0x1f, &mii_regval);
14127 		spin_unlock_bh(&tp->lock);
14128 
14129 		data->val_out = mii_regval;
14130 
14131 		return err;
14132 	}
14133 
14134 	case SIOCSMIIREG:
14135 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14136 			break;			/* We have no PHY */
14137 
14138 		if (!netif_running(dev))
14139 			return -EAGAIN;
14140 
14141 		spin_lock_bh(&tp->lock);
14142 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
14143 				     data->reg_num & 0x1f, data->val_in);
14144 		spin_unlock_bh(&tp->lock);
14145 
14146 		return err;
14147 
14148 	case SIOCSHWTSTAMP:
14149 		return tg3_hwtstamp_set(dev, ifr);
14150 
14151 	case SIOCGHWTSTAMP:
14152 		return tg3_hwtstamp_get(dev, ifr);
14153 
14154 	default:
14155 		/* do nothing */
14156 		break;
14157 	}
14158 	return -EOPNOTSUPP;
14159 }
14160 
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14161 static int tg3_get_coalesce(struct net_device *dev,
14162 			    struct ethtool_coalesce *ec,
14163 			    struct kernel_ethtool_coalesce *kernel_coal,
14164 			    struct netlink_ext_ack *extack)
14165 {
14166 	struct tg3 *tp = netdev_priv(dev);
14167 
14168 	memcpy(ec, &tp->coal, sizeof(*ec));
14169 	return 0;
14170 }
14171 
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14172 static int tg3_set_coalesce(struct net_device *dev,
14173 			    struct ethtool_coalesce *ec,
14174 			    struct kernel_ethtool_coalesce *kernel_coal,
14175 			    struct netlink_ext_ack *extack)
14176 {
14177 	struct tg3 *tp = netdev_priv(dev);
14178 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14179 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14180 
14181 	if (!tg3_flag(tp, 5705_PLUS)) {
14182 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14183 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14184 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14185 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14186 	}
14187 
14188 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14189 	    (!ec->rx_coalesce_usecs) ||
14190 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14191 	    (!ec->tx_coalesce_usecs) ||
14192 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14193 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14194 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14195 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14196 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14197 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14198 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14199 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14200 		return -EINVAL;
14201 
14202 	/* Only copy relevant parameters, ignore all others. */
14203 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14204 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14205 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14206 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14207 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14208 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14209 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14210 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14211 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14212 
14213 	if (netif_running(dev)) {
14214 		tg3_full_lock(tp, 0);
14215 		__tg3_set_coalesce(tp, &tp->coal);
14216 		tg3_full_unlock(tp);
14217 	}
14218 	return 0;
14219 }
14220 
tg3_set_eee(struct net_device * dev,struct ethtool_keee * edata)14221 static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
14222 {
14223 	struct tg3 *tp = netdev_priv(dev);
14224 
14225 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14226 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14227 		return -EOPNOTSUPP;
14228 	}
14229 
14230 	if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
14231 		netdev_warn(tp->dev,
14232 			    "Direct manipulation of EEE advertisement is not supported\n");
14233 		return -EINVAL;
14234 	}
14235 
14236 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14237 		netdev_warn(tp->dev,
14238 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14239 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14240 		return -EINVAL;
14241 	}
14242 
14243 	tp->eee.eee_enabled = edata->eee_enabled;
14244 	tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
14245 	tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
14246 
14247 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14248 	tg3_warn_mgmt_link_flap(tp);
14249 
14250 	if (netif_running(tp->dev)) {
14251 		tg3_full_lock(tp, 0);
14252 		tg3_setup_eee(tp);
14253 		tg3_phy_reset(tp);
14254 		tg3_full_unlock(tp);
14255 	}
14256 
14257 	return 0;
14258 }
14259 
tg3_get_eee(struct net_device * dev,struct ethtool_keee * edata)14260 static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
14261 {
14262 	struct tg3 *tp = netdev_priv(dev);
14263 
14264 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14265 		netdev_warn(tp->dev,
14266 			    "Board does not support EEE!\n");
14267 		return -EOPNOTSUPP;
14268 	}
14269 
14270 	*edata = tp->eee;
14271 	return 0;
14272 }
14273 
14274 static const struct ethtool_ops tg3_ethtool_ops = {
14275 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14276 				     ETHTOOL_COALESCE_MAX_FRAMES |
14277 				     ETHTOOL_COALESCE_USECS_IRQ |
14278 				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14279 				     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14280 	.get_drvinfo		= tg3_get_drvinfo,
14281 	.get_regs_len		= tg3_get_regs_len,
14282 	.get_regs		= tg3_get_regs,
14283 	.get_wol		= tg3_get_wol,
14284 	.set_wol		= tg3_set_wol,
14285 	.get_msglevel		= tg3_get_msglevel,
14286 	.set_msglevel		= tg3_set_msglevel,
14287 	.nway_reset		= tg3_nway_reset,
14288 	.get_link		= ethtool_op_get_link,
14289 	.get_eeprom_len		= tg3_get_eeprom_len,
14290 	.get_eeprom		= tg3_get_eeprom,
14291 	.set_eeprom		= tg3_set_eeprom,
14292 	.get_ringparam		= tg3_get_ringparam,
14293 	.set_ringparam		= tg3_set_ringparam,
14294 	.get_pauseparam		= tg3_get_pauseparam,
14295 	.set_pauseparam		= tg3_set_pauseparam,
14296 	.self_test		= tg3_self_test,
14297 	.get_strings		= tg3_get_strings,
14298 	.set_phys_id		= tg3_set_phys_id,
14299 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14300 	.get_coalesce		= tg3_get_coalesce,
14301 	.set_coalesce		= tg3_set_coalesce,
14302 	.get_sset_count		= tg3_get_sset_count,
14303 	.get_rxnfc		= tg3_get_rxnfc,
14304 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14305 	.get_rxfh		= tg3_get_rxfh,
14306 	.set_rxfh		= tg3_set_rxfh,
14307 	.get_channels		= tg3_get_channels,
14308 	.set_channels		= tg3_set_channels,
14309 	.get_ts_info		= tg3_get_ts_info,
14310 	.get_eee		= tg3_get_eee,
14311 	.set_eee		= tg3_set_eee,
14312 	.get_link_ksettings	= tg3_get_link_ksettings,
14313 	.set_link_ksettings	= tg3_set_link_ksettings,
14314 };
14315 
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14316 static void tg3_get_stats64(struct net_device *dev,
14317 			    struct rtnl_link_stats64 *stats)
14318 {
14319 	struct tg3 *tp = netdev_priv(dev);
14320 
14321 	spin_lock_bh(&tp->lock);
14322 	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14323 		*stats = tp->net_stats_prev;
14324 		spin_unlock_bh(&tp->lock);
14325 		return;
14326 	}
14327 
14328 	tg3_get_nstats(tp, stats);
14329 	spin_unlock_bh(&tp->lock);
14330 }
14331 
tg3_set_rx_mode(struct net_device * dev)14332 static void tg3_set_rx_mode(struct net_device *dev)
14333 {
14334 	struct tg3 *tp = netdev_priv(dev);
14335 
14336 	if (!netif_running(dev))
14337 		return;
14338 
14339 	tg3_full_lock(tp, 0);
14340 	__tg3_set_rx_mode(dev);
14341 	tg3_full_unlock(tp);
14342 }
14343 
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14344 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14345 			       int new_mtu)
14346 {
14347 	WRITE_ONCE(dev->mtu, new_mtu);
14348 
14349 	if (new_mtu > ETH_DATA_LEN) {
14350 		if (tg3_flag(tp, 5780_CLASS)) {
14351 			netdev_update_features(dev);
14352 			tg3_flag_clear(tp, TSO_CAPABLE);
14353 		} else {
14354 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14355 		}
14356 	} else {
14357 		if (tg3_flag(tp, 5780_CLASS)) {
14358 			tg3_flag_set(tp, TSO_CAPABLE);
14359 			netdev_update_features(dev);
14360 		}
14361 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14362 	}
14363 }
14364 
tg3_change_mtu(struct net_device * dev,int new_mtu)14365 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14366 {
14367 	struct tg3 *tp = netdev_priv(dev);
14368 	int err;
14369 	bool reset_phy = false;
14370 
14371 	if (!netif_running(dev)) {
14372 		/* We'll just catch it later when the
14373 		 * device is up'd.
14374 		 */
14375 		tg3_set_mtu(dev, tp, new_mtu);
14376 		return 0;
14377 	}
14378 
14379 	tg3_phy_stop(tp);
14380 
14381 	tg3_netif_stop(tp);
14382 
14383 	tg3_set_mtu(dev, tp, new_mtu);
14384 
14385 	netdev_lock(dev);
14386 	tg3_full_lock(tp, 1);
14387 
14388 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14389 
14390 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14391 	 * breaks all requests to 256 bytes.
14392 	 */
14393 	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14394 	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
14395 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14396 	    tg3_asic_rev(tp) == ASIC_REV_5720)
14397 		reset_phy = true;
14398 
14399 	err = tg3_restart_hw(tp, reset_phy);
14400 
14401 	if (!err)
14402 		tg3_netif_start(tp);
14403 
14404 	tg3_full_unlock(tp);
14405 	netdev_unlock(dev);
14406 
14407 	if (!err)
14408 		tg3_phy_start(tp);
14409 
14410 	return err;
14411 }
14412 
14413 static const struct net_device_ops tg3_netdev_ops = {
14414 	.ndo_open		= tg3_open,
14415 	.ndo_stop		= tg3_close,
14416 	.ndo_start_xmit		= tg3_start_xmit,
14417 	.ndo_get_stats64	= tg3_get_stats64,
14418 	.ndo_validate_addr	= eth_validate_addr,
14419 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14420 	.ndo_set_mac_address	= tg3_set_mac_addr,
14421 	.ndo_eth_ioctl		= tg3_ioctl,
14422 	.ndo_tx_timeout		= tg3_tx_timeout,
14423 	.ndo_change_mtu		= tg3_change_mtu,
14424 	.ndo_fix_features	= tg3_fix_features,
14425 	.ndo_set_features	= tg3_set_features,
14426 #ifdef CONFIG_NET_POLL_CONTROLLER
14427 	.ndo_poll_controller	= tg3_poll_controller,
14428 #endif
14429 };
14430 
tg3_get_eeprom_size(struct tg3 * tp)14431 static void tg3_get_eeprom_size(struct tg3 *tp)
14432 {
14433 	u32 cursize, val, magic;
14434 
14435 	tp->nvram_size = EEPROM_CHIP_SIZE;
14436 
14437 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14438 		return;
14439 
14440 	if ((magic != TG3_EEPROM_MAGIC) &&
14441 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14442 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14443 		return;
14444 
14445 	/*
14446 	 * Size the chip by reading offsets at increasing powers of two.
14447 	 * When we encounter our validation signature, we know the addressing
14448 	 * has wrapped around, and thus have our chip size.
14449 	 */
14450 	cursize = 0x10;
14451 
14452 	while (cursize < tp->nvram_size) {
14453 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14454 			return;
14455 
14456 		if (val == magic)
14457 			break;
14458 
14459 		cursize <<= 1;
14460 	}
14461 
14462 	tp->nvram_size = cursize;
14463 }
14464 
tg3_get_nvram_size(struct tg3 * tp)14465 static void tg3_get_nvram_size(struct tg3 *tp)
14466 {
14467 	u32 val;
14468 
14469 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14470 		return;
14471 
14472 	/* Selfboot format */
14473 	if (val != TG3_EEPROM_MAGIC) {
14474 		tg3_get_eeprom_size(tp);
14475 		return;
14476 	}
14477 
14478 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14479 		if (val != 0) {
14480 			/* This is confusing.  We want to operate on the
14481 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14482 			 * call will read from NVRAM and byteswap the data
14483 			 * according to the byteswapping settings for all
14484 			 * other register accesses.  This ensures the data we
14485 			 * want will always reside in the lower 16-bits.
14486 			 * However, the data in NVRAM is in LE format, which
14487 			 * means the data from the NVRAM read will always be
14488 			 * opposite the endianness of the CPU.  The 16-bit
14489 			 * byteswap then brings the data to CPU endianness.
14490 			 */
14491 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14492 			return;
14493 		}
14494 	}
14495 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14496 }
14497 
tg3_get_nvram_info(struct tg3 * tp)14498 static void tg3_get_nvram_info(struct tg3 *tp)
14499 {
14500 	u32 nvcfg1;
14501 
14502 	nvcfg1 = tr32(NVRAM_CFG1);
14503 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14504 		tg3_flag_set(tp, FLASH);
14505 	} else {
14506 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14507 		tw32(NVRAM_CFG1, nvcfg1);
14508 	}
14509 
14510 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14511 	    tg3_flag(tp, 5780_CLASS)) {
14512 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14513 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14514 			tp->nvram_jedecnum = JEDEC_ATMEL;
14515 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14516 			tg3_flag_set(tp, NVRAM_BUFFERED);
14517 			break;
14518 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14519 			tp->nvram_jedecnum = JEDEC_ATMEL;
14520 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14521 			break;
14522 		case FLASH_VENDOR_ATMEL_EEPROM:
14523 			tp->nvram_jedecnum = JEDEC_ATMEL;
14524 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14525 			tg3_flag_set(tp, NVRAM_BUFFERED);
14526 			break;
14527 		case FLASH_VENDOR_ST:
14528 			tp->nvram_jedecnum = JEDEC_ST;
14529 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14530 			tg3_flag_set(tp, NVRAM_BUFFERED);
14531 			break;
14532 		case FLASH_VENDOR_SAIFUN:
14533 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14534 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14535 			break;
14536 		case FLASH_VENDOR_SST_SMALL:
14537 		case FLASH_VENDOR_SST_LARGE:
14538 			tp->nvram_jedecnum = JEDEC_SST;
14539 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14540 			break;
14541 		}
14542 	} else {
14543 		tp->nvram_jedecnum = JEDEC_ATMEL;
14544 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14545 		tg3_flag_set(tp, NVRAM_BUFFERED);
14546 	}
14547 }
14548 
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14549 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14550 {
14551 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14552 	case FLASH_5752PAGE_SIZE_256:
14553 		tp->nvram_pagesize = 256;
14554 		break;
14555 	case FLASH_5752PAGE_SIZE_512:
14556 		tp->nvram_pagesize = 512;
14557 		break;
14558 	case FLASH_5752PAGE_SIZE_1K:
14559 		tp->nvram_pagesize = 1024;
14560 		break;
14561 	case FLASH_5752PAGE_SIZE_2K:
14562 		tp->nvram_pagesize = 2048;
14563 		break;
14564 	case FLASH_5752PAGE_SIZE_4K:
14565 		tp->nvram_pagesize = 4096;
14566 		break;
14567 	case FLASH_5752PAGE_SIZE_264:
14568 		tp->nvram_pagesize = 264;
14569 		break;
14570 	case FLASH_5752PAGE_SIZE_528:
14571 		tp->nvram_pagesize = 528;
14572 		break;
14573 	}
14574 }
14575 
tg3_get_5752_nvram_info(struct tg3 * tp)14576 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14577 {
14578 	u32 nvcfg1;
14579 
14580 	nvcfg1 = tr32(NVRAM_CFG1);
14581 
14582 	/* NVRAM protection for TPM */
14583 	if (nvcfg1 & (1 << 27))
14584 		tg3_flag_set(tp, PROTECTED_NVRAM);
14585 
14586 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14587 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14588 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14589 		tp->nvram_jedecnum = JEDEC_ATMEL;
14590 		tg3_flag_set(tp, NVRAM_BUFFERED);
14591 		break;
14592 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14593 		tp->nvram_jedecnum = JEDEC_ATMEL;
14594 		tg3_flag_set(tp, NVRAM_BUFFERED);
14595 		tg3_flag_set(tp, FLASH);
14596 		break;
14597 	case FLASH_5752VENDOR_ST_M45PE10:
14598 	case FLASH_5752VENDOR_ST_M45PE20:
14599 	case FLASH_5752VENDOR_ST_M45PE40:
14600 		tp->nvram_jedecnum = JEDEC_ST;
14601 		tg3_flag_set(tp, NVRAM_BUFFERED);
14602 		tg3_flag_set(tp, FLASH);
14603 		break;
14604 	}
14605 
14606 	if (tg3_flag(tp, FLASH)) {
14607 		tg3_nvram_get_pagesize(tp, nvcfg1);
14608 	} else {
14609 		/* For eeprom, set pagesize to maximum eeprom size */
14610 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14611 
14612 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14613 		tw32(NVRAM_CFG1, nvcfg1);
14614 	}
14615 }
14616 
tg3_get_5755_nvram_info(struct tg3 * tp)14617 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14618 {
14619 	u32 nvcfg1, protect = 0;
14620 
14621 	nvcfg1 = tr32(NVRAM_CFG1);
14622 
14623 	/* NVRAM protection for TPM */
14624 	if (nvcfg1 & (1 << 27)) {
14625 		tg3_flag_set(tp, PROTECTED_NVRAM);
14626 		protect = 1;
14627 	}
14628 
14629 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14630 	switch (nvcfg1) {
14631 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14632 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14633 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14634 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14635 		tp->nvram_jedecnum = JEDEC_ATMEL;
14636 		tg3_flag_set(tp, NVRAM_BUFFERED);
14637 		tg3_flag_set(tp, FLASH);
14638 		tp->nvram_pagesize = 264;
14639 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14640 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14641 			tp->nvram_size = (protect ? 0x3e200 :
14642 					  TG3_NVRAM_SIZE_512KB);
14643 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14644 			tp->nvram_size = (protect ? 0x1f200 :
14645 					  TG3_NVRAM_SIZE_256KB);
14646 		else
14647 			tp->nvram_size = (protect ? 0x1f200 :
14648 					  TG3_NVRAM_SIZE_128KB);
14649 		break;
14650 	case FLASH_5752VENDOR_ST_M45PE10:
14651 	case FLASH_5752VENDOR_ST_M45PE20:
14652 	case FLASH_5752VENDOR_ST_M45PE40:
14653 		tp->nvram_jedecnum = JEDEC_ST;
14654 		tg3_flag_set(tp, NVRAM_BUFFERED);
14655 		tg3_flag_set(tp, FLASH);
14656 		tp->nvram_pagesize = 256;
14657 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14658 			tp->nvram_size = (protect ?
14659 					  TG3_NVRAM_SIZE_64KB :
14660 					  TG3_NVRAM_SIZE_128KB);
14661 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14662 			tp->nvram_size = (protect ?
14663 					  TG3_NVRAM_SIZE_64KB :
14664 					  TG3_NVRAM_SIZE_256KB);
14665 		else
14666 			tp->nvram_size = (protect ?
14667 					  TG3_NVRAM_SIZE_128KB :
14668 					  TG3_NVRAM_SIZE_512KB);
14669 		break;
14670 	}
14671 }
14672 
tg3_get_5787_nvram_info(struct tg3 * tp)14673 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14674 {
14675 	u32 nvcfg1;
14676 
14677 	nvcfg1 = tr32(NVRAM_CFG1);
14678 
14679 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14680 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14681 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14682 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14683 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14684 		tp->nvram_jedecnum = JEDEC_ATMEL;
14685 		tg3_flag_set(tp, NVRAM_BUFFERED);
14686 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14687 
14688 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14689 		tw32(NVRAM_CFG1, nvcfg1);
14690 		break;
14691 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14692 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14693 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14694 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14695 		tp->nvram_jedecnum = JEDEC_ATMEL;
14696 		tg3_flag_set(tp, NVRAM_BUFFERED);
14697 		tg3_flag_set(tp, FLASH);
14698 		tp->nvram_pagesize = 264;
14699 		break;
14700 	case FLASH_5752VENDOR_ST_M45PE10:
14701 	case FLASH_5752VENDOR_ST_M45PE20:
14702 	case FLASH_5752VENDOR_ST_M45PE40:
14703 		tp->nvram_jedecnum = JEDEC_ST;
14704 		tg3_flag_set(tp, NVRAM_BUFFERED);
14705 		tg3_flag_set(tp, FLASH);
14706 		tp->nvram_pagesize = 256;
14707 		break;
14708 	}
14709 }
14710 
tg3_get_5761_nvram_info(struct tg3 * tp)14711 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14712 {
14713 	u32 nvcfg1, protect = 0;
14714 
14715 	nvcfg1 = tr32(NVRAM_CFG1);
14716 
14717 	/* NVRAM protection for TPM */
14718 	if (nvcfg1 & (1 << 27)) {
14719 		tg3_flag_set(tp, PROTECTED_NVRAM);
14720 		protect = 1;
14721 	}
14722 
14723 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14724 	switch (nvcfg1) {
14725 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14726 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14727 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14728 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14729 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14730 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14731 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14732 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14733 		tp->nvram_jedecnum = JEDEC_ATMEL;
14734 		tg3_flag_set(tp, NVRAM_BUFFERED);
14735 		tg3_flag_set(tp, FLASH);
14736 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14737 		tp->nvram_pagesize = 256;
14738 		break;
14739 	case FLASH_5761VENDOR_ST_A_M45PE20:
14740 	case FLASH_5761VENDOR_ST_A_M45PE40:
14741 	case FLASH_5761VENDOR_ST_A_M45PE80:
14742 	case FLASH_5761VENDOR_ST_A_M45PE16:
14743 	case FLASH_5761VENDOR_ST_M_M45PE20:
14744 	case FLASH_5761VENDOR_ST_M_M45PE40:
14745 	case FLASH_5761VENDOR_ST_M_M45PE80:
14746 	case FLASH_5761VENDOR_ST_M_M45PE16:
14747 		tp->nvram_jedecnum = JEDEC_ST;
14748 		tg3_flag_set(tp, NVRAM_BUFFERED);
14749 		tg3_flag_set(tp, FLASH);
14750 		tp->nvram_pagesize = 256;
14751 		break;
14752 	}
14753 
14754 	if (protect) {
14755 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14756 	} else {
14757 		switch (nvcfg1) {
14758 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14759 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14760 		case FLASH_5761VENDOR_ST_A_M45PE16:
14761 		case FLASH_5761VENDOR_ST_M_M45PE16:
14762 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14763 			break;
14764 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14765 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14766 		case FLASH_5761VENDOR_ST_A_M45PE80:
14767 		case FLASH_5761VENDOR_ST_M_M45PE80:
14768 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14769 			break;
14770 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14771 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14772 		case FLASH_5761VENDOR_ST_A_M45PE40:
14773 		case FLASH_5761VENDOR_ST_M_M45PE40:
14774 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14775 			break;
14776 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14777 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14778 		case FLASH_5761VENDOR_ST_A_M45PE20:
14779 		case FLASH_5761VENDOR_ST_M_M45PE20:
14780 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14781 			break;
14782 		}
14783 	}
14784 }
14785 
tg3_get_5906_nvram_info(struct tg3 * tp)14786 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14787 {
14788 	tp->nvram_jedecnum = JEDEC_ATMEL;
14789 	tg3_flag_set(tp, NVRAM_BUFFERED);
14790 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14791 }
14792 
tg3_get_57780_nvram_info(struct tg3 * tp)14793 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14794 {
14795 	u32 nvcfg1;
14796 
14797 	nvcfg1 = tr32(NVRAM_CFG1);
14798 
14799 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14800 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14801 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14802 		tp->nvram_jedecnum = JEDEC_ATMEL;
14803 		tg3_flag_set(tp, NVRAM_BUFFERED);
14804 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14805 
14806 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14807 		tw32(NVRAM_CFG1, nvcfg1);
14808 		return;
14809 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14810 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14811 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14812 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14813 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14814 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14815 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14816 		tp->nvram_jedecnum = JEDEC_ATMEL;
14817 		tg3_flag_set(tp, NVRAM_BUFFERED);
14818 		tg3_flag_set(tp, FLASH);
14819 
14820 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14821 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14822 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14823 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14824 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14825 			break;
14826 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14827 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14828 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14829 			break;
14830 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14831 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14832 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14833 			break;
14834 		}
14835 		break;
14836 	case FLASH_5752VENDOR_ST_M45PE10:
14837 	case FLASH_5752VENDOR_ST_M45PE20:
14838 	case FLASH_5752VENDOR_ST_M45PE40:
14839 		tp->nvram_jedecnum = JEDEC_ST;
14840 		tg3_flag_set(tp, NVRAM_BUFFERED);
14841 		tg3_flag_set(tp, FLASH);
14842 
14843 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14844 		case FLASH_5752VENDOR_ST_M45PE10:
14845 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14846 			break;
14847 		case FLASH_5752VENDOR_ST_M45PE20:
14848 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14849 			break;
14850 		case FLASH_5752VENDOR_ST_M45PE40:
14851 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14852 			break;
14853 		}
14854 		break;
14855 	default:
14856 		tg3_flag_set(tp, NO_NVRAM);
14857 		return;
14858 	}
14859 
14860 	tg3_nvram_get_pagesize(tp, nvcfg1);
14861 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14862 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14863 }
14864 
14865 
tg3_get_5717_nvram_info(struct tg3 * tp)14866 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14867 {
14868 	u32 nvcfg1;
14869 
14870 	nvcfg1 = tr32(NVRAM_CFG1);
14871 
14872 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14873 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14874 	case FLASH_5717VENDOR_MICRO_EEPROM:
14875 		tp->nvram_jedecnum = JEDEC_ATMEL;
14876 		tg3_flag_set(tp, NVRAM_BUFFERED);
14877 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14878 
14879 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14880 		tw32(NVRAM_CFG1, nvcfg1);
14881 		return;
14882 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14883 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14884 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14885 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14886 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14887 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14888 	case FLASH_5717VENDOR_ATMEL_45USPT:
14889 		tp->nvram_jedecnum = JEDEC_ATMEL;
14890 		tg3_flag_set(tp, NVRAM_BUFFERED);
14891 		tg3_flag_set(tp, FLASH);
14892 
14893 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14894 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14895 			/* Detect size with tg3_nvram_get_size() */
14896 			break;
14897 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14898 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14899 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14900 			break;
14901 		default:
14902 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14903 			break;
14904 		}
14905 		break;
14906 	case FLASH_5717VENDOR_ST_M_M25PE10:
14907 	case FLASH_5717VENDOR_ST_A_M25PE10:
14908 	case FLASH_5717VENDOR_ST_M_M45PE10:
14909 	case FLASH_5717VENDOR_ST_A_M45PE10:
14910 	case FLASH_5717VENDOR_ST_M_M25PE20:
14911 	case FLASH_5717VENDOR_ST_A_M25PE20:
14912 	case FLASH_5717VENDOR_ST_M_M45PE20:
14913 	case FLASH_5717VENDOR_ST_A_M45PE20:
14914 	case FLASH_5717VENDOR_ST_25USPT:
14915 	case FLASH_5717VENDOR_ST_45USPT:
14916 		tp->nvram_jedecnum = JEDEC_ST;
14917 		tg3_flag_set(tp, NVRAM_BUFFERED);
14918 		tg3_flag_set(tp, FLASH);
14919 
14920 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14921 		case FLASH_5717VENDOR_ST_M_M25PE20:
14922 		case FLASH_5717VENDOR_ST_M_M45PE20:
14923 			/* Detect size with tg3_nvram_get_size() */
14924 			break;
14925 		case FLASH_5717VENDOR_ST_A_M25PE20:
14926 		case FLASH_5717VENDOR_ST_A_M45PE20:
14927 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14928 			break;
14929 		default:
14930 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14931 			break;
14932 		}
14933 		break;
14934 	default:
14935 		tg3_flag_set(tp, NO_NVRAM);
14936 		return;
14937 	}
14938 
14939 	tg3_nvram_get_pagesize(tp, nvcfg1);
14940 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14941 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14942 }
14943 
tg3_get_5720_nvram_info(struct tg3 * tp)14944 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14945 {
14946 	u32 nvcfg1, nvmpinstrp, nv_status;
14947 
14948 	nvcfg1 = tr32(NVRAM_CFG1);
14949 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14950 
14951 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14952 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14953 			tg3_flag_set(tp, NO_NVRAM);
14954 			return;
14955 		}
14956 
14957 		switch (nvmpinstrp) {
14958 		case FLASH_5762_MX25L_100:
14959 		case FLASH_5762_MX25L_200:
14960 		case FLASH_5762_MX25L_400:
14961 		case FLASH_5762_MX25L_800:
14962 		case FLASH_5762_MX25L_160_320:
14963 			tp->nvram_pagesize = 4096;
14964 			tp->nvram_jedecnum = JEDEC_MACRONIX;
14965 			tg3_flag_set(tp, NVRAM_BUFFERED);
14966 			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14967 			tg3_flag_set(tp, FLASH);
14968 			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14969 			tp->nvram_size =
14970 				(1 << (nv_status >> AUTOSENSE_DEVID &
14971 						AUTOSENSE_DEVID_MASK)
14972 					<< AUTOSENSE_SIZE_IN_MB);
14973 			return;
14974 
14975 		case FLASH_5762_EEPROM_HD:
14976 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14977 			break;
14978 		case FLASH_5762_EEPROM_LD:
14979 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14980 			break;
14981 		case FLASH_5720VENDOR_M_ST_M45PE20:
14982 			/* This pinstrap supports multiple sizes, so force it
14983 			 * to read the actual size from location 0xf0.
14984 			 */
14985 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14986 			break;
14987 		}
14988 	}
14989 
14990 	switch (nvmpinstrp) {
14991 	case FLASH_5720_EEPROM_HD:
14992 	case FLASH_5720_EEPROM_LD:
14993 		tp->nvram_jedecnum = JEDEC_ATMEL;
14994 		tg3_flag_set(tp, NVRAM_BUFFERED);
14995 
14996 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14997 		tw32(NVRAM_CFG1, nvcfg1);
14998 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14999 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
15000 		else
15001 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
15002 		return;
15003 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
15004 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
15005 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
15006 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
15007 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
15008 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
15009 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
15010 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
15011 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
15012 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
15013 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
15014 	case FLASH_5720VENDOR_ATMEL_45USPT:
15015 		tp->nvram_jedecnum = JEDEC_ATMEL;
15016 		tg3_flag_set(tp, NVRAM_BUFFERED);
15017 		tg3_flag_set(tp, FLASH);
15018 
15019 		switch (nvmpinstrp) {
15020 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
15021 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
15022 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
15023 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15024 			break;
15025 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
15026 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
15027 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
15028 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15029 			break;
15030 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
15031 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
15032 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15033 			break;
15034 		default:
15035 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
15036 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15037 			break;
15038 		}
15039 		break;
15040 	case FLASH_5720VENDOR_M_ST_M25PE10:
15041 	case FLASH_5720VENDOR_M_ST_M45PE10:
15042 	case FLASH_5720VENDOR_A_ST_M25PE10:
15043 	case FLASH_5720VENDOR_A_ST_M45PE10:
15044 	case FLASH_5720VENDOR_M_ST_M25PE20:
15045 	case FLASH_5720VENDOR_M_ST_M45PE20:
15046 	case FLASH_5720VENDOR_A_ST_M25PE20:
15047 	case FLASH_5720VENDOR_A_ST_M45PE20:
15048 	case FLASH_5720VENDOR_M_ST_M25PE40:
15049 	case FLASH_5720VENDOR_M_ST_M45PE40:
15050 	case FLASH_5720VENDOR_A_ST_M25PE40:
15051 	case FLASH_5720VENDOR_A_ST_M45PE40:
15052 	case FLASH_5720VENDOR_M_ST_M25PE80:
15053 	case FLASH_5720VENDOR_M_ST_M45PE80:
15054 	case FLASH_5720VENDOR_A_ST_M25PE80:
15055 	case FLASH_5720VENDOR_A_ST_M45PE80:
15056 	case FLASH_5720VENDOR_ST_25USPT:
15057 	case FLASH_5720VENDOR_ST_45USPT:
15058 		tp->nvram_jedecnum = JEDEC_ST;
15059 		tg3_flag_set(tp, NVRAM_BUFFERED);
15060 		tg3_flag_set(tp, FLASH);
15061 
15062 		switch (nvmpinstrp) {
15063 		case FLASH_5720VENDOR_M_ST_M25PE20:
15064 		case FLASH_5720VENDOR_M_ST_M45PE20:
15065 		case FLASH_5720VENDOR_A_ST_M25PE20:
15066 		case FLASH_5720VENDOR_A_ST_M45PE20:
15067 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15068 			break;
15069 		case FLASH_5720VENDOR_M_ST_M25PE40:
15070 		case FLASH_5720VENDOR_M_ST_M45PE40:
15071 		case FLASH_5720VENDOR_A_ST_M25PE40:
15072 		case FLASH_5720VENDOR_A_ST_M45PE40:
15073 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15074 			break;
15075 		case FLASH_5720VENDOR_M_ST_M25PE80:
15076 		case FLASH_5720VENDOR_M_ST_M45PE80:
15077 		case FLASH_5720VENDOR_A_ST_M25PE80:
15078 		case FLASH_5720VENDOR_A_ST_M45PE80:
15079 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15080 			break;
15081 		default:
15082 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
15083 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15084 			break;
15085 		}
15086 		break;
15087 	default:
15088 		tg3_flag_set(tp, NO_NVRAM);
15089 		return;
15090 	}
15091 
15092 	tg3_nvram_get_pagesize(tp, nvcfg1);
15093 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15094 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15095 
15096 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15097 		u32 val;
15098 
15099 		if (tg3_nvram_read(tp, 0, &val))
15100 			return;
15101 
15102 		if (val != TG3_EEPROM_MAGIC &&
15103 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15104 			tg3_flag_set(tp, NO_NVRAM);
15105 	}
15106 }
15107 
15108 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)15109 static void tg3_nvram_init(struct tg3 *tp)
15110 {
15111 	if (tg3_flag(tp, IS_SSB_CORE)) {
15112 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15113 		tg3_flag_clear(tp, NVRAM);
15114 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15115 		tg3_flag_set(tp, NO_NVRAM);
15116 		return;
15117 	}
15118 
15119 	tw32_f(GRC_EEPROM_ADDR,
15120 	     (EEPROM_ADDR_FSM_RESET |
15121 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
15122 	       EEPROM_ADDR_CLKPERD_SHIFT)));
15123 
15124 	msleep(1);
15125 
15126 	/* Enable seeprom accesses. */
15127 	tw32_f(GRC_LOCAL_CTRL,
15128 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15129 	udelay(100);
15130 
15131 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15132 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
15133 		tg3_flag_set(tp, NVRAM);
15134 
15135 		if (tg3_nvram_lock(tp)) {
15136 			netdev_warn(tp->dev,
15137 				    "Cannot get nvram lock, %s failed\n",
15138 				    __func__);
15139 			return;
15140 		}
15141 		tg3_enable_nvram_access(tp);
15142 
15143 		tp->nvram_size = 0;
15144 
15145 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
15146 			tg3_get_5752_nvram_info(tp);
15147 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15148 			tg3_get_5755_nvram_info(tp);
15149 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15150 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15151 			 tg3_asic_rev(tp) == ASIC_REV_5785)
15152 			tg3_get_5787_nvram_info(tp);
15153 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15154 			tg3_get_5761_nvram_info(tp);
15155 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15156 			tg3_get_5906_nvram_info(tp);
15157 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15158 			 tg3_flag(tp, 57765_CLASS))
15159 			tg3_get_57780_nvram_info(tp);
15160 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15161 			 tg3_asic_rev(tp) == ASIC_REV_5719)
15162 			tg3_get_5717_nvram_info(tp);
15163 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15164 			 tg3_asic_rev(tp) == ASIC_REV_5762)
15165 			tg3_get_5720_nvram_info(tp);
15166 		else
15167 			tg3_get_nvram_info(tp);
15168 
15169 		if (tp->nvram_size == 0)
15170 			tg3_get_nvram_size(tp);
15171 
15172 		tg3_disable_nvram_access(tp);
15173 		tg3_nvram_unlock(tp);
15174 
15175 	} else {
15176 		tg3_flag_clear(tp, NVRAM);
15177 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15178 
15179 		tg3_get_eeprom_size(tp);
15180 	}
15181 }
15182 
15183 struct subsys_tbl_ent {
15184 	u16 subsys_vendor, subsys_devid;
15185 	u32 phy_id;
15186 };
15187 
15188 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15189 	/* Broadcom boards. */
15190 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15191 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15192 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15193 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15194 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15195 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15196 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15197 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15198 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15199 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15200 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15201 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15202 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15203 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15204 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15205 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15206 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15207 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15208 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15209 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15210 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15211 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15212 
15213 	/* 3com boards. */
15214 	{ TG3PCI_SUBVENDOR_ID_3COM,
15215 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15216 	{ TG3PCI_SUBVENDOR_ID_3COM,
15217 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15218 	{ TG3PCI_SUBVENDOR_ID_3COM,
15219 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15220 	{ TG3PCI_SUBVENDOR_ID_3COM,
15221 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15222 	{ TG3PCI_SUBVENDOR_ID_3COM,
15223 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15224 
15225 	/* DELL boards. */
15226 	{ TG3PCI_SUBVENDOR_ID_DELL,
15227 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15228 	{ TG3PCI_SUBVENDOR_ID_DELL,
15229 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15230 	{ TG3PCI_SUBVENDOR_ID_DELL,
15231 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15232 	{ TG3PCI_SUBVENDOR_ID_DELL,
15233 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15234 
15235 	/* Compaq boards. */
15236 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15237 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15238 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15239 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15240 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15241 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15242 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15243 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15244 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15245 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15246 
15247 	/* IBM boards. */
15248 	{ TG3PCI_SUBVENDOR_ID_IBM,
15249 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15250 };
15251 
tg3_lookup_by_subsys(struct tg3 * tp)15252 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15253 {
15254 	int i;
15255 
15256 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15257 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15258 		     tp->pdev->subsystem_vendor) &&
15259 		    (subsys_id_to_phy_id[i].subsys_devid ==
15260 		     tp->pdev->subsystem_device))
15261 			return &subsys_id_to_phy_id[i];
15262 	}
15263 	return NULL;
15264 }
15265 
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15266 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15267 {
15268 	u32 val;
15269 
15270 	tp->phy_id = TG3_PHY_ID_INVALID;
15271 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15272 
15273 	/* Assume an onboard device and WOL capable by default.  */
15274 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15275 	tg3_flag_set(tp, WOL_CAP);
15276 
15277 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15278 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15279 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15280 			tg3_flag_set(tp, IS_NIC);
15281 		}
15282 		val = tr32(VCPU_CFGSHDW);
15283 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15284 			tg3_flag_set(tp, ASPM_WORKAROUND);
15285 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15286 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15287 			tg3_flag_set(tp, WOL_ENABLE);
15288 			device_set_wakeup_enable(&tp->pdev->dev, true);
15289 		}
15290 		goto done;
15291 	}
15292 
15293 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15294 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15295 		u32 nic_cfg, led_cfg;
15296 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15297 		u32 nic_phy_id, ver, eeprom_phy_id;
15298 		int eeprom_phy_serdes = 0;
15299 
15300 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15301 		tp->nic_sram_data_cfg = nic_cfg;
15302 
15303 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15304 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15305 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15306 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15307 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15308 		    (ver > 0) && (ver < 0x100))
15309 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15310 
15311 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15312 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15313 
15314 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15315 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15316 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15317 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15318 
15319 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15320 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15321 			eeprom_phy_serdes = 1;
15322 
15323 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15324 		if (nic_phy_id != 0) {
15325 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15326 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15327 
15328 			eeprom_phy_id  = (id1 >> 16) << 10;
15329 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15330 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15331 		} else
15332 			eeprom_phy_id = 0;
15333 
15334 		tp->phy_id = eeprom_phy_id;
15335 		if (eeprom_phy_serdes) {
15336 			if (!tg3_flag(tp, 5705_PLUS))
15337 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15338 			else
15339 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15340 		}
15341 
15342 		if (tg3_flag(tp, 5750_PLUS))
15343 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15344 				    SHASTA_EXT_LED_MODE_MASK);
15345 		else
15346 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15347 
15348 		switch (led_cfg) {
15349 		default:
15350 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15351 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15352 			break;
15353 
15354 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15355 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15356 			break;
15357 
15358 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15359 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15360 
15361 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15362 			 * read on some older 5700/5701 bootcode.
15363 			 */
15364 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15365 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15366 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15367 
15368 			break;
15369 
15370 		case SHASTA_EXT_LED_SHARED:
15371 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15372 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15373 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15374 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15375 						 LED_CTRL_MODE_PHY_2);
15376 
15377 			if (tg3_flag(tp, 5717_PLUS) ||
15378 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15379 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15380 						LED_CTRL_BLINK_RATE_MASK;
15381 
15382 			break;
15383 
15384 		case SHASTA_EXT_LED_MAC:
15385 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15386 			break;
15387 
15388 		case SHASTA_EXT_LED_COMBO:
15389 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15390 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15391 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15392 						 LED_CTRL_MODE_PHY_2);
15393 			break;
15394 
15395 		}
15396 
15397 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15398 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15399 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15400 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15401 
15402 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15403 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15404 
15405 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15406 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15407 			if ((tp->pdev->subsystem_vendor ==
15408 			     PCI_VENDOR_ID_ARIMA) &&
15409 			    (tp->pdev->subsystem_device == 0x205a ||
15410 			     tp->pdev->subsystem_device == 0x2063))
15411 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15412 		} else {
15413 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15414 			tg3_flag_set(tp, IS_NIC);
15415 		}
15416 
15417 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15418 			tg3_flag_set(tp, ENABLE_ASF);
15419 			if (tg3_flag(tp, 5750_PLUS))
15420 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15421 		}
15422 
15423 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15424 		    tg3_flag(tp, 5750_PLUS))
15425 			tg3_flag_set(tp, ENABLE_APE);
15426 
15427 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15428 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15429 			tg3_flag_clear(tp, WOL_CAP);
15430 
15431 		if (tg3_flag(tp, WOL_CAP) &&
15432 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15433 			tg3_flag_set(tp, WOL_ENABLE);
15434 			device_set_wakeup_enable(&tp->pdev->dev, true);
15435 		}
15436 
15437 		if (cfg2 & (1 << 17))
15438 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15439 
15440 		/* serdes signal pre-emphasis in register 0x590 set by */
15441 		/* bootcode if bit 18 is set */
15442 		if (cfg2 & (1 << 18))
15443 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15444 
15445 		if ((tg3_flag(tp, 57765_PLUS) ||
15446 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15447 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15448 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15449 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15450 
15451 		if (tg3_flag(tp, PCI_EXPRESS)) {
15452 			u32 cfg3;
15453 
15454 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15455 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15456 			    !tg3_flag(tp, 57765_PLUS) &&
15457 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15458 				tg3_flag_set(tp, ASPM_WORKAROUND);
15459 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15460 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15461 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15462 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15463 		}
15464 
15465 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15466 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15467 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15468 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15469 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15470 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15471 
15472 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15473 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15474 	}
15475 done:
15476 	if (tg3_flag(tp, WOL_CAP))
15477 		device_set_wakeup_enable(&tp->pdev->dev,
15478 					 tg3_flag(tp, WOL_ENABLE));
15479 	else
15480 		device_set_wakeup_capable(&tp->pdev->dev, false);
15481 }
15482 
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15483 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15484 {
15485 	int i, err;
15486 	u32 val2, off = offset * 8;
15487 
15488 	err = tg3_nvram_lock(tp);
15489 	if (err)
15490 		return err;
15491 
15492 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15493 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15494 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15495 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15496 	udelay(10);
15497 
15498 	for (i = 0; i < 100; i++) {
15499 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15500 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15501 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15502 			break;
15503 		}
15504 		udelay(10);
15505 	}
15506 
15507 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15508 
15509 	tg3_nvram_unlock(tp);
15510 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15511 		return 0;
15512 
15513 	return -EBUSY;
15514 }
15515 
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15516 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15517 {
15518 	int i;
15519 	u32 val;
15520 
15521 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15522 	tw32(OTP_CTRL, cmd);
15523 
15524 	/* Wait for up to 1 ms for command to execute. */
15525 	for (i = 0; i < 100; i++) {
15526 		val = tr32(OTP_STATUS);
15527 		if (val & OTP_STATUS_CMD_DONE)
15528 			break;
15529 		udelay(10);
15530 	}
15531 
15532 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15533 }
15534 
15535 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15536  * configuration is a 32-bit value that straddles the alignment boundary.
15537  * We do two 32-bit reads and then shift and merge the results.
15538  */
tg3_read_otp_phycfg(struct tg3 * tp)15539 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15540 {
15541 	u32 bhalf_otp, thalf_otp;
15542 
15543 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15544 
15545 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15546 		return 0;
15547 
15548 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15549 
15550 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15551 		return 0;
15552 
15553 	thalf_otp = tr32(OTP_READ_DATA);
15554 
15555 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15556 
15557 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15558 		return 0;
15559 
15560 	bhalf_otp = tr32(OTP_READ_DATA);
15561 
15562 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15563 }
15564 
tg3_phy_init_link_config(struct tg3 * tp)15565 static void tg3_phy_init_link_config(struct tg3 *tp)
15566 {
15567 	u32 adv = ADVERTISED_Autoneg;
15568 
15569 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15570 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15571 			adv |= ADVERTISED_1000baseT_Half;
15572 		adv |= ADVERTISED_1000baseT_Full;
15573 	}
15574 
15575 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15576 		adv |= ADVERTISED_100baseT_Half |
15577 		       ADVERTISED_100baseT_Full |
15578 		       ADVERTISED_10baseT_Half |
15579 		       ADVERTISED_10baseT_Full |
15580 		       ADVERTISED_TP;
15581 	else
15582 		adv |= ADVERTISED_FIBRE;
15583 
15584 	tp->link_config.advertising = adv;
15585 	tp->link_config.speed = SPEED_UNKNOWN;
15586 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15587 	tp->link_config.autoneg = AUTONEG_ENABLE;
15588 	tp->link_config.active_speed = SPEED_UNKNOWN;
15589 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15590 
15591 	tp->old_link = -1;
15592 }
15593 
tg3_phy_probe(struct tg3 * tp)15594 static int tg3_phy_probe(struct tg3 *tp)
15595 {
15596 	u32 hw_phy_id_1, hw_phy_id_2;
15597 	u32 hw_phy_id, hw_phy_id_masked;
15598 	int err;
15599 
15600 	/* flow control autonegotiation is default behavior */
15601 	tg3_flag_set(tp, PAUSE_AUTONEG);
15602 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15603 
15604 	if (tg3_flag(tp, ENABLE_APE)) {
15605 		switch (tp->pci_fn) {
15606 		case 0:
15607 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15608 			break;
15609 		case 1:
15610 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15611 			break;
15612 		case 2:
15613 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15614 			break;
15615 		case 3:
15616 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15617 			break;
15618 		}
15619 	}
15620 
15621 	if (!tg3_flag(tp, ENABLE_ASF) &&
15622 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15623 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15624 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15625 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15626 
15627 	if (tg3_flag(tp, USE_PHYLIB))
15628 		return tg3_phy_init(tp);
15629 
15630 	/* Reading the PHY ID register can conflict with ASF
15631 	 * firmware access to the PHY hardware.
15632 	 */
15633 	err = 0;
15634 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15635 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15636 	} else {
15637 		/* Now read the physical PHY_ID from the chip and verify
15638 		 * that it is sane.  If it doesn't look good, we fall back
15639 		 * to either the hard-coded table based PHY_ID and failing
15640 		 * that the value found in the eeprom area.
15641 		 */
15642 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15643 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15644 
15645 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15646 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15647 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15648 
15649 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15650 	}
15651 
15652 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15653 		tp->phy_id = hw_phy_id;
15654 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15655 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15656 		else
15657 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15658 	} else {
15659 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15660 			/* Do nothing, phy ID already set up in
15661 			 * tg3_get_eeprom_hw_cfg().
15662 			 */
15663 		} else {
15664 			struct subsys_tbl_ent *p;
15665 
15666 			/* No eeprom signature?  Try the hardcoded
15667 			 * subsys device table.
15668 			 */
15669 			p = tg3_lookup_by_subsys(tp);
15670 			if (p) {
15671 				tp->phy_id = p->phy_id;
15672 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15673 				/* For now we saw the IDs 0xbc050cd0,
15674 				 * 0xbc050f80 and 0xbc050c30 on devices
15675 				 * connected to an BCM4785 and there are
15676 				 * probably more. Just assume that the phy is
15677 				 * supported when it is connected to a SSB core
15678 				 * for now.
15679 				 */
15680 				return -ENODEV;
15681 			}
15682 
15683 			if (!tp->phy_id ||
15684 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15685 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15686 		}
15687 	}
15688 
15689 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15690 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15691 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15692 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15693 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15694 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15695 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15696 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15697 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15698 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15699 
15700 		linkmode_zero(tp->eee.supported);
15701 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
15702 				 tp->eee.supported);
15703 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
15704 				 tp->eee.supported);
15705 		linkmode_copy(tp->eee.advertised, tp->eee.supported);
15706 
15707 		tp->eee.eee_enabled = 1;
15708 		tp->eee.tx_lpi_enabled = 1;
15709 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15710 	}
15711 
15712 	tg3_phy_init_link_config(tp);
15713 
15714 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15715 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15716 	    !tg3_flag(tp, ENABLE_APE) &&
15717 	    !tg3_flag(tp, ENABLE_ASF)) {
15718 		u32 bmsr, dummy;
15719 
15720 		tg3_readphy(tp, MII_BMSR, &bmsr);
15721 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15722 		    (bmsr & BMSR_LSTATUS))
15723 			goto skip_phy_reset;
15724 
15725 		err = tg3_phy_reset(tp);
15726 		if (err)
15727 			return err;
15728 
15729 		tg3_phy_set_wirespeed(tp);
15730 
15731 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15732 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15733 					    tp->link_config.flowctrl);
15734 
15735 			tg3_writephy(tp, MII_BMCR,
15736 				     BMCR_ANENABLE | BMCR_ANRESTART);
15737 		}
15738 	}
15739 
15740 skip_phy_reset:
15741 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15742 		err = tg3_init_5401phy_dsp(tp);
15743 		if (err)
15744 			return err;
15745 
15746 		err = tg3_init_5401phy_dsp(tp);
15747 	}
15748 
15749 	return err;
15750 }
15751 
tg3_read_vpd(struct tg3 * tp)15752 static void tg3_read_vpd(struct tg3 *tp)
15753 {
15754 	u8 *vpd_data;
15755 	unsigned int len, vpdlen;
15756 	int i;
15757 
15758 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15759 	if (!vpd_data)
15760 		goto out_no_vpd;
15761 
15762 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15763 					 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15764 	if (i < 0)
15765 		goto partno;
15766 
15767 	if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15768 		goto partno;
15769 
15770 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15771 					 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15772 	if (i < 0)
15773 		goto partno;
15774 
15775 	memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15776 	snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15777 
15778 partno:
15779 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15780 					 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15781 	if (i < 0)
15782 		goto out_not_found;
15783 
15784 	if (len > TG3_BPN_SIZE)
15785 		goto out_not_found;
15786 
15787 	memcpy(tp->board_part_number, &vpd_data[i], len);
15788 
15789 out_not_found:
15790 	kfree(vpd_data);
15791 	if (tp->board_part_number[0])
15792 		return;
15793 
15794 out_no_vpd:
15795 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15796 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15797 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15798 			strcpy(tp->board_part_number, "BCM5717");
15799 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15800 			strcpy(tp->board_part_number, "BCM5718");
15801 		else
15802 			goto nomatch;
15803 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15804 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15805 			strcpy(tp->board_part_number, "BCM57780");
15806 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15807 			strcpy(tp->board_part_number, "BCM57760");
15808 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15809 			strcpy(tp->board_part_number, "BCM57790");
15810 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15811 			strcpy(tp->board_part_number, "BCM57788");
15812 		else
15813 			goto nomatch;
15814 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15815 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15816 			strcpy(tp->board_part_number, "BCM57761");
15817 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15818 			strcpy(tp->board_part_number, "BCM57765");
15819 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15820 			strcpy(tp->board_part_number, "BCM57781");
15821 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15822 			strcpy(tp->board_part_number, "BCM57785");
15823 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15824 			strcpy(tp->board_part_number, "BCM57791");
15825 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15826 			strcpy(tp->board_part_number, "BCM57795");
15827 		else
15828 			goto nomatch;
15829 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15830 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15831 			strcpy(tp->board_part_number, "BCM57762");
15832 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15833 			strcpy(tp->board_part_number, "BCM57766");
15834 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15835 			strcpy(tp->board_part_number, "BCM57782");
15836 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15837 			strcpy(tp->board_part_number, "BCM57786");
15838 		else
15839 			goto nomatch;
15840 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15841 		strcpy(tp->board_part_number, "BCM95906");
15842 	} else {
15843 nomatch:
15844 		strcpy(tp->board_part_number, "none");
15845 	}
15846 }
15847 
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15848 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15849 {
15850 	u32 val;
15851 
15852 	if (tg3_nvram_read(tp, offset, &val) ||
15853 	    (val & 0xfc000000) != 0x0c000000 ||
15854 	    tg3_nvram_read(tp, offset + 4, &val) ||
15855 	    val != 0)
15856 		return 0;
15857 
15858 	return 1;
15859 }
15860 
tg3_read_bc_ver(struct tg3 * tp)15861 static void tg3_read_bc_ver(struct tg3 *tp)
15862 {
15863 	u32 val, offset, start, ver_offset;
15864 	int i, dst_off;
15865 	bool newver = false;
15866 
15867 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15868 	    tg3_nvram_read(tp, 0x4, &start))
15869 		return;
15870 
15871 	offset = tg3_nvram_logical_addr(tp, offset);
15872 
15873 	if (tg3_nvram_read(tp, offset, &val))
15874 		return;
15875 
15876 	if ((val & 0xfc000000) == 0x0c000000) {
15877 		if (tg3_nvram_read(tp, offset + 4, &val))
15878 			return;
15879 
15880 		if (val == 0)
15881 			newver = true;
15882 	}
15883 
15884 	dst_off = strlen(tp->fw_ver);
15885 
15886 	if (newver) {
15887 		if (TG3_VER_SIZE - dst_off < 16 ||
15888 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15889 			return;
15890 
15891 		offset = offset + ver_offset - start;
15892 		for (i = 0; i < 16; i += 4) {
15893 			__be32 v;
15894 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15895 				return;
15896 
15897 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15898 		}
15899 	} else {
15900 		u32 major, minor;
15901 
15902 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15903 			return;
15904 
15905 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15906 			TG3_NVM_BCVER_MAJSFT;
15907 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15908 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15909 			 "v%d.%02d", major, minor);
15910 	}
15911 }
15912 
tg3_read_hwsb_ver(struct tg3 * tp)15913 static void tg3_read_hwsb_ver(struct tg3 *tp)
15914 {
15915 	u32 val, major, minor;
15916 
15917 	/* Use native endian representation */
15918 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15919 		return;
15920 
15921 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15922 		TG3_NVM_HWSB_CFG1_MAJSFT;
15923 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15924 		TG3_NVM_HWSB_CFG1_MINSFT;
15925 
15926 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15927 }
15928 
tg3_read_sb_ver(struct tg3 * tp,u32 val)15929 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15930 {
15931 	u32 offset, major, minor, build;
15932 
15933 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15934 
15935 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15936 		return;
15937 
15938 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15939 	case TG3_EEPROM_SB_REVISION_0:
15940 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15941 		break;
15942 	case TG3_EEPROM_SB_REVISION_2:
15943 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15944 		break;
15945 	case TG3_EEPROM_SB_REVISION_3:
15946 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15947 		break;
15948 	case TG3_EEPROM_SB_REVISION_4:
15949 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15950 		break;
15951 	case TG3_EEPROM_SB_REVISION_5:
15952 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15953 		break;
15954 	case TG3_EEPROM_SB_REVISION_6:
15955 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15956 		break;
15957 	default:
15958 		return;
15959 	}
15960 
15961 	if (tg3_nvram_read(tp, offset, &val))
15962 		return;
15963 
15964 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15965 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15966 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15967 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15968 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15969 
15970 	if (minor > 99 || build > 26)
15971 		return;
15972 
15973 	offset = strlen(tp->fw_ver);
15974 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15975 		 " v%d.%02d", major, minor);
15976 
15977 	if (build > 0) {
15978 		offset = strlen(tp->fw_ver);
15979 		if (offset < TG3_VER_SIZE - 1)
15980 			tp->fw_ver[offset] = 'a' + build - 1;
15981 	}
15982 }
15983 
tg3_read_mgmtfw_ver(struct tg3 * tp)15984 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15985 {
15986 	u32 val, offset, start;
15987 	int i, vlen;
15988 
15989 	for (offset = TG3_NVM_DIR_START;
15990 	     offset < TG3_NVM_DIR_END;
15991 	     offset += TG3_NVM_DIRENT_SIZE) {
15992 		if (tg3_nvram_read(tp, offset, &val))
15993 			return;
15994 
15995 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15996 			break;
15997 	}
15998 
15999 	if (offset == TG3_NVM_DIR_END)
16000 		return;
16001 
16002 	if (!tg3_flag(tp, 5705_PLUS))
16003 		start = 0x08000000;
16004 	else if (tg3_nvram_read(tp, offset - 4, &start))
16005 		return;
16006 
16007 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
16008 	    !tg3_fw_img_is_valid(tp, offset) ||
16009 	    tg3_nvram_read(tp, offset + 8, &val))
16010 		return;
16011 
16012 	offset += val - start;
16013 
16014 	vlen = strlen(tp->fw_ver);
16015 
16016 	tp->fw_ver[vlen++] = ',';
16017 	tp->fw_ver[vlen++] = ' ';
16018 
16019 	for (i = 0; i < 4; i++) {
16020 		__be32 v;
16021 		if (tg3_nvram_read_be32(tp, offset, &v))
16022 			return;
16023 
16024 		offset += sizeof(v);
16025 
16026 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
16027 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
16028 			break;
16029 		}
16030 
16031 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
16032 		vlen += sizeof(v);
16033 	}
16034 }
16035 
tg3_probe_ncsi(struct tg3 * tp)16036 static void tg3_probe_ncsi(struct tg3 *tp)
16037 {
16038 	u32 apedata;
16039 
16040 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
16041 	if (apedata != APE_SEG_SIG_MAGIC)
16042 		return;
16043 
16044 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
16045 	if (!(apedata & APE_FW_STATUS_READY))
16046 		return;
16047 
16048 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
16049 		tg3_flag_set(tp, APE_HAS_NCSI);
16050 }
16051 
tg3_read_dash_ver(struct tg3 * tp)16052 static void tg3_read_dash_ver(struct tg3 *tp)
16053 {
16054 	int vlen;
16055 	u32 apedata;
16056 	char *fwtype;
16057 
16058 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
16059 
16060 	if (tg3_flag(tp, APE_HAS_NCSI))
16061 		fwtype = "NCSI";
16062 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
16063 		fwtype = "SMASH";
16064 	else
16065 		fwtype = "DASH";
16066 
16067 	vlen = strlen(tp->fw_ver);
16068 
16069 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
16070 		 fwtype,
16071 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
16072 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
16073 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
16074 		 (apedata & APE_FW_VERSION_BLDMSK));
16075 }
16076 
tg3_read_otp_ver(struct tg3 * tp)16077 static void tg3_read_otp_ver(struct tg3 *tp)
16078 {
16079 	u32 val, val2;
16080 
16081 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
16082 		return;
16083 
16084 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16085 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16086 	    TG3_OTP_MAGIC0_VALID(val)) {
16087 		u64 val64 = (u64) val << 32 | val2;
16088 		u32 ver = 0;
16089 		int i, vlen;
16090 
16091 		for (i = 0; i < 7; i++) {
16092 			if ((val64 & 0xff) == 0)
16093 				break;
16094 			ver = val64 & 0xff;
16095 			val64 >>= 8;
16096 		}
16097 		vlen = strlen(tp->fw_ver);
16098 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16099 	}
16100 }
16101 
tg3_read_fw_ver(struct tg3 * tp)16102 static void tg3_read_fw_ver(struct tg3 *tp)
16103 {
16104 	u32 val;
16105 	bool vpd_vers = false;
16106 
16107 	if (tp->fw_ver[0] != 0)
16108 		vpd_vers = true;
16109 
16110 	if (tg3_flag(tp, NO_NVRAM)) {
16111 		strcat(tp->fw_ver, "sb");
16112 		tg3_read_otp_ver(tp);
16113 		return;
16114 	}
16115 
16116 	if (tg3_nvram_read(tp, 0, &val))
16117 		return;
16118 
16119 	if (val == TG3_EEPROM_MAGIC)
16120 		tg3_read_bc_ver(tp);
16121 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16122 		tg3_read_sb_ver(tp, val);
16123 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16124 		tg3_read_hwsb_ver(tp);
16125 
16126 	if (tg3_flag(tp, ENABLE_ASF)) {
16127 		if (tg3_flag(tp, ENABLE_APE)) {
16128 			tg3_probe_ncsi(tp);
16129 			if (!vpd_vers)
16130 				tg3_read_dash_ver(tp);
16131 		} else if (!vpd_vers) {
16132 			tg3_read_mgmtfw_ver(tp);
16133 		}
16134 	}
16135 
16136 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16137 }
16138 
tg3_rx_ret_ring_size(struct tg3 * tp)16139 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16140 {
16141 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
16142 		return TG3_RX_RET_MAX_SIZE_5717;
16143 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16144 		return TG3_RX_RET_MAX_SIZE_5700;
16145 	else
16146 		return TG3_RX_RET_MAX_SIZE_5705;
16147 }
16148 
16149 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16150 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16151 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16152 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16153 	{ },
16154 };
16155 
tg3_find_peer(struct tg3 * tp)16156 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16157 {
16158 	struct pci_dev *peer;
16159 	unsigned int func, devnr = tp->pdev->devfn & ~7;
16160 
16161 	for (func = 0; func < 8; func++) {
16162 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16163 		if (peer && peer != tp->pdev)
16164 			break;
16165 		pci_dev_put(peer);
16166 	}
16167 	/* 5704 can be configured in single-port mode, set peer to
16168 	 * tp->pdev in that case.
16169 	 */
16170 	if (!peer) {
16171 		peer = tp->pdev;
16172 		return peer;
16173 	}
16174 
16175 	/*
16176 	 * We don't need to keep the refcount elevated; there's no way
16177 	 * to remove one half of this device without removing the other
16178 	 */
16179 	pci_dev_put(peer);
16180 
16181 	return peer;
16182 }
16183 
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16184 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16185 {
16186 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16187 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16188 		u32 reg;
16189 
16190 		/* All devices that use the alternate
16191 		 * ASIC REV location have a CPMU.
16192 		 */
16193 		tg3_flag_set(tp, CPMU_PRESENT);
16194 
16195 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16196 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16197 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16198 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16199 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16200 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16201 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16202 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16203 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16204 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16205 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16206 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16207 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16208 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16209 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16210 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16211 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16212 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16213 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16214 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16215 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16216 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16217 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16218 		else
16219 			reg = TG3PCI_PRODID_ASICREV;
16220 
16221 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16222 	}
16223 
16224 	/* Wrong chip ID in 5752 A0. This code can be removed later
16225 	 * as A0 is not in production.
16226 	 */
16227 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16228 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16229 
16230 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16231 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16232 
16233 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16234 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16235 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16236 		tg3_flag_set(tp, 5717_PLUS);
16237 
16238 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16239 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16240 		tg3_flag_set(tp, 57765_CLASS);
16241 
16242 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16243 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16244 		tg3_flag_set(tp, 57765_PLUS);
16245 
16246 	/* Intentionally exclude ASIC_REV_5906 */
16247 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16248 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16249 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16250 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16251 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16252 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16253 	    tg3_flag(tp, 57765_PLUS))
16254 		tg3_flag_set(tp, 5755_PLUS);
16255 
16256 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16257 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16258 		tg3_flag_set(tp, 5780_CLASS);
16259 
16260 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16261 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16262 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16263 	    tg3_flag(tp, 5755_PLUS) ||
16264 	    tg3_flag(tp, 5780_CLASS))
16265 		tg3_flag_set(tp, 5750_PLUS);
16266 
16267 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16268 	    tg3_flag(tp, 5750_PLUS))
16269 		tg3_flag_set(tp, 5705_PLUS);
16270 }
16271 
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16272 static bool tg3_10_100_only_device(struct tg3 *tp,
16273 				   const struct pci_device_id *ent)
16274 {
16275 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16276 
16277 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16278 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16279 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16280 		return true;
16281 
16282 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16283 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16284 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16285 				return true;
16286 		} else {
16287 			return true;
16288 		}
16289 	}
16290 
16291 	return false;
16292 }
16293 
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16294 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16295 {
16296 	u32 misc_ctrl_reg;
16297 	u32 pci_state_reg, grc_misc_cfg;
16298 	u32 val;
16299 	u16 pci_cmd;
16300 	int err;
16301 
16302 	/* Force memory write invalidate off.  If we leave it on,
16303 	 * then on 5700_BX chips we have to enable a workaround.
16304 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16305 	 * to match the cacheline size.  The Broadcom driver have this
16306 	 * workaround but turns MWI off all the times so never uses
16307 	 * it.  This seems to suggest that the workaround is insufficient.
16308 	 */
16309 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16310 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16311 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16312 
16313 	/* Important! -- Make sure register accesses are byteswapped
16314 	 * correctly.  Also, for those chips that require it, make
16315 	 * sure that indirect register accesses are enabled before
16316 	 * the first operation.
16317 	 */
16318 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16319 			      &misc_ctrl_reg);
16320 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16321 			       MISC_HOST_CTRL_CHIPREV);
16322 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16323 			       tp->misc_host_ctrl);
16324 
16325 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16326 
16327 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16328 	 * we need to disable memory and use config. cycles
16329 	 * only to access all registers. The 5702/03 chips
16330 	 * can mistakenly decode the special cycles from the
16331 	 * ICH chipsets as memory write cycles, causing corruption
16332 	 * of register and memory space. Only certain ICH bridges
16333 	 * will drive special cycles with non-zero data during the
16334 	 * address phase which can fall within the 5703's address
16335 	 * range. This is not an ICH bug as the PCI spec allows
16336 	 * non-zero address during special cycles. However, only
16337 	 * these ICH bridges are known to drive non-zero addresses
16338 	 * during special cycles.
16339 	 *
16340 	 * Since special cycles do not cross PCI bridges, we only
16341 	 * enable this workaround if the 5703 is on the secondary
16342 	 * bus of these ICH bridges.
16343 	 */
16344 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16345 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16346 		static struct tg3_dev_id {
16347 			u32	vendor;
16348 			u32	device;
16349 			u32	rev;
16350 		} ich_chipsets[] = {
16351 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16352 			  PCI_ANY_ID },
16353 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16354 			  PCI_ANY_ID },
16355 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16356 			  0xa },
16357 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16358 			  PCI_ANY_ID },
16359 			{ },
16360 		};
16361 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16362 		struct pci_dev *bridge = NULL;
16363 
16364 		while (pci_id->vendor != 0) {
16365 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16366 						bridge);
16367 			if (!bridge) {
16368 				pci_id++;
16369 				continue;
16370 			}
16371 			if (pci_id->rev != PCI_ANY_ID) {
16372 				if (bridge->revision > pci_id->rev)
16373 					continue;
16374 			}
16375 			if (bridge->subordinate &&
16376 			    (bridge->subordinate->number ==
16377 			     tp->pdev->bus->number)) {
16378 				tg3_flag_set(tp, ICH_WORKAROUND);
16379 				pci_dev_put(bridge);
16380 				break;
16381 			}
16382 		}
16383 	}
16384 
16385 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16386 		static struct tg3_dev_id {
16387 			u32	vendor;
16388 			u32	device;
16389 		} bridge_chipsets[] = {
16390 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16391 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16392 			{ },
16393 		};
16394 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16395 		struct pci_dev *bridge = NULL;
16396 
16397 		while (pci_id->vendor != 0) {
16398 			bridge = pci_get_device(pci_id->vendor,
16399 						pci_id->device,
16400 						bridge);
16401 			if (!bridge) {
16402 				pci_id++;
16403 				continue;
16404 			}
16405 			if (bridge->subordinate &&
16406 			    (bridge->subordinate->number <=
16407 			     tp->pdev->bus->number) &&
16408 			    (bridge->subordinate->busn_res.end >=
16409 			     tp->pdev->bus->number)) {
16410 				tg3_flag_set(tp, 5701_DMA_BUG);
16411 				pci_dev_put(bridge);
16412 				break;
16413 			}
16414 		}
16415 	}
16416 
16417 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16418 	 * DMA addresses > 40-bit. This bridge may have other additional
16419 	 * 57xx devices behind it in some 4-port NIC designs for example.
16420 	 * Any tg3 device found behind the bridge will also need the 40-bit
16421 	 * DMA workaround.
16422 	 */
16423 	if (tg3_flag(tp, 5780_CLASS)) {
16424 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16425 		tp->msi_cap = tp->pdev->msi_cap;
16426 	} else {
16427 		struct pci_dev *bridge = NULL;
16428 
16429 		do {
16430 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16431 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16432 						bridge);
16433 			if (bridge && bridge->subordinate &&
16434 			    (bridge->subordinate->number <=
16435 			     tp->pdev->bus->number) &&
16436 			    (bridge->subordinate->busn_res.end >=
16437 			     tp->pdev->bus->number)) {
16438 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16439 				pci_dev_put(bridge);
16440 				break;
16441 			}
16442 		} while (bridge);
16443 	}
16444 
16445 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16446 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16447 		tp->pdev_peer = tg3_find_peer(tp);
16448 
16449 	/* Determine TSO capabilities */
16450 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16451 		; /* Do nothing. HW bug. */
16452 	else if (tg3_flag(tp, 57765_PLUS))
16453 		tg3_flag_set(tp, HW_TSO_3);
16454 	else if (tg3_flag(tp, 5755_PLUS) ||
16455 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16456 		tg3_flag_set(tp, HW_TSO_2);
16457 	else if (tg3_flag(tp, 5750_PLUS)) {
16458 		tg3_flag_set(tp, HW_TSO_1);
16459 		tg3_flag_set(tp, TSO_BUG);
16460 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16461 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16462 			tg3_flag_clear(tp, TSO_BUG);
16463 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16464 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16465 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16466 		tg3_flag_set(tp, FW_TSO);
16467 		tg3_flag_set(tp, TSO_BUG);
16468 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16469 			tp->fw_needed = FIRMWARE_TG3TSO5;
16470 		else
16471 			tp->fw_needed = FIRMWARE_TG3TSO;
16472 	}
16473 
16474 	/* Selectively allow TSO based on operating conditions */
16475 	if (tg3_flag(tp, HW_TSO_1) ||
16476 	    tg3_flag(tp, HW_TSO_2) ||
16477 	    tg3_flag(tp, HW_TSO_3) ||
16478 	    tg3_flag(tp, FW_TSO)) {
16479 		/* For firmware TSO, assume ASF is disabled.
16480 		 * We'll disable TSO later if we discover ASF
16481 		 * is enabled in tg3_get_eeprom_hw_cfg().
16482 		 */
16483 		tg3_flag_set(tp, TSO_CAPABLE);
16484 	} else {
16485 		tg3_flag_clear(tp, TSO_CAPABLE);
16486 		tg3_flag_clear(tp, TSO_BUG);
16487 		tp->fw_needed = NULL;
16488 	}
16489 
16490 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16491 		tp->fw_needed = FIRMWARE_TG3;
16492 
16493 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16494 		tp->fw_needed = FIRMWARE_TG357766;
16495 
16496 	tp->irq_max = 1;
16497 
16498 	if (tg3_flag(tp, 5750_PLUS)) {
16499 		tg3_flag_set(tp, SUPPORT_MSI);
16500 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16501 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16502 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16503 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16504 		     tp->pdev_peer == tp->pdev))
16505 			tg3_flag_clear(tp, SUPPORT_MSI);
16506 
16507 		if (tg3_flag(tp, 5755_PLUS) ||
16508 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16509 			tg3_flag_set(tp, 1SHOT_MSI);
16510 		}
16511 
16512 		if (tg3_flag(tp, 57765_PLUS)) {
16513 			tg3_flag_set(tp, SUPPORT_MSIX);
16514 			tp->irq_max = TG3_IRQ_MAX_VECS;
16515 		}
16516 	}
16517 
16518 	tp->txq_max = 1;
16519 	tp->rxq_max = 1;
16520 	if (tp->irq_max > 1) {
16521 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16522 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16523 
16524 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16525 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16526 			tp->txq_max = tp->irq_max - 1;
16527 	}
16528 
16529 	if (tg3_flag(tp, 5755_PLUS) ||
16530 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16531 		tg3_flag_set(tp, SHORT_DMA_BUG);
16532 
16533 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16534 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16535 
16536 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16537 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16538 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16539 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16540 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16541 
16542 	if (tg3_flag(tp, 57765_PLUS) &&
16543 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16544 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16545 
16546 	if (!tg3_flag(tp, 5705_PLUS) ||
16547 	    tg3_flag(tp, 5780_CLASS) ||
16548 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16549 		tg3_flag_set(tp, JUMBO_CAPABLE);
16550 
16551 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16552 			      &pci_state_reg);
16553 
16554 	if (pci_is_pcie(tp->pdev)) {
16555 		u16 lnkctl;
16556 
16557 		tg3_flag_set(tp, PCI_EXPRESS);
16558 
16559 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16560 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16561 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16562 				tg3_flag_clear(tp, HW_TSO_2);
16563 				tg3_flag_clear(tp, TSO_CAPABLE);
16564 			}
16565 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16566 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16567 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16568 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16569 				tg3_flag_set(tp, CLKREQ_BUG);
16570 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16571 			tg3_flag_set(tp, L1PLLPD_EN);
16572 		}
16573 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16574 		/* BCM5785 devices are effectively PCIe devices, and should
16575 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16576 		 * section.
16577 		 */
16578 		tg3_flag_set(tp, PCI_EXPRESS);
16579 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16580 		   tg3_flag(tp, 5780_CLASS)) {
16581 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16582 		if (!tp->pcix_cap) {
16583 			dev_err(&tp->pdev->dev,
16584 				"Cannot find PCI-X capability, aborting\n");
16585 			return -EIO;
16586 		}
16587 
16588 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16589 			tg3_flag_set(tp, PCIX_MODE);
16590 	}
16591 
16592 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16593 	 * reordering to the mailbox registers done by the host
16594 	 * controller can cause major troubles.  We read back from
16595 	 * every mailbox register write to force the writes to be
16596 	 * posted to the chip in order.
16597 	 */
16598 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16599 	    !tg3_flag(tp, PCI_EXPRESS))
16600 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16601 
16602 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16603 			     &tp->pci_cacheline_sz);
16604 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16605 			     &tp->pci_lat_timer);
16606 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16607 	    tp->pci_lat_timer < 64) {
16608 		tp->pci_lat_timer = 64;
16609 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16610 				      tp->pci_lat_timer);
16611 	}
16612 
16613 	/* Important! -- It is critical that the PCI-X hw workaround
16614 	 * situation is decided before the first MMIO register access.
16615 	 */
16616 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16617 		/* 5700 BX chips need to have their TX producer index
16618 		 * mailboxes written twice to workaround a bug.
16619 		 */
16620 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16621 
16622 		/* If we are in PCI-X mode, enable register write workaround.
16623 		 *
16624 		 * The workaround is to use indirect register accesses
16625 		 * for all chip writes not to mailbox registers.
16626 		 */
16627 		if (tg3_flag(tp, PCIX_MODE)) {
16628 			u32 pm_reg;
16629 
16630 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16631 
16632 			/* The chip can have it's power management PCI config
16633 			 * space registers clobbered due to this bug.
16634 			 * So explicitly force the chip into D0 here.
16635 			 */
16636 			pci_read_config_dword(tp->pdev,
16637 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16638 					      &pm_reg);
16639 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16640 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16641 			pci_write_config_dword(tp->pdev,
16642 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16643 					       pm_reg);
16644 
16645 			/* Also, force SERR#/PERR# in PCI command. */
16646 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16647 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16648 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16649 		}
16650 	}
16651 
16652 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16653 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16654 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16655 		tg3_flag_set(tp, PCI_32BIT);
16656 
16657 	/* Chip-specific fixup from Broadcom driver */
16658 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16659 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16660 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16661 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16662 	}
16663 
16664 	/* Default fast path register access methods */
16665 	tp->read32 = tg3_read32;
16666 	tp->write32 = tg3_write32;
16667 	tp->read32_mbox = tg3_read32;
16668 	tp->write32_mbox = tg3_write32;
16669 	tp->write32_tx_mbox = tg3_write32;
16670 	tp->write32_rx_mbox = tg3_write32;
16671 
16672 	/* Various workaround register access methods */
16673 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16674 		tp->write32 = tg3_write_indirect_reg32;
16675 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16676 		 (tg3_flag(tp, PCI_EXPRESS) &&
16677 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16678 		/*
16679 		 * Back to back register writes can cause problems on these
16680 		 * chips, the workaround is to read back all reg writes
16681 		 * except those to mailbox regs.
16682 		 *
16683 		 * See tg3_write_indirect_reg32().
16684 		 */
16685 		tp->write32 = tg3_write_flush_reg32;
16686 	}
16687 
16688 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16689 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16690 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16691 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16692 	}
16693 
16694 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16695 		tp->read32 = tg3_read_indirect_reg32;
16696 		tp->write32 = tg3_write_indirect_reg32;
16697 		tp->read32_mbox = tg3_read_indirect_mbox;
16698 		tp->write32_mbox = tg3_write_indirect_mbox;
16699 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16700 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16701 
16702 		iounmap(tp->regs);
16703 		tp->regs = NULL;
16704 
16705 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16706 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16707 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16708 	}
16709 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16710 		tp->read32_mbox = tg3_read32_mbox_5906;
16711 		tp->write32_mbox = tg3_write32_mbox_5906;
16712 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16713 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16714 	}
16715 
16716 	if (tp->write32 == tg3_write_indirect_reg32 ||
16717 	    (tg3_flag(tp, PCIX_MODE) &&
16718 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16719 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16720 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16721 
16722 	/* The memory arbiter has to be enabled in order for SRAM accesses
16723 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16724 	 * sure it is enabled, but other entities such as system netboot
16725 	 * code might disable it.
16726 	 */
16727 	val = tr32(MEMARB_MODE);
16728 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16729 
16730 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16731 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16732 	    tg3_flag(tp, 5780_CLASS)) {
16733 		if (tg3_flag(tp, PCIX_MODE)) {
16734 			pci_read_config_dword(tp->pdev,
16735 					      tp->pcix_cap + PCI_X_STATUS,
16736 					      &val);
16737 			tp->pci_fn = val & 0x7;
16738 		}
16739 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16740 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16741 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16742 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16743 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16744 			val = tr32(TG3_CPMU_STATUS);
16745 
16746 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16747 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16748 		else
16749 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16750 				     TG3_CPMU_STATUS_FSHFT_5719;
16751 	}
16752 
16753 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16754 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16755 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16756 	}
16757 
16758 	/* Get eeprom hw config before calling tg3_set_power_state().
16759 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16760 	 * determined before calling tg3_set_power_state() so that
16761 	 * we know whether or not to switch out of Vaux power.
16762 	 * When the flag is set, it means that GPIO1 is used for eeprom
16763 	 * write protect and also implies that it is a LOM where GPIOs
16764 	 * are not used to switch power.
16765 	 */
16766 	tg3_get_eeprom_hw_cfg(tp);
16767 
16768 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16769 		tg3_flag_clear(tp, TSO_CAPABLE);
16770 		tg3_flag_clear(tp, TSO_BUG);
16771 		tp->fw_needed = NULL;
16772 	}
16773 
16774 	if (tg3_flag(tp, ENABLE_APE)) {
16775 		/* Allow reads and writes to the
16776 		 * APE register and memory space.
16777 		 */
16778 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16779 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16780 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16781 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16782 				       pci_state_reg);
16783 
16784 		tg3_ape_lock_init(tp);
16785 		tp->ape_hb_interval =
16786 			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16787 	}
16788 
16789 	/* Set up tp->grc_local_ctrl before calling
16790 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16791 	 * will bring 5700's external PHY out of reset.
16792 	 * It is also used as eeprom write protect on LOMs.
16793 	 */
16794 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16795 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16796 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16797 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16798 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16799 	/* Unused GPIO3 must be driven as output on 5752 because there
16800 	 * are no pull-up resistors on unused GPIO pins.
16801 	 */
16802 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16803 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16804 
16805 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16806 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16807 	    tg3_flag(tp, 57765_CLASS))
16808 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16809 
16810 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16811 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16812 		/* Turn off the debug UART. */
16813 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16814 		if (tg3_flag(tp, IS_NIC))
16815 			/* Keep VMain power. */
16816 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16817 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16818 	}
16819 
16820 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16821 		tp->grc_local_ctrl |=
16822 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16823 
16824 	/* Switch out of Vaux if it is a NIC */
16825 	tg3_pwrsrc_switch_to_vmain(tp);
16826 
16827 	/* Derive initial jumbo mode from MTU assigned in
16828 	 * ether_setup() via the alloc_etherdev() call
16829 	 */
16830 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16831 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16832 
16833 	/* Determine WakeOnLan speed to use. */
16834 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16835 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16836 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16837 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16838 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16839 	} else {
16840 		tg3_flag_set(tp, WOL_SPEED_100MB);
16841 	}
16842 
16843 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16844 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16845 
16846 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16847 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16848 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16849 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16850 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16851 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16852 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16853 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16854 
16855 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16856 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16857 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16858 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16859 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16860 
16861 	if (tg3_flag(tp, 5705_PLUS) &&
16862 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16863 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16864 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16865 	    !tg3_flag(tp, 57765_PLUS)) {
16866 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16867 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16868 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16869 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16870 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16871 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16872 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16873 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16874 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16875 		} else
16876 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16877 	}
16878 
16879 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16880 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16881 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16882 		if (tp->phy_otp == 0)
16883 			tp->phy_otp = TG3_OTP_DEFAULT;
16884 	}
16885 
16886 	if (tg3_flag(tp, CPMU_PRESENT))
16887 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16888 	else
16889 		tp->mi_mode = MAC_MI_MODE_BASE;
16890 
16891 	tp->coalesce_mode = 0;
16892 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16893 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16894 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16895 
16896 	/* Set these bits to enable statistics workaround. */
16897 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16898 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16899 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16900 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16901 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16902 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16903 	}
16904 
16905 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16906 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16907 		tg3_flag_set(tp, USE_PHYLIB);
16908 
16909 	err = tg3_mdio_init(tp);
16910 	if (err)
16911 		return err;
16912 
16913 	/* Initialize data/descriptor byte/word swapping. */
16914 	val = tr32(GRC_MODE);
16915 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16916 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16917 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16918 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16919 			GRC_MODE_B2HRX_ENABLE |
16920 			GRC_MODE_HTX2B_ENABLE |
16921 			GRC_MODE_HOST_STACKUP);
16922 	else
16923 		val &= GRC_MODE_HOST_STACKUP;
16924 
16925 	tw32(GRC_MODE, val | tp->grc_mode);
16926 
16927 	tg3_switch_clocks(tp);
16928 
16929 	/* Clear this out for sanity. */
16930 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16931 
16932 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16933 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16934 
16935 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16936 			      &pci_state_reg);
16937 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16938 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16939 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16940 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16941 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16942 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16943 			void __iomem *sram_base;
16944 
16945 			/* Write some dummy words into the SRAM status block
16946 			 * area, see if it reads back correctly.  If the return
16947 			 * value is bad, force enable the PCIX workaround.
16948 			 */
16949 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16950 
16951 			writel(0x00000000, sram_base);
16952 			writel(0x00000000, sram_base + 4);
16953 			writel(0xffffffff, sram_base + 4);
16954 			if (readl(sram_base) != 0x00000000)
16955 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16956 		}
16957 	}
16958 
16959 	udelay(50);
16960 	tg3_nvram_init(tp);
16961 
16962 	/* If the device has an NVRAM, no need to load patch firmware */
16963 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16964 	    !tg3_flag(tp, NO_NVRAM))
16965 		tp->fw_needed = NULL;
16966 
16967 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16968 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16969 
16970 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16971 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16972 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16973 		tg3_flag_set(tp, IS_5788);
16974 
16975 	if (!tg3_flag(tp, IS_5788) &&
16976 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16977 		tg3_flag_set(tp, TAGGED_STATUS);
16978 	if (tg3_flag(tp, TAGGED_STATUS)) {
16979 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16980 				      HOSTCC_MODE_CLRTICK_TXBD);
16981 
16982 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16983 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16984 				       tp->misc_host_ctrl);
16985 	}
16986 
16987 	/* Preserve the APE MAC_MODE bits */
16988 	if (tg3_flag(tp, ENABLE_APE))
16989 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16990 	else
16991 		tp->mac_mode = 0;
16992 
16993 	if (tg3_10_100_only_device(tp, ent))
16994 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16995 
16996 	err = tg3_phy_probe(tp);
16997 	if (err) {
16998 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16999 		/* ... but do not return immediately ... */
17000 		tg3_mdio_fini(tp);
17001 	}
17002 
17003 	tg3_read_vpd(tp);
17004 	tg3_read_fw_ver(tp);
17005 
17006 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
17007 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
17008 	} else {
17009 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
17010 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
17011 		else
17012 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
17013 	}
17014 
17015 	/* 5700 {AX,BX} chips have a broken status block link
17016 	 * change bit implementation, so we must use the
17017 	 * status register in those cases.
17018 	 */
17019 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
17020 		tg3_flag_set(tp, USE_LINKCHG_REG);
17021 	else
17022 		tg3_flag_clear(tp, USE_LINKCHG_REG);
17023 
17024 	/* The led_ctrl is set during tg3_phy_probe, here we might
17025 	 * have to force the link status polling mechanism based
17026 	 * upon subsystem IDs.
17027 	 */
17028 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
17029 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
17030 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
17031 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
17032 		tg3_flag_set(tp, USE_LINKCHG_REG);
17033 	}
17034 
17035 	/* For all SERDES we poll the MAC status register. */
17036 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
17037 		tg3_flag_set(tp, POLL_SERDES);
17038 	else
17039 		tg3_flag_clear(tp, POLL_SERDES);
17040 
17041 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
17042 		tg3_flag_set(tp, POLL_CPMU_LINK);
17043 
17044 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
17045 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
17046 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
17047 	    tg3_flag(tp, PCIX_MODE)) {
17048 		tp->rx_offset = NET_SKB_PAD;
17049 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
17050 		tp->rx_copy_thresh = ~(u16)0;
17051 #endif
17052 	}
17053 
17054 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
17055 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
17056 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
17057 
17058 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
17059 
17060 	/* Increment the rx prod index on the rx std ring by at most
17061 	 * 8 for these chips to workaround hw errata.
17062 	 */
17063 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
17064 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
17065 	    tg3_asic_rev(tp) == ASIC_REV_5755)
17066 		tp->rx_std_max_post = 8;
17067 
17068 	if (tg3_flag(tp, ASPM_WORKAROUND))
17069 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
17070 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
17071 
17072 	return err;
17073 }
17074 
tg3_get_device_address(struct tg3 * tp,u8 * addr)17075 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
17076 {
17077 	u32 hi, lo, mac_offset;
17078 	int addr_ok = 0;
17079 	int err;
17080 
17081 	if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
17082 		return 0;
17083 
17084 	if (tg3_flag(tp, IS_SSB_CORE)) {
17085 		err = ssb_gige_get_macaddr(tp->pdev, addr);
17086 		if (!err && is_valid_ether_addr(addr))
17087 			return 0;
17088 	}
17089 
17090 	mac_offset = 0x7c;
17091 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17092 	    tg3_flag(tp, 5780_CLASS)) {
17093 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17094 			mac_offset = 0xcc;
17095 		if (tg3_nvram_lock(tp))
17096 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17097 		else
17098 			tg3_nvram_unlock(tp);
17099 	} else if (tg3_flag(tp, 5717_PLUS)) {
17100 		if (tp->pci_fn & 1)
17101 			mac_offset = 0xcc;
17102 		if (tp->pci_fn > 1)
17103 			mac_offset += 0x18c;
17104 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17105 		mac_offset = 0x10;
17106 
17107 	/* First try to get it from MAC address mailbox. */
17108 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17109 	if ((hi >> 16) == 0x484b) {
17110 		addr[0] = (hi >>  8) & 0xff;
17111 		addr[1] = (hi >>  0) & 0xff;
17112 
17113 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17114 		addr[2] = (lo >> 24) & 0xff;
17115 		addr[3] = (lo >> 16) & 0xff;
17116 		addr[4] = (lo >>  8) & 0xff;
17117 		addr[5] = (lo >>  0) & 0xff;
17118 
17119 		/* Some old bootcode may report a 0 MAC address in SRAM */
17120 		addr_ok = is_valid_ether_addr(addr);
17121 	}
17122 	if (!addr_ok) {
17123 		__be32 be_hi, be_lo;
17124 
17125 		/* Next, try NVRAM. */
17126 		if (!tg3_flag(tp, NO_NVRAM) &&
17127 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &be_hi) &&
17128 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &be_lo)) {
17129 			memcpy(&addr[0], ((char *)&be_hi) + 2, 2);
17130 			memcpy(&addr[2], (char *)&be_lo, sizeof(be_lo));
17131 		}
17132 		/* Finally just fetch it out of the MAC control regs. */
17133 		else {
17134 			hi = tr32(MAC_ADDR_0_HIGH);
17135 			lo = tr32(MAC_ADDR_0_LOW);
17136 
17137 			addr[5] = lo & 0xff;
17138 			addr[4] = (lo >> 8) & 0xff;
17139 			addr[3] = (lo >> 16) & 0xff;
17140 			addr[2] = (lo >> 24) & 0xff;
17141 			addr[1] = hi & 0xff;
17142 			addr[0] = (hi >> 8) & 0xff;
17143 		}
17144 	}
17145 
17146 	if (!is_valid_ether_addr(addr))
17147 		return -EINVAL;
17148 	return 0;
17149 }
17150 
17151 #define BOUNDARY_SINGLE_CACHELINE	1
17152 #define BOUNDARY_MULTI_CACHELINE	2
17153 
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)17154 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17155 {
17156 	int cacheline_size;
17157 	u8 byte;
17158 	int goal;
17159 
17160 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17161 	if (byte == 0)
17162 		cacheline_size = 1024;
17163 	else
17164 		cacheline_size = (int) byte * 4;
17165 
17166 	/* On 5703 and later chips, the boundary bits have no
17167 	 * effect.
17168 	 */
17169 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17170 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17171 	    !tg3_flag(tp, PCI_EXPRESS))
17172 		goto out;
17173 
17174 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
17175 	goal = BOUNDARY_MULTI_CACHELINE;
17176 #else
17177 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17178 	goal = BOUNDARY_SINGLE_CACHELINE;
17179 #else
17180 	goal = 0;
17181 #endif
17182 #endif
17183 
17184 	if (tg3_flag(tp, 57765_PLUS)) {
17185 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17186 		goto out;
17187 	}
17188 
17189 	if (!goal)
17190 		goto out;
17191 
17192 	/* PCI controllers on most RISC systems tend to disconnect
17193 	 * when a device tries to burst across a cache-line boundary.
17194 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17195 	 *
17196 	 * Unfortunately, for PCI-E there are only limited
17197 	 * write-side controls for this, and thus for reads
17198 	 * we will still get the disconnects.  We'll also waste
17199 	 * these PCI cycles for both read and write for chips
17200 	 * other than 5700 and 5701 which do not implement the
17201 	 * boundary bits.
17202 	 */
17203 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17204 		switch (cacheline_size) {
17205 		case 16:
17206 		case 32:
17207 		case 64:
17208 		case 128:
17209 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17210 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17211 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17212 			} else {
17213 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17214 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17215 			}
17216 			break;
17217 
17218 		case 256:
17219 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17220 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17221 			break;
17222 
17223 		default:
17224 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17225 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17226 			break;
17227 		}
17228 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17229 		switch (cacheline_size) {
17230 		case 16:
17231 		case 32:
17232 		case 64:
17233 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17234 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17235 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17236 				break;
17237 			}
17238 			fallthrough;
17239 		case 128:
17240 		default:
17241 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17242 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17243 			break;
17244 		}
17245 	} else {
17246 		switch (cacheline_size) {
17247 		case 16:
17248 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17249 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17250 					DMA_RWCTRL_WRITE_BNDRY_16);
17251 				break;
17252 			}
17253 			fallthrough;
17254 		case 32:
17255 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17256 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17257 					DMA_RWCTRL_WRITE_BNDRY_32);
17258 				break;
17259 			}
17260 			fallthrough;
17261 		case 64:
17262 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17263 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17264 					DMA_RWCTRL_WRITE_BNDRY_64);
17265 				break;
17266 			}
17267 			fallthrough;
17268 		case 128:
17269 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17270 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17271 					DMA_RWCTRL_WRITE_BNDRY_128);
17272 				break;
17273 			}
17274 			fallthrough;
17275 		case 256:
17276 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17277 				DMA_RWCTRL_WRITE_BNDRY_256);
17278 			break;
17279 		case 512:
17280 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17281 				DMA_RWCTRL_WRITE_BNDRY_512);
17282 			break;
17283 		case 1024:
17284 		default:
17285 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17286 				DMA_RWCTRL_WRITE_BNDRY_1024);
17287 			break;
17288 		}
17289 	}
17290 
17291 out:
17292 	return val;
17293 }
17294 
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17295 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17296 			   int size, bool to_device)
17297 {
17298 	struct tg3_internal_buffer_desc test_desc;
17299 	u32 sram_dma_descs;
17300 	int i, ret;
17301 
17302 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17303 
17304 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17305 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17306 	tw32(RDMAC_STATUS, 0);
17307 	tw32(WDMAC_STATUS, 0);
17308 
17309 	tw32(BUFMGR_MODE, 0);
17310 	tw32(FTQ_RESET, 0);
17311 
17312 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17313 	test_desc.addr_lo = buf_dma & 0xffffffff;
17314 	test_desc.nic_mbuf = 0x00002100;
17315 	test_desc.len = size;
17316 
17317 	/*
17318 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17319 	 * the *second* time the tg3 driver was getting loaded after an
17320 	 * initial scan.
17321 	 *
17322 	 * Broadcom tells me:
17323 	 *   ...the DMA engine is connected to the GRC block and a DMA
17324 	 *   reset may affect the GRC block in some unpredictable way...
17325 	 *   The behavior of resets to individual blocks has not been tested.
17326 	 *
17327 	 * Broadcom noted the GRC reset will also reset all sub-components.
17328 	 */
17329 	if (to_device) {
17330 		test_desc.cqid_sqid = (13 << 8) | 2;
17331 
17332 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17333 		udelay(40);
17334 	} else {
17335 		test_desc.cqid_sqid = (16 << 8) | 7;
17336 
17337 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17338 		udelay(40);
17339 	}
17340 	test_desc.flags = 0x00000005;
17341 
17342 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17343 		u32 val;
17344 
17345 		val = *(((u32 *)&test_desc) + i);
17346 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17347 				       sram_dma_descs + (i * sizeof(u32)));
17348 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17349 	}
17350 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17351 
17352 	if (to_device)
17353 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17354 	else
17355 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17356 
17357 	ret = -ENODEV;
17358 	for (i = 0; i < 40; i++) {
17359 		u32 val;
17360 
17361 		if (to_device)
17362 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17363 		else
17364 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17365 		if ((val & 0xffff) == sram_dma_descs) {
17366 			ret = 0;
17367 			break;
17368 		}
17369 
17370 		udelay(100);
17371 	}
17372 
17373 	return ret;
17374 }
17375 
17376 #define TEST_BUFFER_SIZE	0x2000
17377 
17378 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17379 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17380 	{ },
17381 };
17382 
tg3_test_dma(struct tg3 * tp)17383 static int tg3_test_dma(struct tg3 *tp)
17384 {
17385 	dma_addr_t buf_dma;
17386 	u32 *buf, saved_dma_rwctrl;
17387 	int ret = 0;
17388 
17389 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17390 				 &buf_dma, GFP_KERNEL);
17391 	if (!buf) {
17392 		ret = -ENOMEM;
17393 		goto out_nofree;
17394 	}
17395 
17396 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17397 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17398 
17399 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17400 
17401 	if (tg3_flag(tp, 57765_PLUS))
17402 		goto out;
17403 
17404 	if (tg3_flag(tp, PCI_EXPRESS)) {
17405 		/* DMA read watermark not used on PCIE */
17406 		tp->dma_rwctrl |= 0x00180000;
17407 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17408 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17409 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17410 			tp->dma_rwctrl |= 0x003f0000;
17411 		else
17412 			tp->dma_rwctrl |= 0x003f000f;
17413 	} else {
17414 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17415 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17416 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17417 			u32 read_water = 0x7;
17418 
17419 			/* If the 5704 is behind the EPB bridge, we can
17420 			 * do the less restrictive ONE_DMA workaround for
17421 			 * better performance.
17422 			 */
17423 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17424 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17425 				tp->dma_rwctrl |= 0x8000;
17426 			else if (ccval == 0x6 || ccval == 0x7)
17427 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17428 
17429 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17430 				read_water = 4;
17431 			/* Set bit 23 to enable PCIX hw bug fix */
17432 			tp->dma_rwctrl |=
17433 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17434 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17435 				(1 << 23);
17436 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17437 			/* 5780 always in PCIX mode */
17438 			tp->dma_rwctrl |= 0x00144000;
17439 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17440 			/* 5714 always in PCIX mode */
17441 			tp->dma_rwctrl |= 0x00148000;
17442 		} else {
17443 			tp->dma_rwctrl |= 0x001b000f;
17444 		}
17445 	}
17446 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17447 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17448 
17449 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17450 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17451 		tp->dma_rwctrl &= 0xfffffff0;
17452 
17453 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17454 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17455 		/* Remove this if it causes problems for some boards. */
17456 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17457 
17458 		/* On 5700/5701 chips, we need to set this bit.
17459 		 * Otherwise the chip will issue cacheline transactions
17460 		 * to streamable DMA memory with not all the byte
17461 		 * enables turned on.  This is an error on several
17462 		 * RISC PCI controllers, in particular sparc64.
17463 		 *
17464 		 * On 5703/5704 chips, this bit has been reassigned
17465 		 * a different meaning.  In particular, it is used
17466 		 * on those chips to enable a PCI-X workaround.
17467 		 */
17468 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17469 	}
17470 
17471 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17472 
17473 
17474 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17475 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17476 		goto out;
17477 
17478 	/* It is best to perform DMA test with maximum write burst size
17479 	 * to expose the 5700/5701 write DMA bug.
17480 	 */
17481 	saved_dma_rwctrl = tp->dma_rwctrl;
17482 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17483 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17484 
17485 	while (1) {
17486 		u32 *p = buf, i;
17487 
17488 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17489 			p[i] = i;
17490 
17491 		/* Send the buffer to the chip. */
17492 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17493 		if (ret) {
17494 			dev_err(&tp->pdev->dev,
17495 				"%s: Buffer write failed. err = %d\n",
17496 				__func__, ret);
17497 			break;
17498 		}
17499 
17500 		/* Now read it back. */
17501 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17502 		if (ret) {
17503 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17504 				"err = %d\n", __func__, ret);
17505 			break;
17506 		}
17507 
17508 		/* Verify it. */
17509 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17510 			if (p[i] == i)
17511 				continue;
17512 
17513 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17514 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17515 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17516 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17517 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17518 				break;
17519 			} else {
17520 				dev_err(&tp->pdev->dev,
17521 					"%s: Buffer corrupted on read back! "
17522 					"(%d != %d)\n", __func__, p[i], i);
17523 				ret = -ENODEV;
17524 				goto out;
17525 			}
17526 		}
17527 
17528 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17529 			/* Success. */
17530 			ret = 0;
17531 			break;
17532 		}
17533 	}
17534 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17535 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17536 		/* DMA test passed without adjusting DMA boundary,
17537 		 * now look for chipsets that are known to expose the
17538 		 * DMA bug without failing the test.
17539 		 */
17540 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17541 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17542 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17543 		} else {
17544 			/* Safe to use the calculated DMA boundary. */
17545 			tp->dma_rwctrl = saved_dma_rwctrl;
17546 		}
17547 
17548 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17549 	}
17550 
17551 out:
17552 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17553 out_nofree:
17554 	return ret;
17555 }
17556 
tg3_init_bufmgr_config(struct tg3 * tp)17557 static void tg3_init_bufmgr_config(struct tg3 *tp)
17558 {
17559 	if (tg3_flag(tp, 57765_PLUS)) {
17560 		tp->bufmgr_config.mbuf_read_dma_low_water =
17561 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17562 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17563 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17564 		tp->bufmgr_config.mbuf_high_water =
17565 			DEFAULT_MB_HIGH_WATER_57765;
17566 
17567 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17568 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17569 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17570 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17571 		tp->bufmgr_config.mbuf_high_water_jumbo =
17572 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17573 	} else if (tg3_flag(tp, 5705_PLUS)) {
17574 		tp->bufmgr_config.mbuf_read_dma_low_water =
17575 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17576 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17577 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17578 		tp->bufmgr_config.mbuf_high_water =
17579 			DEFAULT_MB_HIGH_WATER_5705;
17580 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17581 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17582 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17583 			tp->bufmgr_config.mbuf_high_water =
17584 				DEFAULT_MB_HIGH_WATER_5906;
17585 		}
17586 
17587 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17588 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17589 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17590 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17591 		tp->bufmgr_config.mbuf_high_water_jumbo =
17592 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17593 	} else {
17594 		tp->bufmgr_config.mbuf_read_dma_low_water =
17595 			DEFAULT_MB_RDMA_LOW_WATER;
17596 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17597 			DEFAULT_MB_MACRX_LOW_WATER;
17598 		tp->bufmgr_config.mbuf_high_water =
17599 			DEFAULT_MB_HIGH_WATER;
17600 
17601 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17602 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17603 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17604 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17605 		tp->bufmgr_config.mbuf_high_water_jumbo =
17606 			DEFAULT_MB_HIGH_WATER_JUMBO;
17607 	}
17608 
17609 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17610 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17611 }
17612 
tg3_phy_string(struct tg3 * tp)17613 static char *tg3_phy_string(struct tg3 *tp)
17614 {
17615 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17616 	case TG3_PHY_ID_BCM5400:	return "5400";
17617 	case TG3_PHY_ID_BCM5401:	return "5401";
17618 	case TG3_PHY_ID_BCM5411:	return "5411";
17619 	case TG3_PHY_ID_BCM5701:	return "5701";
17620 	case TG3_PHY_ID_BCM5703:	return "5703";
17621 	case TG3_PHY_ID_BCM5704:	return "5704";
17622 	case TG3_PHY_ID_BCM5705:	return "5705";
17623 	case TG3_PHY_ID_BCM5750:	return "5750";
17624 	case TG3_PHY_ID_BCM5752:	return "5752";
17625 	case TG3_PHY_ID_BCM5714:	return "5714";
17626 	case TG3_PHY_ID_BCM5780:	return "5780";
17627 	case TG3_PHY_ID_BCM5755:	return "5755";
17628 	case TG3_PHY_ID_BCM5787:	return "5787";
17629 	case TG3_PHY_ID_BCM5784:	return "5784";
17630 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17631 	case TG3_PHY_ID_BCM5906:	return "5906";
17632 	case TG3_PHY_ID_BCM5761:	return "5761";
17633 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17634 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17635 	case TG3_PHY_ID_BCM57765:	return "57765";
17636 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17637 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17638 	case TG3_PHY_ID_BCM5762:	return "5762C";
17639 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17640 	case 0:			return "serdes";
17641 	default:		return "unknown";
17642 	}
17643 }
17644 
tg3_bus_string(struct tg3 * tp,char * str)17645 static char *tg3_bus_string(struct tg3 *tp, char *str)
17646 {
17647 	if (tg3_flag(tp, PCI_EXPRESS)) {
17648 		strcpy(str, "PCI Express");
17649 		return str;
17650 	} else if (tg3_flag(tp, PCIX_MODE)) {
17651 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17652 
17653 		strcpy(str, "PCIX:");
17654 
17655 		if ((clock_ctrl == 7) ||
17656 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17657 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17658 			strcat(str, "133MHz");
17659 		else if (clock_ctrl == 0)
17660 			strcat(str, "33MHz");
17661 		else if (clock_ctrl == 2)
17662 			strcat(str, "50MHz");
17663 		else if (clock_ctrl == 4)
17664 			strcat(str, "66MHz");
17665 		else if (clock_ctrl == 6)
17666 			strcat(str, "100MHz");
17667 	} else {
17668 		strcpy(str, "PCI:");
17669 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17670 			strcat(str, "66MHz");
17671 		else
17672 			strcat(str, "33MHz");
17673 	}
17674 	if (tg3_flag(tp, PCI_32BIT))
17675 		strcat(str, ":32-bit");
17676 	else
17677 		strcat(str, ":64-bit");
17678 	return str;
17679 }
17680 
tg3_init_coal(struct tg3 * tp)17681 static void tg3_init_coal(struct tg3 *tp)
17682 {
17683 	struct ethtool_coalesce *ec = &tp->coal;
17684 
17685 	memset(ec, 0, sizeof(*ec));
17686 	ec->cmd = ETHTOOL_GCOALESCE;
17687 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17688 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17689 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17690 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17691 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17692 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17693 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17694 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17695 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17696 
17697 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17698 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17699 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17700 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17701 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17702 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17703 	}
17704 
17705 	if (tg3_flag(tp, 5705_PLUS)) {
17706 		ec->rx_coalesce_usecs_irq = 0;
17707 		ec->tx_coalesce_usecs_irq = 0;
17708 		ec->stats_block_coalesce_usecs = 0;
17709 	}
17710 }
17711 
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17712 static int tg3_init_one(struct pci_dev *pdev,
17713 				  const struct pci_device_id *ent)
17714 {
17715 	struct net_device *dev;
17716 	struct tg3 *tp;
17717 	int i, err;
17718 	u32 sndmbx, rcvmbx, intmbx;
17719 	char str[40];
17720 	u64 dma_mask, persist_dma_mask;
17721 	netdev_features_t features = 0;
17722 	u8 addr[ETH_ALEN] __aligned(2);
17723 
17724 	err = pci_enable_device(pdev);
17725 	if (err) {
17726 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17727 		return err;
17728 	}
17729 
17730 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17731 	if (err) {
17732 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17733 		goto err_out_disable_pdev;
17734 	}
17735 
17736 	pci_set_master(pdev);
17737 
17738 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17739 	if (!dev) {
17740 		err = -ENOMEM;
17741 		goto err_out_free_res;
17742 	}
17743 
17744 	SET_NETDEV_DEV(dev, &pdev->dev);
17745 
17746 	tp = netdev_priv(dev);
17747 	tp->pdev = pdev;
17748 	tp->dev = dev;
17749 	tp->rx_mode = TG3_DEF_RX_MODE;
17750 	tp->tx_mode = TG3_DEF_TX_MODE;
17751 	tp->irq_sync = 1;
17752 	tp->pcierr_recovery = false;
17753 
17754 	if (tg3_debug > 0)
17755 		tp->msg_enable = tg3_debug;
17756 	else
17757 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17758 
17759 	if (pdev_is_ssb_gige_core(pdev)) {
17760 		tg3_flag_set(tp, IS_SSB_CORE);
17761 		if (ssb_gige_must_flush_posted_writes(pdev))
17762 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17763 		if (ssb_gige_one_dma_at_once(pdev))
17764 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17765 		if (ssb_gige_have_roboswitch(pdev)) {
17766 			tg3_flag_set(tp, USE_PHYLIB);
17767 			tg3_flag_set(tp, ROBOSWITCH);
17768 		}
17769 		if (ssb_gige_is_rgmii(pdev))
17770 			tg3_flag_set(tp, RGMII_MODE);
17771 	}
17772 
17773 	/* The word/byte swap controls here control register access byte
17774 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17775 	 * setting below.
17776 	 */
17777 	tp->misc_host_ctrl =
17778 		MISC_HOST_CTRL_MASK_PCI_INT |
17779 		MISC_HOST_CTRL_WORD_SWAP |
17780 		MISC_HOST_CTRL_INDIR_ACCESS |
17781 		MISC_HOST_CTRL_PCISTATE_RW;
17782 
17783 	/* The NONFRM (non-frame) byte/word swap controls take effect
17784 	 * on descriptor entries, anything which isn't packet data.
17785 	 *
17786 	 * The StrongARM chips on the board (one for tx, one for rx)
17787 	 * are running in big-endian mode.
17788 	 */
17789 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17790 			GRC_MODE_WSWAP_NONFRM_DATA);
17791 #ifdef __BIG_ENDIAN
17792 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17793 #endif
17794 	spin_lock_init(&tp->lock);
17795 	spin_lock_init(&tp->indirect_lock);
17796 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17797 
17798 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17799 	if (!tp->regs) {
17800 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17801 		err = -ENOMEM;
17802 		goto err_out_free_dev;
17803 	}
17804 
17805 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17806 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17807 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17808 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17809 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17810 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17811 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17812 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17813 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17814 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17815 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17816 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17817 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17818 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17819 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17820 		tg3_flag_set(tp, ENABLE_APE);
17821 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17822 		if (!tp->aperegs) {
17823 			dev_err(&pdev->dev,
17824 				"Cannot map APE registers, aborting\n");
17825 			err = -ENOMEM;
17826 			goto err_out_iounmap;
17827 		}
17828 	}
17829 
17830 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17831 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17832 
17833 	dev->ethtool_ops = &tg3_ethtool_ops;
17834 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17835 	dev->netdev_ops = &tg3_netdev_ops;
17836 	dev->irq = pdev->irq;
17837 
17838 	err = tg3_get_invariants(tp, ent);
17839 	if (err) {
17840 		dev_err(&pdev->dev,
17841 			"Problem fetching invariants of chip, aborting\n");
17842 		goto err_out_apeunmap;
17843 	}
17844 
17845 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17846 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17847 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17848 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17849 	 * do DMA address check in __tg3_start_xmit().
17850 	 */
17851 	if (tg3_flag(tp, IS_5788))
17852 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17853 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17854 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17855 #ifdef CONFIG_HIGHMEM
17856 		dma_mask = DMA_BIT_MASK(64);
17857 #endif
17858 	} else
17859 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17860 
17861 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
17862 		persist_dma_mask = DMA_BIT_MASK(31);
17863 
17864 	/* Configure DMA attributes. */
17865 	if (dma_mask > DMA_BIT_MASK(32)) {
17866 		err = dma_set_mask(&pdev->dev, dma_mask);
17867 		if (!err) {
17868 			features |= NETIF_F_HIGHDMA;
17869 			err = dma_set_coherent_mask(&pdev->dev,
17870 						    persist_dma_mask);
17871 			if (err < 0) {
17872 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17873 					"DMA for consistent allocations\n");
17874 				goto err_out_apeunmap;
17875 			}
17876 		}
17877 	}
17878 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17879 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17880 		if (err) {
17881 			dev_err(&pdev->dev,
17882 				"No usable DMA configuration, aborting\n");
17883 			goto err_out_apeunmap;
17884 		}
17885 	}
17886 
17887 	tg3_init_bufmgr_config(tp);
17888 
17889 	/* 5700 B0 chips do not support checksumming correctly due
17890 	 * to hardware bugs.
17891 	 */
17892 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17893 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17894 
17895 		if (tg3_flag(tp, 5755_PLUS))
17896 			features |= NETIF_F_IPV6_CSUM;
17897 	}
17898 
17899 	/* TSO is on by default on chips that support hardware TSO.
17900 	 * Firmware TSO on older chips gives lower performance, so it
17901 	 * is off by default, but can be enabled using ethtool.
17902 	 */
17903 	if ((tg3_flag(tp, HW_TSO_1) ||
17904 	     tg3_flag(tp, HW_TSO_2) ||
17905 	     tg3_flag(tp, HW_TSO_3)) &&
17906 	    (features & NETIF_F_IP_CSUM))
17907 		features |= NETIF_F_TSO;
17908 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17909 		if (features & NETIF_F_IPV6_CSUM)
17910 			features |= NETIF_F_TSO6;
17911 		if (tg3_flag(tp, HW_TSO_3) ||
17912 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17913 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17914 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17915 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17916 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17917 			features |= NETIF_F_TSO_ECN;
17918 	}
17919 
17920 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17921 			 NETIF_F_HW_VLAN_CTAG_RX;
17922 	dev->vlan_features |= features;
17923 
17924 	/*
17925 	 * Add loopback capability only for a subset of devices that support
17926 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17927 	 * loopback for the remaining devices.
17928 	 */
17929 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17930 	    !tg3_flag(tp, CPMU_PRESENT))
17931 		/* Add the loopback capability */
17932 		features |= NETIF_F_LOOPBACK;
17933 
17934 	dev->hw_features |= features;
17935 	dev->priv_flags |= IFF_UNICAST_FLT;
17936 
17937 	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17938 	dev->min_mtu = TG3_MIN_MTU;
17939 	dev->max_mtu = TG3_MAX_MTU(tp);
17940 
17941 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17942 	    !tg3_flag(tp, TSO_CAPABLE) &&
17943 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17944 		tg3_flag_set(tp, MAX_RXPEND_64);
17945 		tp->rx_pending = 63;
17946 	}
17947 
17948 	err = tg3_get_device_address(tp, addr);
17949 	if (err) {
17950 		dev_err(&pdev->dev,
17951 			"Could not obtain valid ethernet address, aborting\n");
17952 		goto err_out_apeunmap;
17953 	}
17954 	eth_hw_addr_set(dev, addr);
17955 
17956 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17957 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17958 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17959 	for (i = 0; i < tp->irq_max; i++) {
17960 		struct tg3_napi *tnapi = &tp->napi[i];
17961 
17962 		tnapi->tp = tp;
17963 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17964 
17965 		tnapi->int_mbox = intmbx;
17966 		intmbx += 0x8;
17967 
17968 		tnapi->consmbox = rcvmbx;
17969 		tnapi->prodmbox = sndmbx;
17970 
17971 		if (i)
17972 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17973 		else
17974 			tnapi->coal_now = HOSTCC_MODE_NOW;
17975 
17976 		if (!tg3_flag(tp, SUPPORT_MSIX))
17977 			break;
17978 
17979 		/*
17980 		 * If we support MSIX, we'll be using RSS.  If we're using
17981 		 * RSS, the first vector only handles link interrupts and the
17982 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17983 		 * mailbox values for the next iteration.  The values we setup
17984 		 * above are still useful for the single vectored mode.
17985 		 */
17986 		if (!i)
17987 			continue;
17988 
17989 		rcvmbx += 0x8;
17990 
17991 		if (sndmbx & 0x4)
17992 			sndmbx -= 0x4;
17993 		else
17994 			sndmbx += 0xc;
17995 	}
17996 
17997 	/*
17998 	 * Reset chip in case UNDI or EFI driver did not shutdown
17999 	 * DMA self test will enable WDMAC and we'll see (spurious)
18000 	 * pending DMA on the PCI bus at that point.
18001 	 */
18002 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
18003 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
18004 		tg3_full_lock(tp, 0);
18005 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
18006 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18007 		tg3_full_unlock(tp);
18008 	}
18009 
18010 	err = tg3_test_dma(tp);
18011 	if (err) {
18012 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
18013 		goto err_out_apeunmap;
18014 	}
18015 
18016 	tg3_init_coal(tp);
18017 
18018 	pci_set_drvdata(pdev, dev);
18019 
18020 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
18021 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
18022 	    tg3_asic_rev(tp) == ASIC_REV_5762)
18023 		tg3_flag_set(tp, PTP_CAPABLE);
18024 
18025 	tg3_timer_init(tp);
18026 
18027 	tg3_carrier_off(tp);
18028 
18029 	err = register_netdev(dev);
18030 	if (err) {
18031 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
18032 		goto err_out_apeunmap;
18033 	}
18034 
18035 	if (tg3_flag(tp, PTP_CAPABLE)) {
18036 		tg3_ptp_init(tp);
18037 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
18038 						   &tp->pdev->dev);
18039 		if (IS_ERR(tp->ptp_clock))
18040 			tp->ptp_clock = NULL;
18041 	}
18042 
18043 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
18044 		    tp->board_part_number,
18045 		    tg3_chip_rev_id(tp),
18046 		    tg3_bus_string(tp, str),
18047 		    dev->dev_addr);
18048 
18049 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
18050 		char *ethtype;
18051 
18052 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
18053 			ethtype = "10/100Base-TX";
18054 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
18055 			ethtype = "1000Base-SX";
18056 		else
18057 			ethtype = "10/100/1000Base-T";
18058 
18059 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
18060 			    "(WireSpeed[%d], EEE[%d])\n",
18061 			    tg3_phy_string(tp), ethtype,
18062 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18063 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18064 	}
18065 
18066 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18067 		    (dev->features & NETIF_F_RXCSUM) != 0,
18068 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
18069 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18070 		    tg3_flag(tp, ENABLE_ASF) != 0,
18071 		    tg3_flag(tp, TSO_CAPABLE) != 0);
18072 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18073 		    tp->dma_rwctrl,
18074 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18075 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18076 
18077 	pci_save_state(pdev);
18078 
18079 	return 0;
18080 
18081 err_out_apeunmap:
18082 	if (tp->aperegs) {
18083 		iounmap(tp->aperegs);
18084 		tp->aperegs = NULL;
18085 	}
18086 
18087 err_out_iounmap:
18088 	if (tp->regs) {
18089 		iounmap(tp->regs);
18090 		tp->regs = NULL;
18091 	}
18092 
18093 err_out_free_dev:
18094 	free_netdev(dev);
18095 
18096 err_out_free_res:
18097 	pci_release_regions(pdev);
18098 
18099 err_out_disable_pdev:
18100 	if (pci_is_enabled(pdev))
18101 		pci_disable_device(pdev);
18102 	return err;
18103 }
18104 
tg3_remove_one(struct pci_dev * pdev)18105 static void tg3_remove_one(struct pci_dev *pdev)
18106 {
18107 	struct net_device *dev = pci_get_drvdata(pdev);
18108 
18109 	if (dev) {
18110 		struct tg3 *tp = netdev_priv(dev);
18111 
18112 		tg3_ptp_fini(tp);
18113 
18114 		release_firmware(tp->fw);
18115 
18116 		tg3_reset_task_cancel(tp);
18117 
18118 		if (tg3_flag(tp, USE_PHYLIB)) {
18119 			tg3_phy_fini(tp);
18120 			tg3_mdio_fini(tp);
18121 		}
18122 
18123 		unregister_netdev(dev);
18124 		if (tp->aperegs) {
18125 			iounmap(tp->aperegs);
18126 			tp->aperegs = NULL;
18127 		}
18128 		if (tp->regs) {
18129 			iounmap(tp->regs);
18130 			tp->regs = NULL;
18131 		}
18132 		free_netdev(dev);
18133 		pci_release_regions(pdev);
18134 		pci_disable_device(pdev);
18135 	}
18136 }
18137 
18138 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)18139 static int tg3_suspend(struct device *device)
18140 {
18141 	struct net_device *dev = dev_get_drvdata(device);
18142 	struct tg3 *tp = netdev_priv(dev);
18143 
18144 	rtnl_lock();
18145 
18146 	if (!netif_running(dev))
18147 		goto unlock;
18148 
18149 	tg3_reset_task_cancel(tp);
18150 	tg3_phy_stop(tp);
18151 	tg3_netif_stop(tp);
18152 
18153 	tg3_timer_stop(tp);
18154 
18155 	tg3_full_lock(tp, 1);
18156 	tg3_disable_ints(tp);
18157 	tg3_full_unlock(tp);
18158 
18159 	netif_device_detach(dev);
18160 
18161 	tg3_full_lock(tp, 0);
18162 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18163 	tg3_flag_clear(tp, INIT_COMPLETE);
18164 	tg3_full_unlock(tp);
18165 
18166 	tg3_power_down_prepare(tp);
18167 
18168 unlock:
18169 	rtnl_unlock();
18170 	return 0;
18171 }
18172 
tg3_resume(struct device * device)18173 static int tg3_resume(struct device *device)
18174 {
18175 	struct net_device *dev = dev_get_drvdata(device);
18176 	struct tg3 *tp = netdev_priv(dev);
18177 	int err = 0;
18178 
18179 	rtnl_lock();
18180 
18181 	if (!netif_running(dev))
18182 		goto unlock;
18183 
18184 	netif_device_attach(dev);
18185 
18186 	netdev_lock(dev);
18187 	tg3_full_lock(tp, 0);
18188 
18189 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18190 
18191 	tg3_flag_set(tp, INIT_COMPLETE);
18192 	err = tg3_restart_hw(tp,
18193 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18194 	if (err)
18195 		goto out;
18196 
18197 	tg3_timer_start(tp);
18198 
18199 	tg3_netif_start(tp);
18200 
18201 out:
18202 	tg3_full_unlock(tp);
18203 	netdev_unlock(dev);
18204 
18205 	if (!err)
18206 		tg3_phy_start(tp);
18207 
18208 unlock:
18209 	rtnl_unlock();
18210 	return err;
18211 }
18212 #endif /* CONFIG_PM_SLEEP */
18213 
18214 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18215 
18216 /* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
18217  * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
18218  * be, powered down.
18219  */
18220 static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
18221 	{
18222 		.matches = {
18223 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18224 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
18225 		},
18226 	},
18227 	{
18228 		.matches = {
18229 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18230 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
18231 		},
18232 	},
18233 	{
18234 		.matches = {
18235 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18236 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
18237 		},
18238 	},
18239 	{
18240 		.matches = {
18241 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18242 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
18243 		},
18244 	},
18245 	{
18246 		.matches = {
18247 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18248 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
18249 		},
18250 	},
18251 	{
18252 		.matches = {
18253 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18254 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
18255 		},
18256 	},
18257 	{}
18258 };
18259 
tg3_shutdown(struct pci_dev * pdev)18260 static void tg3_shutdown(struct pci_dev *pdev)
18261 {
18262 	struct net_device *dev = pci_get_drvdata(pdev);
18263 	struct tg3 *tp = netdev_priv(dev);
18264 
18265 	tg3_reset_task_cancel(tp);
18266 
18267 	rtnl_lock();
18268 
18269 	netif_device_detach(dev);
18270 
18271 	if (netif_running(dev))
18272 		dev_close(dev);
18273 
18274 	if (system_state == SYSTEM_POWER_OFF)
18275 		tg3_power_down(tp);
18276 	else if (system_state == SYSTEM_RESTART &&
18277 		 dmi_first_match(tg3_restart_aer_quirk_table) &&
18278 		 pdev->current_state != PCI_D3cold &&
18279 		 pdev->current_state != PCI_UNKNOWN) {
18280 		/* Disable PCIe AER on the tg3 to avoid a fatal
18281 		 * error during this system restart.
18282 		 */
18283 		pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
18284 					   PCI_EXP_DEVCTL_CERE |
18285 					   PCI_EXP_DEVCTL_NFERE |
18286 					   PCI_EXP_DEVCTL_FERE |
18287 					   PCI_EXP_DEVCTL_URRE);
18288 	}
18289 
18290 	rtnl_unlock();
18291 
18292 	pci_disable_device(pdev);
18293 }
18294 
18295 /**
18296  * tg3_io_error_detected - called when PCI error is detected
18297  * @pdev: Pointer to PCI device
18298  * @state: The current pci connection state
18299  *
18300  * This function is called after a PCI bus error affecting
18301  * this device has been detected.
18302  */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18303 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18304 					      pci_channel_state_t state)
18305 {
18306 	struct net_device *netdev = pci_get_drvdata(pdev);
18307 	struct tg3 *tp = netdev_priv(netdev);
18308 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18309 
18310 	netdev_info(netdev, "PCI I/O error detected\n");
18311 
18312 	/* Want to make sure that the reset task doesn't run */
18313 	tg3_reset_task_cancel(tp);
18314 
18315 	rtnl_lock();
18316 
18317 	/* Could be second call or maybe we don't have netdev yet */
18318 	if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18319 		goto done;
18320 
18321 	/* We needn't recover from permanent error */
18322 	if (state == pci_channel_io_frozen)
18323 		tp->pcierr_recovery = true;
18324 
18325 	tg3_phy_stop(tp);
18326 
18327 	tg3_netif_stop(tp);
18328 
18329 	tg3_timer_stop(tp);
18330 
18331 	netif_device_detach(netdev);
18332 
18333 	/* Clean up software state, even if MMIO is blocked */
18334 	tg3_full_lock(tp, 0);
18335 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18336 	tg3_full_unlock(tp);
18337 
18338 done:
18339 	if (state == pci_channel_io_perm_failure) {
18340 		if (netdev) {
18341 			netdev_lock(netdev);
18342 			tg3_napi_enable(tp);
18343 			netdev_unlock(netdev);
18344 			dev_close(netdev);
18345 		}
18346 		err = PCI_ERS_RESULT_DISCONNECT;
18347 	} else {
18348 		pci_disable_device(pdev);
18349 	}
18350 
18351 	rtnl_unlock();
18352 
18353 	return err;
18354 }
18355 
18356 /**
18357  * tg3_io_slot_reset - called after the pci bus has been reset.
18358  * @pdev: Pointer to PCI device
18359  *
18360  * Restart the card from scratch, as if from a cold-boot.
18361  * At this point, the card has experienced a hard reset,
18362  * followed by fixups by BIOS, and has its config space
18363  * set up identically to what it was at cold boot.
18364  */
tg3_io_slot_reset(struct pci_dev * pdev)18365 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18366 {
18367 	struct net_device *netdev = pci_get_drvdata(pdev);
18368 	struct tg3 *tp = netdev_priv(netdev);
18369 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18370 	int err;
18371 
18372 	rtnl_lock();
18373 
18374 	if (pci_enable_device(pdev)) {
18375 		dev_err(&pdev->dev,
18376 			"Cannot re-enable PCI device after reset.\n");
18377 		goto done;
18378 	}
18379 
18380 	pci_set_master(pdev);
18381 	pci_restore_state(pdev);
18382 	pci_save_state(pdev);
18383 
18384 	if (!netdev || !netif_running(netdev)) {
18385 		rc = PCI_ERS_RESULT_RECOVERED;
18386 		goto done;
18387 	}
18388 
18389 	err = tg3_power_up(tp);
18390 	if (err)
18391 		goto done;
18392 
18393 	rc = PCI_ERS_RESULT_RECOVERED;
18394 
18395 done:
18396 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18397 		netdev_lock(netdev);
18398 		tg3_napi_enable(tp);
18399 		netdev_unlock(netdev);
18400 		dev_close(netdev);
18401 	}
18402 	rtnl_unlock();
18403 
18404 	return rc;
18405 }
18406 
18407 /**
18408  * tg3_io_resume - called when traffic can start flowing again.
18409  * @pdev: Pointer to PCI device
18410  *
18411  * This callback is called when the error recovery driver tells
18412  * us that its OK to resume normal operation.
18413  */
tg3_io_resume(struct pci_dev * pdev)18414 static void tg3_io_resume(struct pci_dev *pdev)
18415 {
18416 	struct net_device *netdev = pci_get_drvdata(pdev);
18417 	struct tg3 *tp = netdev_priv(netdev);
18418 	int err;
18419 
18420 	rtnl_lock();
18421 
18422 	if (!netdev || !netif_running(netdev))
18423 		goto done;
18424 
18425 	netdev_lock(netdev);
18426 	tg3_full_lock(tp, 0);
18427 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18428 	tg3_flag_set(tp, INIT_COMPLETE);
18429 	err = tg3_restart_hw(tp, true);
18430 	if (err) {
18431 		tg3_full_unlock(tp);
18432 		netdev_unlock(netdev);
18433 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18434 		goto done;
18435 	}
18436 
18437 	netif_device_attach(netdev);
18438 
18439 	tg3_timer_start(tp);
18440 
18441 	tg3_netif_start(tp);
18442 
18443 	tg3_full_unlock(tp);
18444 	netdev_unlock(netdev);
18445 
18446 	tg3_phy_start(tp);
18447 
18448 done:
18449 	tp->pcierr_recovery = false;
18450 	rtnl_unlock();
18451 }
18452 
18453 static const struct pci_error_handlers tg3_err_handler = {
18454 	.error_detected	= tg3_io_error_detected,
18455 	.slot_reset	= tg3_io_slot_reset,
18456 	.resume		= tg3_io_resume
18457 };
18458 
18459 static struct pci_driver tg3_driver = {
18460 	.name		= DRV_MODULE_NAME,
18461 	.id_table	= tg3_pci_tbl,
18462 	.probe		= tg3_init_one,
18463 	.remove		= tg3_remove_one,
18464 	.err_handler	= &tg3_err_handler,
18465 	.driver.pm	= &tg3_pm_ops,
18466 	.shutdown	= tg3_shutdown,
18467 };
18468 
18469 module_pci_driver(tg3_driver);
18470