xref: /linux/drivers/net/ethernet/broadcom/tg3.c (revision be54f8c558027a218423134dd9b8c7c46d92204a)
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *	Derived from proprietary unpublished source code,
14  *	Copyright (C) 2000-2016 Broadcom Corporation.
15  *	Copyright (C) 2016-2017 Broadcom Ltd.
16  *	Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *	refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *	Permission is hereby granted for the distribution of this firmware
20  *	data in hexadecimal or equivalent format, provided this copyright
21  *	notice is accompanying it.
22  */
23 
24 
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32.h>
58 #include <linux/dmi.h>
59 
60 #include <net/checksum.h>
61 #include <net/gso.h>
62 #include <net/ip.h>
63 
64 #include <linux/io.h>
65 #include <asm/byteorder.h>
66 #include <linux/uaccess.h>
67 
68 #include <uapi/linux/net_tstamp.h>
69 #include <linux/ptp_clock_kernel.h>
70 
71 #define BAR_0	0
72 #define BAR_2	2
73 
74 #include "tg3.h"
75 
76 /* Functions & macros to verify TG3_FLAGS types */
77 
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)78 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 	return test_bit(flag, bits);
81 }
82 
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)83 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 	set_bit(flag, bits);
86 }
87 
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)88 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
89 {
90 	clear_bit(flag, bits);
91 }
92 
93 #define tg3_flag(tp, flag)				\
94 	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_set(tp, flag)				\
96 	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
97 #define tg3_flag_clear(tp, flag)			\
98 	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
99 
100 #define DRV_MODULE_NAME		"tg3"
101 /* DO NOT UPDATE TG3_*_NUM defines */
102 #define TG3_MAJ_NUM			3
103 #define TG3_MIN_NUM			137
104 
105 #define RESET_KIND_SHUTDOWN	0
106 #define RESET_KIND_INIT		1
107 #define RESET_KIND_SUSPEND	2
108 
109 #define TG3_DEF_RX_MODE		0
110 #define TG3_DEF_TX_MODE		0
111 #define TG3_DEF_MSG_ENABLE	  \
112 	(NETIF_MSG_DRV		| \
113 	 NETIF_MSG_PROBE	| \
114 	 NETIF_MSG_LINK		| \
115 	 NETIF_MSG_TIMER	| \
116 	 NETIF_MSG_IFDOWN	| \
117 	 NETIF_MSG_IFUP		| \
118 	 NETIF_MSG_RX_ERR	| \
119 	 NETIF_MSG_TX_ERR)
120 
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY	100
122 
123 /* length of time before we decide the hardware is borked,
124  * and dev->tx_timeout() should be called to fix the problem
125  */
126 
127 #define TG3_TX_TIMEOUT			(5 * HZ)
128 
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU			ETH_ZLEN
131 #define TG3_MAX_MTU(tp)	\
132 	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133 
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135  * You can't change the ring sizes, but you can change where you place
136  * them in the NIC onboard memory.
137  */
138 #define TG3_RX_STD_RING_SIZE(tp) \
139 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING		200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143 	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING	100
146 
147 /* Do not place this n-ring entries value into the tp struct itself,
148  * we really want to expose these constants to GCC so that modulo et
149  * al.  operations are done with shifts and masks instead of with
150  * hw multiply/modulo instructions.  Another solution would be to
151  * replace things like '% foo' with '& (foo - 1)'.
152  */
153 
154 #define TG3_TX_RING_SIZE		512
155 #define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
156 
157 #define TG3_RX_STD_RING_BYTES(tp) \
158 	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160 	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162 	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
164 				 TG3_TX_RING_SIZE)
165 #define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
166 
167 #define TG3_DMA_BYTE_ENAB		64
168 
169 #define TG3_RX_STD_DMA_SZ		1536
170 #define TG3_RX_JMB_DMA_SZ		9046
171 
172 #define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
173 
174 #define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176 
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178 	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179 
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181 	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182 
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184  * that are at least dword aligned when used in PCIX mode.  The driver
185  * works around this bug by double copying the packet.  This workaround
186  * is built into the normal double copy length check for efficiency.
187  *
188  * However, the double copy is only necessary on those architectures
189  * where unaligned memory accesses are inefficient.  For those architectures
190  * where unaligned memory accesses incur little penalty, we can reintegrate
191  * the 5701 in the normal rx path.  Doing so saves a device structure
192  * dereference by hardcoding the double copy threshold in place.
193  */
194 #define TG3_RX_COPY_THRESHOLD		256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196 	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
197 #else
198 	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
199 #endif
200 
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
205 #endif
206 
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K		2048
210 #define TG3_TX_BD_DMA_MAX_4K		4096
211 
212 #define TG3_RAW_IP_ALIGN 2
213 
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216 
217 #define TG3_FW_UPDATE_TIMEOUT_SEC	5
218 #define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219 
220 #define FIRMWARE_TG3		"tigon/tg3.bin"
221 #define FIRMWARE_TG357766	"tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
224 
225 MODULE_AUTHOR("David S. Miller <davem@redhat.com> and Jeff Garzik <jgarzik@pobox.com>");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_FIRMWARE(FIRMWARE_TG3);
229 MODULE_FIRMWARE(FIRMWARE_TG357766);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232 
233 static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug, int, 0);
235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236 
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
239 
240 static const struct pci_device_id tg3_pci_tbl[] = {
241 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 			TG3_DRV_DATA_FLAG_5705_10_100},
262 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 			TG3_DRV_DATA_FLAG_5705_10_100},
265 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268 			TG3_DRV_DATA_FLAG_5705_10_100},
269 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290 			PCI_VENDOR_ID_LENOVO,
291 			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335 	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347 	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349 	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353 	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354 	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355 	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 	{}
357 };
358 
359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360 
361 static const struct {
362 	const char string[ETH_GSTRING_LEN];
363 } ethtool_stats_keys[] = {
364 	{ "rx_octets" },
365 	{ "rx_fragments" },
366 	{ "rx_ucast_packets" },
367 	{ "rx_mcast_packets" },
368 	{ "rx_bcast_packets" },
369 	{ "rx_fcs_errors" },
370 	{ "rx_align_errors" },
371 	{ "rx_xon_pause_rcvd" },
372 	{ "rx_xoff_pause_rcvd" },
373 	{ "rx_mac_ctrl_rcvd" },
374 	{ "rx_xoff_entered" },
375 	{ "rx_frame_too_long_errors" },
376 	{ "rx_jabbers" },
377 	{ "rx_undersize_packets" },
378 	{ "rx_in_length_errors" },
379 	{ "rx_out_length_errors" },
380 	{ "rx_64_or_less_octet_packets" },
381 	{ "rx_65_to_127_octet_packets" },
382 	{ "rx_128_to_255_octet_packets" },
383 	{ "rx_256_to_511_octet_packets" },
384 	{ "rx_512_to_1023_octet_packets" },
385 	{ "rx_1024_to_1522_octet_packets" },
386 	{ "rx_1523_to_2047_octet_packets" },
387 	{ "rx_2048_to_4095_octet_packets" },
388 	{ "rx_4096_to_8191_octet_packets" },
389 	{ "rx_8192_to_9022_octet_packets" },
390 
391 	{ "tx_octets" },
392 	{ "tx_collisions" },
393 
394 	{ "tx_xon_sent" },
395 	{ "tx_xoff_sent" },
396 	{ "tx_flow_control" },
397 	{ "tx_mac_errors" },
398 	{ "tx_single_collisions" },
399 	{ "tx_mult_collisions" },
400 	{ "tx_deferred" },
401 	{ "tx_excessive_collisions" },
402 	{ "tx_late_collisions" },
403 	{ "tx_collide_2times" },
404 	{ "tx_collide_3times" },
405 	{ "tx_collide_4times" },
406 	{ "tx_collide_5times" },
407 	{ "tx_collide_6times" },
408 	{ "tx_collide_7times" },
409 	{ "tx_collide_8times" },
410 	{ "tx_collide_9times" },
411 	{ "tx_collide_10times" },
412 	{ "tx_collide_11times" },
413 	{ "tx_collide_12times" },
414 	{ "tx_collide_13times" },
415 	{ "tx_collide_14times" },
416 	{ "tx_collide_15times" },
417 	{ "tx_ucast_packets" },
418 	{ "tx_mcast_packets" },
419 	{ "tx_bcast_packets" },
420 	{ "tx_carrier_sense_errors" },
421 	{ "tx_discards" },
422 	{ "tx_errors" },
423 
424 	{ "dma_writeq_full" },
425 	{ "dma_write_prioq_full" },
426 	{ "rxbds_empty" },
427 	{ "rx_discards" },
428 	{ "rx_errors" },
429 	{ "rx_threshold_hit" },
430 
431 	{ "dma_readq_full" },
432 	{ "dma_read_prioq_full" },
433 	{ "tx_comp_queue_full" },
434 
435 	{ "ring_set_send_prod_index" },
436 	{ "ring_status_update" },
437 	{ "nic_irqs" },
438 	{ "nic_avoided_irqs" },
439 	{ "nic_tx_threshold_hit" },
440 
441 	{ "mbuf_lwm_thresh_hit" },
442 };
443 
444 #define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST		0
446 #define TG3_LINK_TEST		1
447 #define TG3_REGISTER_TEST	2
448 #define TG3_MEMORY_TEST		3
449 #define TG3_MAC_LOOPB_TEST	4
450 #define TG3_PHY_LOOPB_TEST	5
451 #define TG3_EXT_LOOPB_TEST	6
452 #define TG3_INTERRUPT_TEST	7
453 
454 
455 static const struct {
456 	const char string[ETH_GSTRING_LEN];
457 } ethtool_test_keys[] = {
458 	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
459 	[TG3_LINK_TEST]		= { "link test         (online) " },
460 	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
461 	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
462 	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
463 	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
464 	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
465 	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
466 };
467 
468 #define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
469 
470 
tg3_write32(struct tg3 * tp,u32 off,u32 val)471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 {
473 	writel(val, tp->regs + off);
474 }
475 
tg3_read32(struct tg3 * tp,u32 off)476 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 {
478 	return readl(tp->regs + off);
479 }
480 
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 {
483 	writel(val, tp->aperegs + off);
484 }
485 
tg3_ape_read32(struct tg3 * tp,u32 off)486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 {
488 	return readl(tp->aperegs + off);
489 }
490 
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 {
493 	unsigned long flags;
494 
495 	spin_lock_irqsave(&tp->indirect_lock, flags);
496 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500 
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 {
503 	writel(val, tp->regs + off);
504 	readl(tp->regs + off);
505 }
506 
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
508 {
509 	unsigned long flags;
510 	u32 val;
511 
512 	spin_lock_irqsave(&tp->indirect_lock, flags);
513 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 	return val;
517 }
518 
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 {
521 	unsigned long flags;
522 
523 	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524 		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525 				       TG3_64BIT_REG_LOW, val);
526 		return;
527 	}
528 	if (off == TG3_RX_STD_PROD_IDX_REG) {
529 		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530 				       TG3_64BIT_REG_LOW, val);
531 		return;
532 	}
533 
534 	spin_lock_irqsave(&tp->indirect_lock, flags);
535 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536 	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
538 
539 	/* In indirect mode when disabling interrupts, we also need
540 	 * to clear the interrupt bit in the GRC local ctrl register.
541 	 */
542 	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543 	    (val == 0x1)) {
544 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545 				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 	}
547 }
548 
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
550 {
551 	unsigned long flags;
552 	u32 val;
553 
554 	spin_lock_irqsave(&tp->indirect_lock, flags);
555 	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556 	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 	return val;
559 }
560 
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562  * where it is unsafe to read back the register without some delay.
563  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565  */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 {
568 	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569 		/* Non-posted methods */
570 		tp->write32(tp, off, val);
571 	else {
572 		/* Posted method */
573 		tg3_write32(tp, off, val);
574 		if (usec_wait)
575 			udelay(usec_wait);
576 		tp->read32(tp, off);
577 	}
578 	/* Wait again after the read for the posted method to guarantee that
579 	 * the wait time is met.
580 	 */
581 	if (usec_wait)
582 		udelay(usec_wait);
583 }
584 
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 {
587 	tp->write32_mbox(tp, off, val);
588 	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589 	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590 	     !tg3_flag(tp, ICH_WORKAROUND)))
591 		tp->read32_mbox(tp, off);
592 }
593 
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 {
596 	void __iomem *mbox = tp->regs + off;
597 	writel(val, mbox);
598 	if (tg3_flag(tp, TXD_MBOX_HWBUG))
599 		writel(val, mbox);
600 	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601 	    tg3_flag(tp, FLUSH_POSTED_WRITES))
602 		readl(mbox);
603 }
604 
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 {
607 	return readl(tp->regs + off + GRCMBOX_BASE);
608 }
609 
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 {
612 	writel(val, tp->regs + off + GRCMBOX_BASE);
613 }
614 
615 #define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
620 
621 #define tw32(reg, val)			tp->write32(tp, reg, val)
622 #define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg)			tp->read32(tp, reg)
625 
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 {
628 	unsigned long flags;
629 
630 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
632 		return;
633 
634 	spin_lock_irqsave(&tp->indirect_lock, flags);
635 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638 
639 		/* Always leave this as zero. */
640 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641 	} else {
642 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643 		tw32_f(TG3PCI_MEM_WIN_DATA, val);
644 
645 		/* Always leave this as zero. */
646 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 	}
648 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
649 }
650 
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 {
653 	unsigned long flags;
654 
655 	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656 	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
657 		*val = 0;
658 		return;
659 	}
660 
661 	spin_lock_irqsave(&tp->indirect_lock, flags);
662 	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664 		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665 
666 		/* Always leave this as zero. */
667 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668 	} else {
669 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670 		*val = tr32(TG3PCI_MEM_WIN_DATA);
671 
672 		/* Always leave this as zero. */
673 		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674 	}
675 	spin_unlock_irqrestore(&tp->indirect_lock, flags);
676 }
677 
tg3_ape_lock_init(struct tg3 * tp)678 static void tg3_ape_lock_init(struct tg3 *tp)
679 {
680 	int i;
681 	u32 regbase, bit;
682 
683 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
684 		regbase = TG3_APE_LOCK_GRANT;
685 	else
686 		regbase = TG3_APE_PER_LOCK_GRANT;
687 
688 	/* Make sure the driver hasn't any stale locks. */
689 	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690 		switch (i) {
691 		case TG3_APE_LOCK_PHY0:
692 		case TG3_APE_LOCK_PHY1:
693 		case TG3_APE_LOCK_PHY2:
694 		case TG3_APE_LOCK_PHY3:
695 			bit = APE_LOCK_GRANT_DRIVER;
696 			break;
697 		default:
698 			if (!tp->pci_fn)
699 				bit = APE_LOCK_GRANT_DRIVER;
700 			else
701 				bit = 1 << tp->pci_fn;
702 		}
703 		tg3_ape_write32(tp, regbase + 4 * i, bit);
704 	}
705 
706 }
707 
tg3_ape_lock(struct tg3 * tp,int locknum)708 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 {
710 	int i, off;
711 	int ret = 0;
712 	u32 status, req, gnt, bit;
713 
714 	if (!tg3_flag(tp, ENABLE_APE))
715 		return 0;
716 
717 	switch (locknum) {
718 	case TG3_APE_LOCK_GPIO:
719 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
720 			return 0;
721 		fallthrough;
722 	case TG3_APE_LOCK_GRC:
723 	case TG3_APE_LOCK_MEM:
724 		if (!tp->pci_fn)
725 			bit = APE_LOCK_REQ_DRIVER;
726 		else
727 			bit = 1 << tp->pci_fn;
728 		break;
729 	case TG3_APE_LOCK_PHY0:
730 	case TG3_APE_LOCK_PHY1:
731 	case TG3_APE_LOCK_PHY2:
732 	case TG3_APE_LOCK_PHY3:
733 		bit = APE_LOCK_REQ_DRIVER;
734 		break;
735 	default:
736 		return -EINVAL;
737 	}
738 
739 	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
740 		req = TG3_APE_LOCK_REQ;
741 		gnt = TG3_APE_LOCK_GRANT;
742 	} else {
743 		req = TG3_APE_PER_LOCK_REQ;
744 		gnt = TG3_APE_PER_LOCK_GRANT;
745 	}
746 
747 	off = 4 * locknum;
748 
749 	tg3_ape_write32(tp, req + off, bit);
750 
751 	/* Wait for up to 1 millisecond to acquire lock. */
752 	for (i = 0; i < 100; i++) {
753 		status = tg3_ape_read32(tp, gnt + off);
754 		if (status == bit)
755 			break;
756 		if (pci_channel_offline(tp->pdev))
757 			break;
758 
759 		udelay(10);
760 	}
761 
762 	if (status != bit) {
763 		/* Revoke the lock request. */
764 		tg3_ape_write32(tp, gnt + off, bit);
765 		ret = -EBUSY;
766 	}
767 
768 	return ret;
769 }
770 
tg3_ape_unlock(struct tg3 * tp,int locknum)771 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
772 {
773 	u32 gnt, bit;
774 
775 	if (!tg3_flag(tp, ENABLE_APE))
776 		return;
777 
778 	switch (locknum) {
779 	case TG3_APE_LOCK_GPIO:
780 		if (tg3_asic_rev(tp) == ASIC_REV_5761)
781 			return;
782 		fallthrough;
783 	case TG3_APE_LOCK_GRC:
784 	case TG3_APE_LOCK_MEM:
785 		if (!tp->pci_fn)
786 			bit = APE_LOCK_GRANT_DRIVER;
787 		else
788 			bit = 1 << tp->pci_fn;
789 		break;
790 	case TG3_APE_LOCK_PHY0:
791 	case TG3_APE_LOCK_PHY1:
792 	case TG3_APE_LOCK_PHY2:
793 	case TG3_APE_LOCK_PHY3:
794 		bit = APE_LOCK_GRANT_DRIVER;
795 		break;
796 	default:
797 		return;
798 	}
799 
800 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
801 		gnt = TG3_APE_LOCK_GRANT;
802 	else
803 		gnt = TG3_APE_PER_LOCK_GRANT;
804 
805 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
806 }
807 
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)808 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
809 {
810 	u32 apedata;
811 
812 	while (timeout_us) {
813 		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
814 			return -EBUSY;
815 
816 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
817 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
818 			break;
819 
820 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
821 
822 		udelay(10);
823 		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
824 	}
825 
826 	return timeout_us ? 0 : -EBUSY;
827 }
828 
829 #ifdef CONFIG_TIGON3_HWMON
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)830 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
831 {
832 	u32 i, apedata;
833 
834 	for (i = 0; i < timeout_us / 10; i++) {
835 		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
836 
837 		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
838 			break;
839 
840 		udelay(10);
841 	}
842 
843 	return i == timeout_us / 10;
844 }
845 
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)846 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
847 				   u32 len)
848 {
849 	int err;
850 	u32 i, bufoff, msgoff, maxlen, apedata;
851 
852 	if (!tg3_flag(tp, APE_HAS_NCSI))
853 		return 0;
854 
855 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
856 	if (apedata != APE_SEG_SIG_MAGIC)
857 		return -ENODEV;
858 
859 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
860 	if (!(apedata & APE_FW_STATUS_READY))
861 		return -EAGAIN;
862 
863 	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
864 		 TG3_APE_SHMEM_BASE;
865 	msgoff = bufoff + 2 * sizeof(u32);
866 	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
867 
868 	while (len) {
869 		u32 length;
870 
871 		/* Cap xfer sizes to scratchpad limits. */
872 		length = (len > maxlen) ? maxlen : len;
873 		len -= length;
874 
875 		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
876 		if (!(apedata & APE_FW_STATUS_READY))
877 			return -EAGAIN;
878 
879 		/* Wait for up to 1 msec for APE to service previous event. */
880 		err = tg3_ape_event_lock(tp, 1000);
881 		if (err)
882 			return err;
883 
884 		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
885 			  APE_EVENT_STATUS_SCRTCHPD_READ |
886 			  APE_EVENT_STATUS_EVENT_PENDING;
887 		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
888 
889 		tg3_ape_write32(tp, bufoff, base_off);
890 		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
891 
892 		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
893 		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
894 
895 		base_off += length;
896 
897 		if (tg3_ape_wait_for_event(tp, 30000))
898 			return -EAGAIN;
899 
900 		for (i = 0; length; i += 4, length -= 4) {
901 			u32 val = tg3_ape_read32(tp, msgoff + i);
902 			memcpy(data, &val, sizeof(u32));
903 			data++;
904 		}
905 	}
906 
907 	return 0;
908 }
909 #endif
910 
tg3_ape_send_event(struct tg3 * tp,u32 event)911 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
912 {
913 	int err;
914 	u32 apedata;
915 
916 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
917 	if (apedata != APE_SEG_SIG_MAGIC)
918 		return -EAGAIN;
919 
920 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
921 	if (!(apedata & APE_FW_STATUS_READY))
922 		return -EAGAIN;
923 
924 	/* Wait for up to 20 millisecond for APE to service previous event. */
925 	err = tg3_ape_event_lock(tp, 20000);
926 	if (err)
927 		return err;
928 
929 	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
930 			event | APE_EVENT_STATUS_EVENT_PENDING);
931 
932 	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
933 	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
934 
935 	return 0;
936 }
937 
tg3_ape_driver_state_change(struct tg3 * tp,int kind)938 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
939 {
940 	u32 event;
941 	u32 apedata;
942 
943 	if (!tg3_flag(tp, ENABLE_APE))
944 		return;
945 
946 	switch (kind) {
947 	case RESET_KIND_INIT:
948 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
949 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
950 				APE_HOST_SEG_SIG_MAGIC);
951 		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
952 				APE_HOST_SEG_LEN_MAGIC);
953 		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
954 		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
955 		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
956 			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
957 		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
958 				APE_HOST_BEHAV_NO_PHYLOCK);
959 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
960 				    TG3_APE_HOST_DRVR_STATE_START);
961 
962 		event = APE_EVENT_STATUS_STATE_START;
963 		break;
964 	case RESET_KIND_SHUTDOWN:
965 		if (device_may_wakeup(&tp->pdev->dev) &&
966 		    tg3_flag(tp, WOL_ENABLE)) {
967 			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
968 					    TG3_APE_HOST_WOL_SPEED_AUTO);
969 			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
970 		} else
971 			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
972 
973 		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
974 
975 		event = APE_EVENT_STATUS_STATE_UNLOAD;
976 		break;
977 	default:
978 		return;
979 	}
980 
981 	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
982 
983 	tg3_ape_send_event(tp, event);
984 }
985 
tg3_send_ape_heartbeat(struct tg3 * tp,unsigned long interval)986 static void tg3_send_ape_heartbeat(struct tg3 *tp,
987 				   unsigned long interval)
988 {
989 	/* Check if hb interval has exceeded */
990 	if (!tg3_flag(tp, ENABLE_APE) ||
991 	    time_before(jiffies, tp->ape_hb_jiffies + interval))
992 		return;
993 
994 	tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
995 	tp->ape_hb_jiffies = jiffies;
996 }
997 
tg3_disable_ints(struct tg3 * tp)998 static void tg3_disable_ints(struct tg3 *tp)
999 {
1000 	int i;
1001 
1002 	tw32(TG3PCI_MISC_HOST_CTRL,
1003 	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1004 	for (i = 0; i < tp->irq_max; i++)
1005 		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1006 }
1007 
tg3_enable_ints(struct tg3 * tp)1008 static void tg3_enable_ints(struct tg3 *tp)
1009 {
1010 	int i;
1011 
1012 	tp->irq_sync = 0;
1013 	wmb();
1014 
1015 	tw32(TG3PCI_MISC_HOST_CTRL,
1016 	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1017 
1018 	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1019 	for (i = 0; i < tp->irq_cnt; i++) {
1020 		struct tg3_napi *tnapi = &tp->napi[i];
1021 
1022 		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1023 		if (tg3_flag(tp, 1SHOT_MSI))
1024 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1025 
1026 		tp->coal_now |= tnapi->coal_now;
1027 	}
1028 
1029 	/* Force an initial interrupt */
1030 	if (!tg3_flag(tp, TAGGED_STATUS) &&
1031 	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1032 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1033 	else
1034 		tw32(HOSTCC_MODE, tp->coal_now);
1035 
1036 	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1037 }
1038 
tg3_has_work(struct tg3_napi * tnapi)1039 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1040 {
1041 	struct tg3 *tp = tnapi->tp;
1042 	struct tg3_hw_status *sblk = tnapi->hw_status;
1043 	unsigned int work_exists = 0;
1044 
1045 	/* check for phy events */
1046 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1047 		if (sblk->status & SD_STATUS_LINK_CHG)
1048 			work_exists = 1;
1049 	}
1050 
1051 	/* check for TX work to do */
1052 	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1053 		work_exists = 1;
1054 
1055 	/* check for RX work to do */
1056 	if (tnapi->rx_rcb_prod_idx &&
1057 	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1058 		work_exists = 1;
1059 
1060 	return work_exists;
1061 }
1062 
1063 /* tg3_int_reenable
1064  *  similar to tg3_enable_ints, but it accurately determines whether there
1065  *  is new work pending and can return without flushing the PIO write
1066  *  which reenables interrupts
1067  */
tg3_int_reenable(struct tg3_napi * tnapi)1068 static void tg3_int_reenable(struct tg3_napi *tnapi)
1069 {
1070 	struct tg3 *tp = tnapi->tp;
1071 
1072 	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1073 
1074 	/* When doing tagged status, this work check is unnecessary.
1075 	 * The last_tag we write above tells the chip which piece of
1076 	 * work we've completed.
1077 	 */
1078 	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1079 		tw32(HOSTCC_MODE, tp->coalesce_mode |
1080 		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1081 }
1082 
tg3_switch_clocks(struct tg3 * tp)1083 static void tg3_switch_clocks(struct tg3 *tp)
1084 {
1085 	u32 clock_ctrl;
1086 	u32 orig_clock_ctrl;
1087 
1088 	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1089 		return;
1090 
1091 	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1092 
1093 	orig_clock_ctrl = clock_ctrl;
1094 	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1095 		       CLOCK_CTRL_CLKRUN_OENABLE |
1096 		       0x1f);
1097 	tp->pci_clock_ctrl = clock_ctrl;
1098 
1099 	if (tg3_flag(tp, 5705_PLUS)) {
1100 		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1101 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1102 				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1103 		}
1104 	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1105 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1106 			    clock_ctrl |
1107 			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1108 			    40);
1109 		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1110 			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1111 			    40);
1112 	}
1113 	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1114 }
1115 
1116 #define PHY_BUSY_LOOPS	5000
1117 
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1118 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1119 			 u32 *val)
1120 {
1121 	u32 frame_val;
1122 	unsigned int loops;
1123 	int ret;
1124 
1125 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1126 		tw32_f(MAC_MI_MODE,
1127 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1128 		udelay(80);
1129 	}
1130 
1131 	tg3_ape_lock(tp, tp->phy_ape_lock);
1132 
1133 	*val = 0x0;
1134 
1135 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1136 		      MI_COM_PHY_ADDR_MASK);
1137 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1138 		      MI_COM_REG_ADDR_MASK);
1139 	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1140 
1141 	tw32_f(MAC_MI_COM, frame_val);
1142 
1143 	loops = PHY_BUSY_LOOPS;
1144 	while (loops != 0) {
1145 		udelay(10);
1146 		frame_val = tr32(MAC_MI_COM);
1147 
1148 		if ((frame_val & MI_COM_BUSY) == 0) {
1149 			udelay(5);
1150 			frame_val = tr32(MAC_MI_COM);
1151 			break;
1152 		}
1153 		loops -= 1;
1154 	}
1155 
1156 	ret = -EBUSY;
1157 	if (loops != 0) {
1158 		*val = frame_val & MI_COM_DATA_MASK;
1159 		ret = 0;
1160 	}
1161 
1162 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1163 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1164 		udelay(80);
1165 	}
1166 
1167 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1168 
1169 	return ret;
1170 }
1171 
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1172 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1173 {
1174 	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1175 }
1176 
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1177 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1178 			  u32 val)
1179 {
1180 	u32 frame_val;
1181 	unsigned int loops;
1182 	int ret;
1183 
1184 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1185 	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1186 		return 0;
1187 
1188 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1189 		tw32_f(MAC_MI_MODE,
1190 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1191 		udelay(80);
1192 	}
1193 
1194 	tg3_ape_lock(tp, tp->phy_ape_lock);
1195 
1196 	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1197 		      MI_COM_PHY_ADDR_MASK);
1198 	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1199 		      MI_COM_REG_ADDR_MASK);
1200 	frame_val |= (val & MI_COM_DATA_MASK);
1201 	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1202 
1203 	tw32_f(MAC_MI_COM, frame_val);
1204 
1205 	loops = PHY_BUSY_LOOPS;
1206 	while (loops != 0) {
1207 		udelay(10);
1208 		frame_val = tr32(MAC_MI_COM);
1209 		if ((frame_val & MI_COM_BUSY) == 0) {
1210 			udelay(5);
1211 			frame_val = tr32(MAC_MI_COM);
1212 			break;
1213 		}
1214 		loops -= 1;
1215 	}
1216 
1217 	ret = -EBUSY;
1218 	if (loops != 0)
1219 		ret = 0;
1220 
1221 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1222 		tw32_f(MAC_MI_MODE, tp->mi_mode);
1223 		udelay(80);
1224 	}
1225 
1226 	tg3_ape_unlock(tp, tp->phy_ape_lock);
1227 
1228 	return ret;
1229 }
1230 
tg3_writephy(struct tg3 * tp,int reg,u32 val)1231 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1232 {
1233 	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1234 }
1235 
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1236 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1237 {
1238 	int err;
1239 
1240 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1241 	if (err)
1242 		goto done;
1243 
1244 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1245 	if (err)
1246 		goto done;
1247 
1248 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1249 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1250 	if (err)
1251 		goto done;
1252 
1253 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1254 
1255 done:
1256 	return err;
1257 }
1258 
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1259 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1260 {
1261 	int err;
1262 
1263 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1264 	if (err)
1265 		goto done;
1266 
1267 	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1268 	if (err)
1269 		goto done;
1270 
1271 	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1272 			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1273 	if (err)
1274 		goto done;
1275 
1276 	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1277 
1278 done:
1279 	return err;
1280 }
1281 
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1282 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1283 {
1284 	int err;
1285 
1286 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1287 	if (!err)
1288 		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1289 
1290 	return err;
1291 }
1292 
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1293 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1294 {
1295 	int err;
1296 
1297 	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1298 	if (!err)
1299 		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1300 
1301 	return err;
1302 }
1303 
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1304 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1305 {
1306 	int err;
1307 
1308 	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1309 			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1310 			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1311 	if (!err)
1312 		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1313 
1314 	return err;
1315 }
1316 
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1317 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1318 {
1319 	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1320 		set |= MII_TG3_AUXCTL_MISC_WREN;
1321 
1322 	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1323 }
1324 
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1325 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1326 {
1327 	u32 val;
1328 	int err;
1329 
1330 	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1331 
1332 	if (err)
1333 		return err;
1334 
1335 	if (enable)
1336 		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1337 	else
1338 		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1339 
1340 	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1341 				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1342 
1343 	return err;
1344 }
1345 
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1346 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1347 {
1348 	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1349 			    reg | val | MII_TG3_MISC_SHDW_WREN);
1350 }
1351 
tg3_bmcr_reset(struct tg3 * tp)1352 static int tg3_bmcr_reset(struct tg3 *tp)
1353 {
1354 	u32 phy_control;
1355 	int limit, err;
1356 
1357 	/* OK, reset it, and poll the BMCR_RESET bit until it
1358 	 * clears or we time out.
1359 	 */
1360 	phy_control = BMCR_RESET;
1361 	err = tg3_writephy(tp, MII_BMCR, phy_control);
1362 	if (err != 0)
1363 		return -EBUSY;
1364 
1365 	limit = 5000;
1366 	while (limit--) {
1367 		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1368 		if (err != 0)
1369 			return -EBUSY;
1370 
1371 		if ((phy_control & BMCR_RESET) == 0) {
1372 			udelay(40);
1373 			break;
1374 		}
1375 		udelay(10);
1376 	}
1377 	if (limit < 0)
1378 		return -EBUSY;
1379 
1380 	return 0;
1381 }
1382 
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1383 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1384 {
1385 	struct tg3 *tp = bp->priv;
1386 	u32 val;
1387 
1388 	spin_lock_bh(&tp->lock);
1389 
1390 	if (__tg3_readphy(tp, mii_id, reg, &val))
1391 		val = -EIO;
1392 
1393 	spin_unlock_bh(&tp->lock);
1394 
1395 	return val;
1396 }
1397 
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1398 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1399 {
1400 	struct tg3 *tp = bp->priv;
1401 	u32 ret = 0;
1402 
1403 	spin_lock_bh(&tp->lock);
1404 
1405 	if (__tg3_writephy(tp, mii_id, reg, val))
1406 		ret = -EIO;
1407 
1408 	spin_unlock_bh(&tp->lock);
1409 
1410 	return ret;
1411 }
1412 
tg3_mdio_config_5785(struct tg3 * tp)1413 static void tg3_mdio_config_5785(struct tg3 *tp)
1414 {
1415 	u32 val;
1416 	struct phy_device *phydev;
1417 
1418 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1419 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1420 	case PHY_ID_BCM50610:
1421 	case PHY_ID_BCM50610M:
1422 		val = MAC_PHYCFG2_50610_LED_MODES;
1423 		break;
1424 	case PHY_ID_BCMAC131:
1425 		val = MAC_PHYCFG2_AC131_LED_MODES;
1426 		break;
1427 	case PHY_ID_RTL8211C:
1428 		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1429 		break;
1430 	case PHY_ID_RTL8201E:
1431 		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1432 		break;
1433 	default:
1434 		return;
1435 	}
1436 
1437 	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1438 		tw32(MAC_PHYCFG2, val);
1439 
1440 		val = tr32(MAC_PHYCFG1);
1441 		val &= ~(MAC_PHYCFG1_RGMII_INT |
1442 			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1443 		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1444 		tw32(MAC_PHYCFG1, val);
1445 
1446 		return;
1447 	}
1448 
1449 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1450 		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1451 		       MAC_PHYCFG2_FMODE_MASK_MASK |
1452 		       MAC_PHYCFG2_GMODE_MASK_MASK |
1453 		       MAC_PHYCFG2_ACT_MASK_MASK   |
1454 		       MAC_PHYCFG2_QUAL_MASK_MASK |
1455 		       MAC_PHYCFG2_INBAND_ENABLE;
1456 
1457 	tw32(MAC_PHYCFG2, val);
1458 
1459 	val = tr32(MAC_PHYCFG1);
1460 	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1461 		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1462 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1465 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1466 			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1467 	}
1468 	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1469 	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1470 	tw32(MAC_PHYCFG1, val);
1471 
1472 	val = tr32(MAC_EXT_RGMII_MODE);
1473 	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1474 		 MAC_RGMII_MODE_RX_QUALITY |
1475 		 MAC_RGMII_MODE_RX_ACTIVITY |
1476 		 MAC_RGMII_MODE_RX_ENG_DET |
1477 		 MAC_RGMII_MODE_TX_ENABLE |
1478 		 MAC_RGMII_MODE_TX_LOWPWR |
1479 		 MAC_RGMII_MODE_TX_RESET);
1480 	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1481 		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1482 			val |= MAC_RGMII_MODE_RX_INT_B |
1483 			       MAC_RGMII_MODE_RX_QUALITY |
1484 			       MAC_RGMII_MODE_RX_ACTIVITY |
1485 			       MAC_RGMII_MODE_RX_ENG_DET;
1486 		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1487 			val |= MAC_RGMII_MODE_TX_ENABLE |
1488 			       MAC_RGMII_MODE_TX_LOWPWR |
1489 			       MAC_RGMII_MODE_TX_RESET;
1490 	}
1491 	tw32(MAC_EXT_RGMII_MODE, val);
1492 }
1493 
tg3_mdio_start(struct tg3 * tp)1494 static void tg3_mdio_start(struct tg3 *tp)
1495 {
1496 	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1497 	tw32_f(MAC_MI_MODE, tp->mi_mode);
1498 	udelay(80);
1499 
1500 	if (tg3_flag(tp, MDIOBUS_INITED) &&
1501 	    tg3_asic_rev(tp) == ASIC_REV_5785)
1502 		tg3_mdio_config_5785(tp);
1503 }
1504 
tg3_mdio_init(struct tg3 * tp)1505 static int tg3_mdio_init(struct tg3 *tp)
1506 {
1507 	int i;
1508 	u32 reg;
1509 	struct phy_device *phydev;
1510 
1511 	if (tg3_flag(tp, 5717_PLUS)) {
1512 		u32 is_serdes;
1513 
1514 		tp->phy_addr = tp->pci_fn + 1;
1515 
1516 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1517 			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1518 		else
1519 			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1520 				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1521 		if (is_serdes)
1522 			tp->phy_addr += 7;
1523 	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1524 		int addr;
1525 
1526 		addr = ssb_gige_get_phyaddr(tp->pdev);
1527 		if (addr < 0)
1528 			return addr;
1529 		tp->phy_addr = addr;
1530 	} else
1531 		tp->phy_addr = TG3_PHY_MII_ADDR;
1532 
1533 	tg3_mdio_start(tp);
1534 
1535 	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1536 		return 0;
1537 
1538 	tp->mdio_bus = mdiobus_alloc();
1539 	if (tp->mdio_bus == NULL)
1540 		return -ENOMEM;
1541 
1542 	tp->mdio_bus->name     = "tg3 mdio bus";
1543 	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1544 	tp->mdio_bus->priv     = tp;
1545 	tp->mdio_bus->parent   = &tp->pdev->dev;
1546 	tp->mdio_bus->read     = &tg3_mdio_read;
1547 	tp->mdio_bus->write    = &tg3_mdio_write;
1548 	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1549 
1550 	/* The bus registration will look for all the PHYs on the mdio bus.
1551 	 * Unfortunately, it does not ensure the PHY is powered up before
1552 	 * accessing the PHY ID registers.  A chip reset is the
1553 	 * quickest way to bring the device back to an operational state..
1554 	 */
1555 	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1556 		tg3_bmcr_reset(tp);
1557 
1558 	i = mdiobus_register(tp->mdio_bus);
1559 	if (i) {
1560 		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1561 		mdiobus_free(tp->mdio_bus);
1562 		return i;
1563 	}
1564 
1565 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1566 
1567 	if (!phydev || !phydev->drv) {
1568 		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1569 		mdiobus_unregister(tp->mdio_bus);
1570 		mdiobus_free(tp->mdio_bus);
1571 		return -ENODEV;
1572 	}
1573 
1574 	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1575 	case PHY_ID_BCM57780:
1576 		phydev->interface = PHY_INTERFACE_MODE_GMII;
1577 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1578 		break;
1579 	case PHY_ID_BCM50610:
1580 	case PHY_ID_BCM50610M:
1581 		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1582 				     PHY_BRCM_RX_REFCLK_UNUSED |
1583 				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1584 				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1585 		fallthrough;
1586 	case PHY_ID_RTL8211C:
1587 		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1588 		break;
1589 	case PHY_ID_RTL8201E:
1590 	case PHY_ID_BCMAC131:
1591 		phydev->interface = PHY_INTERFACE_MODE_MII;
1592 		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1593 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1594 		break;
1595 	}
1596 
1597 	tg3_flag_set(tp, MDIOBUS_INITED);
1598 
1599 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1600 		tg3_mdio_config_5785(tp);
1601 
1602 	return 0;
1603 }
1604 
tg3_mdio_fini(struct tg3 * tp)1605 static void tg3_mdio_fini(struct tg3 *tp)
1606 {
1607 	if (tg3_flag(tp, MDIOBUS_INITED)) {
1608 		tg3_flag_clear(tp, MDIOBUS_INITED);
1609 		mdiobus_unregister(tp->mdio_bus);
1610 		mdiobus_free(tp->mdio_bus);
1611 	}
1612 }
1613 
1614 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1615 static inline void tg3_generate_fw_event(struct tg3 *tp)
1616 {
1617 	u32 val;
1618 
1619 	val = tr32(GRC_RX_CPU_EVENT);
1620 	val |= GRC_RX_CPU_DRIVER_EVENT;
1621 	tw32_f(GRC_RX_CPU_EVENT, val);
1622 
1623 	tp->last_event_jiffies = jiffies;
1624 }
1625 
1626 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1627 
1628 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1629 static void tg3_wait_for_event_ack(struct tg3 *tp)
1630 {
1631 	int i;
1632 	unsigned int delay_cnt;
1633 	long time_remain;
1634 
1635 	/* If enough time has passed, no wait is necessary. */
1636 	time_remain = (long)(tp->last_event_jiffies + 1 +
1637 		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1638 		      (long)jiffies;
1639 	if (time_remain < 0)
1640 		return;
1641 
1642 	/* Check if we can shorten the wait time. */
1643 	delay_cnt = jiffies_to_usecs(time_remain);
1644 	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1645 		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1646 	delay_cnt = (delay_cnt >> 3) + 1;
1647 
1648 	for (i = 0; i < delay_cnt; i++) {
1649 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1650 			break;
1651 		if (pci_channel_offline(tp->pdev))
1652 			break;
1653 
1654 		udelay(8);
1655 	}
1656 }
1657 
1658 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1659 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1660 {
1661 	u32 reg, val;
1662 
1663 	val = 0;
1664 	if (!tg3_readphy(tp, MII_BMCR, &reg))
1665 		val = reg << 16;
1666 	if (!tg3_readphy(tp, MII_BMSR, &reg))
1667 		val |= (reg & 0xffff);
1668 	*data++ = val;
1669 
1670 	val = 0;
1671 	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1672 		val = reg << 16;
1673 	if (!tg3_readphy(tp, MII_LPA, &reg))
1674 		val |= (reg & 0xffff);
1675 	*data++ = val;
1676 
1677 	val = 0;
1678 	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1679 		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1680 			val = reg << 16;
1681 		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1682 			val |= (reg & 0xffff);
1683 	}
1684 	*data++ = val;
1685 
1686 	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1687 		val = reg << 16;
1688 	else
1689 		val = 0;
1690 	*data++ = val;
1691 }
1692 
1693 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1694 static void tg3_ump_link_report(struct tg3 *tp)
1695 {
1696 	u32 data[4];
1697 
1698 	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1699 		return;
1700 
1701 	tg3_phy_gather_ump_data(tp, data);
1702 
1703 	tg3_wait_for_event_ack(tp);
1704 
1705 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1706 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1707 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1708 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1709 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1710 	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1711 
1712 	tg3_generate_fw_event(tp);
1713 }
1714 
1715 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1716 static void tg3_stop_fw(struct tg3 *tp)
1717 {
1718 	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1719 		/* Wait for RX cpu to ACK the previous event. */
1720 		tg3_wait_for_event_ack(tp);
1721 
1722 		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1723 
1724 		tg3_generate_fw_event(tp);
1725 
1726 		/* Wait for RX cpu to ACK this event. */
1727 		tg3_wait_for_event_ack(tp);
1728 	}
1729 }
1730 
1731 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1732 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1733 {
1734 	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1735 		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1736 
1737 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1738 		switch (kind) {
1739 		case RESET_KIND_INIT:
1740 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741 				      DRV_STATE_START);
1742 			break;
1743 
1744 		case RESET_KIND_SHUTDOWN:
1745 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746 				      DRV_STATE_UNLOAD);
1747 			break;
1748 
1749 		case RESET_KIND_SUSPEND:
1750 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751 				      DRV_STATE_SUSPEND);
1752 			break;
1753 
1754 		default:
1755 			break;
1756 		}
1757 	}
1758 }
1759 
1760 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1761 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1762 {
1763 	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1764 		switch (kind) {
1765 		case RESET_KIND_INIT:
1766 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1767 				      DRV_STATE_START_DONE);
1768 			break;
1769 
1770 		case RESET_KIND_SHUTDOWN:
1771 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 				      DRV_STATE_UNLOAD_DONE);
1773 			break;
1774 
1775 		default:
1776 			break;
1777 		}
1778 	}
1779 }
1780 
1781 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1782 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1783 {
1784 	if (tg3_flag(tp, ENABLE_ASF)) {
1785 		switch (kind) {
1786 		case RESET_KIND_INIT:
1787 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 				      DRV_STATE_START);
1789 			break;
1790 
1791 		case RESET_KIND_SHUTDOWN:
1792 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793 				      DRV_STATE_UNLOAD);
1794 			break;
1795 
1796 		case RESET_KIND_SUSPEND:
1797 			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 				      DRV_STATE_SUSPEND);
1799 			break;
1800 
1801 		default:
1802 			break;
1803 		}
1804 	}
1805 }
1806 
tg3_poll_fw(struct tg3 * tp)1807 static int tg3_poll_fw(struct tg3 *tp)
1808 {
1809 	int i;
1810 	u32 val;
1811 
1812 	if (tg3_flag(tp, NO_FWARE_REPORTED))
1813 		return 0;
1814 
1815 	if (tg3_flag(tp, IS_SSB_CORE)) {
1816 		/* We don't use firmware. */
1817 		return 0;
1818 	}
1819 
1820 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1821 		/* Wait up to 20ms for init done. */
1822 		for (i = 0; i < 200; i++) {
1823 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1824 				return 0;
1825 			if (pci_channel_offline(tp->pdev))
1826 				return -ENODEV;
1827 
1828 			udelay(100);
1829 		}
1830 		return -ENODEV;
1831 	}
1832 
1833 	/* Wait for firmware initialization to complete. */
1834 	for (i = 0; i < 100000; i++) {
1835 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1836 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1837 			break;
1838 		if (pci_channel_offline(tp->pdev)) {
1839 			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1840 				tg3_flag_set(tp, NO_FWARE_REPORTED);
1841 				netdev_info(tp->dev, "No firmware running\n");
1842 			}
1843 
1844 			break;
1845 		}
1846 
1847 		udelay(10);
1848 	}
1849 
1850 	/* Chip might not be fitted with firmware.  Some Sun onboard
1851 	 * parts are configured like that.  So don't signal the timeout
1852 	 * of the above loop as an error, but do report the lack of
1853 	 * running firmware once.
1854 	 */
1855 	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1856 		tg3_flag_set(tp, NO_FWARE_REPORTED);
1857 
1858 		netdev_info(tp->dev, "No firmware running\n");
1859 	}
1860 
1861 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1862 		/* The 57765 A0 needs a little more
1863 		 * time to do some important work.
1864 		 */
1865 		mdelay(10);
1866 	}
1867 
1868 	return 0;
1869 }
1870 
tg3_link_report(struct tg3 * tp)1871 static void tg3_link_report(struct tg3 *tp)
1872 {
1873 	if (!netif_carrier_ok(tp->dev)) {
1874 		netif_info(tp, link, tp->dev, "Link is down\n");
1875 		tg3_ump_link_report(tp);
1876 	} else if (netif_msg_link(tp)) {
1877 		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1878 			    (tp->link_config.active_speed == SPEED_1000 ?
1879 			     1000 :
1880 			     (tp->link_config.active_speed == SPEED_100 ?
1881 			      100 : 10)),
1882 			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1883 			     "full" : "half"));
1884 
1885 		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1886 			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1887 			    "on" : "off",
1888 			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1889 			    "on" : "off");
1890 
1891 		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1892 			netdev_info(tp->dev, "EEE is %s\n",
1893 				    tp->setlpicnt ? "enabled" : "disabled");
1894 
1895 		tg3_ump_link_report(tp);
1896 	}
1897 
1898 	tp->link_up = netif_carrier_ok(tp->dev);
1899 }
1900 
tg3_decode_flowctrl_1000T(u32 adv)1901 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1902 {
1903 	u32 flowctrl = 0;
1904 
1905 	if (adv & ADVERTISE_PAUSE_CAP) {
1906 		flowctrl |= FLOW_CTRL_RX;
1907 		if (!(adv & ADVERTISE_PAUSE_ASYM))
1908 			flowctrl |= FLOW_CTRL_TX;
1909 	} else if (adv & ADVERTISE_PAUSE_ASYM)
1910 		flowctrl |= FLOW_CTRL_TX;
1911 
1912 	return flowctrl;
1913 }
1914 
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1915 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1916 {
1917 	u16 miireg;
1918 
1919 	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1920 		miireg = ADVERTISE_1000XPAUSE;
1921 	else if (flow_ctrl & FLOW_CTRL_TX)
1922 		miireg = ADVERTISE_1000XPSE_ASYM;
1923 	else if (flow_ctrl & FLOW_CTRL_RX)
1924 		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1925 	else
1926 		miireg = 0;
1927 
1928 	return miireg;
1929 }
1930 
tg3_decode_flowctrl_1000X(u32 adv)1931 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1932 {
1933 	u32 flowctrl = 0;
1934 
1935 	if (adv & ADVERTISE_1000XPAUSE) {
1936 		flowctrl |= FLOW_CTRL_RX;
1937 		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1938 			flowctrl |= FLOW_CTRL_TX;
1939 	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1940 		flowctrl |= FLOW_CTRL_TX;
1941 
1942 	return flowctrl;
1943 }
1944 
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1945 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1946 {
1947 	u8 cap = 0;
1948 
1949 	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1950 		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1951 	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1952 		if (lcladv & ADVERTISE_1000XPAUSE)
1953 			cap = FLOW_CTRL_RX;
1954 		if (rmtadv & ADVERTISE_1000XPAUSE)
1955 			cap = FLOW_CTRL_TX;
1956 	}
1957 
1958 	return cap;
1959 }
1960 
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1961 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1962 {
1963 	u8 autoneg;
1964 	u8 flowctrl = 0;
1965 	u32 old_rx_mode = tp->rx_mode;
1966 	u32 old_tx_mode = tp->tx_mode;
1967 
1968 	if (tg3_flag(tp, USE_PHYLIB))
1969 		autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1970 	else
1971 		autoneg = tp->link_config.autoneg;
1972 
1973 	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1974 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1975 			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1976 		else
1977 			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1978 	} else
1979 		flowctrl = tp->link_config.flowctrl;
1980 
1981 	tp->link_config.active_flowctrl = flowctrl;
1982 
1983 	if (flowctrl & FLOW_CTRL_RX)
1984 		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1985 	else
1986 		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1987 
1988 	if (old_rx_mode != tp->rx_mode)
1989 		tw32_f(MAC_RX_MODE, tp->rx_mode);
1990 
1991 	if (flowctrl & FLOW_CTRL_TX)
1992 		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1993 	else
1994 		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1995 
1996 	if (old_tx_mode != tp->tx_mode)
1997 		tw32_f(MAC_TX_MODE, tp->tx_mode);
1998 }
1999 
tg3_adjust_link(struct net_device * dev)2000 static void tg3_adjust_link(struct net_device *dev)
2001 {
2002 	u8 oldflowctrl, linkmesg = 0;
2003 	u32 mac_mode, lcl_adv, rmt_adv;
2004 	struct tg3 *tp = netdev_priv(dev);
2005 	struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2006 
2007 	spin_lock_bh(&tp->lock);
2008 
2009 	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2010 				    MAC_MODE_HALF_DUPLEX);
2011 
2012 	oldflowctrl = tp->link_config.active_flowctrl;
2013 
2014 	if (phydev->link) {
2015 		lcl_adv = 0;
2016 		rmt_adv = 0;
2017 
2018 		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2019 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2020 		else if (phydev->speed == SPEED_1000 ||
2021 			 tg3_asic_rev(tp) != ASIC_REV_5785)
2022 			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2023 		else
2024 			mac_mode |= MAC_MODE_PORT_MODE_MII;
2025 
2026 		if (phydev->duplex == DUPLEX_HALF)
2027 			mac_mode |= MAC_MODE_HALF_DUPLEX;
2028 		else {
2029 			lcl_adv = mii_advertise_flowctrl(
2030 				  tp->link_config.flowctrl);
2031 
2032 			if (phydev->pause)
2033 				rmt_adv = LPA_PAUSE_CAP;
2034 			if (phydev->asym_pause)
2035 				rmt_adv |= LPA_PAUSE_ASYM;
2036 		}
2037 
2038 		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2039 	} else
2040 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2041 
2042 	if (mac_mode != tp->mac_mode) {
2043 		tp->mac_mode = mac_mode;
2044 		tw32_f(MAC_MODE, tp->mac_mode);
2045 		udelay(40);
2046 	}
2047 
2048 	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2049 		if (phydev->speed == SPEED_10)
2050 			tw32(MAC_MI_STAT,
2051 			     MAC_MI_STAT_10MBPS_MODE |
2052 			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053 		else
2054 			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055 	}
2056 
2057 	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2058 		tw32(MAC_TX_LENGTHS,
2059 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2060 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2061 		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2062 	else
2063 		tw32(MAC_TX_LENGTHS,
2064 		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065 		      (6 << TX_LENGTHS_IPG_SHIFT) |
2066 		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067 
2068 	if (phydev->link != tp->old_link ||
2069 	    phydev->speed != tp->link_config.active_speed ||
2070 	    phydev->duplex != tp->link_config.active_duplex ||
2071 	    oldflowctrl != tp->link_config.active_flowctrl)
2072 		linkmesg = 1;
2073 
2074 	tp->old_link = phydev->link;
2075 	tp->link_config.active_speed = phydev->speed;
2076 	tp->link_config.active_duplex = phydev->duplex;
2077 
2078 	spin_unlock_bh(&tp->lock);
2079 
2080 	if (linkmesg)
2081 		tg3_link_report(tp);
2082 }
2083 
tg3_phy_init(struct tg3 * tp)2084 static int tg3_phy_init(struct tg3 *tp)
2085 {
2086 	struct phy_device *phydev;
2087 
2088 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2089 		return 0;
2090 
2091 	/* Bring the PHY back to a known state. */
2092 	tg3_bmcr_reset(tp);
2093 
2094 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2095 
2096 	/* Attach the MAC to the PHY. */
2097 	phydev = phy_connect(tp->dev, phydev_name(phydev),
2098 			     tg3_adjust_link, phydev->interface);
2099 	if (IS_ERR(phydev)) {
2100 		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2101 		return PTR_ERR(phydev);
2102 	}
2103 
2104 	/* Mask with MAC supported features. */
2105 	switch (phydev->interface) {
2106 	case PHY_INTERFACE_MODE_GMII:
2107 	case PHY_INTERFACE_MODE_RGMII:
2108 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2109 			phy_set_max_speed(phydev, SPEED_1000);
2110 			phy_support_asym_pause(phydev);
2111 			break;
2112 		}
2113 		fallthrough;
2114 	case PHY_INTERFACE_MODE_MII:
2115 		phy_set_max_speed(phydev, SPEED_100);
2116 		phy_support_asym_pause(phydev);
2117 		break;
2118 	default:
2119 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2120 		return -EINVAL;
2121 	}
2122 
2123 	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2124 
2125 	phy_attached_info(phydev);
2126 
2127 	return 0;
2128 }
2129 
tg3_phy_start(struct tg3 * tp)2130 static void tg3_phy_start(struct tg3 *tp)
2131 {
2132 	struct phy_device *phydev;
2133 
2134 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2135 		return;
2136 
2137 	phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2138 
2139 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2140 		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2141 		phydev->speed = tp->link_config.speed;
2142 		phydev->duplex = tp->link_config.duplex;
2143 		phydev->autoneg = tp->link_config.autoneg;
2144 		ethtool_convert_legacy_u32_to_link_mode(
2145 			phydev->advertising, tp->link_config.advertising);
2146 	}
2147 
2148 	phy_start(phydev);
2149 
2150 	phy_start_aneg(phydev);
2151 }
2152 
tg3_phy_stop(struct tg3 * tp)2153 static void tg3_phy_stop(struct tg3 *tp)
2154 {
2155 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2156 		return;
2157 
2158 	phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2159 }
2160 
tg3_phy_fini(struct tg3 * tp)2161 static void tg3_phy_fini(struct tg3 *tp)
2162 {
2163 	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2164 		phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2165 		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2166 	}
2167 }
2168 
tg3_phy_set_extloopbk(struct tg3 * tp)2169 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2170 {
2171 	int err;
2172 	u32 val;
2173 
2174 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2175 		return 0;
2176 
2177 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2178 		/* Cannot do read-modify-write on 5401 */
2179 		err = tg3_phy_auxctl_write(tp,
2180 					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181 					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2182 					   0x4c20);
2183 		goto done;
2184 	}
2185 
2186 	err = tg3_phy_auxctl_read(tp,
2187 				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2188 	if (err)
2189 		return err;
2190 
2191 	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2192 	err = tg3_phy_auxctl_write(tp,
2193 				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2194 
2195 done:
2196 	return err;
2197 }
2198 
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2199 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2200 {
2201 	u32 phytest;
2202 
2203 	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2204 		u32 phy;
2205 
2206 		tg3_writephy(tp, MII_TG3_FET_TEST,
2207 			     phytest | MII_TG3_FET_SHADOW_EN);
2208 		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2209 			if (enable)
2210 				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 			else
2212 				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2213 			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2214 		}
2215 		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2216 	}
2217 }
2218 
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2219 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2220 {
2221 	u32 reg;
2222 
2223 	if (!tg3_flag(tp, 5705_PLUS) ||
2224 	    (tg3_flag(tp, 5717_PLUS) &&
2225 	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2226 		return;
2227 
2228 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2229 		tg3_phy_fet_toggle_apd(tp, enable);
2230 		return;
2231 	}
2232 
2233 	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2234 	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2235 	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2236 	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2237 	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2238 		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2239 
2240 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2241 
2242 
2243 	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2244 	if (enable)
2245 		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2246 
2247 	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2248 }
2249 
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2250 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2251 {
2252 	u32 phy;
2253 
2254 	if (!tg3_flag(tp, 5705_PLUS) ||
2255 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2256 		return;
2257 
2258 	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2259 		u32 ephy;
2260 
2261 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2262 			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2263 
2264 			tg3_writephy(tp, MII_TG3_FET_TEST,
2265 				     ephy | MII_TG3_FET_SHADOW_EN);
2266 			if (!tg3_readphy(tp, reg, &phy)) {
2267 				if (enable)
2268 					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269 				else
2270 					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271 				tg3_writephy(tp, reg, phy);
2272 			}
2273 			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2274 		}
2275 	} else {
2276 		int ret;
2277 
2278 		ret = tg3_phy_auxctl_read(tp,
2279 					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2280 		if (!ret) {
2281 			if (enable)
2282 				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283 			else
2284 				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285 			tg3_phy_auxctl_write(tp,
2286 					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2287 		}
2288 	}
2289 }
2290 
tg3_phy_set_wirespeed(struct tg3 * tp)2291 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2292 {
2293 	int ret;
2294 	u32 val;
2295 
2296 	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2297 		return;
2298 
2299 	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2300 	if (!ret)
2301 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2302 				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2303 }
2304 
tg3_phy_apply_otp(struct tg3 * tp)2305 static void tg3_phy_apply_otp(struct tg3 *tp)
2306 {
2307 	u32 otp, phy;
2308 
2309 	if (!tp->phy_otp)
2310 		return;
2311 
2312 	otp = tp->phy_otp;
2313 
2314 	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2315 		return;
2316 
2317 	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2318 	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2319 	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2320 
2321 	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2322 	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2323 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2324 
2325 	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2326 	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2327 	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2328 
2329 	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2330 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2331 
2332 	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2333 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2334 
2335 	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2336 	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2337 	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2338 
2339 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2340 }
2341 
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_keee * eee)2342 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
2343 {
2344 	u32 val;
2345 	struct ethtool_keee *dest = &tp->eee;
2346 
2347 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2348 		return;
2349 
2350 	if (eee)
2351 		dest = eee;
2352 
2353 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2354 		return;
2355 
2356 	/* Pull eee_active */
2357 	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2358 	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2359 		dest->eee_active = 1;
2360 	} else
2361 		dest->eee_active = 0;
2362 
2363 	/* Pull lp advertised settings */
2364 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2365 		return;
2366 	mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
2367 
2368 	/* Pull advertised and eee_enabled settings */
2369 	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2370 		return;
2371 	dest->eee_enabled = !!val;
2372 	mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
2373 
2374 	/* Pull tx_lpi_enabled */
2375 	val = tr32(TG3_CPMU_EEE_MODE);
2376 	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2377 
2378 	/* Pull lpi timer value */
2379 	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2380 }
2381 
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2382 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2383 {
2384 	u32 val;
2385 
2386 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2387 		return;
2388 
2389 	tp->setlpicnt = 0;
2390 
2391 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2392 	    current_link_up &&
2393 	    tp->link_config.active_duplex == DUPLEX_FULL &&
2394 	    (tp->link_config.active_speed == SPEED_100 ||
2395 	     tp->link_config.active_speed == SPEED_1000)) {
2396 		u32 eeectl;
2397 
2398 		if (tp->link_config.active_speed == SPEED_1000)
2399 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2400 		else
2401 			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2402 
2403 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2404 
2405 		tg3_eee_pull_config(tp, NULL);
2406 		if (tp->eee.eee_active)
2407 			tp->setlpicnt = 2;
2408 	}
2409 
2410 	if (!tp->setlpicnt) {
2411 		if (current_link_up &&
2412 		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2413 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2414 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2415 		}
2416 
2417 		val = tr32(TG3_CPMU_EEE_MODE);
2418 		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2419 	}
2420 }
2421 
tg3_phy_eee_enable(struct tg3 * tp)2422 static void tg3_phy_eee_enable(struct tg3 *tp)
2423 {
2424 	u32 val;
2425 
2426 	if (tp->link_config.active_speed == SPEED_1000 &&
2427 	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2428 	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2429 	     tg3_flag(tp, 57765_CLASS)) &&
2430 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2431 		val = MII_TG3_DSP_TAP26_ALNOKO |
2432 		      MII_TG3_DSP_TAP26_RMRXSTO;
2433 		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2434 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2435 	}
2436 
2437 	val = tr32(TG3_CPMU_EEE_MODE);
2438 	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2439 }
2440 
tg3_wait_macro_done(struct tg3 * tp)2441 static int tg3_wait_macro_done(struct tg3 *tp)
2442 {
2443 	int limit = 100;
2444 
2445 	while (limit--) {
2446 		u32 tmp32;
2447 
2448 		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2449 			if ((tmp32 & 0x1000) == 0)
2450 				break;
2451 		}
2452 	}
2453 	if (limit < 0)
2454 		return -EBUSY;
2455 
2456 	return 0;
2457 }
2458 
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2459 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2460 {
2461 	static const u32 test_pat[4][6] = {
2462 	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2463 	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2464 	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2465 	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2466 	};
2467 	int chan;
2468 
2469 	for (chan = 0; chan < 4; chan++) {
2470 		int i;
2471 
2472 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2473 			     (chan * 0x2000) | 0x0200);
2474 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2475 
2476 		for (i = 0; i < 6; i++)
2477 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2478 				     test_pat[chan][i]);
2479 
2480 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2481 		if (tg3_wait_macro_done(tp)) {
2482 			*resetp = 1;
2483 			return -EBUSY;
2484 		}
2485 
2486 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2487 			     (chan * 0x2000) | 0x0200);
2488 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2489 		if (tg3_wait_macro_done(tp)) {
2490 			*resetp = 1;
2491 			return -EBUSY;
2492 		}
2493 
2494 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2495 		if (tg3_wait_macro_done(tp)) {
2496 			*resetp = 1;
2497 			return -EBUSY;
2498 		}
2499 
2500 		for (i = 0; i < 6; i += 2) {
2501 			u32 low, high;
2502 
2503 			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2504 			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2505 			    tg3_wait_macro_done(tp)) {
2506 				*resetp = 1;
2507 				return -EBUSY;
2508 			}
2509 			low &= 0x7fff;
2510 			high &= 0x000f;
2511 			if (low != test_pat[chan][i] ||
2512 			    high != test_pat[chan][i+1]) {
2513 				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2514 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2515 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2516 
2517 				return -EBUSY;
2518 			}
2519 		}
2520 	}
2521 
2522 	return 0;
2523 }
2524 
tg3_phy_reset_chanpat(struct tg3 * tp)2525 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2526 {
2527 	int chan;
2528 
2529 	for (chan = 0; chan < 4; chan++) {
2530 		int i;
2531 
2532 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2533 			     (chan * 0x2000) | 0x0200);
2534 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2535 		for (i = 0; i < 6; i++)
2536 			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2537 		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2538 		if (tg3_wait_macro_done(tp))
2539 			return -EBUSY;
2540 	}
2541 
2542 	return 0;
2543 }
2544 
tg3_phy_reset_5703_4_5(struct tg3 * tp)2545 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2546 {
2547 	u32 reg32, phy9_orig;
2548 	int retries, do_phy_reset, err;
2549 
2550 	retries = 10;
2551 	do_phy_reset = 1;
2552 	do {
2553 		if (do_phy_reset) {
2554 			err = tg3_bmcr_reset(tp);
2555 			if (err)
2556 				return err;
2557 			do_phy_reset = 0;
2558 		}
2559 
2560 		/* Disable transmitter and interrupt.  */
2561 		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2562 			continue;
2563 
2564 		reg32 |= 0x3000;
2565 		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2566 
2567 		/* Set full-duplex, 1000 mbps.  */
2568 		tg3_writephy(tp, MII_BMCR,
2569 			     BMCR_FULLDPLX | BMCR_SPEED1000);
2570 
2571 		/* Set to master mode.  */
2572 		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2573 			continue;
2574 
2575 		tg3_writephy(tp, MII_CTRL1000,
2576 			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2577 
2578 		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2579 		if (err)
2580 			return err;
2581 
2582 		/* Block the PHY control access.  */
2583 		tg3_phydsp_write(tp, 0x8005, 0x0800);
2584 
2585 		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2586 		if (!err)
2587 			break;
2588 	} while (--retries);
2589 
2590 	err = tg3_phy_reset_chanpat(tp);
2591 	if (err)
2592 		return err;
2593 
2594 	tg3_phydsp_write(tp, 0x8005, 0x0000);
2595 
2596 	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2597 	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2598 
2599 	tg3_phy_toggle_auxctl_smdsp(tp, false);
2600 
2601 	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2602 
2603 	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2604 	if (err)
2605 		return err;
2606 
2607 	reg32 &= ~0x3000;
2608 	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2609 
2610 	return 0;
2611 }
2612 
tg3_carrier_off(struct tg3 * tp)2613 static void tg3_carrier_off(struct tg3 *tp)
2614 {
2615 	netif_carrier_off(tp->dev);
2616 	tp->link_up = false;
2617 }
2618 
tg3_warn_mgmt_link_flap(struct tg3 * tp)2619 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2620 {
2621 	if (tg3_flag(tp, ENABLE_ASF))
2622 		netdev_warn(tp->dev,
2623 			    "Management side-band traffic will be interrupted during phy settings change\n");
2624 }
2625 
2626 /* This will reset the tigon3 PHY if there is no valid
2627  * link unless the FORCE argument is non-zero.
2628  */
tg3_phy_reset(struct tg3 * tp)2629 static int tg3_phy_reset(struct tg3 *tp)
2630 {
2631 	u32 val, cpmuctrl;
2632 	int err;
2633 
2634 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2635 		val = tr32(GRC_MISC_CFG);
2636 		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2637 		udelay(40);
2638 	}
2639 	err  = tg3_readphy(tp, MII_BMSR, &val);
2640 	err |= tg3_readphy(tp, MII_BMSR, &val);
2641 	if (err != 0)
2642 		return -EBUSY;
2643 
2644 	if (netif_running(tp->dev) && tp->link_up) {
2645 		netif_carrier_off(tp->dev);
2646 		tg3_link_report(tp);
2647 	}
2648 
2649 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2650 	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2651 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2652 		err = tg3_phy_reset_5703_4_5(tp);
2653 		if (err)
2654 			return err;
2655 		goto out;
2656 	}
2657 
2658 	cpmuctrl = 0;
2659 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2660 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2661 		cpmuctrl = tr32(TG3_CPMU_CTRL);
2662 		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2663 			tw32(TG3_CPMU_CTRL,
2664 			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2665 	}
2666 
2667 	err = tg3_bmcr_reset(tp);
2668 	if (err)
2669 		return err;
2670 
2671 	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2672 		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2673 		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2674 
2675 		tw32(TG3_CPMU_CTRL, cpmuctrl);
2676 	}
2677 
2678 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2679 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2680 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2681 		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2682 		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2683 			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2684 			udelay(40);
2685 			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2686 		}
2687 	}
2688 
2689 	if (tg3_flag(tp, 5717_PLUS) &&
2690 	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2691 		return 0;
2692 
2693 	tg3_phy_apply_otp(tp);
2694 
2695 	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2696 		tg3_phy_toggle_apd(tp, true);
2697 	else
2698 		tg3_phy_toggle_apd(tp, false);
2699 
2700 out:
2701 	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2702 	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2703 		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2704 		tg3_phydsp_write(tp, 0x000a, 0x0323);
2705 		tg3_phy_toggle_auxctl_smdsp(tp, false);
2706 	}
2707 
2708 	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2709 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2711 	}
2712 
2713 	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2714 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2715 			tg3_phydsp_write(tp, 0x000a, 0x310b);
2716 			tg3_phydsp_write(tp, 0x201f, 0x9506);
2717 			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2718 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2719 		}
2720 	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2721 		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2722 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2723 			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2724 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2725 				tg3_writephy(tp, MII_TG3_TEST1,
2726 					     MII_TG3_TEST1_TRIM_EN | 0x4);
2727 			} else
2728 				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2729 
2730 			tg3_phy_toggle_auxctl_smdsp(tp, false);
2731 		}
2732 	}
2733 
2734 	/* Set Extended packet length bit (bit 14) on all chips that */
2735 	/* support jumbo frames */
2736 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2737 		/* Cannot do read-modify-write on 5401 */
2738 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2739 	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2740 		/* Set bit 14 with read-modify-write to preserve other bits */
2741 		err = tg3_phy_auxctl_read(tp,
2742 					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2743 		if (!err)
2744 			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2745 					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2746 	}
2747 
2748 	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2749 	 * jumbo frames transmission.
2750 	 */
2751 	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2752 		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2753 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2754 				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2755 	}
2756 
2757 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2758 		/* adjust output voltage */
2759 		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2760 	}
2761 
2762 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2763 		tg3_phydsp_write(tp, 0xffb, 0x4000);
2764 
2765 	tg3_phy_toggle_automdix(tp, true);
2766 	tg3_phy_set_wirespeed(tp);
2767 	return 0;
2768 }
2769 
2770 #define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2771 #define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2772 #define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2773 					  TG3_GPIO_MSG_NEED_VAUX)
2774 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2775 	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2776 	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2777 	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2778 	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2779 
2780 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2781 	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2782 	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2783 	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2784 	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2785 
tg3_set_function_status(struct tg3 * tp,u32 newstat)2786 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2787 {
2788 	u32 status, shift;
2789 
2790 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2791 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2792 		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2793 	else
2794 		status = tr32(TG3_CPMU_DRV_STATUS);
2795 
2796 	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2797 	status &= ~(TG3_GPIO_MSG_MASK << shift);
2798 	status |= (newstat << shift);
2799 
2800 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801 	    tg3_asic_rev(tp) == ASIC_REV_5719)
2802 		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2803 	else
2804 		tw32(TG3_CPMU_DRV_STATUS, status);
2805 
2806 	return status >> TG3_APE_GPIO_MSG_SHIFT;
2807 }
2808 
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2809 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2810 {
2811 	if (!tg3_flag(tp, IS_NIC))
2812 		return 0;
2813 
2814 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2815 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2816 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2817 		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2818 			return -EIO;
2819 
2820 		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2821 
2822 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2823 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2824 
2825 		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2826 	} else {
2827 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2829 	}
2830 
2831 	return 0;
2832 }
2833 
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2834 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2835 {
2836 	u32 grc_local_ctrl;
2837 
2838 	if (!tg3_flag(tp, IS_NIC) ||
2839 	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2840 	    tg3_asic_rev(tp) == ASIC_REV_5701)
2841 		return;
2842 
2843 	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2844 
2845 	tw32_wait_f(GRC_LOCAL_CTRL,
2846 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2847 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2848 
2849 	tw32_wait_f(GRC_LOCAL_CTRL,
2850 		    grc_local_ctrl,
2851 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2852 
2853 	tw32_wait_f(GRC_LOCAL_CTRL,
2854 		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2855 		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2856 }
2857 
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2858 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2859 {
2860 	if (!tg3_flag(tp, IS_NIC))
2861 		return;
2862 
2863 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2864 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2865 		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2866 			    (GRC_LCLCTRL_GPIO_OE0 |
2867 			     GRC_LCLCTRL_GPIO_OE1 |
2868 			     GRC_LCLCTRL_GPIO_OE2 |
2869 			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2870 			     GRC_LCLCTRL_GPIO_OUTPUT1),
2871 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2872 	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2873 		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2874 		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2875 		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2876 				     GRC_LCLCTRL_GPIO_OE1 |
2877 				     GRC_LCLCTRL_GPIO_OE2 |
2878 				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2879 				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2880 				     tp->grc_local_ctrl;
2881 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2882 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2883 
2884 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2885 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2886 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2887 
2888 		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2889 		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2890 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2891 	} else {
2892 		u32 no_gpio2;
2893 		u32 grc_local_ctrl = 0;
2894 
2895 		/* Workaround to prevent overdrawing Amps. */
2896 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2897 			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2898 			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2899 				    grc_local_ctrl,
2900 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2901 		}
2902 
2903 		/* On 5753 and variants, GPIO2 cannot be used. */
2904 		no_gpio2 = tp->nic_sram_data_cfg &
2905 			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2906 
2907 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2908 				  GRC_LCLCTRL_GPIO_OE1 |
2909 				  GRC_LCLCTRL_GPIO_OE2 |
2910 				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2911 				  GRC_LCLCTRL_GPIO_OUTPUT2;
2912 		if (no_gpio2) {
2913 			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2914 					    GRC_LCLCTRL_GPIO_OUTPUT2);
2915 		}
2916 		tw32_wait_f(GRC_LOCAL_CTRL,
2917 			    tp->grc_local_ctrl | grc_local_ctrl,
2918 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2919 
2920 		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2921 
2922 		tw32_wait_f(GRC_LOCAL_CTRL,
2923 			    tp->grc_local_ctrl | grc_local_ctrl,
2924 			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2925 
2926 		if (!no_gpio2) {
2927 			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2928 			tw32_wait_f(GRC_LOCAL_CTRL,
2929 				    tp->grc_local_ctrl | grc_local_ctrl,
2930 				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2931 		}
2932 	}
2933 }
2934 
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2935 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2936 {
2937 	u32 msg = 0;
2938 
2939 	/* Serialize power state transitions */
2940 	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2941 		return;
2942 
2943 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2944 		msg = TG3_GPIO_MSG_NEED_VAUX;
2945 
2946 	msg = tg3_set_function_status(tp, msg);
2947 
2948 	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2949 		goto done;
2950 
2951 	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2952 		tg3_pwrsrc_switch_to_vaux(tp);
2953 	else
2954 		tg3_pwrsrc_die_with_vmain(tp);
2955 
2956 done:
2957 	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2958 }
2959 
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2960 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2961 {
2962 	bool need_vaux = false;
2963 
2964 	/* The GPIOs do something completely different on 57765. */
2965 	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2966 		return;
2967 
2968 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2969 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2970 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2971 		tg3_frob_aux_power_5717(tp, include_wol ?
2972 					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2973 		return;
2974 	}
2975 
2976 	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2977 		struct net_device *dev_peer;
2978 
2979 		dev_peer = pci_get_drvdata(tp->pdev_peer);
2980 
2981 		/* remove_one() may have been run on the peer. */
2982 		if (dev_peer) {
2983 			struct tg3 *tp_peer = netdev_priv(dev_peer);
2984 
2985 			if (tg3_flag(tp_peer, INIT_COMPLETE))
2986 				return;
2987 
2988 			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2989 			    tg3_flag(tp_peer, ENABLE_ASF))
2990 				need_vaux = true;
2991 		}
2992 	}
2993 
2994 	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2995 	    tg3_flag(tp, ENABLE_ASF))
2996 		need_vaux = true;
2997 
2998 	if (need_vaux)
2999 		tg3_pwrsrc_switch_to_vaux(tp);
3000 	else
3001 		tg3_pwrsrc_die_with_vmain(tp);
3002 }
3003 
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3004 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3005 {
3006 	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3007 		return 1;
3008 	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3009 		if (speed != SPEED_10)
3010 			return 1;
3011 	} else if (speed == SPEED_10)
3012 		return 1;
3013 
3014 	return 0;
3015 }
3016 
tg3_phy_power_bug(struct tg3 * tp)3017 static bool tg3_phy_power_bug(struct tg3 *tp)
3018 {
3019 	switch (tg3_asic_rev(tp)) {
3020 	case ASIC_REV_5700:
3021 	case ASIC_REV_5704:
3022 		return true;
3023 	case ASIC_REV_5780:
3024 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3025 			return true;
3026 		return false;
3027 	case ASIC_REV_5717:
3028 		if (!tp->pci_fn)
3029 			return true;
3030 		return false;
3031 	case ASIC_REV_5719:
3032 	case ASIC_REV_5720:
3033 		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3034 		    !tp->pci_fn)
3035 			return true;
3036 		return false;
3037 	}
3038 
3039 	return false;
3040 }
3041 
tg3_phy_led_bug(struct tg3 * tp)3042 static bool tg3_phy_led_bug(struct tg3 *tp)
3043 {
3044 	switch (tg3_asic_rev(tp)) {
3045 	case ASIC_REV_5719:
3046 	case ASIC_REV_5720:
3047 		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3048 		    !tp->pci_fn)
3049 			return true;
3050 		return false;
3051 	}
3052 
3053 	return false;
3054 }
3055 
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3056 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3057 {
3058 	u32 val;
3059 
3060 	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3061 		return;
3062 
3063 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3064 		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3065 			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3066 			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3067 
3068 			sg_dig_ctrl |=
3069 				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3070 			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3071 			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3072 		}
3073 		return;
3074 	}
3075 
3076 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3077 		tg3_bmcr_reset(tp);
3078 		val = tr32(GRC_MISC_CFG);
3079 		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3080 		udelay(40);
3081 		return;
3082 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3083 		u32 phytest;
3084 		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3085 			u32 phy;
3086 
3087 			tg3_writephy(tp, MII_ADVERTISE, 0);
3088 			tg3_writephy(tp, MII_BMCR,
3089 				     BMCR_ANENABLE | BMCR_ANRESTART);
3090 
3091 			tg3_writephy(tp, MII_TG3_FET_TEST,
3092 				     phytest | MII_TG3_FET_SHADOW_EN);
3093 			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3094 				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3095 				tg3_writephy(tp,
3096 					     MII_TG3_FET_SHDW_AUXMODE4,
3097 					     phy);
3098 			}
3099 			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3100 		}
3101 		return;
3102 	} else if (do_low_power) {
3103 		if (!tg3_phy_led_bug(tp))
3104 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3105 				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3106 
3107 		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3108 		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3109 		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3110 		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3111 	}
3112 
3113 	/* The PHY should not be powered down on some chips because
3114 	 * of bugs.
3115 	 */
3116 	if (tg3_phy_power_bug(tp))
3117 		return;
3118 
3119 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3120 	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3121 		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3122 		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3123 		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3124 		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3125 	}
3126 
3127 	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3128 }
3129 
3130 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3131 static int tg3_nvram_lock(struct tg3 *tp)
3132 {
3133 	if (tg3_flag(tp, NVRAM)) {
3134 		int i;
3135 
3136 		if (tp->nvram_lock_cnt == 0) {
3137 			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3138 			for (i = 0; i < 8000; i++) {
3139 				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3140 					break;
3141 				udelay(20);
3142 			}
3143 			if (i == 8000) {
3144 				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3145 				return -ENODEV;
3146 			}
3147 		}
3148 		tp->nvram_lock_cnt++;
3149 	}
3150 	return 0;
3151 }
3152 
3153 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3154 static void tg3_nvram_unlock(struct tg3 *tp)
3155 {
3156 	if (tg3_flag(tp, NVRAM)) {
3157 		if (tp->nvram_lock_cnt > 0)
3158 			tp->nvram_lock_cnt--;
3159 		if (tp->nvram_lock_cnt == 0)
3160 			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3161 	}
3162 }
3163 
3164 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3165 static void tg3_enable_nvram_access(struct tg3 *tp)
3166 {
3167 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3168 		u32 nvaccess = tr32(NVRAM_ACCESS);
3169 
3170 		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3171 	}
3172 }
3173 
3174 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3175 static void tg3_disable_nvram_access(struct tg3 *tp)
3176 {
3177 	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3178 		u32 nvaccess = tr32(NVRAM_ACCESS);
3179 
3180 		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3181 	}
3182 }
3183 
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3184 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3185 					u32 offset, u32 *val)
3186 {
3187 	u32 tmp;
3188 	int i;
3189 
3190 	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3191 		return -EINVAL;
3192 
3193 	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3194 					EEPROM_ADDR_DEVID_MASK |
3195 					EEPROM_ADDR_READ);
3196 	tw32(GRC_EEPROM_ADDR,
3197 	     tmp |
3198 	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3199 	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3200 	      EEPROM_ADDR_ADDR_MASK) |
3201 	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3202 
3203 	for (i = 0; i < 1000; i++) {
3204 		tmp = tr32(GRC_EEPROM_ADDR);
3205 
3206 		if (tmp & EEPROM_ADDR_COMPLETE)
3207 			break;
3208 		msleep(1);
3209 	}
3210 	if (!(tmp & EEPROM_ADDR_COMPLETE))
3211 		return -EBUSY;
3212 
3213 	tmp = tr32(GRC_EEPROM_DATA);
3214 
3215 	/*
3216 	 * The data will always be opposite the native endian
3217 	 * format.  Perform a blind byteswap to compensate.
3218 	 */
3219 	*val = swab32(tmp);
3220 
3221 	return 0;
3222 }
3223 
3224 #define NVRAM_CMD_TIMEOUT 10000
3225 
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3226 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3227 {
3228 	int i;
3229 
3230 	tw32(NVRAM_CMD, nvram_cmd);
3231 	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3232 		usleep_range(10, 40);
3233 		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3234 			udelay(10);
3235 			break;
3236 		}
3237 	}
3238 
3239 	if (i == NVRAM_CMD_TIMEOUT)
3240 		return -EBUSY;
3241 
3242 	return 0;
3243 }
3244 
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3245 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3246 {
3247 	if (tg3_flag(tp, NVRAM) &&
3248 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3249 	    tg3_flag(tp, FLASH) &&
3250 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3251 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3252 
3253 		addr = ((addr / tp->nvram_pagesize) <<
3254 			ATMEL_AT45DB0X1B_PAGE_POS) +
3255 		       (addr % tp->nvram_pagesize);
3256 
3257 	return addr;
3258 }
3259 
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3260 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3261 {
3262 	if (tg3_flag(tp, NVRAM) &&
3263 	    tg3_flag(tp, NVRAM_BUFFERED) &&
3264 	    tg3_flag(tp, FLASH) &&
3265 	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3266 	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3267 
3268 		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3269 			tp->nvram_pagesize) +
3270 		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3271 
3272 	return addr;
3273 }
3274 
3275 /* NOTE: Data read in from NVRAM is byteswapped according to
3276  * the byteswapping settings for all other register accesses.
3277  * tg3 devices are BE devices, so on a BE machine, the data
3278  * returned will be exactly as it is seen in NVRAM.  On a LE
3279  * machine, the 32-bit value will be byteswapped.
3280  */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3281 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3282 {
3283 	int ret;
3284 
3285 	if (!tg3_flag(tp, NVRAM))
3286 		return tg3_nvram_read_using_eeprom(tp, offset, val);
3287 
3288 	offset = tg3_nvram_phys_addr(tp, offset);
3289 
3290 	if (offset > NVRAM_ADDR_MSK)
3291 		return -EINVAL;
3292 
3293 	ret = tg3_nvram_lock(tp);
3294 	if (ret)
3295 		return ret;
3296 
3297 	tg3_enable_nvram_access(tp);
3298 
3299 	tw32(NVRAM_ADDR, offset);
3300 	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3301 		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3302 
3303 	if (ret == 0)
3304 		*val = tr32(NVRAM_RDDATA);
3305 
3306 	tg3_disable_nvram_access(tp);
3307 
3308 	tg3_nvram_unlock(tp);
3309 
3310 	return ret;
3311 }
3312 
3313 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3314 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3315 {
3316 	u32 v;
3317 	int res = tg3_nvram_read(tp, offset, &v);
3318 	if (!res)
3319 		*val = cpu_to_be32(v);
3320 	return res;
3321 }
3322 
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3323 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3324 				    u32 offset, u32 len, u8 *buf)
3325 {
3326 	int i, j, rc = 0;
3327 	u32 val;
3328 
3329 	for (i = 0; i < len; i += 4) {
3330 		u32 addr;
3331 		__be32 data;
3332 
3333 		addr = offset + i;
3334 
3335 		memcpy(&data, buf + i, 4);
3336 
3337 		/*
3338 		 * The SEEPROM interface expects the data to always be opposite
3339 		 * the native endian format.  We accomplish this by reversing
3340 		 * all the operations that would have been performed on the
3341 		 * data from a call to tg3_nvram_read_be32().
3342 		 */
3343 		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3344 
3345 		val = tr32(GRC_EEPROM_ADDR);
3346 		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3347 
3348 		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3349 			EEPROM_ADDR_READ);
3350 		tw32(GRC_EEPROM_ADDR, val |
3351 			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3352 			(addr & EEPROM_ADDR_ADDR_MASK) |
3353 			EEPROM_ADDR_START |
3354 			EEPROM_ADDR_WRITE);
3355 
3356 		for (j = 0; j < 1000; j++) {
3357 			val = tr32(GRC_EEPROM_ADDR);
3358 
3359 			if (val & EEPROM_ADDR_COMPLETE)
3360 				break;
3361 			msleep(1);
3362 		}
3363 		if (!(val & EEPROM_ADDR_COMPLETE)) {
3364 			rc = -EBUSY;
3365 			break;
3366 		}
3367 	}
3368 
3369 	return rc;
3370 }
3371 
3372 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3373 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3374 		u8 *buf)
3375 {
3376 	int ret = 0;
3377 	u32 pagesize = tp->nvram_pagesize;
3378 	u32 pagemask = pagesize - 1;
3379 	u32 nvram_cmd;
3380 	u8 *tmp;
3381 
3382 	tmp = kmalloc(pagesize, GFP_KERNEL);
3383 	if (tmp == NULL)
3384 		return -ENOMEM;
3385 
3386 	while (len) {
3387 		int j;
3388 		u32 phy_addr, page_off, size;
3389 
3390 		phy_addr = offset & ~pagemask;
3391 
3392 		for (j = 0; j < pagesize; j += 4) {
3393 			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3394 						  (__be32 *) (tmp + j));
3395 			if (ret)
3396 				break;
3397 		}
3398 		if (ret)
3399 			break;
3400 
3401 		page_off = offset & pagemask;
3402 		size = pagesize;
3403 		if (len < size)
3404 			size = len;
3405 
3406 		len -= size;
3407 
3408 		memcpy(tmp + page_off, buf, size);
3409 
3410 		offset = offset + (pagesize - page_off);
3411 
3412 		tg3_enable_nvram_access(tp);
3413 
3414 		/*
3415 		 * Before we can erase the flash page, we need
3416 		 * to issue a special "write enable" command.
3417 		 */
3418 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3419 
3420 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3421 			break;
3422 
3423 		/* Erase the target page */
3424 		tw32(NVRAM_ADDR, phy_addr);
3425 
3426 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3427 			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3428 
3429 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3430 			break;
3431 
3432 		/* Issue another write enable to start the write. */
3433 		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3434 
3435 		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3436 			break;
3437 
3438 		for (j = 0; j < pagesize; j += 4) {
3439 			__be32 data;
3440 
3441 			data = *((__be32 *) (tmp + j));
3442 
3443 			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3444 
3445 			tw32(NVRAM_ADDR, phy_addr + j);
3446 
3447 			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3448 				NVRAM_CMD_WR;
3449 
3450 			if (j == 0)
3451 				nvram_cmd |= NVRAM_CMD_FIRST;
3452 			else if (j == (pagesize - 4))
3453 				nvram_cmd |= NVRAM_CMD_LAST;
3454 
3455 			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3456 			if (ret)
3457 				break;
3458 		}
3459 		if (ret)
3460 			break;
3461 	}
3462 
3463 	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3464 	tg3_nvram_exec_cmd(tp, nvram_cmd);
3465 
3466 	kfree(tmp);
3467 
3468 	return ret;
3469 }
3470 
3471 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3472 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3473 		u8 *buf)
3474 {
3475 	int i, ret = 0;
3476 
3477 	for (i = 0; i < len; i += 4, offset += 4) {
3478 		u32 page_off, phy_addr, nvram_cmd;
3479 		__be32 data;
3480 
3481 		memcpy(&data, buf + i, 4);
3482 		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3483 
3484 		page_off = offset % tp->nvram_pagesize;
3485 
3486 		phy_addr = tg3_nvram_phys_addr(tp, offset);
3487 
3488 		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3489 
3490 		if (page_off == 0 || i == 0)
3491 			nvram_cmd |= NVRAM_CMD_FIRST;
3492 		if (page_off == (tp->nvram_pagesize - 4))
3493 			nvram_cmd |= NVRAM_CMD_LAST;
3494 
3495 		if (i == (len - 4))
3496 			nvram_cmd |= NVRAM_CMD_LAST;
3497 
3498 		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3499 		    !tg3_flag(tp, FLASH) ||
3500 		    !tg3_flag(tp, 57765_PLUS))
3501 			tw32(NVRAM_ADDR, phy_addr);
3502 
3503 		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3504 		    !tg3_flag(tp, 5755_PLUS) &&
3505 		    (tp->nvram_jedecnum == JEDEC_ST) &&
3506 		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3507 			u32 cmd;
3508 
3509 			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3510 			ret = tg3_nvram_exec_cmd(tp, cmd);
3511 			if (ret)
3512 				break;
3513 		}
3514 		if (!tg3_flag(tp, FLASH)) {
3515 			/* We always do complete word writes to eeprom. */
3516 			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3517 		}
3518 
3519 		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3520 		if (ret)
3521 			break;
3522 	}
3523 	return ret;
3524 }
3525 
3526 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3527 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3528 {
3529 	int ret;
3530 
3531 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3532 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3533 		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3534 		udelay(40);
3535 	}
3536 
3537 	if (!tg3_flag(tp, NVRAM)) {
3538 		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3539 	} else {
3540 		u32 grc_mode;
3541 
3542 		ret = tg3_nvram_lock(tp);
3543 		if (ret)
3544 			return ret;
3545 
3546 		tg3_enable_nvram_access(tp);
3547 		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3548 			tw32(NVRAM_WRITE1, 0x406);
3549 
3550 		grc_mode = tr32(GRC_MODE);
3551 		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3552 
3553 		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3554 			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3555 				buf);
3556 		} else {
3557 			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3558 				buf);
3559 		}
3560 
3561 		grc_mode = tr32(GRC_MODE);
3562 		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3563 
3564 		tg3_disable_nvram_access(tp);
3565 		tg3_nvram_unlock(tp);
3566 	}
3567 
3568 	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3569 		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3570 		udelay(40);
3571 	}
3572 
3573 	return ret;
3574 }
3575 
3576 #define RX_CPU_SCRATCH_BASE	0x30000
3577 #define RX_CPU_SCRATCH_SIZE	0x04000
3578 #define TX_CPU_SCRATCH_BASE	0x34000
3579 #define TX_CPU_SCRATCH_SIZE	0x04000
3580 
3581 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3582 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3583 {
3584 	int i;
3585 	const int iters = 10000;
3586 
3587 	for (i = 0; i < iters; i++) {
3588 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3589 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3590 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3591 			break;
3592 		if (pci_channel_offline(tp->pdev))
3593 			return -EBUSY;
3594 	}
3595 
3596 	return (i == iters) ? -EBUSY : 0;
3597 }
3598 
3599 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3600 static int tg3_rxcpu_pause(struct tg3 *tp)
3601 {
3602 	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3603 
3604 	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3605 	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3606 	udelay(10);
3607 
3608 	return rc;
3609 }
3610 
3611 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3612 static int tg3_txcpu_pause(struct tg3 *tp)
3613 {
3614 	return tg3_pause_cpu(tp, TX_CPU_BASE);
3615 }
3616 
3617 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3618 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3619 {
3620 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3621 	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3622 }
3623 
3624 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3625 static void tg3_rxcpu_resume(struct tg3 *tp)
3626 {
3627 	tg3_resume_cpu(tp, RX_CPU_BASE);
3628 }
3629 
3630 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3631 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3632 {
3633 	int rc;
3634 
3635 	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3636 
3637 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3638 		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3639 
3640 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3641 		return 0;
3642 	}
3643 	if (cpu_base == RX_CPU_BASE) {
3644 		rc = tg3_rxcpu_pause(tp);
3645 	} else {
3646 		/*
3647 		 * There is only an Rx CPU for the 5750 derivative in the
3648 		 * BCM4785.
3649 		 */
3650 		if (tg3_flag(tp, IS_SSB_CORE))
3651 			return 0;
3652 
3653 		rc = tg3_txcpu_pause(tp);
3654 	}
3655 
3656 	if (rc) {
3657 		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3658 			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3659 		return -ENODEV;
3660 	}
3661 
3662 	/* Clear firmware's nvram arbitration. */
3663 	if (tg3_flag(tp, NVRAM))
3664 		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3665 	return 0;
3666 }
3667 
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3668 static int tg3_fw_data_len(struct tg3 *tp,
3669 			   const struct tg3_firmware_hdr *fw_hdr)
3670 {
3671 	int fw_len;
3672 
3673 	/* Non fragmented firmware have one firmware header followed by a
3674 	 * contiguous chunk of data to be written. The length field in that
3675 	 * header is not the length of data to be written but the complete
3676 	 * length of the bss. The data length is determined based on
3677 	 * tp->fw->size minus headers.
3678 	 *
3679 	 * Fragmented firmware have a main header followed by multiple
3680 	 * fragments. Each fragment is identical to non fragmented firmware
3681 	 * with a firmware header followed by a contiguous chunk of data. In
3682 	 * the main header, the length field is unused and set to 0xffffffff.
3683 	 * In each fragment header the length is the entire size of that
3684 	 * fragment i.e. fragment data + header length. Data length is
3685 	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3686 	 */
3687 	if (tp->fw_len == 0xffffffff)
3688 		fw_len = be32_to_cpu(fw_hdr->len);
3689 	else
3690 		fw_len = tp->fw->size;
3691 
3692 	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3693 }
3694 
3695 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3696 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3697 				 u32 cpu_scratch_base, int cpu_scratch_size,
3698 				 const struct tg3_firmware_hdr *fw_hdr)
3699 {
3700 	int err, i;
3701 	void (*write_op)(struct tg3 *, u32, u32);
3702 	int total_len = tp->fw->size;
3703 
3704 	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3705 		netdev_err(tp->dev,
3706 			   "%s: Trying to load TX cpu firmware which is 5705\n",
3707 			   __func__);
3708 		return -EINVAL;
3709 	}
3710 
3711 	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3712 		write_op = tg3_write_mem;
3713 	else
3714 		write_op = tg3_write_indirect_reg32;
3715 
3716 	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3717 		/* It is possible that bootcode is still loading at this point.
3718 		 * Get the nvram lock first before halting the cpu.
3719 		 */
3720 		int lock_err = tg3_nvram_lock(tp);
3721 		err = tg3_halt_cpu(tp, cpu_base);
3722 		if (!lock_err)
3723 			tg3_nvram_unlock(tp);
3724 		if (err)
3725 			goto out;
3726 
3727 		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3728 			write_op(tp, cpu_scratch_base + i, 0);
3729 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3730 		tw32(cpu_base + CPU_MODE,
3731 		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3732 	} else {
3733 		/* Subtract additional main header for fragmented firmware and
3734 		 * advance to the first fragment
3735 		 */
3736 		total_len -= TG3_FW_HDR_LEN;
3737 		fw_hdr++;
3738 	}
3739 
3740 	do {
3741 		__be32 *fw_data = (__be32 *)(fw_hdr + 1);
3742 		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3743 			write_op(tp, cpu_scratch_base +
3744 				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3745 				     (i * sizeof(u32)),
3746 				 be32_to_cpu(fw_data[i]));
3747 
3748 		total_len -= be32_to_cpu(fw_hdr->len);
3749 
3750 		/* Advance to next fragment */
3751 		fw_hdr = (struct tg3_firmware_hdr *)
3752 			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3753 	} while (total_len > 0);
3754 
3755 	err = 0;
3756 
3757 out:
3758 	return err;
3759 }
3760 
3761 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3762 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3763 {
3764 	int i;
3765 	const int iters = 5;
3766 
3767 	tw32(cpu_base + CPU_STATE, 0xffffffff);
3768 	tw32_f(cpu_base + CPU_PC, pc);
3769 
3770 	for (i = 0; i < iters; i++) {
3771 		if (tr32(cpu_base + CPU_PC) == pc)
3772 			break;
3773 		tw32(cpu_base + CPU_STATE, 0xffffffff);
3774 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3775 		tw32_f(cpu_base + CPU_PC, pc);
3776 		udelay(1000);
3777 	}
3778 
3779 	return (i == iters) ? -EBUSY : 0;
3780 }
3781 
3782 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3783 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3784 {
3785 	const struct tg3_firmware_hdr *fw_hdr;
3786 	int err;
3787 
3788 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3789 
3790 	/* Firmware blob starts with version numbers, followed by
3791 	   start address and length. We are setting complete length.
3792 	   length = end_address_of_bss - start_address_of_text.
3793 	   Remainder is the blob to be loaded contiguously
3794 	   from start address. */
3795 
3796 	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3797 				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3798 				    fw_hdr);
3799 	if (err)
3800 		return err;
3801 
3802 	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3803 				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3804 				    fw_hdr);
3805 	if (err)
3806 		return err;
3807 
3808 	/* Now startup only the RX cpu. */
3809 	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3810 				       be32_to_cpu(fw_hdr->base_addr));
3811 	if (err) {
3812 		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3813 			   "should be %08x\n", __func__,
3814 			   tr32(RX_CPU_BASE + CPU_PC),
3815 				be32_to_cpu(fw_hdr->base_addr));
3816 		return -ENODEV;
3817 	}
3818 
3819 	tg3_rxcpu_resume(tp);
3820 
3821 	return 0;
3822 }
3823 
tg3_validate_rxcpu_state(struct tg3 * tp)3824 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3825 {
3826 	const int iters = 1000;
3827 	int i;
3828 	u32 val;
3829 
3830 	/* Wait for boot code to complete initialization and enter service
3831 	 * loop. It is then safe to download service patches
3832 	 */
3833 	for (i = 0; i < iters; i++) {
3834 		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3835 			break;
3836 
3837 		udelay(10);
3838 	}
3839 
3840 	if (i == iters) {
3841 		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3842 		return -EBUSY;
3843 	}
3844 
3845 	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3846 	if (val & 0xff) {
3847 		netdev_warn(tp->dev,
3848 			    "Other patches exist. Not downloading EEE patch\n");
3849 		return -EEXIST;
3850 	}
3851 
3852 	return 0;
3853 }
3854 
3855 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3856 static void tg3_load_57766_firmware(struct tg3 *tp)
3857 {
3858 	struct tg3_firmware_hdr *fw_hdr;
3859 
3860 	if (!tg3_flag(tp, NO_NVRAM))
3861 		return;
3862 
3863 	if (tg3_validate_rxcpu_state(tp))
3864 		return;
3865 
3866 	if (!tp->fw)
3867 		return;
3868 
3869 	/* This firmware blob has a different format than older firmware
3870 	 * releases as given below. The main difference is we have fragmented
3871 	 * data to be written to non-contiguous locations.
3872 	 *
3873 	 * In the beginning we have a firmware header identical to other
3874 	 * firmware which consists of version, base addr and length. The length
3875 	 * here is unused and set to 0xffffffff.
3876 	 *
3877 	 * This is followed by a series of firmware fragments which are
3878 	 * individually identical to previous firmware. i.e. they have the
3879 	 * firmware header and followed by data for that fragment. The version
3880 	 * field of the individual fragment header is unused.
3881 	 */
3882 
3883 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3884 	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3885 		return;
3886 
3887 	if (tg3_rxcpu_pause(tp))
3888 		return;
3889 
3890 	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3891 	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3892 
3893 	tg3_rxcpu_resume(tp);
3894 }
3895 
3896 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3897 static int tg3_load_tso_firmware(struct tg3 *tp)
3898 {
3899 	const struct tg3_firmware_hdr *fw_hdr;
3900 	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3901 	int err;
3902 
3903 	if (!tg3_flag(tp, FW_TSO))
3904 		return 0;
3905 
3906 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3907 
3908 	/* Firmware blob starts with version numbers, followed by
3909 	   start address and length. We are setting complete length.
3910 	   length = end_address_of_bss - start_address_of_text.
3911 	   Remainder is the blob to be loaded contiguously
3912 	   from start address. */
3913 
3914 	cpu_scratch_size = tp->fw_len;
3915 
3916 	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3917 		cpu_base = RX_CPU_BASE;
3918 		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3919 	} else {
3920 		cpu_base = TX_CPU_BASE;
3921 		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3922 		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3923 	}
3924 
3925 	err = tg3_load_firmware_cpu(tp, cpu_base,
3926 				    cpu_scratch_base, cpu_scratch_size,
3927 				    fw_hdr);
3928 	if (err)
3929 		return err;
3930 
3931 	/* Now startup the cpu. */
3932 	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3933 				       be32_to_cpu(fw_hdr->base_addr));
3934 	if (err) {
3935 		netdev_err(tp->dev,
3936 			   "%s fails to set CPU PC, is %08x should be %08x\n",
3937 			   __func__, tr32(cpu_base + CPU_PC),
3938 			   be32_to_cpu(fw_hdr->base_addr));
3939 		return -ENODEV;
3940 	}
3941 
3942 	tg3_resume_cpu(tp, cpu_base);
3943 	return 0;
3944 }
3945 
3946 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,const u8 * mac_addr,int index)3947 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3948 				   int index)
3949 {
3950 	u32 addr_high, addr_low;
3951 
3952 	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3953 	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3954 		    (mac_addr[4] <<  8) | mac_addr[5]);
3955 
3956 	if (index < 4) {
3957 		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3958 		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3959 	} else {
3960 		index -= 4;
3961 		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3962 		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3963 	}
3964 }
3965 
3966 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3967 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3968 {
3969 	u32 addr_high;
3970 	int i;
3971 
3972 	for (i = 0; i < 4; i++) {
3973 		if (i == 1 && skip_mac_1)
3974 			continue;
3975 		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3976 	}
3977 
3978 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3979 	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3980 		for (i = 4; i < 16; i++)
3981 			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3982 	}
3983 
3984 	addr_high = (tp->dev->dev_addr[0] +
3985 		     tp->dev->dev_addr[1] +
3986 		     tp->dev->dev_addr[2] +
3987 		     tp->dev->dev_addr[3] +
3988 		     tp->dev->dev_addr[4] +
3989 		     tp->dev->dev_addr[5]) &
3990 		TX_BACKOFF_SEED_MASK;
3991 	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3992 }
3993 
tg3_enable_register_access(struct tg3 * tp)3994 static void tg3_enable_register_access(struct tg3 *tp)
3995 {
3996 	/*
3997 	 * Make sure register accesses (indirect or otherwise) will function
3998 	 * correctly.
3999 	 */
4000 	pci_write_config_dword(tp->pdev,
4001 			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4002 }
4003 
tg3_power_up(struct tg3 * tp)4004 static int tg3_power_up(struct tg3 *tp)
4005 {
4006 	int err;
4007 
4008 	tg3_enable_register_access(tp);
4009 
4010 	err = pci_set_power_state(tp->pdev, PCI_D0);
4011 	if (!err) {
4012 		/* Switch out of Vaux if it is a NIC */
4013 		tg3_pwrsrc_switch_to_vmain(tp);
4014 	} else {
4015 		netdev_err(tp->dev, "Transition to D0 failed\n");
4016 	}
4017 
4018 	return err;
4019 }
4020 
4021 static int tg3_setup_phy(struct tg3 *, bool);
4022 
tg3_power_down_prepare(struct tg3 * tp)4023 static void tg3_power_down_prepare(struct tg3 *tp)
4024 {
4025 	u32 misc_host_ctrl;
4026 	bool device_should_wake, do_low_power;
4027 
4028 	tg3_enable_register_access(tp);
4029 
4030 	/* Restore the CLKREQ setting. */
4031 	if (tg3_flag(tp, CLKREQ_BUG))
4032 		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4033 					 PCI_EXP_LNKCTL_CLKREQ_EN);
4034 
4035 	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4036 	tw32(TG3PCI_MISC_HOST_CTRL,
4037 	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4038 
4039 	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4040 			     tg3_flag(tp, WOL_ENABLE);
4041 
4042 	if (tg3_flag(tp, USE_PHYLIB)) {
4043 		do_low_power = false;
4044 		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4045 		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4046 			__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4047 			struct phy_device *phydev;
4048 			u32 phyid;
4049 
4050 			phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4051 
4052 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4053 
4054 			tp->link_config.speed = phydev->speed;
4055 			tp->link_config.duplex = phydev->duplex;
4056 			tp->link_config.autoneg = phydev->autoneg;
4057 			ethtool_convert_link_mode_to_legacy_u32(
4058 				&tp->link_config.advertising,
4059 				phydev->advertising);
4060 
4061 			linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4062 			linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4063 					 advertising);
4064 			linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4065 					 advertising);
4066 			linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4067 					 advertising);
4068 
4069 			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4070 				if (tg3_flag(tp, WOL_SPEED_100MB)) {
4071 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4072 							 advertising);
4073 					linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4074 							 advertising);
4075 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4076 							 advertising);
4077 				} else {
4078 					linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4079 							 advertising);
4080 				}
4081 			}
4082 
4083 			linkmode_copy(phydev->advertising, advertising);
4084 			phy_start_aneg(phydev);
4085 
4086 			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4087 			if (phyid != PHY_ID_BCMAC131) {
4088 				phyid &= PHY_BCM_OUI_MASK;
4089 				if (phyid == PHY_BCM_OUI_1 ||
4090 				    phyid == PHY_BCM_OUI_2 ||
4091 				    phyid == PHY_BCM_OUI_3)
4092 					do_low_power = true;
4093 			}
4094 		}
4095 	} else {
4096 		do_low_power = true;
4097 
4098 		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4099 			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4100 
4101 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4102 			tg3_setup_phy(tp, false);
4103 	}
4104 
4105 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4106 		u32 val;
4107 
4108 		val = tr32(GRC_VCPU_EXT_CTRL);
4109 		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4110 	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4111 		int i;
4112 		u32 val;
4113 
4114 		for (i = 0; i < 200; i++) {
4115 			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4116 			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4117 				break;
4118 			msleep(1);
4119 		}
4120 	}
4121 	if (tg3_flag(tp, WOL_CAP))
4122 		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4123 						     WOL_DRV_STATE_SHUTDOWN |
4124 						     WOL_DRV_WOL |
4125 						     WOL_SET_MAGIC_PKT);
4126 
4127 	if (device_should_wake) {
4128 		u32 mac_mode;
4129 
4130 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4131 			if (do_low_power &&
4132 			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4133 				tg3_phy_auxctl_write(tp,
4134 					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4135 					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4136 					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4137 					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4138 				udelay(40);
4139 			}
4140 
4141 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4142 				mac_mode = MAC_MODE_PORT_MODE_GMII;
4143 			else if (tp->phy_flags &
4144 				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4145 				if (tp->link_config.active_speed == SPEED_1000)
4146 					mac_mode = MAC_MODE_PORT_MODE_GMII;
4147 				else
4148 					mac_mode = MAC_MODE_PORT_MODE_MII;
4149 			} else
4150 				mac_mode = MAC_MODE_PORT_MODE_MII;
4151 
4152 			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4153 			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4154 				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4155 					     SPEED_100 : SPEED_10;
4156 				if (tg3_5700_link_polarity(tp, speed))
4157 					mac_mode |= MAC_MODE_LINK_POLARITY;
4158 				else
4159 					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4160 			}
4161 		} else {
4162 			mac_mode = MAC_MODE_PORT_MODE_TBI;
4163 		}
4164 
4165 		if (!tg3_flag(tp, 5750_PLUS))
4166 			tw32(MAC_LED_CTRL, tp->led_ctrl);
4167 
4168 		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4169 		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4170 		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4171 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4172 
4173 		if (tg3_flag(tp, ENABLE_APE))
4174 			mac_mode |= MAC_MODE_APE_TX_EN |
4175 				    MAC_MODE_APE_RX_EN |
4176 				    MAC_MODE_TDE_ENABLE;
4177 
4178 		tw32_f(MAC_MODE, mac_mode);
4179 		udelay(100);
4180 
4181 		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4182 		udelay(10);
4183 	}
4184 
4185 	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4186 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4187 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4188 		u32 base_val;
4189 
4190 		base_val = tp->pci_clock_ctrl;
4191 		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4192 			     CLOCK_CTRL_TXCLK_DISABLE);
4193 
4194 		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4195 			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4196 	} else if (tg3_flag(tp, 5780_CLASS) ||
4197 		   tg3_flag(tp, CPMU_PRESENT) ||
4198 		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4199 		/* do nothing */
4200 	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4201 		u32 newbits1, newbits2;
4202 
4203 		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4204 		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4205 			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4206 				    CLOCK_CTRL_TXCLK_DISABLE |
4207 				    CLOCK_CTRL_ALTCLK);
4208 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4209 		} else if (tg3_flag(tp, 5705_PLUS)) {
4210 			newbits1 = CLOCK_CTRL_625_CORE;
4211 			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4212 		} else {
4213 			newbits1 = CLOCK_CTRL_ALTCLK;
4214 			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4215 		}
4216 
4217 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4218 			    40);
4219 
4220 		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4221 			    40);
4222 
4223 		if (!tg3_flag(tp, 5705_PLUS)) {
4224 			u32 newbits3;
4225 
4226 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4227 			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4228 				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4229 					    CLOCK_CTRL_TXCLK_DISABLE |
4230 					    CLOCK_CTRL_44MHZ_CORE);
4231 			} else {
4232 				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4233 			}
4234 
4235 			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4236 				    tp->pci_clock_ctrl | newbits3, 40);
4237 		}
4238 	}
4239 
4240 	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4241 		tg3_power_down_phy(tp, do_low_power);
4242 
4243 	tg3_frob_aux_power(tp, true);
4244 
4245 	/* Workaround for unstable PLL clock */
4246 	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4247 	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4248 	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4249 		u32 val = tr32(0x7d00);
4250 
4251 		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4252 		tw32(0x7d00, val);
4253 		if (!tg3_flag(tp, ENABLE_ASF)) {
4254 			int err;
4255 
4256 			err = tg3_nvram_lock(tp);
4257 			tg3_halt_cpu(tp, RX_CPU_BASE);
4258 			if (!err)
4259 				tg3_nvram_unlock(tp);
4260 		}
4261 	}
4262 
4263 	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4264 
4265 	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4266 
4267 	return;
4268 }
4269 
tg3_power_down(struct tg3 * tp)4270 static void tg3_power_down(struct tg3 *tp)
4271 {
4272 	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4273 	pci_set_power_state(tp->pdev, PCI_D3hot);
4274 }
4275 
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u32 * speed,u8 * duplex)4276 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4277 {
4278 	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4279 	case MII_TG3_AUX_STAT_10HALF:
4280 		*speed = SPEED_10;
4281 		*duplex = DUPLEX_HALF;
4282 		break;
4283 
4284 	case MII_TG3_AUX_STAT_10FULL:
4285 		*speed = SPEED_10;
4286 		*duplex = DUPLEX_FULL;
4287 		break;
4288 
4289 	case MII_TG3_AUX_STAT_100HALF:
4290 		*speed = SPEED_100;
4291 		*duplex = DUPLEX_HALF;
4292 		break;
4293 
4294 	case MII_TG3_AUX_STAT_100FULL:
4295 		*speed = SPEED_100;
4296 		*duplex = DUPLEX_FULL;
4297 		break;
4298 
4299 	case MII_TG3_AUX_STAT_1000HALF:
4300 		*speed = SPEED_1000;
4301 		*duplex = DUPLEX_HALF;
4302 		break;
4303 
4304 	case MII_TG3_AUX_STAT_1000FULL:
4305 		*speed = SPEED_1000;
4306 		*duplex = DUPLEX_FULL;
4307 		break;
4308 
4309 	default:
4310 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4311 			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4312 				 SPEED_10;
4313 			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4314 				  DUPLEX_HALF;
4315 			break;
4316 		}
4317 		*speed = SPEED_UNKNOWN;
4318 		*duplex = DUPLEX_UNKNOWN;
4319 		break;
4320 	}
4321 }
4322 
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4323 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4324 {
4325 	int err = 0;
4326 	u32 val, new_adv;
4327 
4328 	new_adv = ADVERTISE_CSMA;
4329 	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4330 	new_adv |= mii_advertise_flowctrl(flowctrl);
4331 
4332 	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4333 	if (err)
4334 		goto done;
4335 
4336 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4337 		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4338 
4339 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4340 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4341 			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4342 
4343 		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4344 		if (err)
4345 			goto done;
4346 	}
4347 
4348 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4349 		goto done;
4350 
4351 	tw32(TG3_CPMU_EEE_MODE,
4352 	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4353 
4354 	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4355 	if (!err) {
4356 		u32 err2;
4357 
4358 		if (!tp->eee.eee_enabled)
4359 			val = 0;
4360 		else
4361 			val = ethtool_adv_to_mmd_eee_adv_t(advertise);
4362 
4363 		mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
4364 		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4365 		if (err)
4366 			val = 0;
4367 
4368 		switch (tg3_asic_rev(tp)) {
4369 		case ASIC_REV_5717:
4370 		case ASIC_REV_57765:
4371 		case ASIC_REV_57766:
4372 		case ASIC_REV_5719:
4373 			/* If we advertised any eee advertisements above... */
4374 			if (val)
4375 				val = MII_TG3_DSP_TAP26_ALNOKO |
4376 				      MII_TG3_DSP_TAP26_RMRXSTO |
4377 				      MII_TG3_DSP_TAP26_OPCSINPT;
4378 			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4379 			fallthrough;
4380 		case ASIC_REV_5720:
4381 		case ASIC_REV_5762:
4382 			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4383 				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4384 						 MII_TG3_DSP_CH34TP2_HIBW01);
4385 		}
4386 
4387 		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4388 		if (!err)
4389 			err = err2;
4390 	}
4391 
4392 done:
4393 	return err;
4394 }
4395 
tg3_phy_copper_begin(struct tg3 * tp)4396 static void tg3_phy_copper_begin(struct tg3 *tp)
4397 {
4398 	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4399 	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4400 		u32 adv, fc;
4401 
4402 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4403 		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4404 			adv = ADVERTISED_10baseT_Half |
4405 			      ADVERTISED_10baseT_Full;
4406 			if (tg3_flag(tp, WOL_SPEED_100MB))
4407 				adv |= ADVERTISED_100baseT_Half |
4408 				       ADVERTISED_100baseT_Full;
4409 			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4410 				if (!(tp->phy_flags &
4411 				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4412 					adv |= ADVERTISED_1000baseT_Half;
4413 				adv |= ADVERTISED_1000baseT_Full;
4414 			}
4415 
4416 			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4417 		} else {
4418 			adv = tp->link_config.advertising;
4419 			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4420 				adv &= ~(ADVERTISED_1000baseT_Half |
4421 					 ADVERTISED_1000baseT_Full);
4422 
4423 			fc = tp->link_config.flowctrl;
4424 		}
4425 
4426 		tg3_phy_autoneg_cfg(tp, adv, fc);
4427 
4428 		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4429 		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4430 			/* Normally during power down we want to autonegotiate
4431 			 * the lowest possible speed for WOL. However, to avoid
4432 			 * link flap, we leave it untouched.
4433 			 */
4434 			return;
4435 		}
4436 
4437 		tg3_writephy(tp, MII_BMCR,
4438 			     BMCR_ANENABLE | BMCR_ANRESTART);
4439 	} else {
4440 		int i;
4441 		u32 bmcr, orig_bmcr;
4442 
4443 		tp->link_config.active_speed = tp->link_config.speed;
4444 		tp->link_config.active_duplex = tp->link_config.duplex;
4445 
4446 		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4447 			/* With autoneg disabled, 5715 only links up when the
4448 			 * advertisement register has the configured speed
4449 			 * enabled.
4450 			 */
4451 			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4452 		}
4453 
4454 		bmcr = 0;
4455 		switch (tp->link_config.speed) {
4456 		default:
4457 		case SPEED_10:
4458 			break;
4459 
4460 		case SPEED_100:
4461 			bmcr |= BMCR_SPEED100;
4462 			break;
4463 
4464 		case SPEED_1000:
4465 			bmcr |= BMCR_SPEED1000;
4466 			break;
4467 		}
4468 
4469 		if (tp->link_config.duplex == DUPLEX_FULL)
4470 			bmcr |= BMCR_FULLDPLX;
4471 
4472 		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4473 		    (bmcr != orig_bmcr)) {
4474 			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4475 			for (i = 0; i < 1500; i++) {
4476 				u32 tmp;
4477 
4478 				udelay(10);
4479 				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4480 				    tg3_readphy(tp, MII_BMSR, &tmp))
4481 					continue;
4482 				if (!(tmp & BMSR_LSTATUS)) {
4483 					udelay(40);
4484 					break;
4485 				}
4486 			}
4487 			tg3_writephy(tp, MII_BMCR, bmcr);
4488 			udelay(40);
4489 		}
4490 	}
4491 }
4492 
tg3_phy_pull_config(struct tg3 * tp)4493 static int tg3_phy_pull_config(struct tg3 *tp)
4494 {
4495 	int err;
4496 	u32 val;
4497 
4498 	err = tg3_readphy(tp, MII_BMCR, &val);
4499 	if (err)
4500 		goto done;
4501 
4502 	if (!(val & BMCR_ANENABLE)) {
4503 		tp->link_config.autoneg = AUTONEG_DISABLE;
4504 		tp->link_config.advertising = 0;
4505 		tg3_flag_clear(tp, PAUSE_AUTONEG);
4506 
4507 		err = -EIO;
4508 
4509 		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4510 		case 0:
4511 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4512 				goto done;
4513 
4514 			tp->link_config.speed = SPEED_10;
4515 			break;
4516 		case BMCR_SPEED100:
4517 			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4518 				goto done;
4519 
4520 			tp->link_config.speed = SPEED_100;
4521 			break;
4522 		case BMCR_SPEED1000:
4523 			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4524 				tp->link_config.speed = SPEED_1000;
4525 				break;
4526 			}
4527 			fallthrough;
4528 		default:
4529 			goto done;
4530 		}
4531 
4532 		if (val & BMCR_FULLDPLX)
4533 			tp->link_config.duplex = DUPLEX_FULL;
4534 		else
4535 			tp->link_config.duplex = DUPLEX_HALF;
4536 
4537 		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4538 
4539 		err = 0;
4540 		goto done;
4541 	}
4542 
4543 	tp->link_config.autoneg = AUTONEG_ENABLE;
4544 	tp->link_config.advertising = ADVERTISED_Autoneg;
4545 	tg3_flag_set(tp, PAUSE_AUTONEG);
4546 
4547 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4548 		u32 adv;
4549 
4550 		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4551 		if (err)
4552 			goto done;
4553 
4554 		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4555 		tp->link_config.advertising |= adv | ADVERTISED_TP;
4556 
4557 		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4558 	} else {
4559 		tp->link_config.advertising |= ADVERTISED_FIBRE;
4560 	}
4561 
4562 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4563 		u32 adv;
4564 
4565 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4566 			err = tg3_readphy(tp, MII_CTRL1000, &val);
4567 			if (err)
4568 				goto done;
4569 
4570 			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4571 		} else {
4572 			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4573 			if (err)
4574 				goto done;
4575 
4576 			adv = tg3_decode_flowctrl_1000X(val);
4577 			tp->link_config.flowctrl = adv;
4578 
4579 			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4580 			adv = mii_adv_to_ethtool_adv_x(val);
4581 		}
4582 
4583 		tp->link_config.advertising |= adv;
4584 	}
4585 
4586 done:
4587 	return err;
4588 }
4589 
tg3_init_5401phy_dsp(struct tg3 * tp)4590 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4591 {
4592 	int err;
4593 
4594 	/* Turn off tap power management. */
4595 	/* Set Extended packet length bit */
4596 	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4597 
4598 	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4599 	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4600 	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4601 	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4602 	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4603 
4604 	udelay(40);
4605 
4606 	return err;
4607 }
4608 
tg3_phy_eee_config_ok(struct tg3 * tp)4609 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4610 {
4611 	struct ethtool_keee eee = {};
4612 
4613 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4614 		return true;
4615 
4616 	tg3_eee_pull_config(tp, &eee);
4617 
4618 	if (tp->eee.eee_enabled) {
4619 		if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
4620 		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4621 		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4622 			return false;
4623 	} else {
4624 		/* EEE is disabled but we're advertising */
4625 		if (!linkmode_empty(eee.advertised))
4626 			return false;
4627 	}
4628 
4629 	return true;
4630 }
4631 
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4632 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4633 {
4634 	u32 advmsk, tgtadv, advertising;
4635 
4636 	advertising = tp->link_config.advertising;
4637 	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4638 
4639 	advmsk = ADVERTISE_ALL;
4640 	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4641 		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4642 		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4643 	}
4644 
4645 	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4646 		return false;
4647 
4648 	if ((*lcladv & advmsk) != tgtadv)
4649 		return false;
4650 
4651 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4652 		u32 tg3_ctrl;
4653 
4654 		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4655 
4656 		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4657 			return false;
4658 
4659 		if (tgtadv &&
4660 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4661 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4662 			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4663 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4664 				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4665 		} else {
4666 			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4667 		}
4668 
4669 		if (tg3_ctrl != tgtadv)
4670 			return false;
4671 	}
4672 
4673 	return true;
4674 }
4675 
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4676 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4677 {
4678 	u32 lpeth = 0;
4679 
4680 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4681 		u32 val;
4682 
4683 		if (tg3_readphy(tp, MII_STAT1000, &val))
4684 			return false;
4685 
4686 		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4687 	}
4688 
4689 	if (tg3_readphy(tp, MII_LPA, rmtadv))
4690 		return false;
4691 
4692 	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4693 	tp->link_config.rmt_adv = lpeth;
4694 
4695 	return true;
4696 }
4697 
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4698 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4699 {
4700 	if (curr_link_up != tp->link_up) {
4701 		if (curr_link_up) {
4702 			netif_carrier_on(tp->dev);
4703 		} else {
4704 			netif_carrier_off(tp->dev);
4705 			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4706 				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4707 		}
4708 
4709 		tg3_link_report(tp);
4710 		return true;
4711 	}
4712 
4713 	return false;
4714 }
4715 
tg3_clear_mac_status(struct tg3 * tp)4716 static void tg3_clear_mac_status(struct tg3 *tp)
4717 {
4718 	tw32(MAC_EVENT, 0);
4719 
4720 	tw32_f(MAC_STATUS,
4721 	       MAC_STATUS_SYNC_CHANGED |
4722 	       MAC_STATUS_CFG_CHANGED |
4723 	       MAC_STATUS_MI_COMPLETION |
4724 	       MAC_STATUS_LNKSTATE_CHANGED);
4725 	udelay(40);
4726 }
4727 
tg3_setup_eee(struct tg3 * tp)4728 static void tg3_setup_eee(struct tg3 *tp)
4729 {
4730 	u32 val;
4731 
4732 	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4733 	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4734 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4735 		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4736 
4737 	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4738 
4739 	tw32_f(TG3_CPMU_EEE_CTRL,
4740 	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4741 
4742 	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4743 	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4744 	      TG3_CPMU_EEEMD_LPI_IN_RX |
4745 	      TG3_CPMU_EEEMD_EEE_ENABLE;
4746 
4747 	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4748 		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4749 
4750 	if (tg3_flag(tp, ENABLE_APE))
4751 		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4752 
4753 	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4754 
4755 	tw32_f(TG3_CPMU_EEE_DBTMR1,
4756 	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4757 	       (tp->eee.tx_lpi_timer & 0xffff));
4758 
4759 	tw32_f(TG3_CPMU_EEE_DBTMR2,
4760 	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4761 	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4762 }
4763 
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4764 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4765 {
4766 	bool current_link_up;
4767 	u32 bmsr, val;
4768 	u32 lcl_adv, rmt_adv;
4769 	u32 current_speed;
4770 	u8 current_duplex;
4771 	int i, err;
4772 
4773 	tg3_clear_mac_status(tp);
4774 
4775 	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4776 		tw32_f(MAC_MI_MODE,
4777 		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4778 		udelay(80);
4779 	}
4780 
4781 	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4782 
4783 	/* Some third-party PHYs need to be reset on link going
4784 	 * down.
4785 	 */
4786 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4787 	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4788 	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4789 	    tp->link_up) {
4790 		tg3_readphy(tp, MII_BMSR, &bmsr);
4791 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4792 		    !(bmsr & BMSR_LSTATUS))
4793 			force_reset = true;
4794 	}
4795 	if (force_reset)
4796 		tg3_phy_reset(tp);
4797 
4798 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4799 		tg3_readphy(tp, MII_BMSR, &bmsr);
4800 		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4801 		    !tg3_flag(tp, INIT_COMPLETE))
4802 			bmsr = 0;
4803 
4804 		if (!(bmsr & BMSR_LSTATUS)) {
4805 			err = tg3_init_5401phy_dsp(tp);
4806 			if (err)
4807 				return err;
4808 
4809 			tg3_readphy(tp, MII_BMSR, &bmsr);
4810 			for (i = 0; i < 1000; i++) {
4811 				udelay(10);
4812 				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4813 				    (bmsr & BMSR_LSTATUS)) {
4814 					udelay(40);
4815 					break;
4816 				}
4817 			}
4818 
4819 			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4820 			    TG3_PHY_REV_BCM5401_B0 &&
4821 			    !(bmsr & BMSR_LSTATUS) &&
4822 			    tp->link_config.active_speed == SPEED_1000) {
4823 				err = tg3_phy_reset(tp);
4824 				if (!err)
4825 					err = tg3_init_5401phy_dsp(tp);
4826 				if (err)
4827 					return err;
4828 			}
4829 		}
4830 	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4831 		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4832 		/* 5701 {A0,B0} CRC bug workaround */
4833 		tg3_writephy(tp, 0x15, 0x0a75);
4834 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4835 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4836 		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4837 	}
4838 
4839 	/* Clear pending interrupts... */
4840 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4841 	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4842 
4843 	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4844 		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4845 	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4846 		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4847 
4848 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4849 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4850 		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4851 			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4852 				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4853 		else
4854 			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4855 	}
4856 
4857 	current_link_up = false;
4858 	current_speed = SPEED_UNKNOWN;
4859 	current_duplex = DUPLEX_UNKNOWN;
4860 	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4861 	tp->link_config.rmt_adv = 0;
4862 
4863 	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4864 		err = tg3_phy_auxctl_read(tp,
4865 					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4866 					  &val);
4867 		if (!err && !(val & (1 << 10))) {
4868 			tg3_phy_auxctl_write(tp,
4869 					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4870 					     val | (1 << 10));
4871 			goto relink;
4872 		}
4873 	}
4874 
4875 	bmsr = 0;
4876 	for (i = 0; i < 100; i++) {
4877 		tg3_readphy(tp, MII_BMSR, &bmsr);
4878 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4879 		    (bmsr & BMSR_LSTATUS))
4880 			break;
4881 		udelay(40);
4882 	}
4883 
4884 	if (bmsr & BMSR_LSTATUS) {
4885 		u32 aux_stat, bmcr;
4886 
4887 		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4888 		for (i = 0; i < 2000; i++) {
4889 			udelay(10);
4890 			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4891 			    aux_stat)
4892 				break;
4893 		}
4894 
4895 		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4896 					     &current_speed,
4897 					     &current_duplex);
4898 
4899 		bmcr = 0;
4900 		for (i = 0; i < 200; i++) {
4901 			tg3_readphy(tp, MII_BMCR, &bmcr);
4902 			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4903 				continue;
4904 			if (bmcr && bmcr != 0x7fff)
4905 				break;
4906 			udelay(10);
4907 		}
4908 
4909 		lcl_adv = 0;
4910 		rmt_adv = 0;
4911 
4912 		tp->link_config.active_speed = current_speed;
4913 		tp->link_config.active_duplex = current_duplex;
4914 
4915 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4916 			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4917 
4918 			if ((bmcr & BMCR_ANENABLE) &&
4919 			    eee_config_ok &&
4920 			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4921 			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4922 				current_link_up = true;
4923 
4924 			/* EEE settings changes take effect only after a phy
4925 			 * reset.  If we have skipped a reset due to Link Flap
4926 			 * Avoidance being enabled, do it now.
4927 			 */
4928 			if (!eee_config_ok &&
4929 			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4930 			    !force_reset) {
4931 				tg3_setup_eee(tp);
4932 				tg3_phy_reset(tp);
4933 			}
4934 		} else {
4935 			if (!(bmcr & BMCR_ANENABLE) &&
4936 			    tp->link_config.speed == current_speed &&
4937 			    tp->link_config.duplex == current_duplex) {
4938 				current_link_up = true;
4939 			}
4940 		}
4941 
4942 		if (current_link_up &&
4943 		    tp->link_config.active_duplex == DUPLEX_FULL) {
4944 			u32 reg, bit;
4945 
4946 			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4947 				reg = MII_TG3_FET_GEN_STAT;
4948 				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4949 			} else {
4950 				reg = MII_TG3_EXT_STAT;
4951 				bit = MII_TG3_EXT_STAT_MDIX;
4952 			}
4953 
4954 			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4955 				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4956 
4957 			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4958 		}
4959 	}
4960 
4961 relink:
4962 	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4963 		tg3_phy_copper_begin(tp);
4964 
4965 		if (tg3_flag(tp, ROBOSWITCH)) {
4966 			current_link_up = true;
4967 			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4968 			current_speed = SPEED_1000;
4969 			current_duplex = DUPLEX_FULL;
4970 			tp->link_config.active_speed = current_speed;
4971 			tp->link_config.active_duplex = current_duplex;
4972 		}
4973 
4974 		tg3_readphy(tp, MII_BMSR, &bmsr);
4975 		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4976 		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4977 			current_link_up = true;
4978 	}
4979 
4980 	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4981 	if (current_link_up) {
4982 		if (tp->link_config.active_speed == SPEED_100 ||
4983 		    tp->link_config.active_speed == SPEED_10)
4984 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4985 		else
4986 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4987 	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4988 		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4989 	else
4990 		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4991 
4992 	/* In order for the 5750 core in BCM4785 chip to work properly
4993 	 * in RGMII mode, the Led Control Register must be set up.
4994 	 */
4995 	if (tg3_flag(tp, RGMII_MODE)) {
4996 		u32 led_ctrl = tr32(MAC_LED_CTRL);
4997 		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4998 
4999 		if (tp->link_config.active_speed == SPEED_10)
5000 			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5001 		else if (tp->link_config.active_speed == SPEED_100)
5002 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5003 				     LED_CTRL_100MBPS_ON);
5004 		else if (tp->link_config.active_speed == SPEED_1000)
5005 			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5006 				     LED_CTRL_1000MBPS_ON);
5007 
5008 		tw32(MAC_LED_CTRL, led_ctrl);
5009 		udelay(40);
5010 	}
5011 
5012 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5013 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5014 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5015 
5016 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5017 		if (current_link_up &&
5018 		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5019 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5020 		else
5021 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5022 	}
5023 
5024 	/* ??? Without this setting Netgear GA302T PHY does not
5025 	 * ??? send/receive packets...
5026 	 */
5027 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5028 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5029 		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5030 		tw32_f(MAC_MI_MODE, tp->mi_mode);
5031 		udelay(80);
5032 	}
5033 
5034 	tw32_f(MAC_MODE, tp->mac_mode);
5035 	udelay(40);
5036 
5037 	tg3_phy_eee_adjust(tp, current_link_up);
5038 
5039 	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5040 		/* Polled via timer. */
5041 		tw32_f(MAC_EVENT, 0);
5042 	} else {
5043 		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5044 	}
5045 	udelay(40);
5046 
5047 	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5048 	    current_link_up &&
5049 	    tp->link_config.active_speed == SPEED_1000 &&
5050 	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5051 		udelay(120);
5052 		tw32_f(MAC_STATUS,
5053 		     (MAC_STATUS_SYNC_CHANGED |
5054 		      MAC_STATUS_CFG_CHANGED));
5055 		udelay(40);
5056 		tg3_write_mem(tp,
5057 			      NIC_SRAM_FIRMWARE_MBOX,
5058 			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5059 	}
5060 
5061 	/* Prevent send BD corruption. */
5062 	if (tg3_flag(tp, CLKREQ_BUG)) {
5063 		if (tp->link_config.active_speed == SPEED_100 ||
5064 		    tp->link_config.active_speed == SPEED_10)
5065 			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5066 						   PCI_EXP_LNKCTL_CLKREQ_EN);
5067 		else
5068 			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5069 						 PCI_EXP_LNKCTL_CLKREQ_EN);
5070 	}
5071 
5072 	tg3_test_and_report_link_chg(tp, current_link_up);
5073 
5074 	return 0;
5075 }
5076 
5077 struct tg3_fiber_aneginfo {
5078 	int state;
5079 #define ANEG_STATE_UNKNOWN		0
5080 #define ANEG_STATE_AN_ENABLE		1
5081 #define ANEG_STATE_RESTART_INIT		2
5082 #define ANEG_STATE_RESTART		3
5083 #define ANEG_STATE_DISABLE_LINK_OK	4
5084 #define ANEG_STATE_ABILITY_DETECT_INIT	5
5085 #define ANEG_STATE_ABILITY_DETECT	6
5086 #define ANEG_STATE_ACK_DETECT_INIT	7
5087 #define ANEG_STATE_ACK_DETECT		8
5088 #define ANEG_STATE_COMPLETE_ACK_INIT	9
5089 #define ANEG_STATE_COMPLETE_ACK		10
5090 #define ANEG_STATE_IDLE_DETECT_INIT	11
5091 #define ANEG_STATE_IDLE_DETECT		12
5092 #define ANEG_STATE_LINK_OK		13
5093 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5094 #define ANEG_STATE_NEXT_PAGE_WAIT	15
5095 
5096 	u32 flags;
5097 #define MR_AN_ENABLE		0x00000001
5098 #define MR_RESTART_AN		0x00000002
5099 #define MR_AN_COMPLETE		0x00000004
5100 #define MR_PAGE_RX		0x00000008
5101 #define MR_NP_LOADED		0x00000010
5102 #define MR_TOGGLE_TX		0x00000020
5103 #define MR_LP_ADV_FULL_DUPLEX	0x00000040
5104 #define MR_LP_ADV_HALF_DUPLEX	0x00000080
5105 #define MR_LP_ADV_SYM_PAUSE	0x00000100
5106 #define MR_LP_ADV_ASYM_PAUSE	0x00000200
5107 #define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5108 #define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5109 #define MR_LP_ADV_NEXT_PAGE	0x00001000
5110 #define MR_TOGGLE_RX		0x00002000
5111 #define MR_NP_RX		0x00004000
5112 
5113 #define MR_LINK_OK		0x80000000
5114 
5115 	unsigned long link_time, cur_time;
5116 
5117 	u32 ability_match_cfg;
5118 	int ability_match_count;
5119 
5120 	char ability_match, idle_match, ack_match;
5121 
5122 	u32 txconfig, rxconfig;
5123 #define ANEG_CFG_NP		0x00000080
5124 #define ANEG_CFG_ACK		0x00000040
5125 #define ANEG_CFG_RF2		0x00000020
5126 #define ANEG_CFG_RF1		0x00000010
5127 #define ANEG_CFG_PS2		0x00000001
5128 #define ANEG_CFG_PS1		0x00008000
5129 #define ANEG_CFG_HD		0x00004000
5130 #define ANEG_CFG_FD		0x00002000
5131 #define ANEG_CFG_INVAL		0x00001f06
5132 
5133 };
5134 #define ANEG_OK		0
5135 #define ANEG_DONE	1
5136 #define ANEG_TIMER_ENAB	2
5137 #define ANEG_FAILED	-1
5138 
5139 #define ANEG_STATE_SETTLE_TIME	10000
5140 
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5141 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5142 				   struct tg3_fiber_aneginfo *ap)
5143 {
5144 	u16 flowctrl;
5145 	unsigned long delta;
5146 	u32 rx_cfg_reg;
5147 	int ret;
5148 
5149 	if (ap->state == ANEG_STATE_UNKNOWN) {
5150 		ap->rxconfig = 0;
5151 		ap->link_time = 0;
5152 		ap->cur_time = 0;
5153 		ap->ability_match_cfg = 0;
5154 		ap->ability_match_count = 0;
5155 		ap->ability_match = 0;
5156 		ap->idle_match = 0;
5157 		ap->ack_match = 0;
5158 	}
5159 	ap->cur_time++;
5160 
5161 	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5162 		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5163 
5164 		if (rx_cfg_reg != ap->ability_match_cfg) {
5165 			ap->ability_match_cfg = rx_cfg_reg;
5166 			ap->ability_match = 0;
5167 			ap->ability_match_count = 0;
5168 		} else {
5169 			if (++ap->ability_match_count > 1) {
5170 				ap->ability_match = 1;
5171 				ap->ability_match_cfg = rx_cfg_reg;
5172 			}
5173 		}
5174 		if (rx_cfg_reg & ANEG_CFG_ACK)
5175 			ap->ack_match = 1;
5176 		else
5177 			ap->ack_match = 0;
5178 
5179 		ap->idle_match = 0;
5180 	} else {
5181 		ap->idle_match = 1;
5182 		ap->ability_match_cfg = 0;
5183 		ap->ability_match_count = 0;
5184 		ap->ability_match = 0;
5185 		ap->ack_match = 0;
5186 
5187 		rx_cfg_reg = 0;
5188 	}
5189 
5190 	ap->rxconfig = rx_cfg_reg;
5191 	ret = ANEG_OK;
5192 
5193 	switch (ap->state) {
5194 	case ANEG_STATE_UNKNOWN:
5195 		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5196 			ap->state = ANEG_STATE_AN_ENABLE;
5197 
5198 		fallthrough;
5199 	case ANEG_STATE_AN_ENABLE:
5200 		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5201 		if (ap->flags & MR_AN_ENABLE) {
5202 			ap->link_time = 0;
5203 			ap->cur_time = 0;
5204 			ap->ability_match_cfg = 0;
5205 			ap->ability_match_count = 0;
5206 			ap->ability_match = 0;
5207 			ap->idle_match = 0;
5208 			ap->ack_match = 0;
5209 
5210 			ap->state = ANEG_STATE_RESTART_INIT;
5211 		} else {
5212 			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5213 		}
5214 		break;
5215 
5216 	case ANEG_STATE_RESTART_INIT:
5217 		ap->link_time = ap->cur_time;
5218 		ap->flags &= ~(MR_NP_LOADED);
5219 		ap->txconfig = 0;
5220 		tw32(MAC_TX_AUTO_NEG, 0);
5221 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5222 		tw32_f(MAC_MODE, tp->mac_mode);
5223 		udelay(40);
5224 
5225 		ret = ANEG_TIMER_ENAB;
5226 		ap->state = ANEG_STATE_RESTART;
5227 
5228 		fallthrough;
5229 	case ANEG_STATE_RESTART:
5230 		delta = ap->cur_time - ap->link_time;
5231 		if (delta > ANEG_STATE_SETTLE_TIME)
5232 			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5233 		else
5234 			ret = ANEG_TIMER_ENAB;
5235 		break;
5236 
5237 	case ANEG_STATE_DISABLE_LINK_OK:
5238 		ret = ANEG_DONE;
5239 		break;
5240 
5241 	case ANEG_STATE_ABILITY_DETECT_INIT:
5242 		ap->flags &= ~(MR_TOGGLE_TX);
5243 		ap->txconfig = ANEG_CFG_FD;
5244 		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5245 		if (flowctrl & ADVERTISE_1000XPAUSE)
5246 			ap->txconfig |= ANEG_CFG_PS1;
5247 		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5248 			ap->txconfig |= ANEG_CFG_PS2;
5249 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5250 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5251 		tw32_f(MAC_MODE, tp->mac_mode);
5252 		udelay(40);
5253 
5254 		ap->state = ANEG_STATE_ABILITY_DETECT;
5255 		break;
5256 
5257 	case ANEG_STATE_ABILITY_DETECT:
5258 		if (ap->ability_match != 0 && ap->rxconfig != 0)
5259 			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5260 		break;
5261 
5262 	case ANEG_STATE_ACK_DETECT_INIT:
5263 		ap->txconfig |= ANEG_CFG_ACK;
5264 		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5265 		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5266 		tw32_f(MAC_MODE, tp->mac_mode);
5267 		udelay(40);
5268 
5269 		ap->state = ANEG_STATE_ACK_DETECT;
5270 
5271 		fallthrough;
5272 	case ANEG_STATE_ACK_DETECT:
5273 		if (ap->ack_match != 0) {
5274 			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5275 			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5276 				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5277 			} else {
5278 				ap->state = ANEG_STATE_AN_ENABLE;
5279 			}
5280 		} else if (ap->ability_match != 0 &&
5281 			   ap->rxconfig == 0) {
5282 			ap->state = ANEG_STATE_AN_ENABLE;
5283 		}
5284 		break;
5285 
5286 	case ANEG_STATE_COMPLETE_ACK_INIT:
5287 		if (ap->rxconfig & ANEG_CFG_INVAL) {
5288 			ret = ANEG_FAILED;
5289 			break;
5290 		}
5291 		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5292 			       MR_LP_ADV_HALF_DUPLEX |
5293 			       MR_LP_ADV_SYM_PAUSE |
5294 			       MR_LP_ADV_ASYM_PAUSE |
5295 			       MR_LP_ADV_REMOTE_FAULT1 |
5296 			       MR_LP_ADV_REMOTE_FAULT2 |
5297 			       MR_LP_ADV_NEXT_PAGE |
5298 			       MR_TOGGLE_RX |
5299 			       MR_NP_RX);
5300 		if (ap->rxconfig & ANEG_CFG_FD)
5301 			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5302 		if (ap->rxconfig & ANEG_CFG_HD)
5303 			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5304 		if (ap->rxconfig & ANEG_CFG_PS1)
5305 			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5306 		if (ap->rxconfig & ANEG_CFG_PS2)
5307 			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5308 		if (ap->rxconfig & ANEG_CFG_RF1)
5309 			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5310 		if (ap->rxconfig & ANEG_CFG_RF2)
5311 			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5312 		if (ap->rxconfig & ANEG_CFG_NP)
5313 			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5314 
5315 		ap->link_time = ap->cur_time;
5316 
5317 		ap->flags ^= (MR_TOGGLE_TX);
5318 		if (ap->rxconfig & 0x0008)
5319 			ap->flags |= MR_TOGGLE_RX;
5320 		if (ap->rxconfig & ANEG_CFG_NP)
5321 			ap->flags |= MR_NP_RX;
5322 		ap->flags |= MR_PAGE_RX;
5323 
5324 		ap->state = ANEG_STATE_COMPLETE_ACK;
5325 		ret = ANEG_TIMER_ENAB;
5326 		break;
5327 
5328 	case ANEG_STATE_COMPLETE_ACK:
5329 		if (ap->ability_match != 0 &&
5330 		    ap->rxconfig == 0) {
5331 			ap->state = ANEG_STATE_AN_ENABLE;
5332 			break;
5333 		}
5334 		delta = ap->cur_time - ap->link_time;
5335 		if (delta > ANEG_STATE_SETTLE_TIME) {
5336 			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5337 				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5338 			} else {
5339 				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5340 				    !(ap->flags & MR_NP_RX)) {
5341 					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5342 				} else {
5343 					ret = ANEG_FAILED;
5344 				}
5345 			}
5346 		}
5347 		break;
5348 
5349 	case ANEG_STATE_IDLE_DETECT_INIT:
5350 		ap->link_time = ap->cur_time;
5351 		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5352 		tw32_f(MAC_MODE, tp->mac_mode);
5353 		udelay(40);
5354 
5355 		ap->state = ANEG_STATE_IDLE_DETECT;
5356 		ret = ANEG_TIMER_ENAB;
5357 		break;
5358 
5359 	case ANEG_STATE_IDLE_DETECT:
5360 		if (ap->ability_match != 0 &&
5361 		    ap->rxconfig == 0) {
5362 			ap->state = ANEG_STATE_AN_ENABLE;
5363 			break;
5364 		}
5365 		delta = ap->cur_time - ap->link_time;
5366 		if (delta > ANEG_STATE_SETTLE_TIME) {
5367 			/* XXX another gem from the Broadcom driver :( */
5368 			ap->state = ANEG_STATE_LINK_OK;
5369 		}
5370 		break;
5371 
5372 	case ANEG_STATE_LINK_OK:
5373 		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5374 		ret = ANEG_DONE;
5375 		break;
5376 
5377 	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5378 		/* ??? unimplemented */
5379 		break;
5380 
5381 	case ANEG_STATE_NEXT_PAGE_WAIT:
5382 		/* ??? unimplemented */
5383 		break;
5384 
5385 	default:
5386 		ret = ANEG_FAILED;
5387 		break;
5388 	}
5389 
5390 	return ret;
5391 }
5392 
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5393 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5394 {
5395 	int res = 0;
5396 	struct tg3_fiber_aneginfo aninfo;
5397 	int status = ANEG_FAILED;
5398 	unsigned int tick;
5399 	u32 tmp;
5400 
5401 	tw32_f(MAC_TX_AUTO_NEG, 0);
5402 
5403 	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5404 	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5405 	udelay(40);
5406 
5407 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5408 	udelay(40);
5409 
5410 	memset(&aninfo, 0, sizeof(aninfo));
5411 	aninfo.flags |= MR_AN_ENABLE;
5412 	aninfo.state = ANEG_STATE_UNKNOWN;
5413 	aninfo.cur_time = 0;
5414 	tick = 0;
5415 	while (++tick < 195000) {
5416 		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5417 		if (status == ANEG_DONE || status == ANEG_FAILED)
5418 			break;
5419 
5420 		udelay(1);
5421 	}
5422 
5423 	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5424 	tw32_f(MAC_MODE, tp->mac_mode);
5425 	udelay(40);
5426 
5427 	*txflags = aninfo.txconfig;
5428 	*rxflags = aninfo.flags;
5429 
5430 	if (status == ANEG_DONE &&
5431 	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5432 			     MR_LP_ADV_FULL_DUPLEX)))
5433 		res = 1;
5434 
5435 	return res;
5436 }
5437 
tg3_init_bcm8002(struct tg3 * tp)5438 static void tg3_init_bcm8002(struct tg3 *tp)
5439 {
5440 	u32 mac_status = tr32(MAC_STATUS);
5441 	int i;
5442 
5443 	/* Reset when initting first time or we have a link. */
5444 	if (tg3_flag(tp, INIT_COMPLETE) &&
5445 	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5446 		return;
5447 
5448 	/* Set PLL lock range. */
5449 	tg3_writephy(tp, 0x16, 0x8007);
5450 
5451 	/* SW reset */
5452 	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5453 
5454 	/* Wait for reset to complete. */
5455 	/* XXX schedule_timeout() ... */
5456 	for (i = 0; i < 500; i++)
5457 		udelay(10);
5458 
5459 	/* Config mode; select PMA/Ch 1 regs. */
5460 	tg3_writephy(tp, 0x10, 0x8411);
5461 
5462 	/* Enable auto-lock and comdet, select txclk for tx. */
5463 	tg3_writephy(tp, 0x11, 0x0a10);
5464 
5465 	tg3_writephy(tp, 0x18, 0x00a0);
5466 	tg3_writephy(tp, 0x16, 0x41ff);
5467 
5468 	/* Assert and deassert POR. */
5469 	tg3_writephy(tp, 0x13, 0x0400);
5470 	udelay(40);
5471 	tg3_writephy(tp, 0x13, 0x0000);
5472 
5473 	tg3_writephy(tp, 0x11, 0x0a50);
5474 	udelay(40);
5475 	tg3_writephy(tp, 0x11, 0x0a10);
5476 
5477 	/* Wait for signal to stabilize */
5478 	/* XXX schedule_timeout() ... */
5479 	for (i = 0; i < 15000; i++)
5480 		udelay(10);
5481 
5482 	/* Deselect the channel register so we can read the PHYID
5483 	 * later.
5484 	 */
5485 	tg3_writephy(tp, 0x10, 0x8011);
5486 }
5487 
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5488 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5489 {
5490 	u16 flowctrl;
5491 	bool current_link_up;
5492 	u32 sg_dig_ctrl, sg_dig_status;
5493 	u32 serdes_cfg, expected_sg_dig_ctrl;
5494 	int workaround, port_a;
5495 
5496 	serdes_cfg = 0;
5497 	workaround = 0;
5498 	port_a = 1;
5499 	current_link_up = false;
5500 
5501 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5502 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5503 		workaround = 1;
5504 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5505 			port_a = 0;
5506 
5507 		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5508 		/* preserve bits 20-23 for voltage regulator */
5509 		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5510 	}
5511 
5512 	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5513 
5514 	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5515 		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5516 			if (workaround) {
5517 				u32 val = serdes_cfg;
5518 
5519 				if (port_a)
5520 					val |= 0xc010000;
5521 				else
5522 					val |= 0x4010000;
5523 				tw32_f(MAC_SERDES_CFG, val);
5524 			}
5525 
5526 			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5527 		}
5528 		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5529 			tg3_setup_flow_control(tp, 0, 0);
5530 			current_link_up = true;
5531 		}
5532 		goto out;
5533 	}
5534 
5535 	/* Want auto-negotiation.  */
5536 	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5537 
5538 	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5539 	if (flowctrl & ADVERTISE_1000XPAUSE)
5540 		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5541 	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5542 		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5543 
5544 	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5545 		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5546 		    tp->serdes_counter &&
5547 		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5548 				    MAC_STATUS_RCVD_CFG)) ==
5549 		     MAC_STATUS_PCS_SYNCED)) {
5550 			tp->serdes_counter--;
5551 			current_link_up = true;
5552 			goto out;
5553 		}
5554 restart_autoneg:
5555 		if (workaround)
5556 			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5557 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5558 		udelay(5);
5559 		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5560 
5561 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5562 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5563 	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5564 				 MAC_STATUS_SIGNAL_DET)) {
5565 		sg_dig_status = tr32(SG_DIG_STATUS);
5566 		mac_status = tr32(MAC_STATUS);
5567 
5568 		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5569 		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5570 			u32 local_adv = 0, remote_adv = 0;
5571 
5572 			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5573 				local_adv |= ADVERTISE_1000XPAUSE;
5574 			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5575 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5576 
5577 			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5578 				remote_adv |= LPA_1000XPAUSE;
5579 			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5580 				remote_adv |= LPA_1000XPAUSE_ASYM;
5581 
5582 			tp->link_config.rmt_adv =
5583 					   mii_adv_to_ethtool_adv_x(remote_adv);
5584 
5585 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5586 			current_link_up = true;
5587 			tp->serdes_counter = 0;
5588 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5589 		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5590 			if (tp->serdes_counter)
5591 				tp->serdes_counter--;
5592 			else {
5593 				if (workaround) {
5594 					u32 val = serdes_cfg;
5595 
5596 					if (port_a)
5597 						val |= 0xc010000;
5598 					else
5599 						val |= 0x4010000;
5600 
5601 					tw32_f(MAC_SERDES_CFG, val);
5602 				}
5603 
5604 				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5605 				udelay(40);
5606 
5607 				/* Link parallel detection - link is up */
5608 				/* only if we have PCS_SYNC and not */
5609 				/* receiving config code words */
5610 				mac_status = tr32(MAC_STATUS);
5611 				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5612 				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5613 					tg3_setup_flow_control(tp, 0, 0);
5614 					current_link_up = true;
5615 					tp->phy_flags |=
5616 						TG3_PHYFLG_PARALLEL_DETECT;
5617 					tp->serdes_counter =
5618 						SERDES_PARALLEL_DET_TIMEOUT;
5619 				} else
5620 					goto restart_autoneg;
5621 			}
5622 		}
5623 	} else {
5624 		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5625 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5626 	}
5627 
5628 out:
5629 	return current_link_up;
5630 }
5631 
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5632 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5633 {
5634 	bool current_link_up = false;
5635 
5636 	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5637 		goto out;
5638 
5639 	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5640 		u32 txflags, rxflags;
5641 		int i;
5642 
5643 		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5644 			u32 local_adv = 0, remote_adv = 0;
5645 
5646 			if (txflags & ANEG_CFG_PS1)
5647 				local_adv |= ADVERTISE_1000XPAUSE;
5648 			if (txflags & ANEG_CFG_PS2)
5649 				local_adv |= ADVERTISE_1000XPSE_ASYM;
5650 
5651 			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5652 				remote_adv |= LPA_1000XPAUSE;
5653 			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5654 				remote_adv |= LPA_1000XPAUSE_ASYM;
5655 
5656 			tp->link_config.rmt_adv =
5657 					   mii_adv_to_ethtool_adv_x(remote_adv);
5658 
5659 			tg3_setup_flow_control(tp, local_adv, remote_adv);
5660 
5661 			current_link_up = true;
5662 		}
5663 		for (i = 0; i < 30; i++) {
5664 			udelay(20);
5665 			tw32_f(MAC_STATUS,
5666 			       (MAC_STATUS_SYNC_CHANGED |
5667 				MAC_STATUS_CFG_CHANGED));
5668 			udelay(40);
5669 			if ((tr32(MAC_STATUS) &
5670 			     (MAC_STATUS_SYNC_CHANGED |
5671 			      MAC_STATUS_CFG_CHANGED)) == 0)
5672 				break;
5673 		}
5674 
5675 		mac_status = tr32(MAC_STATUS);
5676 		if (!current_link_up &&
5677 		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5678 		    !(mac_status & MAC_STATUS_RCVD_CFG))
5679 			current_link_up = true;
5680 	} else {
5681 		tg3_setup_flow_control(tp, 0, 0);
5682 
5683 		/* Forcing 1000FD link up. */
5684 		current_link_up = true;
5685 
5686 		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5687 		udelay(40);
5688 
5689 		tw32_f(MAC_MODE, tp->mac_mode);
5690 		udelay(40);
5691 	}
5692 
5693 out:
5694 	return current_link_up;
5695 }
5696 
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5697 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5698 {
5699 	u32 orig_pause_cfg;
5700 	u32 orig_active_speed;
5701 	u8 orig_active_duplex;
5702 	u32 mac_status;
5703 	bool current_link_up;
5704 	int i;
5705 
5706 	orig_pause_cfg = tp->link_config.active_flowctrl;
5707 	orig_active_speed = tp->link_config.active_speed;
5708 	orig_active_duplex = tp->link_config.active_duplex;
5709 
5710 	if (!tg3_flag(tp, HW_AUTONEG) &&
5711 	    tp->link_up &&
5712 	    tg3_flag(tp, INIT_COMPLETE)) {
5713 		mac_status = tr32(MAC_STATUS);
5714 		mac_status &= (MAC_STATUS_PCS_SYNCED |
5715 			       MAC_STATUS_SIGNAL_DET |
5716 			       MAC_STATUS_CFG_CHANGED |
5717 			       MAC_STATUS_RCVD_CFG);
5718 		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5719 				   MAC_STATUS_SIGNAL_DET)) {
5720 			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5721 					    MAC_STATUS_CFG_CHANGED));
5722 			return 0;
5723 		}
5724 	}
5725 
5726 	tw32_f(MAC_TX_AUTO_NEG, 0);
5727 
5728 	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5729 	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5730 	tw32_f(MAC_MODE, tp->mac_mode);
5731 	udelay(40);
5732 
5733 	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5734 		tg3_init_bcm8002(tp);
5735 
5736 	/* Enable link change event even when serdes polling.  */
5737 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5738 	udelay(40);
5739 
5740 	tp->link_config.rmt_adv = 0;
5741 	mac_status = tr32(MAC_STATUS);
5742 
5743 	if (tg3_flag(tp, HW_AUTONEG))
5744 		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5745 	else
5746 		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5747 
5748 	tp->napi[0].hw_status->status =
5749 		(SD_STATUS_UPDATED |
5750 		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5751 
5752 	for (i = 0; i < 100; i++) {
5753 		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5754 				    MAC_STATUS_CFG_CHANGED));
5755 		udelay(5);
5756 		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5757 					 MAC_STATUS_CFG_CHANGED |
5758 					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5759 			break;
5760 	}
5761 
5762 	mac_status = tr32(MAC_STATUS);
5763 	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5764 		current_link_up = false;
5765 		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5766 		    tp->serdes_counter == 0) {
5767 			tw32_f(MAC_MODE, (tp->mac_mode |
5768 					  MAC_MODE_SEND_CONFIGS));
5769 			udelay(1);
5770 			tw32_f(MAC_MODE, tp->mac_mode);
5771 		}
5772 	}
5773 
5774 	if (current_link_up) {
5775 		tp->link_config.active_speed = SPEED_1000;
5776 		tp->link_config.active_duplex = DUPLEX_FULL;
5777 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5778 				    LED_CTRL_LNKLED_OVERRIDE |
5779 				    LED_CTRL_1000MBPS_ON));
5780 	} else {
5781 		tp->link_config.active_speed = SPEED_UNKNOWN;
5782 		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5783 		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5784 				    LED_CTRL_LNKLED_OVERRIDE |
5785 				    LED_CTRL_TRAFFIC_OVERRIDE));
5786 	}
5787 
5788 	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5789 		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5790 		if (orig_pause_cfg != now_pause_cfg ||
5791 		    orig_active_speed != tp->link_config.active_speed ||
5792 		    orig_active_duplex != tp->link_config.active_duplex)
5793 			tg3_link_report(tp);
5794 	}
5795 
5796 	return 0;
5797 }
5798 
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5799 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5800 {
5801 	int err = 0;
5802 	u32 bmsr, bmcr;
5803 	u32 current_speed = SPEED_UNKNOWN;
5804 	u8 current_duplex = DUPLEX_UNKNOWN;
5805 	bool current_link_up = false;
5806 	u32 local_adv, remote_adv, sgsr;
5807 
5808 	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5809 	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5810 	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5811 	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5812 
5813 		if (force_reset)
5814 			tg3_phy_reset(tp);
5815 
5816 		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5817 
5818 		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5819 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5820 		} else {
5821 			current_link_up = true;
5822 			if (sgsr & SERDES_TG3_SPEED_1000) {
5823 				current_speed = SPEED_1000;
5824 				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5825 			} else if (sgsr & SERDES_TG3_SPEED_100) {
5826 				current_speed = SPEED_100;
5827 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5828 			} else {
5829 				current_speed = SPEED_10;
5830 				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5831 			}
5832 
5833 			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5834 				current_duplex = DUPLEX_FULL;
5835 			else
5836 				current_duplex = DUPLEX_HALF;
5837 		}
5838 
5839 		tw32_f(MAC_MODE, tp->mac_mode);
5840 		udelay(40);
5841 
5842 		tg3_clear_mac_status(tp);
5843 
5844 		goto fiber_setup_done;
5845 	}
5846 
5847 	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5848 	tw32_f(MAC_MODE, tp->mac_mode);
5849 	udelay(40);
5850 
5851 	tg3_clear_mac_status(tp);
5852 
5853 	if (force_reset)
5854 		tg3_phy_reset(tp);
5855 
5856 	tp->link_config.rmt_adv = 0;
5857 
5858 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5859 	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5860 	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5861 		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5862 			bmsr |= BMSR_LSTATUS;
5863 		else
5864 			bmsr &= ~BMSR_LSTATUS;
5865 	}
5866 
5867 	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5868 
5869 	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5870 	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5871 		/* do nothing, just check for link up at the end */
5872 	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5873 		u32 adv, newadv;
5874 
5875 		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5876 		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5877 				 ADVERTISE_1000XPAUSE |
5878 				 ADVERTISE_1000XPSE_ASYM |
5879 				 ADVERTISE_SLCT);
5880 
5881 		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5882 		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5883 
5884 		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5885 			tg3_writephy(tp, MII_ADVERTISE, newadv);
5886 			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5887 			tg3_writephy(tp, MII_BMCR, bmcr);
5888 
5889 			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5890 			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5891 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5892 
5893 			return err;
5894 		}
5895 	} else {
5896 		u32 new_bmcr;
5897 
5898 		bmcr &= ~BMCR_SPEED1000;
5899 		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5900 
5901 		if (tp->link_config.duplex == DUPLEX_FULL)
5902 			new_bmcr |= BMCR_FULLDPLX;
5903 
5904 		if (new_bmcr != bmcr) {
5905 			/* BMCR_SPEED1000 is a reserved bit that needs
5906 			 * to be set on write.
5907 			 */
5908 			new_bmcr |= BMCR_SPEED1000;
5909 
5910 			/* Force a linkdown */
5911 			if (tp->link_up) {
5912 				u32 adv;
5913 
5914 				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5915 				adv &= ~(ADVERTISE_1000XFULL |
5916 					 ADVERTISE_1000XHALF |
5917 					 ADVERTISE_SLCT);
5918 				tg3_writephy(tp, MII_ADVERTISE, adv);
5919 				tg3_writephy(tp, MII_BMCR, bmcr |
5920 							   BMCR_ANRESTART |
5921 							   BMCR_ANENABLE);
5922 				udelay(10);
5923 				tg3_carrier_off(tp);
5924 			}
5925 			tg3_writephy(tp, MII_BMCR, new_bmcr);
5926 			bmcr = new_bmcr;
5927 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5928 			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5929 			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5930 				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5931 					bmsr |= BMSR_LSTATUS;
5932 				else
5933 					bmsr &= ~BMSR_LSTATUS;
5934 			}
5935 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5936 		}
5937 	}
5938 
5939 	if (bmsr & BMSR_LSTATUS) {
5940 		current_speed = SPEED_1000;
5941 		current_link_up = true;
5942 		if (bmcr & BMCR_FULLDPLX)
5943 			current_duplex = DUPLEX_FULL;
5944 		else
5945 			current_duplex = DUPLEX_HALF;
5946 
5947 		local_adv = 0;
5948 		remote_adv = 0;
5949 
5950 		if (bmcr & BMCR_ANENABLE) {
5951 			u32 common;
5952 
5953 			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5954 			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5955 			common = local_adv & remote_adv;
5956 			if (common & (ADVERTISE_1000XHALF |
5957 				      ADVERTISE_1000XFULL)) {
5958 				if (common & ADVERTISE_1000XFULL)
5959 					current_duplex = DUPLEX_FULL;
5960 				else
5961 					current_duplex = DUPLEX_HALF;
5962 
5963 				tp->link_config.rmt_adv =
5964 					   mii_adv_to_ethtool_adv_x(remote_adv);
5965 			} else if (!tg3_flag(tp, 5780_CLASS)) {
5966 				/* Link is up via parallel detect */
5967 			} else {
5968 				current_link_up = false;
5969 			}
5970 		}
5971 	}
5972 
5973 fiber_setup_done:
5974 	if (current_link_up && current_duplex == DUPLEX_FULL)
5975 		tg3_setup_flow_control(tp, local_adv, remote_adv);
5976 
5977 	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5978 	if (tp->link_config.active_duplex == DUPLEX_HALF)
5979 		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5980 
5981 	tw32_f(MAC_MODE, tp->mac_mode);
5982 	udelay(40);
5983 
5984 	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5985 
5986 	tp->link_config.active_speed = current_speed;
5987 	tp->link_config.active_duplex = current_duplex;
5988 
5989 	tg3_test_and_report_link_chg(tp, current_link_up);
5990 	return err;
5991 }
5992 
tg3_serdes_parallel_detect(struct tg3 * tp)5993 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5994 {
5995 	if (tp->serdes_counter) {
5996 		/* Give autoneg time to complete. */
5997 		tp->serdes_counter--;
5998 		return;
5999 	}
6000 
6001 	if (!tp->link_up &&
6002 	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6003 		u32 bmcr;
6004 
6005 		tg3_readphy(tp, MII_BMCR, &bmcr);
6006 		if (bmcr & BMCR_ANENABLE) {
6007 			u32 phy1, phy2;
6008 
6009 			/* Select shadow register 0x1f */
6010 			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6011 			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6012 
6013 			/* Select expansion interrupt status register */
6014 			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6015 					 MII_TG3_DSP_EXP1_INT_STAT);
6016 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6017 			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6018 
6019 			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6020 				/* We have signal detect and not receiving
6021 				 * config code words, link is up by parallel
6022 				 * detection.
6023 				 */
6024 
6025 				bmcr &= ~BMCR_ANENABLE;
6026 				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6027 				tg3_writephy(tp, MII_BMCR, bmcr);
6028 				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6029 			}
6030 		}
6031 	} else if (tp->link_up &&
6032 		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6033 		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6034 		u32 phy2;
6035 
6036 		/* Select expansion interrupt status register */
6037 		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6038 				 MII_TG3_DSP_EXP1_INT_STAT);
6039 		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6040 		if (phy2 & 0x20) {
6041 			u32 bmcr;
6042 
6043 			/* Config code words received, turn on autoneg. */
6044 			tg3_readphy(tp, MII_BMCR, &bmcr);
6045 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6046 
6047 			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6048 
6049 		}
6050 	}
6051 }
6052 
tg3_setup_phy(struct tg3 * tp,bool force_reset)6053 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6054 {
6055 	u32 val;
6056 	int err;
6057 
6058 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6059 		err = tg3_setup_fiber_phy(tp, force_reset);
6060 	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6061 		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6062 	else
6063 		err = tg3_setup_copper_phy(tp, force_reset);
6064 
6065 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6066 		u32 scale;
6067 
6068 		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6069 		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6070 			scale = 65;
6071 		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6072 			scale = 6;
6073 		else
6074 			scale = 12;
6075 
6076 		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6077 		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6078 		tw32(GRC_MISC_CFG, val);
6079 	}
6080 
6081 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6082 	      (6 << TX_LENGTHS_IPG_SHIFT);
6083 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6084 	    tg3_asic_rev(tp) == ASIC_REV_5762)
6085 		val |= tr32(MAC_TX_LENGTHS) &
6086 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6087 			TX_LENGTHS_CNT_DWN_VAL_MSK);
6088 
6089 	if (tp->link_config.active_speed == SPEED_1000 &&
6090 	    tp->link_config.active_duplex == DUPLEX_HALF)
6091 		tw32(MAC_TX_LENGTHS, val |
6092 		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6093 	else
6094 		tw32(MAC_TX_LENGTHS, val |
6095 		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6096 
6097 	if (!tg3_flag(tp, 5705_PLUS)) {
6098 		if (tp->link_up) {
6099 			tw32(HOSTCC_STAT_COAL_TICKS,
6100 			     tp->coal.stats_block_coalesce_usecs);
6101 		} else {
6102 			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6103 		}
6104 	}
6105 
6106 	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6107 		val = tr32(PCIE_PWR_MGMT_THRESH);
6108 		if (!tp->link_up)
6109 			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6110 			      tp->pwrmgmt_thresh;
6111 		else
6112 			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6113 		tw32(PCIE_PWR_MGMT_THRESH, val);
6114 	}
6115 
6116 	return err;
6117 }
6118 
6119 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp,struct ptp_system_timestamp * sts)6120 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6121 {
6122 	u64 stamp;
6123 
6124 	ptp_read_system_prets(sts);
6125 	stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6126 	ptp_read_system_postts(sts);
6127 	stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6128 
6129 	return stamp;
6130 }
6131 
6132 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6133 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6134 {
6135 	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6136 
6137 	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6138 	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6139 	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6140 	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6141 }
6142 
6143 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6144 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * info)6145 static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
6146 {
6147 	struct tg3 *tp = netdev_priv(dev);
6148 
6149 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
6150 
6151 	if (tg3_flag(tp, PTP_CAPABLE)) {
6152 		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6153 					SOF_TIMESTAMPING_RX_HARDWARE |
6154 					SOF_TIMESTAMPING_RAW_HARDWARE;
6155 	}
6156 
6157 	if (tp->ptp_clock)
6158 		info->phc_index = ptp_clock_index(tp->ptp_clock);
6159 
6160 	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6161 
6162 	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6163 			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6164 			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6165 			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6166 	return 0;
6167 }
6168 
tg3_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)6169 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6170 {
6171 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6172 	u64 correction;
6173 	bool neg_adj;
6174 
6175 	/* Frequency adjustment is performed using hardware with a 24 bit
6176 	 * accumulator and a programmable correction value. On each clk, the
6177 	 * correction value gets added to the accumulator and when it
6178 	 * overflows, the time counter is incremented/decremented.
6179 	 */
6180 	neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6181 
6182 	tg3_full_lock(tp, 0);
6183 
6184 	if (correction)
6185 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6186 		     TG3_EAV_REF_CLK_CORRECT_EN |
6187 		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6188 		     ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6189 	else
6190 		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6191 
6192 	tg3_full_unlock(tp);
6193 
6194 	return 0;
6195 }
6196 
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6197 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6198 {
6199 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6200 
6201 	tg3_full_lock(tp, 0);
6202 	tp->ptp_adjust += delta;
6203 	tg3_full_unlock(tp);
6204 
6205 	return 0;
6206 }
6207 
tg3_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)6208 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6209 			    struct ptp_system_timestamp *sts)
6210 {
6211 	u64 ns;
6212 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6213 
6214 	tg3_full_lock(tp, 0);
6215 	ns = tg3_refclk_read(tp, sts);
6216 	ns += tp->ptp_adjust;
6217 	tg3_full_unlock(tp);
6218 
6219 	*ts = ns_to_timespec64(ns);
6220 
6221 	return 0;
6222 }
6223 
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6224 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6225 			   const struct timespec64 *ts)
6226 {
6227 	u64 ns;
6228 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6229 
6230 	ns = timespec64_to_ns(ts);
6231 
6232 	tg3_full_lock(tp, 0);
6233 	tg3_refclk_write(tp, ns);
6234 	tp->ptp_adjust = 0;
6235 	tg3_full_unlock(tp);
6236 
6237 	return 0;
6238 }
6239 
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6240 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6241 			  struct ptp_clock_request *rq, int on)
6242 {
6243 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6244 	u32 clock_ctl;
6245 	int rval = 0;
6246 
6247 	switch (rq->type) {
6248 	case PTP_CLK_REQ_PEROUT:
6249 		/* Reject requests with unsupported flags */
6250 		if (rq->perout.flags)
6251 			return -EOPNOTSUPP;
6252 
6253 		if (rq->perout.index != 0)
6254 			return -EINVAL;
6255 
6256 		tg3_full_lock(tp, 0);
6257 		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6258 		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6259 
6260 		if (on) {
6261 			u64 nsec;
6262 
6263 			nsec = rq->perout.start.sec * 1000000000ULL +
6264 			       rq->perout.start.nsec;
6265 
6266 			if (rq->perout.period.sec || rq->perout.period.nsec) {
6267 				netdev_warn(tp->dev,
6268 					    "Device supports only a one-shot timesync output, period must be 0\n");
6269 				rval = -EINVAL;
6270 				goto err_out;
6271 			}
6272 
6273 			if (nsec & (1ULL << 63)) {
6274 				netdev_warn(tp->dev,
6275 					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6276 				rval = -EINVAL;
6277 				goto err_out;
6278 			}
6279 
6280 			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6281 			tw32(TG3_EAV_WATCHDOG0_MSB,
6282 			     TG3_EAV_WATCHDOG0_EN |
6283 			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6284 
6285 			tw32(TG3_EAV_REF_CLCK_CTL,
6286 			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6287 		} else {
6288 			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6289 			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6290 		}
6291 
6292 err_out:
6293 		tg3_full_unlock(tp);
6294 		return rval;
6295 
6296 	default:
6297 		break;
6298 	}
6299 
6300 	return -EOPNOTSUPP;
6301 }
6302 
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6303 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6304 				     struct skb_shared_hwtstamps *timestamp)
6305 {
6306 	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6307 	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6308 					   tp->ptp_adjust);
6309 }
6310 
tg3_read_tx_tstamp(struct tg3 * tp,u64 * hwclock)6311 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
6312 {
6313 	*hwclock = tr32(TG3_TX_TSTAMP_LSB);
6314 	*hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6315 }
6316 
tg3_ptp_ts_aux_work(struct ptp_clock_info * ptp)6317 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
6318 {
6319 	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6320 	struct skb_shared_hwtstamps timestamp;
6321 	u64 hwclock;
6322 
6323 	if (tp->ptp_txts_retrycnt > 2)
6324 		goto done;
6325 
6326 	tg3_read_tx_tstamp(tp, &hwclock);
6327 
6328 	if (hwclock != tp->pre_tx_ts) {
6329 		tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6330 		skb_tstamp_tx(tp->tx_tstamp_skb, &timestamp);
6331 		goto done;
6332 	}
6333 	tp->ptp_txts_retrycnt++;
6334 	return HZ / 10;
6335 done:
6336 	dev_consume_skb_any(tp->tx_tstamp_skb);
6337 	tp->tx_tstamp_skb = NULL;
6338 	tp->ptp_txts_retrycnt = 0;
6339 	tp->pre_tx_ts = 0;
6340 	return -1;
6341 }
6342 
6343 static const struct ptp_clock_info tg3_ptp_caps = {
6344 	.owner		= THIS_MODULE,
6345 	.name		= "tg3 clock",
6346 	.max_adj	= 250000000,
6347 	.n_alarm	= 0,
6348 	.n_ext_ts	= 0,
6349 	.n_per_out	= 1,
6350 	.n_pins		= 0,
6351 	.pps		= 0,
6352 	.adjfine	= tg3_ptp_adjfine,
6353 	.adjtime	= tg3_ptp_adjtime,
6354 	.do_aux_work	= tg3_ptp_ts_aux_work,
6355 	.gettimex64	= tg3_ptp_gettimex,
6356 	.settime64	= tg3_ptp_settime,
6357 	.enable		= tg3_ptp_enable,
6358 };
6359 
6360 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6361 static void tg3_ptp_init(struct tg3 *tp)
6362 {
6363 	if (!tg3_flag(tp, PTP_CAPABLE))
6364 		return;
6365 
6366 	/* Initialize the hardware clock to the system time. */
6367 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6368 	tp->ptp_adjust = 0;
6369 	tp->ptp_info = tg3_ptp_caps;
6370 }
6371 
6372 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6373 static void tg3_ptp_resume(struct tg3 *tp)
6374 {
6375 	if (!tg3_flag(tp, PTP_CAPABLE))
6376 		return;
6377 
6378 	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6379 	tp->ptp_adjust = 0;
6380 }
6381 
tg3_ptp_fini(struct tg3 * tp)6382 static void tg3_ptp_fini(struct tg3 *tp)
6383 {
6384 	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6385 		return;
6386 
6387 	ptp_clock_unregister(tp->ptp_clock);
6388 	tp->ptp_clock = NULL;
6389 	tp->ptp_adjust = 0;
6390 	dev_consume_skb_any(tp->tx_tstamp_skb);
6391 	tp->tx_tstamp_skb = NULL;
6392 }
6393 
tg3_irq_sync(struct tg3 * tp)6394 static inline int tg3_irq_sync(struct tg3 *tp)
6395 {
6396 	return tp->irq_sync;
6397 }
6398 
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6399 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6400 {
6401 	int i;
6402 
6403 	dst = (u32 *)((u8 *)dst + off);
6404 	for (i = 0; i < len; i += sizeof(u32))
6405 		*dst++ = tr32(off + i);
6406 }
6407 
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6408 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6409 {
6410 	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6411 	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6412 	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6413 	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6414 	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6415 	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6416 	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6417 	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6418 	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6419 	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6420 	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6421 	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6422 	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6423 	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6424 	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6425 	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6426 	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6427 	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6428 	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6429 
6430 	if (tg3_flag(tp, SUPPORT_MSIX))
6431 		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6432 
6433 	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6434 	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6435 	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6436 	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6437 	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6438 	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6439 	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6440 	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6441 
6442 	if (!tg3_flag(tp, 5705_PLUS)) {
6443 		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6444 		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6445 		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6446 	}
6447 
6448 	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6449 	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6450 	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6451 	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6452 	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6453 
6454 	if (tg3_flag(tp, NVRAM))
6455 		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6456 }
6457 
tg3_dump_state(struct tg3 * tp)6458 static void tg3_dump_state(struct tg3 *tp)
6459 {
6460 	int i;
6461 	u32 *regs;
6462 
6463 	/* If it is a PCI error, all registers will be 0xffff,
6464 	 * we don't dump them out, just report the error and return
6465 	 */
6466 	if (tp->pdev->error_state != pci_channel_io_normal) {
6467 		netdev_err(tp->dev, "PCI channel ERROR!\n");
6468 		return;
6469 	}
6470 
6471 	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6472 	if (!regs)
6473 		return;
6474 
6475 	if (tg3_flag(tp, PCI_EXPRESS)) {
6476 		/* Read up to but not including private PCI registers */
6477 		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6478 			regs[i / sizeof(u32)] = tr32(i);
6479 	} else
6480 		tg3_dump_legacy_regs(tp, regs);
6481 
6482 	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6483 		if (!regs[i + 0] && !regs[i + 1] &&
6484 		    !regs[i + 2] && !regs[i + 3])
6485 			continue;
6486 
6487 		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6488 			   i * 4,
6489 			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6490 	}
6491 
6492 	kfree(regs);
6493 
6494 	for (i = 0; i < tp->irq_cnt; i++) {
6495 		struct tg3_napi *tnapi = &tp->napi[i];
6496 
6497 		/* SW status block */
6498 		netdev_err(tp->dev,
6499 			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6500 			   i,
6501 			   tnapi->hw_status->status,
6502 			   tnapi->hw_status->status_tag,
6503 			   tnapi->hw_status->rx_jumbo_consumer,
6504 			   tnapi->hw_status->rx_consumer,
6505 			   tnapi->hw_status->rx_mini_consumer,
6506 			   tnapi->hw_status->idx[0].rx_producer,
6507 			   tnapi->hw_status->idx[0].tx_consumer);
6508 
6509 		netdev_err(tp->dev,
6510 		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6511 			   i,
6512 			   tnapi->last_tag, tnapi->last_irq_tag,
6513 			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6514 			   tnapi->rx_rcb_ptr,
6515 			   tnapi->prodring.rx_std_prod_idx,
6516 			   tnapi->prodring.rx_std_cons_idx,
6517 			   tnapi->prodring.rx_jmb_prod_idx,
6518 			   tnapi->prodring.rx_jmb_cons_idx);
6519 	}
6520 }
6521 
6522 /* This is called whenever we suspect that the system chipset is re-
6523  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6524  * is bogus tx completions. We try to recover by setting the
6525  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6526  * in the workqueue.
6527  */
tg3_tx_recover(struct tg3 * tp)6528 static void tg3_tx_recover(struct tg3 *tp)
6529 {
6530 	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6531 	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6532 
6533 	netdev_warn(tp->dev,
6534 		    "The system may be re-ordering memory-mapped I/O "
6535 		    "cycles to the network device, attempting to recover. "
6536 		    "Please report the problem to the driver maintainer "
6537 		    "and include system chipset information.\n");
6538 
6539 	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6540 }
6541 
tg3_tx_avail(struct tg3_napi * tnapi)6542 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6543 {
6544 	/* Tell compiler to fetch tx indices from memory. */
6545 	barrier();
6546 	return tnapi->tx_pending -
6547 	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6548 }
6549 
6550 /* Tigon3 never reports partial packet sends.  So we do not
6551  * need special logic to handle SKBs that have not had all
6552  * of their frags sent yet, like SunGEM does.
6553  */
tg3_tx(struct tg3_napi * tnapi)6554 static void tg3_tx(struct tg3_napi *tnapi)
6555 {
6556 	struct tg3 *tp = tnapi->tp;
6557 	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6558 	u32 sw_idx = tnapi->tx_cons;
6559 	struct netdev_queue *txq;
6560 	int index = tnapi - tp->napi;
6561 	unsigned int pkts_compl = 0, bytes_compl = 0;
6562 
6563 	if (tg3_flag(tp, ENABLE_TSS))
6564 		index--;
6565 
6566 	txq = netdev_get_tx_queue(tp->dev, index);
6567 
6568 	while (sw_idx != hw_idx) {
6569 		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6570 		bool complete_skb_later = false;
6571 		struct sk_buff *skb = ri->skb;
6572 		int i, tx_bug = 0;
6573 
6574 		if (unlikely(skb == NULL)) {
6575 			tg3_tx_recover(tp);
6576 			return;
6577 		}
6578 
6579 		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6580 			struct skb_shared_hwtstamps timestamp;
6581 			u64 hwclock;
6582 
6583 			tg3_read_tx_tstamp(tp, &hwclock);
6584 			if (hwclock != tp->pre_tx_ts) {
6585 				tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6586 				skb_tstamp_tx(skb, &timestamp);
6587 				tp->pre_tx_ts = 0;
6588 			} else {
6589 				tp->tx_tstamp_skb = skb;
6590 				complete_skb_later = true;
6591 			}
6592 		}
6593 
6594 		dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6595 				 skb_headlen(skb), DMA_TO_DEVICE);
6596 
6597 		ri->skb = NULL;
6598 
6599 		while (ri->fragmented) {
6600 			ri->fragmented = false;
6601 			sw_idx = NEXT_TX(sw_idx);
6602 			ri = &tnapi->tx_buffers[sw_idx];
6603 		}
6604 
6605 		sw_idx = NEXT_TX(sw_idx);
6606 
6607 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6608 			ri = &tnapi->tx_buffers[sw_idx];
6609 			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6610 				tx_bug = 1;
6611 
6612 			dma_unmap_page(&tp->pdev->dev,
6613 				       dma_unmap_addr(ri, mapping),
6614 				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6615 				       DMA_TO_DEVICE);
6616 
6617 			while (ri->fragmented) {
6618 				ri->fragmented = false;
6619 				sw_idx = NEXT_TX(sw_idx);
6620 				ri = &tnapi->tx_buffers[sw_idx];
6621 			}
6622 
6623 			sw_idx = NEXT_TX(sw_idx);
6624 		}
6625 
6626 		pkts_compl++;
6627 		bytes_compl += skb->len;
6628 
6629 		if (!complete_skb_later)
6630 			dev_consume_skb_any(skb);
6631 		else
6632 			ptp_schedule_worker(tp->ptp_clock, 0);
6633 
6634 		if (unlikely(tx_bug)) {
6635 			tg3_tx_recover(tp);
6636 			return;
6637 		}
6638 	}
6639 
6640 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6641 
6642 	tnapi->tx_cons = sw_idx;
6643 
6644 	/* Need to make the tx_cons update visible to __tg3_start_xmit()
6645 	 * before checking for netif_queue_stopped().  Without the
6646 	 * memory barrier, there is a small possibility that __tg3_start_xmit()
6647 	 * will miss it and cause the queue to be stopped forever.
6648 	 */
6649 	smp_mb();
6650 
6651 	if (unlikely(netif_tx_queue_stopped(txq) &&
6652 		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6653 		__netif_tx_lock(txq, smp_processor_id());
6654 		if (netif_tx_queue_stopped(txq) &&
6655 		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6656 			netif_tx_wake_queue(txq);
6657 		__netif_tx_unlock(txq);
6658 	}
6659 }
6660 
tg3_frag_free(bool is_frag,void * data)6661 static void tg3_frag_free(bool is_frag, void *data)
6662 {
6663 	if (is_frag)
6664 		skb_free_frag(data);
6665 	else
6666 		kfree(data);
6667 }
6668 
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6669 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6670 {
6671 	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6672 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6673 
6674 	if (!ri->data)
6675 		return;
6676 
6677 	dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6678 			 DMA_FROM_DEVICE);
6679 	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6680 	ri->data = NULL;
6681 }
6682 
6683 
6684 /* Returns size of skb allocated or < 0 on error.
6685  *
6686  * We only need to fill in the address because the other members
6687  * of the RX descriptor are invariant, see tg3_init_rings.
6688  *
6689  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6690  * posting buffers we only dirty the first cache line of the RX
6691  * descriptor (containing the address).  Whereas for the RX status
6692  * buffers the cpu only reads the last cacheline of the RX descriptor
6693  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6694  */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6695 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6696 			     u32 opaque_key, u32 dest_idx_unmasked,
6697 			     unsigned int *frag_size)
6698 {
6699 	struct tg3_rx_buffer_desc *desc;
6700 	struct ring_info *map;
6701 	u8 *data;
6702 	dma_addr_t mapping;
6703 	int skb_size, data_size, dest_idx;
6704 
6705 	switch (opaque_key) {
6706 	case RXD_OPAQUE_RING_STD:
6707 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6708 		desc = &tpr->rx_std[dest_idx];
6709 		map = &tpr->rx_std_buffers[dest_idx];
6710 		data_size = tp->rx_pkt_map_sz;
6711 		break;
6712 
6713 	case RXD_OPAQUE_RING_JUMBO:
6714 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6715 		desc = &tpr->rx_jmb[dest_idx].std;
6716 		map = &tpr->rx_jmb_buffers[dest_idx];
6717 		data_size = TG3_RX_JMB_MAP_SZ;
6718 		break;
6719 
6720 	default:
6721 		return -EINVAL;
6722 	}
6723 
6724 	/* Do not overwrite any of the map or rp information
6725 	 * until we are sure we can commit to a new buffer.
6726 	 *
6727 	 * Callers depend upon this behavior and assume that
6728 	 * we leave everything unchanged if we fail.
6729 	 */
6730 	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6731 		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6732 	if (skb_size <= PAGE_SIZE) {
6733 		data = napi_alloc_frag(skb_size);
6734 		*frag_size = skb_size;
6735 	} else {
6736 		data = kmalloc(skb_size, GFP_ATOMIC);
6737 		*frag_size = 0;
6738 	}
6739 	if (!data)
6740 		return -ENOMEM;
6741 
6742 	mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6743 				 data_size, DMA_FROM_DEVICE);
6744 	if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6745 		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6746 		return -EIO;
6747 	}
6748 
6749 	map->data = data;
6750 	dma_unmap_addr_set(map, mapping, mapping);
6751 
6752 	desc->addr_hi = ((u64)mapping >> 32);
6753 	desc->addr_lo = ((u64)mapping & 0xffffffff);
6754 
6755 	return data_size;
6756 }
6757 
6758 /* We only need to move over in the address because the other
6759  * members of the RX descriptor are invariant.  See notes above
6760  * tg3_alloc_rx_data for full details.
6761  */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6762 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6763 			   struct tg3_rx_prodring_set *dpr,
6764 			   u32 opaque_key, int src_idx,
6765 			   u32 dest_idx_unmasked)
6766 {
6767 	struct tg3 *tp = tnapi->tp;
6768 	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6769 	struct ring_info *src_map, *dest_map;
6770 	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6771 	int dest_idx;
6772 
6773 	switch (opaque_key) {
6774 	case RXD_OPAQUE_RING_STD:
6775 		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6776 		dest_desc = &dpr->rx_std[dest_idx];
6777 		dest_map = &dpr->rx_std_buffers[dest_idx];
6778 		src_desc = &spr->rx_std[src_idx];
6779 		src_map = &spr->rx_std_buffers[src_idx];
6780 		break;
6781 
6782 	case RXD_OPAQUE_RING_JUMBO:
6783 		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6784 		dest_desc = &dpr->rx_jmb[dest_idx].std;
6785 		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6786 		src_desc = &spr->rx_jmb[src_idx].std;
6787 		src_map = &spr->rx_jmb_buffers[src_idx];
6788 		break;
6789 
6790 	default:
6791 		return;
6792 	}
6793 
6794 	dest_map->data = src_map->data;
6795 	dma_unmap_addr_set(dest_map, mapping,
6796 			   dma_unmap_addr(src_map, mapping));
6797 	dest_desc->addr_hi = src_desc->addr_hi;
6798 	dest_desc->addr_lo = src_desc->addr_lo;
6799 
6800 	/* Ensure that the update to the skb happens after the physical
6801 	 * addresses have been transferred to the new BD location.
6802 	 */
6803 	smp_wmb();
6804 
6805 	src_map->data = NULL;
6806 }
6807 
6808 /* The RX ring scheme is composed of multiple rings which post fresh
6809  * buffers to the chip, and one special ring the chip uses to report
6810  * status back to the host.
6811  *
6812  * The special ring reports the status of received packets to the
6813  * host.  The chip does not write into the original descriptor the
6814  * RX buffer was obtained from.  The chip simply takes the original
6815  * descriptor as provided by the host, updates the status and length
6816  * field, then writes this into the next status ring entry.
6817  *
6818  * Each ring the host uses to post buffers to the chip is described
6819  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6820  * it is first placed into the on-chip ram.  When the packet's length
6821  * is known, it walks down the TG3_BDINFO entries to select the ring.
6822  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6823  * which is within the range of the new packet's length is chosen.
6824  *
6825  * The "separate ring for rx status" scheme may sound queer, but it makes
6826  * sense from a cache coherency perspective.  If only the host writes
6827  * to the buffer post rings, and only the chip writes to the rx status
6828  * rings, then cache lines never move beyond shared-modified state.
6829  * If both the host and chip were to write into the same ring, cache line
6830  * eviction could occur since both entities want it in an exclusive state.
6831  */
tg3_rx(struct tg3_napi * tnapi,int budget)6832 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6833 {
6834 	struct tg3 *tp = tnapi->tp;
6835 	u32 work_mask, rx_std_posted = 0;
6836 	u32 std_prod_idx, jmb_prod_idx;
6837 	u32 sw_idx = tnapi->rx_rcb_ptr;
6838 	u16 hw_idx;
6839 	int received;
6840 	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6841 
6842 	hw_idx = *(tnapi->rx_rcb_prod_idx);
6843 	/*
6844 	 * We need to order the read of hw_idx and the read of
6845 	 * the opaque cookie.
6846 	 */
6847 	rmb();
6848 	work_mask = 0;
6849 	received = 0;
6850 	std_prod_idx = tpr->rx_std_prod_idx;
6851 	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6852 	while (sw_idx != hw_idx && budget > 0) {
6853 		struct ring_info *ri;
6854 		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6855 		unsigned int len;
6856 		struct sk_buff *skb;
6857 		dma_addr_t dma_addr;
6858 		u32 opaque_key, desc_idx, *post_ptr;
6859 		u8 *data;
6860 		u64 tstamp = 0;
6861 
6862 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6863 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6864 		if (opaque_key == RXD_OPAQUE_RING_STD) {
6865 			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6866 			dma_addr = dma_unmap_addr(ri, mapping);
6867 			data = ri->data;
6868 			post_ptr = &std_prod_idx;
6869 			rx_std_posted++;
6870 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6871 			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6872 			dma_addr = dma_unmap_addr(ri, mapping);
6873 			data = ri->data;
6874 			post_ptr = &jmb_prod_idx;
6875 		} else
6876 			goto next_pkt_nopost;
6877 
6878 		work_mask |= opaque_key;
6879 
6880 		if (desc->err_vlan & RXD_ERR_MASK) {
6881 		drop_it:
6882 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6883 				       desc_idx, *post_ptr);
6884 		drop_it_no_recycle:
6885 			/* Other statistics kept track of by card. */
6886 			tnapi->rx_dropped++;
6887 			goto next_pkt;
6888 		}
6889 
6890 		prefetch(data + TG3_RX_OFFSET(tp));
6891 		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6892 		      ETH_FCS_LEN;
6893 
6894 		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6895 		     RXD_FLAG_PTPSTAT_PTPV1 ||
6896 		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6897 		     RXD_FLAG_PTPSTAT_PTPV2) {
6898 			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6899 			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6900 		}
6901 
6902 		if (len > TG3_RX_COPY_THRESH(tp)) {
6903 			int skb_size;
6904 			unsigned int frag_size;
6905 
6906 			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6907 						    *post_ptr, &frag_size);
6908 			if (skb_size < 0)
6909 				goto drop_it;
6910 
6911 			dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6912 					 DMA_FROM_DEVICE);
6913 
6914 			/* Ensure that the update to the data happens
6915 			 * after the usage of the old DMA mapping.
6916 			 */
6917 			smp_wmb();
6918 
6919 			ri->data = NULL;
6920 
6921 			if (frag_size)
6922 				skb = build_skb(data, frag_size);
6923 			else
6924 				skb = slab_build_skb(data);
6925 			if (!skb) {
6926 				tg3_frag_free(frag_size != 0, data);
6927 				goto drop_it_no_recycle;
6928 			}
6929 			skb_reserve(skb, TG3_RX_OFFSET(tp));
6930 		} else {
6931 			tg3_recycle_rx(tnapi, tpr, opaque_key,
6932 				       desc_idx, *post_ptr);
6933 
6934 			skb = netdev_alloc_skb(tp->dev,
6935 					       len + TG3_RAW_IP_ALIGN);
6936 			if (skb == NULL)
6937 				goto drop_it_no_recycle;
6938 
6939 			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6940 			dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6941 						DMA_FROM_DEVICE);
6942 			memcpy(skb->data,
6943 			       data + TG3_RX_OFFSET(tp),
6944 			       len);
6945 			dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6946 						   len, DMA_FROM_DEVICE);
6947 		}
6948 
6949 		skb_put(skb, len);
6950 		if (tstamp)
6951 			tg3_hwclock_to_timestamp(tp, tstamp,
6952 						 skb_hwtstamps(skb));
6953 
6954 		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6955 		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6956 		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6957 		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6958 			skb->ip_summed = CHECKSUM_UNNECESSARY;
6959 		else
6960 			skb_checksum_none_assert(skb);
6961 
6962 		skb->protocol = eth_type_trans(skb, tp->dev);
6963 
6964 		if (len > (tp->dev->mtu + ETH_HLEN) &&
6965 		    skb->protocol != htons(ETH_P_8021Q) &&
6966 		    skb->protocol != htons(ETH_P_8021AD)) {
6967 			dev_kfree_skb_any(skb);
6968 			goto drop_it_no_recycle;
6969 		}
6970 
6971 		if (desc->type_flags & RXD_FLAG_VLAN &&
6972 		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6973 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6974 					       desc->err_vlan & RXD_VLAN_MASK);
6975 
6976 		napi_gro_receive(&tnapi->napi, skb);
6977 
6978 		received++;
6979 		budget--;
6980 
6981 next_pkt:
6982 		(*post_ptr)++;
6983 
6984 		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6985 			tpr->rx_std_prod_idx = std_prod_idx &
6986 					       tp->rx_std_ring_mask;
6987 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6988 				     tpr->rx_std_prod_idx);
6989 			work_mask &= ~RXD_OPAQUE_RING_STD;
6990 			rx_std_posted = 0;
6991 		}
6992 next_pkt_nopost:
6993 		sw_idx++;
6994 		sw_idx &= tp->rx_ret_ring_mask;
6995 
6996 		/* Refresh hw_idx to see if there is new work */
6997 		if (sw_idx == hw_idx) {
6998 			hw_idx = *(tnapi->rx_rcb_prod_idx);
6999 			rmb();
7000 		}
7001 	}
7002 
7003 	/* ACK the status ring. */
7004 	tnapi->rx_rcb_ptr = sw_idx;
7005 	tw32_rx_mbox(tnapi->consmbox, sw_idx);
7006 
7007 	/* Refill RX ring(s). */
7008 	if (!tg3_flag(tp, ENABLE_RSS)) {
7009 		/* Sync BD data before updating mailbox */
7010 		wmb();
7011 
7012 		if (work_mask & RXD_OPAQUE_RING_STD) {
7013 			tpr->rx_std_prod_idx = std_prod_idx &
7014 					       tp->rx_std_ring_mask;
7015 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7016 				     tpr->rx_std_prod_idx);
7017 		}
7018 		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7019 			tpr->rx_jmb_prod_idx = jmb_prod_idx &
7020 					       tp->rx_jmb_ring_mask;
7021 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7022 				     tpr->rx_jmb_prod_idx);
7023 		}
7024 	} else if (work_mask) {
7025 		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7026 		 * updated before the producer indices can be updated.
7027 		 */
7028 		smp_wmb();
7029 
7030 		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7031 		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7032 
7033 		if (tnapi != &tp->napi[1]) {
7034 			tp->rx_refill = true;
7035 			napi_schedule(&tp->napi[1].napi);
7036 		}
7037 	}
7038 
7039 	return received;
7040 }
7041 
tg3_poll_link(struct tg3 * tp)7042 static void tg3_poll_link(struct tg3 *tp)
7043 {
7044 	/* handle link change and other phy events */
7045 	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7046 		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7047 
7048 		if (sblk->status & SD_STATUS_LINK_CHG) {
7049 			sblk->status = SD_STATUS_UPDATED |
7050 				       (sblk->status & ~SD_STATUS_LINK_CHG);
7051 			spin_lock(&tp->lock);
7052 			if (tg3_flag(tp, USE_PHYLIB)) {
7053 				tw32_f(MAC_STATUS,
7054 				     (MAC_STATUS_SYNC_CHANGED |
7055 				      MAC_STATUS_CFG_CHANGED |
7056 				      MAC_STATUS_MI_COMPLETION |
7057 				      MAC_STATUS_LNKSTATE_CHANGED));
7058 				udelay(40);
7059 			} else
7060 				tg3_setup_phy(tp, false);
7061 			spin_unlock(&tp->lock);
7062 		}
7063 	}
7064 }
7065 
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7066 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7067 				struct tg3_rx_prodring_set *dpr,
7068 				struct tg3_rx_prodring_set *spr)
7069 {
7070 	u32 si, di, cpycnt, src_prod_idx;
7071 	int i, err = 0;
7072 
7073 	while (1) {
7074 		src_prod_idx = spr->rx_std_prod_idx;
7075 
7076 		/* Make sure updates to the rx_std_buffers[] entries and the
7077 		 * standard producer index are seen in the correct order.
7078 		 */
7079 		smp_rmb();
7080 
7081 		if (spr->rx_std_cons_idx == src_prod_idx)
7082 			break;
7083 
7084 		if (spr->rx_std_cons_idx < src_prod_idx)
7085 			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7086 		else
7087 			cpycnt = tp->rx_std_ring_mask + 1 -
7088 				 spr->rx_std_cons_idx;
7089 
7090 		cpycnt = min(cpycnt,
7091 			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7092 
7093 		si = spr->rx_std_cons_idx;
7094 		di = dpr->rx_std_prod_idx;
7095 
7096 		for (i = di; i < di + cpycnt; i++) {
7097 			if (dpr->rx_std_buffers[i].data) {
7098 				cpycnt = i - di;
7099 				err = -ENOSPC;
7100 				break;
7101 			}
7102 		}
7103 
7104 		if (!cpycnt)
7105 			break;
7106 
7107 		/* Ensure that updates to the rx_std_buffers ring and the
7108 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7109 		 * ordered correctly WRT the skb check above.
7110 		 */
7111 		smp_rmb();
7112 
7113 		memcpy(&dpr->rx_std_buffers[di],
7114 		       &spr->rx_std_buffers[si],
7115 		       cpycnt * sizeof(struct ring_info));
7116 
7117 		for (i = 0; i < cpycnt; i++, di++, si++) {
7118 			struct tg3_rx_buffer_desc *sbd, *dbd;
7119 			sbd = &spr->rx_std[si];
7120 			dbd = &dpr->rx_std[di];
7121 			dbd->addr_hi = sbd->addr_hi;
7122 			dbd->addr_lo = sbd->addr_lo;
7123 		}
7124 
7125 		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7126 				       tp->rx_std_ring_mask;
7127 		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7128 				       tp->rx_std_ring_mask;
7129 	}
7130 
7131 	while (1) {
7132 		src_prod_idx = spr->rx_jmb_prod_idx;
7133 
7134 		/* Make sure updates to the rx_jmb_buffers[] entries and
7135 		 * the jumbo producer index are seen in the correct order.
7136 		 */
7137 		smp_rmb();
7138 
7139 		if (spr->rx_jmb_cons_idx == src_prod_idx)
7140 			break;
7141 
7142 		if (spr->rx_jmb_cons_idx < src_prod_idx)
7143 			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7144 		else
7145 			cpycnt = tp->rx_jmb_ring_mask + 1 -
7146 				 spr->rx_jmb_cons_idx;
7147 
7148 		cpycnt = min(cpycnt,
7149 			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7150 
7151 		si = spr->rx_jmb_cons_idx;
7152 		di = dpr->rx_jmb_prod_idx;
7153 
7154 		for (i = di; i < di + cpycnt; i++) {
7155 			if (dpr->rx_jmb_buffers[i].data) {
7156 				cpycnt = i - di;
7157 				err = -ENOSPC;
7158 				break;
7159 			}
7160 		}
7161 
7162 		if (!cpycnt)
7163 			break;
7164 
7165 		/* Ensure that updates to the rx_jmb_buffers ring and the
7166 		 * shadowed hardware producer ring from tg3_recycle_skb() are
7167 		 * ordered correctly WRT the skb check above.
7168 		 */
7169 		smp_rmb();
7170 
7171 		memcpy(&dpr->rx_jmb_buffers[di],
7172 		       &spr->rx_jmb_buffers[si],
7173 		       cpycnt * sizeof(struct ring_info));
7174 
7175 		for (i = 0; i < cpycnt; i++, di++, si++) {
7176 			struct tg3_rx_buffer_desc *sbd, *dbd;
7177 			sbd = &spr->rx_jmb[si].std;
7178 			dbd = &dpr->rx_jmb[di].std;
7179 			dbd->addr_hi = sbd->addr_hi;
7180 			dbd->addr_lo = sbd->addr_lo;
7181 		}
7182 
7183 		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7184 				       tp->rx_jmb_ring_mask;
7185 		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7186 				       tp->rx_jmb_ring_mask;
7187 	}
7188 
7189 	return err;
7190 }
7191 
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7192 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7193 {
7194 	struct tg3 *tp = tnapi->tp;
7195 
7196 	/* run TX completion thread */
7197 	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7198 		tg3_tx(tnapi);
7199 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7200 			return work_done;
7201 	}
7202 
7203 	if (!tnapi->rx_rcb_prod_idx)
7204 		return work_done;
7205 
7206 	/* run RX thread, within the bounds set by NAPI.
7207 	 * All RX "locking" is done by ensuring outside
7208 	 * code synchronizes with tg3->napi.poll()
7209 	 */
7210 	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7211 		work_done += tg3_rx(tnapi, budget - work_done);
7212 
7213 	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7214 		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7215 		int i, err = 0;
7216 		u32 std_prod_idx = dpr->rx_std_prod_idx;
7217 		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7218 
7219 		tp->rx_refill = false;
7220 		for (i = 1; i <= tp->rxq_cnt; i++)
7221 			err |= tg3_rx_prodring_xfer(tp, dpr,
7222 						    &tp->napi[i].prodring);
7223 
7224 		wmb();
7225 
7226 		if (std_prod_idx != dpr->rx_std_prod_idx)
7227 			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7228 				     dpr->rx_std_prod_idx);
7229 
7230 		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7231 			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7232 				     dpr->rx_jmb_prod_idx);
7233 
7234 		if (err)
7235 			tw32_f(HOSTCC_MODE, tp->coal_now);
7236 	}
7237 
7238 	return work_done;
7239 }
7240 
tg3_reset_task_schedule(struct tg3 * tp)7241 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7242 {
7243 	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7244 		schedule_work(&tp->reset_task);
7245 }
7246 
tg3_reset_task_cancel(struct tg3 * tp)7247 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7248 {
7249 	if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7250 		cancel_work_sync(&tp->reset_task);
7251 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7252 }
7253 
tg3_poll_msix(struct napi_struct * napi,int budget)7254 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7255 {
7256 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7257 	struct tg3 *tp = tnapi->tp;
7258 	int work_done = 0;
7259 	struct tg3_hw_status *sblk = tnapi->hw_status;
7260 
7261 	while (1) {
7262 		work_done = tg3_poll_work(tnapi, work_done, budget);
7263 
7264 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7265 			goto tx_recovery;
7266 
7267 		if (unlikely(work_done >= budget))
7268 			break;
7269 
7270 		/* tp->last_tag is used in tg3_int_reenable() below
7271 		 * to tell the hw how much work has been processed,
7272 		 * so we must read it before checking for more work.
7273 		 */
7274 		tnapi->last_tag = sblk->status_tag;
7275 		tnapi->last_irq_tag = tnapi->last_tag;
7276 		rmb();
7277 
7278 		/* check for RX/TX work to do */
7279 		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7280 			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7281 
7282 			/* This test here is not race free, but will reduce
7283 			 * the number of interrupts by looping again.
7284 			 */
7285 			if (tnapi == &tp->napi[1] && tp->rx_refill)
7286 				continue;
7287 
7288 			napi_complete_done(napi, work_done);
7289 			/* Reenable interrupts. */
7290 			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7291 
7292 			/* This test here is synchronized by napi_schedule()
7293 			 * and napi_complete() to close the race condition.
7294 			 */
7295 			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7296 				tw32(HOSTCC_MODE, tp->coalesce_mode |
7297 						  HOSTCC_MODE_ENABLE |
7298 						  tnapi->coal_now);
7299 			}
7300 			break;
7301 		}
7302 	}
7303 
7304 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7305 	return work_done;
7306 
7307 tx_recovery:
7308 	/* work_done is guaranteed to be less than budget. */
7309 	napi_complete(napi);
7310 	tg3_reset_task_schedule(tp);
7311 	return work_done;
7312 }
7313 
tg3_process_error(struct tg3 * tp)7314 static void tg3_process_error(struct tg3 *tp)
7315 {
7316 	u32 val;
7317 	bool real_error = false;
7318 
7319 	if (tg3_flag(tp, ERROR_PROCESSED))
7320 		return;
7321 
7322 	/* Check Flow Attention register */
7323 	val = tr32(HOSTCC_FLOW_ATTN);
7324 	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7325 		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7326 		real_error = true;
7327 	}
7328 
7329 	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7330 		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7331 		real_error = true;
7332 	}
7333 
7334 	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7335 		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7336 		real_error = true;
7337 	}
7338 
7339 	if (!real_error)
7340 		return;
7341 
7342 	tg3_dump_state(tp);
7343 
7344 	tg3_flag_set(tp, ERROR_PROCESSED);
7345 	tg3_reset_task_schedule(tp);
7346 }
7347 
tg3_poll(struct napi_struct * napi,int budget)7348 static int tg3_poll(struct napi_struct *napi, int budget)
7349 {
7350 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7351 	struct tg3 *tp = tnapi->tp;
7352 	int work_done = 0;
7353 	struct tg3_hw_status *sblk = tnapi->hw_status;
7354 
7355 	while (1) {
7356 		if (sblk->status & SD_STATUS_ERROR)
7357 			tg3_process_error(tp);
7358 
7359 		tg3_poll_link(tp);
7360 
7361 		work_done = tg3_poll_work(tnapi, work_done, budget);
7362 
7363 		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7364 			goto tx_recovery;
7365 
7366 		if (unlikely(work_done >= budget))
7367 			break;
7368 
7369 		if (tg3_flag(tp, TAGGED_STATUS)) {
7370 			/* tp->last_tag is used in tg3_int_reenable() below
7371 			 * to tell the hw how much work has been processed,
7372 			 * so we must read it before checking for more work.
7373 			 */
7374 			tnapi->last_tag = sblk->status_tag;
7375 			tnapi->last_irq_tag = tnapi->last_tag;
7376 			rmb();
7377 		} else
7378 			sblk->status &= ~SD_STATUS_UPDATED;
7379 
7380 		if (likely(!tg3_has_work(tnapi))) {
7381 			napi_complete_done(napi, work_done);
7382 			tg3_int_reenable(tnapi);
7383 			break;
7384 		}
7385 	}
7386 
7387 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7388 	return work_done;
7389 
7390 tx_recovery:
7391 	/* work_done is guaranteed to be less than budget. */
7392 	napi_complete(napi);
7393 	tg3_reset_task_schedule(tp);
7394 	return work_done;
7395 }
7396 
tg3_napi_disable(struct tg3 * tp)7397 static void tg3_napi_disable(struct tg3 *tp)
7398 {
7399 	int txq_idx = tp->txq_cnt - 1;
7400 	int rxq_idx = tp->rxq_cnt - 1;
7401 	struct tg3_napi *tnapi;
7402 	int i;
7403 
7404 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
7405 		tnapi = &tp->napi[i];
7406 		if (tnapi->tx_buffers) {
7407 			netif_queue_set_napi(tp->dev, txq_idx,
7408 					     NETDEV_QUEUE_TYPE_TX, NULL);
7409 			txq_idx--;
7410 		}
7411 		if (tnapi->rx_rcb) {
7412 			netif_queue_set_napi(tp->dev, rxq_idx,
7413 					     NETDEV_QUEUE_TYPE_RX, NULL);
7414 			rxq_idx--;
7415 		}
7416 		napi_disable(&tnapi->napi);
7417 	}
7418 }
7419 
tg3_napi_enable(struct tg3 * tp)7420 static void tg3_napi_enable(struct tg3 *tp)
7421 {
7422 	int txq_idx = 0, rxq_idx = 0;
7423 	struct tg3_napi *tnapi;
7424 	int i;
7425 
7426 	for (i = 0; i < tp->irq_cnt; i++) {
7427 		tnapi = &tp->napi[i];
7428 		napi_enable_locked(&tnapi->napi);
7429 		if (tnapi->tx_buffers) {
7430 			netif_queue_set_napi(tp->dev, txq_idx,
7431 					     NETDEV_QUEUE_TYPE_TX,
7432 					     &tnapi->napi);
7433 			txq_idx++;
7434 		}
7435 		if (tnapi->rx_rcb) {
7436 			netif_queue_set_napi(tp->dev, rxq_idx,
7437 					     NETDEV_QUEUE_TYPE_RX,
7438 					     &tnapi->napi);
7439 			rxq_idx++;
7440 		}
7441 	}
7442 }
7443 
tg3_napi_init(struct tg3 * tp)7444 static void tg3_napi_init(struct tg3 *tp)
7445 {
7446 	int i;
7447 
7448 	for (i = 0; i < tp->irq_cnt; i++) {
7449 		netif_napi_add_locked(tp->dev, &tp->napi[i].napi,
7450 				      i ? tg3_poll_msix : tg3_poll);
7451 		netif_napi_set_irq_locked(&tp->napi[i].napi,
7452 					  tp->napi[i].irq_vec);
7453 	}
7454 }
7455 
tg3_napi_fini(struct tg3 * tp)7456 static void tg3_napi_fini(struct tg3 *tp)
7457 {
7458 	int i;
7459 
7460 	for (i = 0; i < tp->irq_cnt; i++)
7461 		netif_napi_del(&tp->napi[i].napi);
7462 }
7463 
tg3_netif_stop(struct tg3 * tp)7464 static inline void tg3_netif_stop(struct tg3 *tp)
7465 {
7466 	netif_trans_update(tp->dev);	/* prevent tx timeout */
7467 	tg3_napi_disable(tp);
7468 	netif_carrier_off(tp->dev);
7469 	netif_tx_disable(tp->dev);
7470 }
7471 
7472 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7473 static inline void tg3_netif_start(struct tg3 *tp)
7474 {
7475 	tg3_ptp_resume(tp);
7476 
7477 	/* NOTE: unconditional netif_tx_wake_all_queues is only
7478 	 * appropriate so long as all callers are assured to
7479 	 * have free tx slots (such as after tg3_init_hw)
7480 	 */
7481 	netif_tx_wake_all_queues(tp->dev);
7482 
7483 	if (tp->link_up)
7484 		netif_carrier_on(tp->dev);
7485 
7486 	tg3_napi_enable(tp);
7487 	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7488 	tg3_enable_ints(tp);
7489 }
7490 
tg3_irq_quiesce(struct tg3 * tp)7491 static void tg3_irq_quiesce(struct tg3 *tp)
7492 	__releases(tp->lock)
7493 	__acquires(tp->lock)
7494 {
7495 	int i;
7496 
7497 	BUG_ON(tp->irq_sync);
7498 
7499 	tp->irq_sync = 1;
7500 	smp_mb();
7501 
7502 	spin_unlock_bh(&tp->lock);
7503 
7504 	for (i = 0; i < tp->irq_cnt; i++)
7505 		synchronize_irq(tp->napi[i].irq_vec);
7506 
7507 	spin_lock_bh(&tp->lock);
7508 }
7509 
7510 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7511  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7512  * with as well.  Most of the time, this is not necessary except when
7513  * shutting down the device.
7514  */
tg3_full_lock(struct tg3 * tp,int irq_sync)7515 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7516 {
7517 	spin_lock_bh(&tp->lock);
7518 	if (irq_sync)
7519 		tg3_irq_quiesce(tp);
7520 }
7521 
tg3_full_unlock(struct tg3 * tp)7522 static inline void tg3_full_unlock(struct tg3 *tp)
7523 {
7524 	spin_unlock_bh(&tp->lock);
7525 }
7526 
7527 /* One-shot MSI handler - Chip automatically disables interrupt
7528  * after sending MSI so driver doesn't have to do it.
7529  */
tg3_msi_1shot(int irq,void * dev_id)7530 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7531 {
7532 	struct tg3_napi *tnapi = dev_id;
7533 	struct tg3 *tp = tnapi->tp;
7534 
7535 	prefetch(tnapi->hw_status);
7536 	if (tnapi->rx_rcb)
7537 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7538 
7539 	if (likely(!tg3_irq_sync(tp)))
7540 		napi_schedule(&tnapi->napi);
7541 
7542 	return IRQ_HANDLED;
7543 }
7544 
7545 /* MSI ISR - No need to check for interrupt sharing and no need to
7546  * flush status block and interrupt mailbox. PCI ordering rules
7547  * guarantee that MSI will arrive after the status block.
7548  */
tg3_msi(int irq,void * dev_id)7549 static irqreturn_t tg3_msi(int irq, void *dev_id)
7550 {
7551 	struct tg3_napi *tnapi = dev_id;
7552 	struct tg3 *tp = tnapi->tp;
7553 
7554 	prefetch(tnapi->hw_status);
7555 	if (tnapi->rx_rcb)
7556 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7557 	/*
7558 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7559 	 * chip-internal interrupt pending events.
7560 	 * Writing non-zero to intr-mbox-0 additional tells the
7561 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7562 	 * event coalescing.
7563 	 */
7564 	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7565 	if (likely(!tg3_irq_sync(tp)))
7566 		napi_schedule(&tnapi->napi);
7567 
7568 	return IRQ_RETVAL(1);
7569 }
7570 
tg3_interrupt(int irq,void * dev_id)7571 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7572 {
7573 	struct tg3_napi *tnapi = dev_id;
7574 	struct tg3 *tp = tnapi->tp;
7575 	struct tg3_hw_status *sblk = tnapi->hw_status;
7576 	unsigned int handled = 1;
7577 
7578 	/* In INTx mode, it is possible for the interrupt to arrive at
7579 	 * the CPU before the status block posted prior to the interrupt.
7580 	 * Reading the PCI State register will confirm whether the
7581 	 * interrupt is ours and will flush the status block.
7582 	 */
7583 	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7584 		if (tg3_flag(tp, CHIP_RESETTING) ||
7585 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7586 			handled = 0;
7587 			goto out;
7588 		}
7589 	}
7590 
7591 	/*
7592 	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7593 	 * chip-internal interrupt pending events.
7594 	 * Writing non-zero to intr-mbox-0 additional tells the
7595 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7596 	 * event coalescing.
7597 	 *
7598 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7599 	 * spurious interrupts.  The flush impacts performance but
7600 	 * excessive spurious interrupts can be worse in some cases.
7601 	 */
7602 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7603 	if (tg3_irq_sync(tp))
7604 		goto out;
7605 	sblk->status &= ~SD_STATUS_UPDATED;
7606 	if (likely(tg3_has_work(tnapi))) {
7607 		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7608 		napi_schedule(&tnapi->napi);
7609 	} else {
7610 		/* No work, shared interrupt perhaps?  re-enable
7611 		 * interrupts, and flush that PCI write
7612 		 */
7613 		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7614 			       0x00000000);
7615 	}
7616 out:
7617 	return IRQ_RETVAL(handled);
7618 }
7619 
tg3_interrupt_tagged(int irq,void * dev_id)7620 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7621 {
7622 	struct tg3_napi *tnapi = dev_id;
7623 	struct tg3 *tp = tnapi->tp;
7624 	struct tg3_hw_status *sblk = tnapi->hw_status;
7625 	unsigned int handled = 1;
7626 
7627 	/* In INTx mode, it is possible for the interrupt to arrive at
7628 	 * the CPU before the status block posted prior to the interrupt.
7629 	 * Reading the PCI State register will confirm whether the
7630 	 * interrupt is ours and will flush the status block.
7631 	 */
7632 	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7633 		if (tg3_flag(tp, CHIP_RESETTING) ||
7634 		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7635 			handled = 0;
7636 			goto out;
7637 		}
7638 	}
7639 
7640 	/*
7641 	 * writing any value to intr-mbox-0 clears PCI INTA# and
7642 	 * chip-internal interrupt pending events.
7643 	 * writing non-zero to intr-mbox-0 additional tells the
7644 	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7645 	 * event coalescing.
7646 	 *
7647 	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7648 	 * spurious interrupts.  The flush impacts performance but
7649 	 * excessive spurious interrupts can be worse in some cases.
7650 	 */
7651 	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7652 
7653 	/*
7654 	 * In a shared interrupt configuration, sometimes other devices'
7655 	 * interrupts will scream.  We record the current status tag here
7656 	 * so that the above check can report that the screaming interrupts
7657 	 * are unhandled.  Eventually they will be silenced.
7658 	 */
7659 	tnapi->last_irq_tag = sblk->status_tag;
7660 
7661 	if (tg3_irq_sync(tp))
7662 		goto out;
7663 
7664 	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7665 
7666 	napi_schedule(&tnapi->napi);
7667 
7668 out:
7669 	return IRQ_RETVAL(handled);
7670 }
7671 
7672 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7673 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7674 {
7675 	struct tg3_napi *tnapi = dev_id;
7676 	struct tg3 *tp = tnapi->tp;
7677 	struct tg3_hw_status *sblk = tnapi->hw_status;
7678 
7679 	if ((sblk->status & SD_STATUS_UPDATED) ||
7680 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7681 		tg3_disable_ints(tp);
7682 		return IRQ_RETVAL(1);
7683 	}
7684 	return IRQ_RETVAL(0);
7685 }
7686 
7687 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7688 static void tg3_poll_controller(struct net_device *dev)
7689 {
7690 	int i;
7691 	struct tg3 *tp = netdev_priv(dev);
7692 
7693 	if (tg3_irq_sync(tp))
7694 		return;
7695 
7696 	for (i = 0; i < tp->irq_cnt; i++)
7697 		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7698 }
7699 #endif
7700 
tg3_tx_timeout(struct net_device * dev,unsigned int txqueue)7701 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7702 {
7703 	struct tg3 *tp = netdev_priv(dev);
7704 
7705 	if (netif_msg_tx_err(tp)) {
7706 		netdev_err(dev, "transmit timed out, resetting\n");
7707 		tg3_dump_state(tp);
7708 	}
7709 
7710 	tg3_reset_task_schedule(tp);
7711 }
7712 
7713 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7714 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7715 {
7716 	u32 base = (u32) mapping & 0xffffffff;
7717 
7718 	return base + len + 8 < base;
7719 }
7720 
7721 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7722  * of any 4GB boundaries: 4G, 8G, etc
7723  */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7724 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7725 					   u32 len, u32 mss)
7726 {
7727 	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7728 		u32 base = (u32) mapping & 0xffffffff;
7729 
7730 		return ((base + len + (mss & 0x3fff)) < base);
7731 	}
7732 	return 0;
7733 }
7734 
7735 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7736 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7737 					  int len)
7738 {
7739 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7740 	if (tg3_flag(tp, 40BIT_DMA_BUG))
7741 		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7742 	return 0;
7743 #else
7744 	return 0;
7745 #endif
7746 }
7747 
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7748 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7749 				 dma_addr_t mapping, u32 len, u32 flags,
7750 				 u32 mss, u32 vlan)
7751 {
7752 	txbd->addr_hi = ((u64) mapping >> 32);
7753 	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7754 	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7755 	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7756 }
7757 
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7758 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7759 			    dma_addr_t map, u32 len, u32 flags,
7760 			    u32 mss, u32 vlan)
7761 {
7762 	struct tg3 *tp = tnapi->tp;
7763 	bool hwbug = false;
7764 
7765 	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7766 		hwbug = true;
7767 
7768 	if (tg3_4g_overflow_test(map, len))
7769 		hwbug = true;
7770 
7771 	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7772 		hwbug = true;
7773 
7774 	if (tg3_40bit_overflow_test(tp, map, len))
7775 		hwbug = true;
7776 
7777 	if (tp->dma_limit) {
7778 		u32 prvidx = *entry;
7779 		u32 tmp_flag = flags & ~TXD_FLAG_END;
7780 		while (len > tp->dma_limit && *budget) {
7781 			u32 frag_len = tp->dma_limit;
7782 			len -= tp->dma_limit;
7783 
7784 			/* Avoid the 8byte DMA problem */
7785 			if (len <= 8) {
7786 				len += tp->dma_limit / 2;
7787 				frag_len = tp->dma_limit / 2;
7788 			}
7789 
7790 			tnapi->tx_buffers[*entry].fragmented = true;
7791 
7792 			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7793 				      frag_len, tmp_flag, mss, vlan);
7794 			*budget -= 1;
7795 			prvidx = *entry;
7796 			*entry = NEXT_TX(*entry);
7797 
7798 			map += frag_len;
7799 		}
7800 
7801 		if (len) {
7802 			if (*budget) {
7803 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7804 					      len, flags, mss, vlan);
7805 				*budget -= 1;
7806 				*entry = NEXT_TX(*entry);
7807 			} else {
7808 				hwbug = true;
7809 				tnapi->tx_buffers[prvidx].fragmented = false;
7810 			}
7811 		}
7812 	} else {
7813 		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7814 			      len, flags, mss, vlan);
7815 		*entry = NEXT_TX(*entry);
7816 	}
7817 
7818 	return hwbug;
7819 }
7820 
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7821 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7822 {
7823 	int i;
7824 	struct sk_buff *skb;
7825 	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7826 
7827 	skb = txb->skb;
7828 	txb->skb = NULL;
7829 
7830 	dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7831 			 skb_headlen(skb), DMA_TO_DEVICE);
7832 
7833 	while (txb->fragmented) {
7834 		txb->fragmented = false;
7835 		entry = NEXT_TX(entry);
7836 		txb = &tnapi->tx_buffers[entry];
7837 	}
7838 
7839 	for (i = 0; i <= last; i++) {
7840 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7841 
7842 		entry = NEXT_TX(entry);
7843 		txb = &tnapi->tx_buffers[entry];
7844 
7845 		dma_unmap_page(&tnapi->tp->pdev->dev,
7846 			       dma_unmap_addr(txb, mapping),
7847 			       skb_frag_size(frag), DMA_TO_DEVICE);
7848 
7849 		while (txb->fragmented) {
7850 			txb->fragmented = false;
7851 			entry = NEXT_TX(entry);
7852 			txb = &tnapi->tx_buffers[entry];
7853 		}
7854 	}
7855 }
7856 
7857 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7858 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7859 				       struct sk_buff **pskb,
7860 				       u32 *entry, u32 *budget,
7861 				       u32 base_flags, u32 mss, u32 vlan)
7862 {
7863 	struct tg3 *tp = tnapi->tp;
7864 	struct sk_buff *new_skb, *skb = *pskb;
7865 	dma_addr_t new_addr = 0;
7866 	int ret = 0;
7867 
7868 	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7869 		new_skb = skb_copy(skb, GFP_ATOMIC);
7870 	else {
7871 		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7872 
7873 		new_skb = skb_copy_expand(skb,
7874 					  skb_headroom(skb) + more_headroom,
7875 					  skb_tailroom(skb), GFP_ATOMIC);
7876 	}
7877 
7878 	if (!new_skb) {
7879 		ret = -1;
7880 	} else {
7881 		/* New SKB is guaranteed to be linear. */
7882 		new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7883 					  new_skb->len, DMA_TO_DEVICE);
7884 		/* Make sure the mapping succeeded */
7885 		if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7886 			dev_kfree_skb_any(new_skb);
7887 			ret = -1;
7888 		} else {
7889 			u32 save_entry = *entry;
7890 
7891 			base_flags |= TXD_FLAG_END;
7892 
7893 			tnapi->tx_buffers[*entry].skb = new_skb;
7894 			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7895 					   mapping, new_addr);
7896 
7897 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7898 					    new_skb->len, base_flags,
7899 					    mss, vlan)) {
7900 				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7901 				dev_kfree_skb_any(new_skb);
7902 				ret = -1;
7903 			}
7904 		}
7905 	}
7906 
7907 	dev_consume_skb_any(skb);
7908 	*pskb = new_skb;
7909 	return ret;
7910 }
7911 
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7912 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7913 {
7914 	/* Check if we will never have enough descriptors,
7915 	 * as gso_segs can be more than current ring size
7916 	 */
7917 	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7918 }
7919 
7920 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
7921 
7922 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7923  * indicated in tg3_tx_frag_set()
7924  */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7925 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7926 		       struct netdev_queue *txq, struct sk_buff *skb)
7927 {
7928 	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7929 	struct sk_buff *segs, *seg, *next;
7930 
7931 	/* Estimate the number of fragments in the worst case */
7932 	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7933 		netif_tx_stop_queue(txq);
7934 
7935 		/* netif_tx_stop_queue() must be done before checking
7936 		 * checking tx index in tg3_tx_avail() below, because in
7937 		 * tg3_tx(), we update tx index before checking for
7938 		 * netif_tx_queue_stopped().
7939 		 */
7940 		smp_mb();
7941 		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7942 			return NETDEV_TX_BUSY;
7943 
7944 		netif_tx_wake_queue(txq);
7945 	}
7946 
7947 	segs = skb_gso_segment(skb, tp->dev->features &
7948 				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7949 	if (IS_ERR(segs) || !segs) {
7950 		tnapi->tx_dropped++;
7951 		goto tg3_tso_bug_end;
7952 	}
7953 
7954 	skb_list_walk_safe(segs, seg, next) {
7955 		skb_mark_not_on_list(seg);
7956 		__tg3_start_xmit(seg, tp->dev);
7957 	}
7958 
7959 tg3_tso_bug_end:
7960 	dev_consume_skb_any(skb);
7961 
7962 	return NETDEV_TX_OK;
7963 }
7964 
7965 /* hard_start_xmit for all devices */
__tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7966 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7967 {
7968 	struct tg3 *tp = netdev_priv(dev);
7969 	u32 len, entry, base_flags, mss, vlan = 0;
7970 	u32 budget;
7971 	int i = -1, would_hit_hwbug;
7972 	dma_addr_t mapping;
7973 	struct tg3_napi *tnapi;
7974 	struct netdev_queue *txq;
7975 	unsigned int last;
7976 	struct iphdr *iph = NULL;
7977 	struct tcphdr *tcph = NULL;
7978 	__sum16 tcp_csum = 0, ip_csum = 0;
7979 	__be16 ip_tot_len = 0;
7980 
7981 	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7982 	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7983 	if (tg3_flag(tp, ENABLE_TSS))
7984 		tnapi++;
7985 
7986 	budget = tg3_tx_avail(tnapi);
7987 
7988 	/* We are running in BH disabled context with netif_tx_lock
7989 	 * and TX reclaim runs via tp->napi.poll inside of a software
7990 	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7991 	 * no IRQ context deadlocks to worry about either.  Rejoice!
7992 	 */
7993 	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7994 		if (!netif_tx_queue_stopped(txq)) {
7995 			netif_tx_stop_queue(txq);
7996 
7997 			/* This is a hard error, log it. */
7998 			netdev_err(dev,
7999 				   "BUG! Tx Ring full when queue awake!\n");
8000 		}
8001 		return NETDEV_TX_BUSY;
8002 	}
8003 
8004 	entry = tnapi->tx_prod;
8005 	base_flags = 0;
8006 
8007 	mss = skb_shinfo(skb)->gso_size;
8008 	if (mss) {
8009 		u32 tcp_opt_len, hdr_len;
8010 
8011 		if (skb_cow_head(skb, 0))
8012 			goto drop;
8013 
8014 		iph = ip_hdr(skb);
8015 		tcp_opt_len = tcp_optlen(skb);
8016 
8017 		hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
8018 
8019 		/* HW/FW can not correctly segment packets that have been
8020 		 * vlan encapsulated.
8021 		 */
8022 		if (skb->protocol == htons(ETH_P_8021Q) ||
8023 		    skb->protocol == htons(ETH_P_8021AD)) {
8024 			if (tg3_tso_bug_gso_check(tnapi, skb))
8025 				return tg3_tso_bug(tp, tnapi, txq, skb);
8026 			goto drop;
8027 		}
8028 
8029 		if (!skb_is_gso_v6(skb)) {
8030 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
8031 			    tg3_flag(tp, TSO_BUG)) {
8032 				if (tg3_tso_bug_gso_check(tnapi, skb))
8033 					return tg3_tso_bug(tp, tnapi, txq, skb);
8034 				goto drop;
8035 			}
8036 			ip_csum = iph->check;
8037 			ip_tot_len = iph->tot_len;
8038 			iph->check = 0;
8039 			iph->tot_len = htons(mss + hdr_len);
8040 		}
8041 
8042 		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
8043 			       TXD_FLAG_CPU_POST_DMA);
8044 
8045 		tcph = tcp_hdr(skb);
8046 		tcp_csum = tcph->check;
8047 
8048 		if (tg3_flag(tp, HW_TSO_1) ||
8049 		    tg3_flag(tp, HW_TSO_2) ||
8050 		    tg3_flag(tp, HW_TSO_3)) {
8051 			tcph->check = 0;
8052 			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8053 		} else {
8054 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8055 							 0, IPPROTO_TCP, 0);
8056 		}
8057 
8058 		if (tg3_flag(tp, HW_TSO_3)) {
8059 			mss |= (hdr_len & 0xc) << 12;
8060 			if (hdr_len & 0x10)
8061 				base_flags |= 0x00000010;
8062 			base_flags |= (hdr_len & 0x3e0) << 5;
8063 		} else if (tg3_flag(tp, HW_TSO_2))
8064 			mss |= hdr_len << 9;
8065 		else if (tg3_flag(tp, HW_TSO_1) ||
8066 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
8067 			if (tcp_opt_len || iph->ihl > 5) {
8068 				int tsflags;
8069 
8070 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8071 				mss |= (tsflags << 11);
8072 			}
8073 		} else {
8074 			if (tcp_opt_len || iph->ihl > 5) {
8075 				int tsflags;
8076 
8077 				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8078 				base_flags |= tsflags << 12;
8079 			}
8080 		}
8081 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8082 		/* HW/FW can not correctly checksum packets that have been
8083 		 * vlan encapsulated.
8084 		 */
8085 		if (skb->protocol == htons(ETH_P_8021Q) ||
8086 		    skb->protocol == htons(ETH_P_8021AD)) {
8087 			if (skb_checksum_help(skb))
8088 				goto drop;
8089 		} else  {
8090 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8091 		}
8092 	}
8093 
8094 	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8095 	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8096 		base_flags |= TXD_FLAG_JMB_PKT;
8097 
8098 	if (skb_vlan_tag_present(skb)) {
8099 		base_flags |= TXD_FLAG_VLAN;
8100 		vlan = skb_vlan_tag_get(skb);
8101 	}
8102 
8103 	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8104 	    tg3_flag(tp, TX_TSTAMP_EN)) {
8105 		tg3_full_lock(tp, 0);
8106 		if (!tp->pre_tx_ts) {
8107 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8108 			base_flags |= TXD_FLAG_HWTSTAMP;
8109 			tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
8110 		}
8111 		tg3_full_unlock(tp);
8112 	}
8113 
8114 	len = skb_headlen(skb);
8115 
8116 	mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8117 				 DMA_TO_DEVICE);
8118 	if (dma_mapping_error(&tp->pdev->dev, mapping))
8119 		goto drop;
8120 
8121 
8122 	tnapi->tx_buffers[entry].skb = skb;
8123 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8124 
8125 	would_hit_hwbug = 0;
8126 
8127 	if (tg3_flag(tp, 5701_DMA_BUG))
8128 		would_hit_hwbug = 1;
8129 
8130 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8131 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8132 			    mss, vlan)) {
8133 		would_hit_hwbug = 1;
8134 	} else if (skb_shinfo(skb)->nr_frags > 0) {
8135 		u32 tmp_mss = mss;
8136 
8137 		if (!tg3_flag(tp, HW_TSO_1) &&
8138 		    !tg3_flag(tp, HW_TSO_2) &&
8139 		    !tg3_flag(tp, HW_TSO_3))
8140 			tmp_mss = 0;
8141 
8142 		/* Now loop through additional data
8143 		 * fragments, and queue them.
8144 		 */
8145 		last = skb_shinfo(skb)->nr_frags - 1;
8146 		for (i = 0; i <= last; i++) {
8147 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8148 
8149 			len = skb_frag_size(frag);
8150 			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8151 						   len, DMA_TO_DEVICE);
8152 
8153 			tnapi->tx_buffers[entry].skb = NULL;
8154 			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8155 					   mapping);
8156 			if (dma_mapping_error(&tp->pdev->dev, mapping))
8157 				goto dma_error;
8158 
8159 			if (!budget ||
8160 			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8161 					    len, base_flags |
8162 					    ((i == last) ? TXD_FLAG_END : 0),
8163 					    tmp_mss, vlan)) {
8164 				would_hit_hwbug = 1;
8165 				break;
8166 			}
8167 		}
8168 	}
8169 
8170 	if (would_hit_hwbug) {
8171 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8172 
8173 		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8174 			/* If it's a TSO packet, do GSO instead of
8175 			 * allocating and copying to a large linear SKB
8176 			 */
8177 			if (ip_tot_len) {
8178 				iph->check = ip_csum;
8179 				iph->tot_len = ip_tot_len;
8180 			}
8181 			tcph->check = tcp_csum;
8182 			return tg3_tso_bug(tp, tnapi, txq, skb);
8183 		}
8184 
8185 		/* If the workaround fails due to memory/mapping
8186 		 * failure, silently drop this packet.
8187 		 */
8188 		entry = tnapi->tx_prod;
8189 		budget = tg3_tx_avail(tnapi);
8190 		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8191 						base_flags, mss, vlan))
8192 			goto drop_nofree;
8193 	}
8194 
8195 	skb_tx_timestamp(skb);
8196 	netdev_tx_sent_queue(txq, skb->len);
8197 
8198 	/* Sync BD data before updating mailbox */
8199 	wmb();
8200 
8201 	tnapi->tx_prod = entry;
8202 	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8203 		netif_tx_stop_queue(txq);
8204 
8205 		/* netif_tx_stop_queue() must be done before checking
8206 		 * checking tx index in tg3_tx_avail() below, because in
8207 		 * tg3_tx(), we update tx index before checking for
8208 		 * netif_tx_queue_stopped().
8209 		 */
8210 		smp_mb();
8211 		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8212 			netif_tx_wake_queue(txq);
8213 	}
8214 
8215 	return NETDEV_TX_OK;
8216 
8217 dma_error:
8218 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8219 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8220 drop:
8221 	dev_kfree_skb_any(skb);
8222 drop_nofree:
8223 	tnapi->tx_dropped++;
8224 	return NETDEV_TX_OK;
8225 }
8226 
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)8227 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8228 {
8229 	struct netdev_queue *txq;
8230 	u16 skb_queue_mapping;
8231 	netdev_tx_t ret;
8232 
8233 	skb_queue_mapping = skb_get_queue_mapping(skb);
8234 	txq = netdev_get_tx_queue(dev, skb_queue_mapping);
8235 
8236 	ret = __tg3_start_xmit(skb, dev);
8237 
8238 	/* Notify the hardware that packets are ready by updating the TX ring
8239 	 * tail pointer. We respect netdev_xmit_more() thus avoiding poking
8240 	 * the hardware for every packet. To guarantee forward progress the TX
8241 	 * ring must be drained when it is full as indicated by
8242 	 * netif_xmit_stopped(). This needs to happen even when the current
8243 	 * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
8244 	 * queued by previous __tg3_start_xmit() calls might get stuck in
8245 	 * the queue forever.
8246 	 */
8247 	if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8248 		struct tg3_napi *tnapi;
8249 		struct tg3 *tp;
8250 
8251 		tp = netdev_priv(dev);
8252 		tnapi = &tp->napi[skb_queue_mapping];
8253 
8254 		if (tg3_flag(tp, ENABLE_TSS))
8255 			tnapi++;
8256 
8257 		tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
8258 	}
8259 
8260 	return ret;
8261 }
8262 
tg3_mac_loopback(struct tg3 * tp,bool enable)8263 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8264 {
8265 	if (enable) {
8266 		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8267 				  MAC_MODE_PORT_MODE_MASK);
8268 
8269 		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8270 
8271 		if (!tg3_flag(tp, 5705_PLUS))
8272 			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8273 
8274 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8275 			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8276 		else
8277 			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8278 	} else {
8279 		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8280 
8281 		if (tg3_flag(tp, 5705_PLUS) ||
8282 		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8283 		    tg3_asic_rev(tp) == ASIC_REV_5700)
8284 			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8285 	}
8286 
8287 	tw32(MAC_MODE, tp->mac_mode);
8288 	udelay(40);
8289 }
8290 
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8291 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8292 {
8293 	u32 val, bmcr, mac_mode, ptest = 0;
8294 
8295 	tg3_phy_toggle_apd(tp, false);
8296 	tg3_phy_toggle_automdix(tp, false);
8297 
8298 	if (extlpbk && tg3_phy_set_extloopbk(tp))
8299 		return -EIO;
8300 
8301 	bmcr = BMCR_FULLDPLX;
8302 	switch (speed) {
8303 	case SPEED_10:
8304 		break;
8305 	case SPEED_100:
8306 		bmcr |= BMCR_SPEED100;
8307 		break;
8308 	case SPEED_1000:
8309 	default:
8310 		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8311 			speed = SPEED_100;
8312 			bmcr |= BMCR_SPEED100;
8313 		} else {
8314 			speed = SPEED_1000;
8315 			bmcr |= BMCR_SPEED1000;
8316 		}
8317 	}
8318 
8319 	if (extlpbk) {
8320 		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8321 			tg3_readphy(tp, MII_CTRL1000, &val);
8322 			val |= CTL1000_AS_MASTER |
8323 			       CTL1000_ENABLE_MASTER;
8324 			tg3_writephy(tp, MII_CTRL1000, val);
8325 		} else {
8326 			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8327 				MII_TG3_FET_PTEST_TRIM_2;
8328 			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8329 		}
8330 	} else
8331 		bmcr |= BMCR_LOOPBACK;
8332 
8333 	tg3_writephy(tp, MII_BMCR, bmcr);
8334 
8335 	/* The write needs to be flushed for the FETs */
8336 	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8337 		tg3_readphy(tp, MII_BMCR, &bmcr);
8338 
8339 	udelay(40);
8340 
8341 	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8342 	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8343 		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8344 			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8345 			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8346 
8347 		/* The write needs to be flushed for the AC131 */
8348 		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8349 	}
8350 
8351 	/* Reset to prevent losing 1st rx packet intermittently */
8352 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8353 	    tg3_flag(tp, 5780_CLASS)) {
8354 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8355 		udelay(10);
8356 		tw32_f(MAC_RX_MODE, tp->rx_mode);
8357 	}
8358 
8359 	mac_mode = tp->mac_mode &
8360 		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8361 	if (speed == SPEED_1000)
8362 		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8363 	else
8364 		mac_mode |= MAC_MODE_PORT_MODE_MII;
8365 
8366 	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8367 		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8368 
8369 		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8370 			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8371 		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8372 			mac_mode |= MAC_MODE_LINK_POLARITY;
8373 
8374 		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8375 			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8376 	}
8377 
8378 	tw32(MAC_MODE, mac_mode);
8379 	udelay(40);
8380 
8381 	return 0;
8382 }
8383 
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8384 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8385 {
8386 	struct tg3 *tp = netdev_priv(dev);
8387 
8388 	if (features & NETIF_F_LOOPBACK) {
8389 		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8390 			return;
8391 
8392 		spin_lock_bh(&tp->lock);
8393 		tg3_mac_loopback(tp, true);
8394 		netif_carrier_on(tp->dev);
8395 		spin_unlock_bh(&tp->lock);
8396 		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8397 	} else {
8398 		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8399 			return;
8400 
8401 		spin_lock_bh(&tp->lock);
8402 		tg3_mac_loopback(tp, false);
8403 		/* Force link status check */
8404 		tg3_setup_phy(tp, true);
8405 		spin_unlock_bh(&tp->lock);
8406 		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8407 	}
8408 }
8409 
tg3_fix_features(struct net_device * dev,netdev_features_t features)8410 static netdev_features_t tg3_fix_features(struct net_device *dev,
8411 	netdev_features_t features)
8412 {
8413 	struct tg3 *tp = netdev_priv(dev);
8414 
8415 	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8416 		features &= ~NETIF_F_ALL_TSO;
8417 
8418 	return features;
8419 }
8420 
tg3_set_features(struct net_device * dev,netdev_features_t features)8421 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8422 {
8423 	netdev_features_t changed = dev->features ^ features;
8424 
8425 	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8426 		tg3_set_loopback(dev, features);
8427 
8428 	return 0;
8429 }
8430 
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8431 static void tg3_rx_prodring_free(struct tg3 *tp,
8432 				 struct tg3_rx_prodring_set *tpr)
8433 {
8434 	int i;
8435 
8436 	if (tpr != &tp->napi[0].prodring) {
8437 		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8438 		     i = (i + 1) & tp->rx_std_ring_mask)
8439 			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8440 					tp->rx_pkt_map_sz);
8441 
8442 		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8443 			for (i = tpr->rx_jmb_cons_idx;
8444 			     i != tpr->rx_jmb_prod_idx;
8445 			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8446 				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8447 						TG3_RX_JMB_MAP_SZ);
8448 			}
8449 		}
8450 
8451 		return;
8452 	}
8453 
8454 	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8455 		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8456 				tp->rx_pkt_map_sz);
8457 
8458 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8459 		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8460 			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8461 					TG3_RX_JMB_MAP_SZ);
8462 	}
8463 }
8464 
8465 /* Initialize rx rings for packet processing.
8466  *
8467  * The chip has been shut down and the driver detached from
8468  * the networking, so no interrupts or new tx packets will
8469  * end up in the driver.  tp->{tx,}lock are held and thus
8470  * we may not sleep.
8471  */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8472 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8473 				 struct tg3_rx_prodring_set *tpr)
8474 {
8475 	u32 i, rx_pkt_dma_sz;
8476 
8477 	tpr->rx_std_cons_idx = 0;
8478 	tpr->rx_std_prod_idx = 0;
8479 	tpr->rx_jmb_cons_idx = 0;
8480 	tpr->rx_jmb_prod_idx = 0;
8481 
8482 	if (tpr != &tp->napi[0].prodring) {
8483 		memset(&tpr->rx_std_buffers[0], 0,
8484 		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8485 		if (tpr->rx_jmb_buffers)
8486 			memset(&tpr->rx_jmb_buffers[0], 0,
8487 			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8488 		goto done;
8489 	}
8490 
8491 	/* Zero out all descriptors. */
8492 	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8493 
8494 	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8495 	if (tg3_flag(tp, 5780_CLASS) &&
8496 	    tp->dev->mtu > ETH_DATA_LEN)
8497 		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8498 	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8499 
8500 	/* Initialize invariants of the rings, we only set this
8501 	 * stuff once.  This works because the card does not
8502 	 * write into the rx buffer posting rings.
8503 	 */
8504 	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8505 		struct tg3_rx_buffer_desc *rxd;
8506 
8507 		rxd = &tpr->rx_std[i];
8508 		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8509 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8510 		rxd->opaque = (RXD_OPAQUE_RING_STD |
8511 			       (i << RXD_OPAQUE_INDEX_SHIFT));
8512 	}
8513 
8514 	/* Now allocate fresh SKBs for each rx ring. */
8515 	for (i = 0; i < tp->rx_pending; i++) {
8516 		unsigned int frag_size;
8517 
8518 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8519 				      &frag_size) < 0) {
8520 			netdev_warn(tp->dev,
8521 				    "Using a smaller RX standard ring. Only "
8522 				    "%d out of %d buffers were allocated "
8523 				    "successfully\n", i, tp->rx_pending);
8524 			if (i == 0)
8525 				goto initfail;
8526 			tp->rx_pending = i;
8527 			break;
8528 		}
8529 	}
8530 
8531 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8532 		goto done;
8533 
8534 	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8535 
8536 	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8537 		goto done;
8538 
8539 	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8540 		struct tg3_rx_buffer_desc *rxd;
8541 
8542 		rxd = &tpr->rx_jmb[i].std;
8543 		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8544 		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8545 				  RXD_FLAG_JUMBO;
8546 		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8547 		       (i << RXD_OPAQUE_INDEX_SHIFT));
8548 	}
8549 
8550 	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8551 		unsigned int frag_size;
8552 
8553 		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8554 				      &frag_size) < 0) {
8555 			netdev_warn(tp->dev,
8556 				    "Using a smaller RX jumbo ring. Only %d "
8557 				    "out of %d buffers were allocated "
8558 				    "successfully\n", i, tp->rx_jumbo_pending);
8559 			if (i == 0)
8560 				goto initfail;
8561 			tp->rx_jumbo_pending = i;
8562 			break;
8563 		}
8564 	}
8565 
8566 done:
8567 	return 0;
8568 
8569 initfail:
8570 	tg3_rx_prodring_free(tp, tpr);
8571 	return -ENOMEM;
8572 }
8573 
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8574 static void tg3_rx_prodring_fini(struct tg3 *tp,
8575 				 struct tg3_rx_prodring_set *tpr)
8576 {
8577 	kfree(tpr->rx_std_buffers);
8578 	tpr->rx_std_buffers = NULL;
8579 	kfree(tpr->rx_jmb_buffers);
8580 	tpr->rx_jmb_buffers = NULL;
8581 	if (tpr->rx_std) {
8582 		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8583 				  tpr->rx_std, tpr->rx_std_mapping);
8584 		tpr->rx_std = NULL;
8585 	}
8586 	if (tpr->rx_jmb) {
8587 		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8588 				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8589 		tpr->rx_jmb = NULL;
8590 	}
8591 }
8592 
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8593 static int tg3_rx_prodring_init(struct tg3 *tp,
8594 				struct tg3_rx_prodring_set *tpr)
8595 {
8596 	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8597 				      GFP_KERNEL);
8598 	if (!tpr->rx_std_buffers)
8599 		return -ENOMEM;
8600 
8601 	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8602 					 TG3_RX_STD_RING_BYTES(tp),
8603 					 &tpr->rx_std_mapping,
8604 					 GFP_KERNEL);
8605 	if (!tpr->rx_std)
8606 		goto err_out;
8607 
8608 	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8609 		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8610 					      GFP_KERNEL);
8611 		if (!tpr->rx_jmb_buffers)
8612 			goto err_out;
8613 
8614 		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8615 						 TG3_RX_JMB_RING_BYTES(tp),
8616 						 &tpr->rx_jmb_mapping,
8617 						 GFP_KERNEL);
8618 		if (!tpr->rx_jmb)
8619 			goto err_out;
8620 	}
8621 
8622 	return 0;
8623 
8624 err_out:
8625 	tg3_rx_prodring_fini(tp, tpr);
8626 	return -ENOMEM;
8627 }
8628 
8629 /* Free up pending packets in all rx/tx rings.
8630  *
8631  * The chip has been shut down and the driver detached from
8632  * the networking, so no interrupts or new tx packets will
8633  * end up in the driver.  tp->{tx,}lock is not held and we are not
8634  * in an interrupt context and thus may sleep.
8635  */
tg3_free_rings(struct tg3 * tp)8636 static void tg3_free_rings(struct tg3 *tp)
8637 {
8638 	int i, j;
8639 
8640 	for (j = 0; j < tp->irq_cnt; j++) {
8641 		struct tg3_napi *tnapi = &tp->napi[j];
8642 
8643 		tg3_rx_prodring_free(tp, &tnapi->prodring);
8644 
8645 		if (!tnapi->tx_buffers)
8646 			continue;
8647 
8648 		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8649 			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8650 
8651 			if (!skb)
8652 				continue;
8653 
8654 			tg3_tx_skb_unmap(tnapi, i,
8655 					 skb_shinfo(skb)->nr_frags - 1);
8656 
8657 			dev_consume_skb_any(skb);
8658 		}
8659 		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8660 	}
8661 }
8662 
8663 /* Initialize tx/rx rings for packet processing.
8664  *
8665  * The chip has been shut down and the driver detached from
8666  * the networking, so no interrupts or new tx packets will
8667  * end up in the driver.  tp->{tx,}lock are held and thus
8668  * we may not sleep.
8669  */
tg3_init_rings(struct tg3 * tp)8670 static int tg3_init_rings(struct tg3 *tp)
8671 {
8672 	int i;
8673 
8674 	/* Free up all the SKBs. */
8675 	tg3_free_rings(tp);
8676 
8677 	for (i = 0; i < tp->irq_cnt; i++) {
8678 		struct tg3_napi *tnapi = &tp->napi[i];
8679 
8680 		tnapi->last_tag = 0;
8681 		tnapi->last_irq_tag = 0;
8682 		tnapi->hw_status->status = 0;
8683 		tnapi->hw_status->status_tag = 0;
8684 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8685 
8686 		tnapi->tx_prod = 0;
8687 		tnapi->tx_cons = 0;
8688 		if (tnapi->tx_ring)
8689 			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8690 
8691 		tnapi->rx_rcb_ptr = 0;
8692 		if (tnapi->rx_rcb)
8693 			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8694 
8695 		if (tnapi->prodring.rx_std &&
8696 		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8697 			tg3_free_rings(tp);
8698 			return -ENOMEM;
8699 		}
8700 	}
8701 
8702 	return 0;
8703 }
8704 
tg3_mem_tx_release(struct tg3 * tp)8705 static void tg3_mem_tx_release(struct tg3 *tp)
8706 {
8707 	int i;
8708 
8709 	for (i = 0; i < tp->irq_max; i++) {
8710 		struct tg3_napi *tnapi = &tp->napi[i];
8711 
8712 		if (tnapi->tx_ring) {
8713 			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8714 				tnapi->tx_ring, tnapi->tx_desc_mapping);
8715 			tnapi->tx_ring = NULL;
8716 		}
8717 
8718 		kfree(tnapi->tx_buffers);
8719 		tnapi->tx_buffers = NULL;
8720 	}
8721 }
8722 
tg3_mem_tx_acquire(struct tg3 * tp)8723 static int tg3_mem_tx_acquire(struct tg3 *tp)
8724 {
8725 	int i;
8726 	struct tg3_napi *tnapi = &tp->napi[0];
8727 
8728 	/* If multivector TSS is enabled, vector 0 does not handle
8729 	 * tx interrupts.  Don't allocate any resources for it.
8730 	 */
8731 	if (tg3_flag(tp, ENABLE_TSS))
8732 		tnapi++;
8733 
8734 	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8735 		tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8736 					    sizeof(struct tg3_tx_ring_info),
8737 					    GFP_KERNEL);
8738 		if (!tnapi->tx_buffers)
8739 			goto err_out;
8740 
8741 		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8742 						    TG3_TX_RING_BYTES,
8743 						    &tnapi->tx_desc_mapping,
8744 						    GFP_KERNEL);
8745 		if (!tnapi->tx_ring)
8746 			goto err_out;
8747 	}
8748 
8749 	return 0;
8750 
8751 err_out:
8752 	tg3_mem_tx_release(tp);
8753 	return -ENOMEM;
8754 }
8755 
tg3_mem_rx_release(struct tg3 * tp)8756 static void tg3_mem_rx_release(struct tg3 *tp)
8757 {
8758 	int i;
8759 
8760 	for (i = 0; i < tp->irq_max; i++) {
8761 		struct tg3_napi *tnapi = &tp->napi[i];
8762 
8763 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8764 
8765 		if (!tnapi->rx_rcb)
8766 			continue;
8767 
8768 		dma_free_coherent(&tp->pdev->dev,
8769 				  TG3_RX_RCB_RING_BYTES(tp),
8770 				  tnapi->rx_rcb,
8771 				  tnapi->rx_rcb_mapping);
8772 		tnapi->rx_rcb = NULL;
8773 	}
8774 }
8775 
tg3_mem_rx_acquire(struct tg3 * tp)8776 static int tg3_mem_rx_acquire(struct tg3 *tp)
8777 {
8778 	unsigned int i, limit;
8779 
8780 	limit = tp->rxq_cnt;
8781 
8782 	/* If RSS is enabled, we need a (dummy) producer ring
8783 	 * set on vector zero.  This is the true hw prodring.
8784 	 */
8785 	if (tg3_flag(tp, ENABLE_RSS))
8786 		limit++;
8787 
8788 	for (i = 0; i < limit; i++) {
8789 		struct tg3_napi *tnapi = &tp->napi[i];
8790 
8791 		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8792 			goto err_out;
8793 
8794 		/* If multivector RSS is enabled, vector 0
8795 		 * does not handle rx or tx interrupts.
8796 		 * Don't allocate any resources for it.
8797 		 */
8798 		if (!i && tg3_flag(tp, ENABLE_RSS))
8799 			continue;
8800 
8801 		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8802 						   TG3_RX_RCB_RING_BYTES(tp),
8803 						   &tnapi->rx_rcb_mapping,
8804 						   GFP_KERNEL);
8805 		if (!tnapi->rx_rcb)
8806 			goto err_out;
8807 	}
8808 
8809 	return 0;
8810 
8811 err_out:
8812 	tg3_mem_rx_release(tp);
8813 	return -ENOMEM;
8814 }
8815 
8816 /*
8817  * Must not be invoked with interrupt sources disabled and
8818  * the hardware shutdown down.
8819  */
tg3_free_consistent(struct tg3 * tp)8820 static void tg3_free_consistent(struct tg3 *tp)
8821 {
8822 	int i;
8823 
8824 	for (i = 0; i < tp->irq_cnt; i++) {
8825 		struct tg3_napi *tnapi = &tp->napi[i];
8826 
8827 		if (tnapi->hw_status) {
8828 			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8829 					  tnapi->hw_status,
8830 					  tnapi->status_mapping);
8831 			tnapi->hw_status = NULL;
8832 		}
8833 	}
8834 
8835 	tg3_mem_rx_release(tp);
8836 	tg3_mem_tx_release(tp);
8837 
8838 	/* tp->hw_stats can be referenced safely:
8839 	 *     1. under rtnl_lock
8840 	 *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8841 	 */
8842 	if (tp->hw_stats) {
8843 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8844 				  tp->hw_stats, tp->stats_mapping);
8845 		tp->hw_stats = NULL;
8846 	}
8847 }
8848 
8849 /*
8850  * Must not be invoked with interrupt sources disabled and
8851  * the hardware shutdown down.  Can sleep.
8852  */
tg3_alloc_consistent(struct tg3 * tp)8853 static int tg3_alloc_consistent(struct tg3 *tp)
8854 {
8855 	int i;
8856 
8857 	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8858 					  sizeof(struct tg3_hw_stats),
8859 					  &tp->stats_mapping, GFP_KERNEL);
8860 	if (!tp->hw_stats)
8861 		goto err_out;
8862 
8863 	for (i = 0; i < tp->irq_cnt; i++) {
8864 		struct tg3_napi *tnapi = &tp->napi[i];
8865 		struct tg3_hw_status *sblk;
8866 
8867 		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8868 						      TG3_HW_STATUS_SIZE,
8869 						      &tnapi->status_mapping,
8870 						      GFP_KERNEL);
8871 		if (!tnapi->hw_status)
8872 			goto err_out;
8873 
8874 		sblk = tnapi->hw_status;
8875 
8876 		if (tg3_flag(tp, ENABLE_RSS)) {
8877 			u16 *prodptr = NULL;
8878 
8879 			/*
8880 			 * When RSS is enabled, the status block format changes
8881 			 * slightly.  The "rx_jumbo_consumer", "reserved",
8882 			 * and "rx_mini_consumer" members get mapped to the
8883 			 * other three rx return ring producer indexes.
8884 			 */
8885 			switch (i) {
8886 			case 1:
8887 				prodptr = &sblk->idx[0].rx_producer;
8888 				break;
8889 			case 2:
8890 				prodptr = &sblk->rx_jumbo_consumer;
8891 				break;
8892 			case 3:
8893 				prodptr = &sblk->reserved;
8894 				break;
8895 			case 4:
8896 				prodptr = &sblk->rx_mini_consumer;
8897 				break;
8898 			}
8899 			tnapi->rx_rcb_prod_idx = prodptr;
8900 		} else {
8901 			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8902 		}
8903 	}
8904 
8905 	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8906 		goto err_out;
8907 
8908 	return 0;
8909 
8910 err_out:
8911 	tg3_free_consistent(tp);
8912 	return -ENOMEM;
8913 }
8914 
8915 #define MAX_WAIT_CNT 1000
8916 
8917 /* To stop a block, clear the enable bit and poll till it
8918  * clears.  tp->lock is held.
8919  */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8920 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8921 {
8922 	unsigned int i;
8923 	u32 val;
8924 
8925 	if (tg3_flag(tp, 5705_PLUS)) {
8926 		switch (ofs) {
8927 		case RCVLSC_MODE:
8928 		case DMAC_MODE:
8929 		case MBFREE_MODE:
8930 		case BUFMGR_MODE:
8931 		case MEMARB_MODE:
8932 			/* We can't enable/disable these bits of the
8933 			 * 5705/5750, just say success.
8934 			 */
8935 			return 0;
8936 
8937 		default:
8938 			break;
8939 		}
8940 	}
8941 
8942 	val = tr32(ofs);
8943 	val &= ~enable_bit;
8944 	tw32_f(ofs, val);
8945 
8946 	for (i = 0; i < MAX_WAIT_CNT; i++) {
8947 		if (pci_channel_offline(tp->pdev)) {
8948 			dev_err(&tp->pdev->dev,
8949 				"tg3_stop_block device offline, "
8950 				"ofs=%lx enable_bit=%x\n",
8951 				ofs, enable_bit);
8952 			return -ENODEV;
8953 		}
8954 
8955 		udelay(100);
8956 		val = tr32(ofs);
8957 		if ((val & enable_bit) == 0)
8958 			break;
8959 	}
8960 
8961 	if (i == MAX_WAIT_CNT && !silent) {
8962 		dev_err(&tp->pdev->dev,
8963 			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8964 			ofs, enable_bit);
8965 		return -ENODEV;
8966 	}
8967 
8968 	return 0;
8969 }
8970 
8971 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8972 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8973 {
8974 	int i, err;
8975 
8976 	tg3_disable_ints(tp);
8977 
8978 	if (pci_channel_offline(tp->pdev)) {
8979 		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8980 		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8981 		err = -ENODEV;
8982 		goto err_no_dev;
8983 	}
8984 
8985 	tp->rx_mode &= ~RX_MODE_ENABLE;
8986 	tw32_f(MAC_RX_MODE, tp->rx_mode);
8987 	udelay(10);
8988 
8989 	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8990 	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8991 	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8992 	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8993 	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8994 	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8995 
8996 	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8997 	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8998 	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8999 	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
9000 	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
9001 	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
9002 	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
9003 
9004 	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
9005 	tw32_f(MAC_MODE, tp->mac_mode);
9006 	udelay(40);
9007 
9008 	tp->tx_mode &= ~TX_MODE_ENABLE;
9009 	tw32_f(MAC_TX_MODE, tp->tx_mode);
9010 
9011 	for (i = 0; i < MAX_WAIT_CNT; i++) {
9012 		udelay(100);
9013 		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
9014 			break;
9015 	}
9016 	if (i >= MAX_WAIT_CNT) {
9017 		dev_err(&tp->pdev->dev,
9018 			"%s timed out, TX_MODE_ENABLE will not clear "
9019 			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
9020 		err |= -ENODEV;
9021 	}
9022 
9023 	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
9024 	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
9025 	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
9026 
9027 	tw32(FTQ_RESET, 0xffffffff);
9028 	tw32(FTQ_RESET, 0x00000000);
9029 
9030 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
9031 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
9032 
9033 err_no_dev:
9034 	for (i = 0; i < tp->irq_cnt; i++) {
9035 		struct tg3_napi *tnapi = &tp->napi[i];
9036 		if (tnapi->hw_status)
9037 			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9038 	}
9039 
9040 	return err;
9041 }
9042 
9043 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)9044 static void tg3_save_pci_state(struct tg3 *tp)
9045 {
9046 	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
9047 }
9048 
9049 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)9050 static void tg3_restore_pci_state(struct tg3 *tp)
9051 {
9052 	u32 val;
9053 
9054 	/* Re-enable indirect register accesses. */
9055 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9056 			       tp->misc_host_ctrl);
9057 
9058 	/* Set MAX PCI retry to zero. */
9059 	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
9060 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9061 	    tg3_flag(tp, PCIX_MODE))
9062 		val |= PCISTATE_RETRY_SAME_DMA;
9063 	/* Allow reads and writes to the APE register and memory space. */
9064 	if (tg3_flag(tp, ENABLE_APE))
9065 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9066 		       PCISTATE_ALLOW_APE_SHMEM_WR |
9067 		       PCISTATE_ALLOW_APE_PSPACE_WR;
9068 	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
9069 
9070 	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
9071 
9072 	if (!tg3_flag(tp, PCI_EXPRESS)) {
9073 		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
9074 				      tp->pci_cacheline_sz);
9075 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
9076 				      tp->pci_lat_timer);
9077 	}
9078 
9079 	/* Make sure PCI-X relaxed ordering bit is clear. */
9080 	if (tg3_flag(tp, PCIX_MODE)) {
9081 		u16 pcix_cmd;
9082 
9083 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9084 				     &pcix_cmd);
9085 		pcix_cmd &= ~PCI_X_CMD_ERO;
9086 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9087 				      pcix_cmd);
9088 	}
9089 
9090 	if (tg3_flag(tp, 5780_CLASS)) {
9091 
9092 		/* Chip reset on 5780 will reset MSI enable bit,
9093 		 * so need to restore it.
9094 		 */
9095 		if (tg3_flag(tp, USING_MSI)) {
9096 			u16 ctrl;
9097 
9098 			pci_read_config_word(tp->pdev,
9099 					     tp->msi_cap + PCI_MSI_FLAGS,
9100 					     &ctrl);
9101 			pci_write_config_word(tp->pdev,
9102 					      tp->msi_cap + PCI_MSI_FLAGS,
9103 					      ctrl | PCI_MSI_FLAGS_ENABLE);
9104 			val = tr32(MSGINT_MODE);
9105 			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9106 		}
9107 	}
9108 }
9109 
tg3_override_clk(struct tg3 * tp)9110 static void tg3_override_clk(struct tg3 *tp)
9111 {
9112 	u32 val;
9113 
9114 	switch (tg3_asic_rev(tp)) {
9115 	case ASIC_REV_5717:
9116 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9117 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9118 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9119 		break;
9120 
9121 	case ASIC_REV_5719:
9122 	case ASIC_REV_5720:
9123 		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9124 		break;
9125 
9126 	default:
9127 		return;
9128 	}
9129 }
9130 
tg3_restore_clk(struct tg3 * tp)9131 static void tg3_restore_clk(struct tg3 *tp)
9132 {
9133 	u32 val;
9134 
9135 	switch (tg3_asic_rev(tp)) {
9136 	case ASIC_REV_5717:
9137 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9138 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9139 		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9140 		break;
9141 
9142 	case ASIC_REV_5719:
9143 	case ASIC_REV_5720:
9144 		val = tr32(TG3_CPMU_CLCK_ORIDE);
9145 		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9146 		break;
9147 
9148 	default:
9149 		return;
9150 	}
9151 }
9152 
9153 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9154 static int tg3_chip_reset(struct tg3 *tp)
9155 	__releases(tp->lock)
9156 	__acquires(tp->lock)
9157 {
9158 	u32 val;
9159 	void (*write_op)(struct tg3 *, u32, u32);
9160 	int i, err;
9161 
9162 	if (!pci_device_is_present(tp->pdev))
9163 		return -ENODEV;
9164 
9165 	tg3_nvram_lock(tp);
9166 
9167 	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9168 
9169 	/* No matching tg3_nvram_unlock() after this because
9170 	 * chip reset below will undo the nvram lock.
9171 	 */
9172 	tp->nvram_lock_cnt = 0;
9173 
9174 	/* GRC_MISC_CFG core clock reset will clear the memory
9175 	 * enable bit in PCI register 4 and the MSI enable bit
9176 	 * on some chips, so we save relevant registers here.
9177 	 */
9178 	tg3_save_pci_state(tp);
9179 
9180 	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9181 	    tg3_flag(tp, 5755_PLUS))
9182 		tw32(GRC_FASTBOOT_PC, 0);
9183 
9184 	/*
9185 	 * We must avoid the readl() that normally takes place.
9186 	 * It locks machines, causes machine checks, and other
9187 	 * fun things.  So, temporarily disable the 5701
9188 	 * hardware workaround, while we do the reset.
9189 	 */
9190 	write_op = tp->write32;
9191 	if (write_op == tg3_write_flush_reg32)
9192 		tp->write32 = tg3_write32;
9193 
9194 	/* Prevent the irq handler from reading or writing PCI registers
9195 	 * during chip reset when the memory enable bit in the PCI command
9196 	 * register may be cleared.  The chip does not generate interrupt
9197 	 * at this time, but the irq handler may still be called due to irq
9198 	 * sharing or irqpoll.
9199 	 */
9200 	tg3_flag_set(tp, CHIP_RESETTING);
9201 	for (i = 0; i < tp->irq_cnt; i++) {
9202 		struct tg3_napi *tnapi = &tp->napi[i];
9203 		if (tnapi->hw_status) {
9204 			tnapi->hw_status->status = 0;
9205 			tnapi->hw_status->status_tag = 0;
9206 		}
9207 		tnapi->last_tag = 0;
9208 		tnapi->last_irq_tag = 0;
9209 	}
9210 	smp_mb();
9211 
9212 	tg3_full_unlock(tp);
9213 
9214 	for (i = 0; i < tp->irq_cnt; i++)
9215 		synchronize_irq(tp->napi[i].irq_vec);
9216 
9217 	tg3_full_lock(tp, 0);
9218 
9219 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9220 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9221 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9222 	}
9223 
9224 	/* do the reset */
9225 	val = GRC_MISC_CFG_CORECLK_RESET;
9226 
9227 	if (tg3_flag(tp, PCI_EXPRESS)) {
9228 		/* Force PCIe 1.0a mode */
9229 		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9230 		    !tg3_flag(tp, 57765_PLUS) &&
9231 		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9232 		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9233 			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9234 
9235 		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9236 			tw32(GRC_MISC_CFG, (1 << 29));
9237 			val |= (1 << 29);
9238 		}
9239 	}
9240 
9241 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9242 		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9243 		tw32(GRC_VCPU_EXT_CTRL,
9244 		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9245 	}
9246 
9247 	/* Set the clock to the highest frequency to avoid timeouts. With link
9248 	 * aware mode, the clock speed could be slow and bootcode does not
9249 	 * complete within the expected time. Override the clock to allow the
9250 	 * bootcode to finish sooner and then restore it.
9251 	 */
9252 	tg3_override_clk(tp);
9253 
9254 	/* Manage gphy power for all CPMU absent PCIe devices. */
9255 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9256 		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9257 
9258 	tw32(GRC_MISC_CFG, val);
9259 
9260 	/* restore 5701 hardware bug workaround write method */
9261 	tp->write32 = write_op;
9262 
9263 	/* Unfortunately, we have to delay before the PCI read back.
9264 	 * Some 575X chips even will not respond to a PCI cfg access
9265 	 * when the reset command is given to the chip.
9266 	 *
9267 	 * How do these hardware designers expect things to work
9268 	 * properly if the PCI write is posted for a long period
9269 	 * of time?  It is always necessary to have some method by
9270 	 * which a register read back can occur to push the write
9271 	 * out which does the reset.
9272 	 *
9273 	 * For most tg3 variants the trick below was working.
9274 	 * Ho hum...
9275 	 */
9276 	udelay(120);
9277 
9278 	/* Flush PCI posted writes.  The normal MMIO registers
9279 	 * are inaccessible at this time so this is the only
9280 	 * way to make this reliably (actually, this is no longer
9281 	 * the case, see above).  I tried to use indirect
9282 	 * register read/write but this upset some 5701 variants.
9283 	 */
9284 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9285 
9286 	udelay(120);
9287 
9288 	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9289 		u16 val16;
9290 
9291 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9292 			int j;
9293 			u32 cfg_val;
9294 
9295 			/* Wait for link training to complete.  */
9296 			for (j = 0; j < 5000; j++)
9297 				udelay(100);
9298 
9299 			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9300 			pci_write_config_dword(tp->pdev, 0xc4,
9301 					       cfg_val | (1 << 15));
9302 		}
9303 
9304 		/* Clear the "no snoop" and "relaxed ordering" bits. */
9305 		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9306 		/*
9307 		 * Older PCIe devices only support the 128 byte
9308 		 * MPS setting.  Enforce the restriction.
9309 		 */
9310 		if (!tg3_flag(tp, CPMU_PRESENT))
9311 			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9312 		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9313 
9314 		/* Clear error status */
9315 		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9316 				      PCI_EXP_DEVSTA_CED |
9317 				      PCI_EXP_DEVSTA_NFED |
9318 				      PCI_EXP_DEVSTA_FED |
9319 				      PCI_EXP_DEVSTA_URD);
9320 	}
9321 
9322 	tg3_restore_pci_state(tp);
9323 
9324 	tg3_flag_clear(tp, CHIP_RESETTING);
9325 	tg3_flag_clear(tp, ERROR_PROCESSED);
9326 
9327 	val = 0;
9328 	if (tg3_flag(tp, 5780_CLASS))
9329 		val = tr32(MEMARB_MODE);
9330 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9331 
9332 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9333 		tg3_stop_fw(tp);
9334 		tw32(0x5000, 0x400);
9335 	}
9336 
9337 	if (tg3_flag(tp, IS_SSB_CORE)) {
9338 		/*
9339 		 * BCM4785: In order to avoid repercussions from using
9340 		 * potentially defective internal ROM, stop the Rx RISC CPU,
9341 		 * which is not required.
9342 		 */
9343 		tg3_stop_fw(tp);
9344 		tg3_halt_cpu(tp, RX_CPU_BASE);
9345 	}
9346 
9347 	err = tg3_poll_fw(tp);
9348 	if (err)
9349 		return err;
9350 
9351 	tw32(GRC_MODE, tp->grc_mode);
9352 
9353 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9354 		val = tr32(0xc4);
9355 
9356 		tw32(0xc4, val | (1 << 15));
9357 	}
9358 
9359 	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9360 	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9361 		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9362 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9363 			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9364 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9365 	}
9366 
9367 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9368 		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9369 		val = tp->mac_mode;
9370 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9371 		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9372 		val = tp->mac_mode;
9373 	} else
9374 		val = 0;
9375 
9376 	tw32_f(MAC_MODE, val);
9377 	udelay(40);
9378 
9379 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9380 
9381 	tg3_mdio_start(tp);
9382 
9383 	if (tg3_flag(tp, PCI_EXPRESS) &&
9384 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9385 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9386 	    !tg3_flag(tp, 57765_PLUS)) {
9387 		val = tr32(0x7c00);
9388 
9389 		tw32(0x7c00, val | (1 << 25));
9390 	}
9391 
9392 	tg3_restore_clk(tp);
9393 
9394 	/* Increase the core clock speed to fix tx timeout issue for 5762
9395 	 * with 100Mbps link speed.
9396 	 */
9397 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9398 		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9399 		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9400 		     TG3_CPMU_MAC_ORIDE_ENABLE);
9401 	}
9402 
9403 	/* Reprobe ASF enable state.  */
9404 	tg3_flag_clear(tp, ENABLE_ASF);
9405 	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9406 			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9407 
9408 	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9409 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9410 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9411 		u32 nic_cfg;
9412 
9413 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9414 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9415 			tg3_flag_set(tp, ENABLE_ASF);
9416 			tp->last_event_jiffies = jiffies;
9417 			if (tg3_flag(tp, 5750_PLUS))
9418 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9419 
9420 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9421 			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9422 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9423 			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9424 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9425 		}
9426 	}
9427 
9428 	return 0;
9429 }
9430 
9431 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9432 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9433 static void __tg3_set_rx_mode(struct net_device *);
9434 
9435 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9436 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9437 {
9438 	int err, i;
9439 
9440 	tg3_stop_fw(tp);
9441 
9442 	tg3_write_sig_pre_reset(tp, kind);
9443 
9444 	tg3_abort_hw(tp, silent);
9445 	err = tg3_chip_reset(tp);
9446 
9447 	__tg3_set_mac_addr(tp, false);
9448 
9449 	tg3_write_sig_legacy(tp, kind);
9450 	tg3_write_sig_post_reset(tp, kind);
9451 
9452 	if (tp->hw_stats) {
9453 		/* Save the stats across chip resets... */
9454 		tg3_get_nstats(tp, &tp->net_stats_prev);
9455 		tg3_get_estats(tp, &tp->estats_prev);
9456 
9457 		/* And make sure the next sample is new data */
9458 		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9459 
9460 		for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
9461 			struct tg3_napi *tnapi = &tp->napi[i];
9462 
9463 			tnapi->rx_dropped = 0;
9464 			tnapi->tx_dropped = 0;
9465 		}
9466 	}
9467 
9468 	return err;
9469 }
9470 
tg3_set_mac_addr(struct net_device * dev,void * p)9471 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9472 {
9473 	struct tg3 *tp = netdev_priv(dev);
9474 	struct sockaddr *addr = p;
9475 	int err = 0;
9476 	bool skip_mac_1 = false;
9477 
9478 	if (!is_valid_ether_addr(addr->sa_data))
9479 		return -EADDRNOTAVAIL;
9480 
9481 	eth_hw_addr_set(dev, addr->sa_data);
9482 
9483 	if (!netif_running(dev))
9484 		return 0;
9485 
9486 	if (tg3_flag(tp, ENABLE_ASF)) {
9487 		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9488 
9489 		addr0_high = tr32(MAC_ADDR_0_HIGH);
9490 		addr0_low = tr32(MAC_ADDR_0_LOW);
9491 		addr1_high = tr32(MAC_ADDR_1_HIGH);
9492 		addr1_low = tr32(MAC_ADDR_1_LOW);
9493 
9494 		/* Skip MAC addr 1 if ASF is using it. */
9495 		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9496 		    !(addr1_high == 0 && addr1_low == 0))
9497 			skip_mac_1 = true;
9498 	}
9499 	spin_lock_bh(&tp->lock);
9500 	__tg3_set_mac_addr(tp, skip_mac_1);
9501 	__tg3_set_rx_mode(dev);
9502 	spin_unlock_bh(&tp->lock);
9503 
9504 	return err;
9505 }
9506 
9507 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9508 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9509 			   dma_addr_t mapping, u32 maxlen_flags,
9510 			   u32 nic_addr)
9511 {
9512 	tg3_write_mem(tp,
9513 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9514 		      ((u64) mapping >> 32));
9515 	tg3_write_mem(tp,
9516 		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9517 		      ((u64) mapping & 0xffffffff));
9518 	tg3_write_mem(tp,
9519 		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9520 		       maxlen_flags);
9521 
9522 	if (!tg3_flag(tp, 5705_PLUS))
9523 		tg3_write_mem(tp,
9524 			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9525 			      nic_addr);
9526 }
9527 
9528 
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9529 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9530 {
9531 	int i = 0;
9532 
9533 	if (!tg3_flag(tp, ENABLE_TSS)) {
9534 		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9535 		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9536 		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9537 	} else {
9538 		tw32(HOSTCC_TXCOL_TICKS, 0);
9539 		tw32(HOSTCC_TXMAX_FRAMES, 0);
9540 		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9541 
9542 		for (; i < tp->txq_cnt; i++) {
9543 			u32 reg;
9544 
9545 			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9546 			tw32(reg, ec->tx_coalesce_usecs);
9547 			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9548 			tw32(reg, ec->tx_max_coalesced_frames);
9549 			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9550 			tw32(reg, ec->tx_max_coalesced_frames_irq);
9551 		}
9552 	}
9553 
9554 	for (; i < tp->irq_max - 1; i++) {
9555 		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9556 		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9557 		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9558 	}
9559 }
9560 
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9561 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9562 {
9563 	int i = 0;
9564 	u32 limit = tp->rxq_cnt;
9565 
9566 	if (!tg3_flag(tp, ENABLE_RSS)) {
9567 		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9568 		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9569 		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9570 		limit--;
9571 	} else {
9572 		tw32(HOSTCC_RXCOL_TICKS, 0);
9573 		tw32(HOSTCC_RXMAX_FRAMES, 0);
9574 		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9575 	}
9576 
9577 	for (; i < limit; i++) {
9578 		u32 reg;
9579 
9580 		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9581 		tw32(reg, ec->rx_coalesce_usecs);
9582 		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9583 		tw32(reg, ec->rx_max_coalesced_frames);
9584 		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9585 		tw32(reg, ec->rx_max_coalesced_frames_irq);
9586 	}
9587 
9588 	for (; i < tp->irq_max - 1; i++) {
9589 		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9590 		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9591 		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9592 	}
9593 }
9594 
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9595 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9596 {
9597 	tg3_coal_tx_init(tp, ec);
9598 	tg3_coal_rx_init(tp, ec);
9599 
9600 	if (!tg3_flag(tp, 5705_PLUS)) {
9601 		u32 val = ec->stats_block_coalesce_usecs;
9602 
9603 		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9604 		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9605 
9606 		if (!tp->link_up)
9607 			val = 0;
9608 
9609 		tw32(HOSTCC_STAT_COAL_TICKS, val);
9610 	}
9611 }
9612 
9613 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9614 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9615 {
9616 	u32 txrcb, limit;
9617 
9618 	/* Disable all transmit rings but the first. */
9619 	if (!tg3_flag(tp, 5705_PLUS))
9620 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9621 	else if (tg3_flag(tp, 5717_PLUS))
9622 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9623 	else if (tg3_flag(tp, 57765_CLASS) ||
9624 		 tg3_asic_rev(tp) == ASIC_REV_5762)
9625 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9626 	else
9627 		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9628 
9629 	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9630 	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9631 		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9632 			      BDINFO_FLAGS_DISABLED);
9633 }
9634 
9635 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9636 static void tg3_tx_rcbs_init(struct tg3 *tp)
9637 {
9638 	int i = 0;
9639 	u32 txrcb = NIC_SRAM_SEND_RCB;
9640 
9641 	if (tg3_flag(tp, ENABLE_TSS))
9642 		i++;
9643 
9644 	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9645 		struct tg3_napi *tnapi = &tp->napi[i];
9646 
9647 		if (!tnapi->tx_ring)
9648 			continue;
9649 
9650 		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9651 			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9652 			       NIC_SRAM_TX_BUFFER_DESC);
9653 	}
9654 }
9655 
9656 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9657 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9658 {
9659 	u32 rxrcb, limit;
9660 
9661 	/* Disable all receive return rings but the first. */
9662 	if (tg3_flag(tp, 5717_PLUS))
9663 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9664 	else if (!tg3_flag(tp, 5705_PLUS))
9665 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9666 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9667 		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9668 		 tg3_flag(tp, 57765_CLASS))
9669 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9670 	else
9671 		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9672 
9673 	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9674 	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9675 		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9676 			      BDINFO_FLAGS_DISABLED);
9677 }
9678 
9679 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9680 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9681 {
9682 	int i = 0;
9683 	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9684 
9685 	if (tg3_flag(tp, ENABLE_RSS))
9686 		i++;
9687 
9688 	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9689 		struct tg3_napi *tnapi = &tp->napi[i];
9690 
9691 		if (!tnapi->rx_rcb)
9692 			continue;
9693 
9694 		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9695 			       (tp->rx_ret_ring_mask + 1) <<
9696 				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9697 	}
9698 }
9699 
9700 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9701 static void tg3_rings_reset(struct tg3 *tp)
9702 {
9703 	int i;
9704 	u32 stblk;
9705 	struct tg3_napi *tnapi = &tp->napi[0];
9706 
9707 	tg3_tx_rcbs_disable(tp);
9708 
9709 	tg3_rx_ret_rcbs_disable(tp);
9710 
9711 	/* Disable interrupts */
9712 	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9713 	tp->napi[0].chk_msi_cnt = 0;
9714 	tp->napi[0].last_rx_cons = 0;
9715 	tp->napi[0].last_tx_cons = 0;
9716 
9717 	/* Zero mailbox registers. */
9718 	if (tg3_flag(tp, SUPPORT_MSIX)) {
9719 		for (i = 1; i < tp->irq_max; i++) {
9720 			tp->napi[i].tx_prod = 0;
9721 			tp->napi[i].tx_cons = 0;
9722 			if (tg3_flag(tp, ENABLE_TSS))
9723 				tw32_mailbox(tp->napi[i].prodmbox, 0);
9724 			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9725 			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9726 			tp->napi[i].chk_msi_cnt = 0;
9727 			tp->napi[i].last_rx_cons = 0;
9728 			tp->napi[i].last_tx_cons = 0;
9729 		}
9730 		if (!tg3_flag(tp, ENABLE_TSS))
9731 			tw32_mailbox(tp->napi[0].prodmbox, 0);
9732 	} else {
9733 		tp->napi[0].tx_prod = 0;
9734 		tp->napi[0].tx_cons = 0;
9735 		tw32_mailbox(tp->napi[0].prodmbox, 0);
9736 		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9737 	}
9738 
9739 	/* Make sure the NIC-based send BD rings are disabled. */
9740 	if (!tg3_flag(tp, 5705_PLUS)) {
9741 		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9742 		for (i = 0; i < 16; i++)
9743 			tw32_tx_mbox(mbox + i * 8, 0);
9744 	}
9745 
9746 	/* Clear status block in ram. */
9747 	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9748 
9749 	/* Set status block DMA address */
9750 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9751 	     ((u64) tnapi->status_mapping >> 32));
9752 	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9753 	     ((u64) tnapi->status_mapping & 0xffffffff));
9754 
9755 	stblk = HOSTCC_STATBLCK_RING1;
9756 
9757 	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9758 		u64 mapping = (u64)tnapi->status_mapping;
9759 		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9760 		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9761 		stblk += 8;
9762 
9763 		/* Clear status block in ram. */
9764 		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9765 	}
9766 
9767 	tg3_tx_rcbs_init(tp);
9768 	tg3_rx_ret_rcbs_init(tp);
9769 }
9770 
tg3_setup_rxbd_thresholds(struct tg3 * tp)9771 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9772 {
9773 	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9774 
9775 	if (!tg3_flag(tp, 5750_PLUS) ||
9776 	    tg3_flag(tp, 5780_CLASS) ||
9777 	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9778 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9779 	    tg3_flag(tp, 57765_PLUS))
9780 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9781 	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9782 		 tg3_asic_rev(tp) == ASIC_REV_5787)
9783 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9784 	else
9785 		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9786 
9787 	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9788 	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9789 
9790 	val = min(nic_rep_thresh, host_rep_thresh);
9791 	tw32(RCVBDI_STD_THRESH, val);
9792 
9793 	if (tg3_flag(tp, 57765_PLUS))
9794 		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9795 
9796 	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9797 		return;
9798 
9799 	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9800 
9801 	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9802 
9803 	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9804 	tw32(RCVBDI_JUMBO_THRESH, val);
9805 
9806 	if (tg3_flag(tp, 57765_PLUS))
9807 		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9808 }
9809 
calc_crc(unsigned char * buf,int len)9810 static inline u32 calc_crc(unsigned char *buf, int len)
9811 {
9812 	return ~crc32(~0, buf, len);
9813 }
9814 
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9815 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9816 {
9817 	/* accept or reject all multicast frames */
9818 	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9819 	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9820 	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9821 	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9822 }
9823 
__tg3_set_rx_mode(struct net_device * dev)9824 static void __tg3_set_rx_mode(struct net_device *dev)
9825 {
9826 	struct tg3 *tp = netdev_priv(dev);
9827 	u32 rx_mode;
9828 
9829 	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9830 				  RX_MODE_KEEP_VLAN_TAG);
9831 
9832 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9833 	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9834 	 * flag clear.
9835 	 */
9836 	if (!tg3_flag(tp, ENABLE_ASF))
9837 		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9838 #endif
9839 
9840 	if (dev->flags & IFF_PROMISC) {
9841 		/* Promiscuous mode. */
9842 		rx_mode |= RX_MODE_PROMISC;
9843 	} else if (dev->flags & IFF_ALLMULTI) {
9844 		/* Accept all multicast. */
9845 		tg3_set_multi(tp, 1);
9846 	} else if (netdev_mc_empty(dev)) {
9847 		/* Reject all multicast. */
9848 		tg3_set_multi(tp, 0);
9849 	} else {
9850 		/* Accept one or more multicast(s). */
9851 		struct netdev_hw_addr *ha;
9852 		u32 mc_filter[4] = { 0, };
9853 		u32 regidx;
9854 		u32 bit;
9855 		u32 crc;
9856 
9857 		netdev_for_each_mc_addr(ha, dev) {
9858 			crc = calc_crc(ha->addr, ETH_ALEN);
9859 			bit = ~crc & 0x7f;
9860 			regidx = (bit & 0x60) >> 5;
9861 			bit &= 0x1f;
9862 			mc_filter[regidx] |= (1 << bit);
9863 		}
9864 
9865 		tw32(MAC_HASH_REG_0, mc_filter[0]);
9866 		tw32(MAC_HASH_REG_1, mc_filter[1]);
9867 		tw32(MAC_HASH_REG_2, mc_filter[2]);
9868 		tw32(MAC_HASH_REG_3, mc_filter[3]);
9869 	}
9870 
9871 	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9872 		rx_mode |= RX_MODE_PROMISC;
9873 	} else if (!(dev->flags & IFF_PROMISC)) {
9874 		/* Add all entries into to the mac addr filter list */
9875 		int i = 0;
9876 		struct netdev_hw_addr *ha;
9877 
9878 		netdev_for_each_uc_addr(ha, dev) {
9879 			__tg3_set_one_mac_addr(tp, ha->addr,
9880 					       i + TG3_UCAST_ADDR_IDX(tp));
9881 			i++;
9882 		}
9883 	}
9884 
9885 	if (rx_mode != tp->rx_mode) {
9886 		tp->rx_mode = rx_mode;
9887 		tw32_f(MAC_RX_MODE, rx_mode);
9888 		udelay(10);
9889 	}
9890 }
9891 
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9892 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9893 {
9894 	int i;
9895 
9896 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9897 		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9898 }
9899 
tg3_rss_check_indir_tbl(struct tg3 * tp)9900 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9901 {
9902 	int i;
9903 
9904 	if (!tg3_flag(tp, SUPPORT_MSIX))
9905 		return;
9906 
9907 	if (tp->rxq_cnt == 1) {
9908 		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9909 		return;
9910 	}
9911 
9912 	/* Validate table against current IRQ count */
9913 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9914 		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9915 			break;
9916 	}
9917 
9918 	if (i != TG3_RSS_INDIR_TBL_SIZE)
9919 		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9920 }
9921 
tg3_rss_write_indir_tbl(struct tg3 * tp)9922 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9923 {
9924 	int i = 0;
9925 	u32 reg = MAC_RSS_INDIR_TBL_0;
9926 
9927 	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9928 		u32 val = tp->rss_ind_tbl[i];
9929 		i++;
9930 		for (; i % 8; i++) {
9931 			val <<= 4;
9932 			val |= tp->rss_ind_tbl[i];
9933 		}
9934 		tw32(reg, val);
9935 		reg += 4;
9936 	}
9937 }
9938 
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9939 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9940 {
9941 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9942 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9943 	else
9944 		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9945 }
9946 
9947 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9948 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9949 {
9950 	u32 val, rdmac_mode;
9951 	int i, err, limit;
9952 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9953 
9954 	tg3_disable_ints(tp);
9955 
9956 	tg3_stop_fw(tp);
9957 
9958 	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9959 
9960 	if (tg3_flag(tp, INIT_COMPLETE))
9961 		tg3_abort_hw(tp, 1);
9962 
9963 	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9964 	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9965 		tg3_phy_pull_config(tp);
9966 		tg3_eee_pull_config(tp, NULL);
9967 		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9968 	}
9969 
9970 	/* Enable MAC control of LPI */
9971 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9972 		tg3_setup_eee(tp);
9973 
9974 	if (reset_phy)
9975 		tg3_phy_reset(tp);
9976 
9977 	err = tg3_chip_reset(tp);
9978 	if (err)
9979 		return err;
9980 
9981 	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9982 
9983 	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9984 		val = tr32(TG3_CPMU_CTRL);
9985 		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9986 		tw32(TG3_CPMU_CTRL, val);
9987 
9988 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9989 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9990 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9991 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9992 
9993 		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9994 		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9995 		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9996 		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9997 
9998 		val = tr32(TG3_CPMU_HST_ACC);
9999 		val &= ~CPMU_HST_ACC_MACCLK_MASK;
10000 		val |= CPMU_HST_ACC_MACCLK_6_25;
10001 		tw32(TG3_CPMU_HST_ACC, val);
10002 	}
10003 
10004 	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
10005 		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
10006 		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
10007 		       PCIE_PWR_MGMT_L1_THRESH_4MS;
10008 		tw32(PCIE_PWR_MGMT_THRESH, val);
10009 
10010 		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
10011 		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
10012 
10013 		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
10014 
10015 		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
10016 		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
10017 	}
10018 
10019 	if (tg3_flag(tp, L1PLLPD_EN)) {
10020 		u32 grc_mode = tr32(GRC_MODE);
10021 
10022 		/* Access the lower 1K of PL PCIE block registers. */
10023 		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10024 		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10025 
10026 		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
10027 		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
10028 		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
10029 
10030 		tw32(GRC_MODE, grc_mode);
10031 	}
10032 
10033 	if (tg3_flag(tp, 57765_CLASS)) {
10034 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
10035 			u32 grc_mode = tr32(GRC_MODE);
10036 
10037 			/* Access the lower 1K of PL PCIE block registers. */
10038 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10039 			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10040 
10041 			val = tr32(TG3_PCIE_TLDLPL_PORT +
10042 				   TG3_PCIE_PL_LO_PHYCTL5);
10043 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
10044 			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
10045 
10046 			tw32(GRC_MODE, grc_mode);
10047 		}
10048 
10049 		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
10050 			u32 grc_mode;
10051 
10052 			/* Fix transmit hangs */
10053 			val = tr32(TG3_CPMU_PADRNG_CTL);
10054 			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
10055 			tw32(TG3_CPMU_PADRNG_CTL, val);
10056 
10057 			grc_mode = tr32(GRC_MODE);
10058 
10059 			/* Access the lower 1K of DL PCIE block registers. */
10060 			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10061 			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
10062 
10063 			val = tr32(TG3_PCIE_TLDLPL_PORT +
10064 				   TG3_PCIE_DL_LO_FTSMAX);
10065 			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10066 			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10067 			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10068 
10069 			tw32(GRC_MODE, grc_mode);
10070 		}
10071 
10072 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10073 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10074 		val |= CPMU_LSPD_10MB_MACCLK_6_25;
10075 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10076 	}
10077 
10078 	/* This works around an issue with Athlon chipsets on
10079 	 * B3 tigon3 silicon.  This bit has no effect on any
10080 	 * other revision.  But do not set this on PCI Express
10081 	 * chips and don't even touch the clocks if the CPMU is present.
10082 	 */
10083 	if (!tg3_flag(tp, CPMU_PRESENT)) {
10084 		if (!tg3_flag(tp, PCI_EXPRESS))
10085 			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10086 		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10087 	}
10088 
10089 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10090 	    tg3_flag(tp, PCIX_MODE)) {
10091 		val = tr32(TG3PCI_PCISTATE);
10092 		val |= PCISTATE_RETRY_SAME_DMA;
10093 		tw32(TG3PCI_PCISTATE, val);
10094 	}
10095 
10096 	if (tg3_flag(tp, ENABLE_APE)) {
10097 		/* Allow reads and writes to the
10098 		 * APE register and memory space.
10099 		 */
10100 		val = tr32(TG3PCI_PCISTATE);
10101 		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10102 		       PCISTATE_ALLOW_APE_SHMEM_WR |
10103 		       PCISTATE_ALLOW_APE_PSPACE_WR;
10104 		tw32(TG3PCI_PCISTATE, val);
10105 	}
10106 
10107 	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10108 		/* Enable some hw fixes.  */
10109 		val = tr32(TG3PCI_MSI_DATA);
10110 		val |= (1 << 26) | (1 << 28) | (1 << 29);
10111 		tw32(TG3PCI_MSI_DATA, val);
10112 	}
10113 
10114 	/* Descriptor ring init may make accesses to the
10115 	 * NIC SRAM area to setup the TX descriptors, so we
10116 	 * can only do this after the hardware has been
10117 	 * successfully reset.
10118 	 */
10119 	err = tg3_init_rings(tp);
10120 	if (err)
10121 		return err;
10122 
10123 	if (tg3_flag(tp, 57765_PLUS)) {
10124 		val = tr32(TG3PCI_DMA_RW_CTRL) &
10125 		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10126 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10127 			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10128 		if (!tg3_flag(tp, 57765_CLASS) &&
10129 		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10130 		    tg3_asic_rev(tp) != ASIC_REV_5762)
10131 			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10132 		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10133 	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10134 		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10135 		/* This value is determined during the probe time DMA
10136 		 * engine test, tg3_test_dma.
10137 		 */
10138 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10139 	}
10140 
10141 	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10142 			  GRC_MODE_4X_NIC_SEND_RINGS |
10143 			  GRC_MODE_NO_TX_PHDR_CSUM |
10144 			  GRC_MODE_NO_RX_PHDR_CSUM);
10145 	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10146 
10147 	/* Pseudo-header checksum is done by hardware logic and not
10148 	 * the offload processers, so make the chip do the pseudo-
10149 	 * header checksums on receive.  For transmit it is more
10150 	 * convenient to do the pseudo-header checksum in software
10151 	 * as Linux does that on transmit for us in all cases.
10152 	 */
10153 	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10154 
10155 	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10156 	if (tp->rxptpctl)
10157 		tw32(TG3_RX_PTP_CTL,
10158 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10159 
10160 	if (tg3_flag(tp, PTP_CAPABLE))
10161 		val |= GRC_MODE_TIME_SYNC_ENABLE;
10162 
10163 	tw32(GRC_MODE, tp->grc_mode | val);
10164 
10165 	/* On one of the AMD platform, MRRS is restricted to 4000 because of
10166 	 * south bridge limitation. As a workaround, Driver is setting MRRS
10167 	 * to 2048 instead of default 4096.
10168 	 */
10169 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10170 	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10171 		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10172 		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10173 	}
10174 
10175 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10176 	val = tr32(GRC_MISC_CFG);
10177 	val &= ~0xff;
10178 	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10179 	tw32(GRC_MISC_CFG, val);
10180 
10181 	/* Initialize MBUF/DESC pool. */
10182 	if (tg3_flag(tp, 5750_PLUS)) {
10183 		/* Do nothing.  */
10184 	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10185 		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10186 		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10187 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10188 		else
10189 			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10190 		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10191 		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10192 	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10193 		int fw_len;
10194 
10195 		fw_len = tp->fw_len;
10196 		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10197 		tw32(BUFMGR_MB_POOL_ADDR,
10198 		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10199 		tw32(BUFMGR_MB_POOL_SIZE,
10200 		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10201 	}
10202 
10203 	if (tp->dev->mtu <= ETH_DATA_LEN) {
10204 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10205 		     tp->bufmgr_config.mbuf_read_dma_low_water);
10206 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10207 		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10208 		tw32(BUFMGR_MB_HIGH_WATER,
10209 		     tp->bufmgr_config.mbuf_high_water);
10210 	} else {
10211 		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10212 		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10213 		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10214 		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10215 		tw32(BUFMGR_MB_HIGH_WATER,
10216 		     tp->bufmgr_config.mbuf_high_water_jumbo);
10217 	}
10218 	tw32(BUFMGR_DMA_LOW_WATER,
10219 	     tp->bufmgr_config.dma_low_water);
10220 	tw32(BUFMGR_DMA_HIGH_WATER,
10221 	     tp->bufmgr_config.dma_high_water);
10222 
10223 	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10224 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10225 		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10226 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10227 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10228 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10229 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10230 		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10231 	tw32(BUFMGR_MODE, val);
10232 	for (i = 0; i < 2000; i++) {
10233 		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10234 			break;
10235 		udelay(10);
10236 	}
10237 	if (i >= 2000) {
10238 		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10239 		return -ENODEV;
10240 	}
10241 
10242 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10243 		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10244 
10245 	tg3_setup_rxbd_thresholds(tp);
10246 
10247 	/* Initialize TG3_BDINFO's at:
10248 	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10249 	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10250 	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10251 	 *
10252 	 * like so:
10253 	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10254 	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10255 	 *                              ring attribute flags
10256 	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10257 	 *
10258 	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10259 	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10260 	 *
10261 	 * The size of each ring is fixed in the firmware, but the location is
10262 	 * configurable.
10263 	 */
10264 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10265 	     ((u64) tpr->rx_std_mapping >> 32));
10266 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10267 	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10268 	if (!tg3_flag(tp, 5717_PLUS))
10269 		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10270 		     NIC_SRAM_RX_BUFFER_DESC);
10271 
10272 	/* Disable the mini ring */
10273 	if (!tg3_flag(tp, 5705_PLUS))
10274 		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10275 		     BDINFO_FLAGS_DISABLED);
10276 
10277 	/* Program the jumbo buffer descriptor ring control
10278 	 * blocks on those devices that have them.
10279 	 */
10280 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10281 	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10282 
10283 		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10284 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10285 			     ((u64) tpr->rx_jmb_mapping >> 32));
10286 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10287 			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10288 			val = TG3_RX_JMB_RING_SIZE(tp) <<
10289 			      BDINFO_FLAGS_MAXLEN_SHIFT;
10290 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10291 			     val | BDINFO_FLAGS_USE_EXT_RECV);
10292 			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10293 			    tg3_flag(tp, 57765_CLASS) ||
10294 			    tg3_asic_rev(tp) == ASIC_REV_5762)
10295 				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10296 				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10297 		} else {
10298 			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10299 			     BDINFO_FLAGS_DISABLED);
10300 		}
10301 
10302 		if (tg3_flag(tp, 57765_PLUS)) {
10303 			val = TG3_RX_STD_RING_SIZE(tp);
10304 			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10305 			val |= (TG3_RX_STD_DMA_SZ << 2);
10306 		} else
10307 			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10308 	} else
10309 		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10310 
10311 	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10312 
10313 	tpr->rx_std_prod_idx = tp->rx_pending;
10314 	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10315 
10316 	tpr->rx_jmb_prod_idx =
10317 		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10318 	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10319 
10320 	tg3_rings_reset(tp);
10321 
10322 	/* Initialize MAC address and backoff seed. */
10323 	__tg3_set_mac_addr(tp, false);
10324 
10325 	/* MTU + ethernet header + FCS + optional VLAN tag */
10326 	tw32(MAC_RX_MTU_SIZE,
10327 	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10328 
10329 	/* The slot time is changed by tg3_setup_phy if we
10330 	 * run at gigabit with half duplex.
10331 	 */
10332 	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10333 	      (6 << TX_LENGTHS_IPG_SHIFT) |
10334 	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10335 
10336 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10337 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10338 		val |= tr32(MAC_TX_LENGTHS) &
10339 		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10340 			TX_LENGTHS_CNT_DWN_VAL_MSK);
10341 
10342 	tw32(MAC_TX_LENGTHS, val);
10343 
10344 	/* Receive rules. */
10345 	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10346 	tw32(RCVLPC_CONFIG, 0x0181);
10347 
10348 	/* Calculate RDMAC_MODE setting early, we need it to determine
10349 	 * the RCVLPC_STATE_ENABLE mask.
10350 	 */
10351 	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10352 		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10353 		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10354 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10355 		      RDMAC_MODE_LNGREAD_ENAB);
10356 
10357 	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10358 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10359 
10360 	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10361 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10362 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10363 		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10364 			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10365 			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10366 
10367 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10368 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10369 		if (tg3_flag(tp, TSO_CAPABLE)) {
10370 			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10371 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10372 			   !tg3_flag(tp, IS_5788)) {
10373 			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10374 		}
10375 	}
10376 
10377 	if (tg3_flag(tp, PCI_EXPRESS))
10378 		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10379 
10380 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10381 		tp->dma_limit = 0;
10382 		if (tp->dev->mtu <= ETH_DATA_LEN) {
10383 			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10384 			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10385 		}
10386 	}
10387 
10388 	if (tg3_flag(tp, HW_TSO_1) ||
10389 	    tg3_flag(tp, HW_TSO_2) ||
10390 	    tg3_flag(tp, HW_TSO_3))
10391 		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10392 
10393 	if (tg3_flag(tp, 57765_PLUS) ||
10394 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10395 	    tg3_asic_rev(tp) == ASIC_REV_57780)
10396 		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10397 
10398 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10399 	    tg3_asic_rev(tp) == ASIC_REV_5762)
10400 		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10401 
10402 	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10403 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10404 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10405 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10406 	    tg3_flag(tp, 57765_PLUS)) {
10407 		u32 tgtreg;
10408 
10409 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10410 			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10411 		else
10412 			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10413 
10414 		val = tr32(tgtreg);
10415 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10416 		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10417 			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10418 				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10419 				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10420 			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10421 			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10422 			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10423 		}
10424 		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10425 	}
10426 
10427 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10428 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10429 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10430 		u32 tgtreg;
10431 
10432 		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10433 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10434 		else
10435 			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10436 
10437 		val = tr32(tgtreg);
10438 		tw32(tgtreg, val |
10439 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10440 		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10441 	}
10442 
10443 	/* Receive/send statistics. */
10444 	if (tg3_flag(tp, 5750_PLUS)) {
10445 		val = tr32(RCVLPC_STATS_ENABLE);
10446 		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10447 		tw32(RCVLPC_STATS_ENABLE, val);
10448 	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10449 		   tg3_flag(tp, TSO_CAPABLE)) {
10450 		val = tr32(RCVLPC_STATS_ENABLE);
10451 		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10452 		tw32(RCVLPC_STATS_ENABLE, val);
10453 	} else {
10454 		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10455 	}
10456 	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10457 	tw32(SNDDATAI_STATSENAB, 0xffffff);
10458 	tw32(SNDDATAI_STATSCTRL,
10459 	     (SNDDATAI_SCTRL_ENABLE |
10460 	      SNDDATAI_SCTRL_FASTUPD));
10461 
10462 	/* Setup host coalescing engine. */
10463 	tw32(HOSTCC_MODE, 0);
10464 	for (i = 0; i < 2000; i++) {
10465 		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10466 			break;
10467 		udelay(10);
10468 	}
10469 
10470 	__tg3_set_coalesce(tp, &tp->coal);
10471 
10472 	if (!tg3_flag(tp, 5705_PLUS)) {
10473 		/* Status/statistics block address.  See tg3_timer,
10474 		 * the tg3_periodic_fetch_stats call there, and
10475 		 * tg3_get_stats to see how this works for 5705/5750 chips.
10476 		 */
10477 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10478 		     ((u64) tp->stats_mapping >> 32));
10479 		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10480 		     ((u64) tp->stats_mapping & 0xffffffff));
10481 		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10482 
10483 		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10484 
10485 		/* Clear statistics and status block memory areas */
10486 		for (i = NIC_SRAM_STATS_BLK;
10487 		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10488 		     i += sizeof(u32)) {
10489 			tg3_write_mem(tp, i, 0);
10490 			udelay(40);
10491 		}
10492 	}
10493 
10494 	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10495 
10496 	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10497 	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10498 	if (!tg3_flag(tp, 5705_PLUS))
10499 		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10500 
10501 	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10502 		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10503 		/* reset to prevent losing 1st rx packet intermittently */
10504 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10505 		udelay(10);
10506 	}
10507 
10508 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10509 			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10510 			MAC_MODE_FHDE_ENABLE;
10511 	if (tg3_flag(tp, ENABLE_APE))
10512 		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10513 	if (!tg3_flag(tp, 5705_PLUS) &&
10514 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10515 	    tg3_asic_rev(tp) != ASIC_REV_5700)
10516 		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10517 	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10518 	udelay(40);
10519 
10520 	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10521 	 * If TG3_FLAG_IS_NIC is zero, we should read the
10522 	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10523 	 * whether used as inputs or outputs, are set by boot code after
10524 	 * reset.
10525 	 */
10526 	if (!tg3_flag(tp, IS_NIC)) {
10527 		u32 gpio_mask;
10528 
10529 		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10530 			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10531 			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10532 
10533 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10534 			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10535 				     GRC_LCLCTRL_GPIO_OUTPUT3;
10536 
10537 		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10538 			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10539 
10540 		tp->grc_local_ctrl &= ~gpio_mask;
10541 		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10542 
10543 		/* GPIO1 must be driven high for eeprom write protect */
10544 		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10545 			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10546 					       GRC_LCLCTRL_GPIO_OUTPUT1);
10547 	}
10548 	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10549 	udelay(100);
10550 
10551 	if (tg3_flag(tp, USING_MSIX)) {
10552 		val = tr32(MSGINT_MODE);
10553 		val |= MSGINT_MODE_ENABLE;
10554 		if (tp->irq_cnt > 1)
10555 			val |= MSGINT_MODE_MULTIVEC_EN;
10556 		if (!tg3_flag(tp, 1SHOT_MSI))
10557 			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10558 		tw32(MSGINT_MODE, val);
10559 	}
10560 
10561 	if (!tg3_flag(tp, 5705_PLUS)) {
10562 		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10563 		udelay(40);
10564 	}
10565 
10566 	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10567 	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10568 	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10569 	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10570 	       WDMAC_MODE_LNGREAD_ENAB);
10571 
10572 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10573 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10574 		if (tg3_flag(tp, TSO_CAPABLE) &&
10575 		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10576 		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10577 			/* nothing */
10578 		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10579 			   !tg3_flag(tp, IS_5788)) {
10580 			val |= WDMAC_MODE_RX_ACCEL;
10581 		}
10582 	}
10583 
10584 	/* Enable host coalescing bug fix */
10585 	if (tg3_flag(tp, 5755_PLUS))
10586 		val |= WDMAC_MODE_STATUS_TAG_FIX;
10587 
10588 	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10589 		val |= WDMAC_MODE_BURST_ALL_DATA;
10590 
10591 	tw32_f(WDMAC_MODE, val);
10592 	udelay(40);
10593 
10594 	if (tg3_flag(tp, PCIX_MODE)) {
10595 		u16 pcix_cmd;
10596 
10597 		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10598 				     &pcix_cmd);
10599 		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10600 			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10601 			pcix_cmd |= PCI_X_CMD_READ_2K;
10602 		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10603 			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10604 			pcix_cmd |= PCI_X_CMD_READ_2K;
10605 		}
10606 		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10607 				      pcix_cmd);
10608 	}
10609 
10610 	tw32_f(RDMAC_MODE, rdmac_mode);
10611 	udelay(40);
10612 
10613 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10614 	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10615 		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10616 			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10617 				break;
10618 		}
10619 		if (i < TG3_NUM_RDMA_CHANNELS) {
10620 			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10621 			val |= tg3_lso_rd_dma_workaround_bit(tp);
10622 			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10623 			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10624 		}
10625 	}
10626 
10627 	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10628 	if (!tg3_flag(tp, 5705_PLUS))
10629 		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10630 
10631 	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10632 		tw32(SNDDATAC_MODE,
10633 		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10634 	else
10635 		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10636 
10637 	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10638 	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10639 	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10640 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10641 		val |= RCVDBDI_MODE_LRG_RING_SZ;
10642 	tw32(RCVDBDI_MODE, val);
10643 	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10644 	if (tg3_flag(tp, HW_TSO_1) ||
10645 	    tg3_flag(tp, HW_TSO_2) ||
10646 	    tg3_flag(tp, HW_TSO_3))
10647 		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10648 	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10649 	if (tg3_flag(tp, ENABLE_TSS))
10650 		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10651 	tw32(SNDBDI_MODE, val);
10652 	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10653 
10654 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10655 		err = tg3_load_5701_a0_firmware_fix(tp);
10656 		if (err)
10657 			return err;
10658 	}
10659 
10660 	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10661 		/* Ignore any errors for the firmware download. If download
10662 		 * fails, the device will operate with EEE disabled
10663 		 */
10664 		tg3_load_57766_firmware(tp);
10665 	}
10666 
10667 	if (tg3_flag(tp, TSO_CAPABLE)) {
10668 		err = tg3_load_tso_firmware(tp);
10669 		if (err)
10670 			return err;
10671 	}
10672 
10673 	tp->tx_mode = TX_MODE_ENABLE;
10674 
10675 	if (tg3_flag(tp, 5755_PLUS) ||
10676 	    tg3_asic_rev(tp) == ASIC_REV_5906)
10677 		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10678 
10679 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10680 	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10681 		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10682 		tp->tx_mode &= ~val;
10683 		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10684 	}
10685 
10686 	tw32_f(MAC_TX_MODE, tp->tx_mode);
10687 	udelay(100);
10688 
10689 	if (tg3_flag(tp, ENABLE_RSS)) {
10690 		u32 rss_key[10];
10691 
10692 		tg3_rss_write_indir_tbl(tp);
10693 
10694 		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10695 
10696 		for (i = 0; i < 10 ; i++)
10697 			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10698 	}
10699 
10700 	tp->rx_mode = RX_MODE_ENABLE;
10701 	if (tg3_flag(tp, 5755_PLUS))
10702 		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10703 
10704 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10705 		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10706 
10707 	if (tg3_flag(tp, ENABLE_RSS))
10708 		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10709 			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10710 			       RX_MODE_RSS_IPV6_HASH_EN |
10711 			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10712 			       RX_MODE_RSS_IPV4_HASH_EN |
10713 			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10714 
10715 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10716 	udelay(10);
10717 
10718 	tw32(MAC_LED_CTRL, tp->led_ctrl);
10719 
10720 	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10721 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10722 		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10723 		udelay(10);
10724 	}
10725 	tw32_f(MAC_RX_MODE, tp->rx_mode);
10726 	udelay(10);
10727 
10728 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10729 		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10730 		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10731 			/* Set drive transmission level to 1.2V  */
10732 			/* only if the signal pre-emphasis bit is not set  */
10733 			val = tr32(MAC_SERDES_CFG);
10734 			val &= 0xfffff000;
10735 			val |= 0x880;
10736 			tw32(MAC_SERDES_CFG, val);
10737 		}
10738 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10739 			tw32(MAC_SERDES_CFG, 0x616000);
10740 	}
10741 
10742 	/* Prevent chip from dropping frames when flow control
10743 	 * is enabled.
10744 	 */
10745 	if (tg3_flag(tp, 57765_CLASS))
10746 		val = 1;
10747 	else
10748 		val = 2;
10749 	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10750 
10751 	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10752 	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10753 		/* Use hardware link auto-negotiation */
10754 		tg3_flag_set(tp, HW_AUTONEG);
10755 	}
10756 
10757 	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10758 	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10759 		u32 tmp;
10760 
10761 		tmp = tr32(SERDES_RX_CTRL);
10762 		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10763 		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10764 		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10765 		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10766 	}
10767 
10768 	if (!tg3_flag(tp, USE_PHYLIB)) {
10769 		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10770 			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10771 
10772 		err = tg3_setup_phy(tp, false);
10773 		if (err)
10774 			return err;
10775 
10776 		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10777 		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10778 			u32 tmp;
10779 
10780 			/* Clear CRC stats. */
10781 			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10782 				tg3_writephy(tp, MII_TG3_TEST1,
10783 					     tmp | MII_TG3_TEST1_CRC_EN);
10784 				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10785 			}
10786 		}
10787 	}
10788 
10789 	__tg3_set_rx_mode(tp->dev);
10790 
10791 	/* Initialize receive rules. */
10792 	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10793 	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10794 	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10795 	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10796 
10797 	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10798 		limit = 8;
10799 	else
10800 		limit = 16;
10801 	if (tg3_flag(tp, ENABLE_ASF))
10802 		limit -= 4;
10803 	switch (limit) {
10804 	case 16:
10805 		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10806 		fallthrough;
10807 	case 15:
10808 		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10809 		fallthrough;
10810 	case 14:
10811 		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10812 		fallthrough;
10813 	case 13:
10814 		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10815 		fallthrough;
10816 	case 12:
10817 		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10818 		fallthrough;
10819 	case 11:
10820 		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10821 		fallthrough;
10822 	case 10:
10823 		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10824 		fallthrough;
10825 	case 9:
10826 		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10827 		fallthrough;
10828 	case 8:
10829 		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10830 		fallthrough;
10831 	case 7:
10832 		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10833 		fallthrough;
10834 	case 6:
10835 		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10836 		fallthrough;
10837 	case 5:
10838 		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10839 		fallthrough;
10840 	case 4:
10841 		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10842 	case 3:
10843 		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10844 	case 2:
10845 	case 1:
10846 
10847 	default:
10848 		break;
10849 	}
10850 
10851 	if (tg3_flag(tp, ENABLE_APE))
10852 		/* Write our heartbeat update interval to APE. */
10853 		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10854 				APE_HOST_HEARTBEAT_INT_5SEC);
10855 
10856 	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10857 
10858 	return 0;
10859 }
10860 
10861 /* Called at device open time to get the chip ready for
10862  * packet processing.  Invoked with tp->lock held.
10863  */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10864 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10865 {
10866 	/* Chip may have been just powered on. If so, the boot code may still
10867 	 * be running initialization. Wait for it to finish to avoid races in
10868 	 * accessing the hardware.
10869 	 */
10870 	tg3_enable_register_access(tp);
10871 	tg3_poll_fw(tp);
10872 
10873 	tg3_switch_clocks(tp);
10874 
10875 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10876 
10877 	return tg3_reset_hw(tp, reset_phy);
10878 }
10879 
10880 #ifdef CONFIG_TIGON3_HWMON
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10881 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10882 {
10883 	u32 off, len = TG3_OCIR_LEN;
10884 	int i;
10885 
10886 	for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10887 		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10888 
10889 		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10890 		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10891 			memset(ocir, 0, len);
10892 	}
10893 }
10894 
10895 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10896 static ssize_t tg3_show_temp(struct device *dev,
10897 			     struct device_attribute *devattr, char *buf)
10898 {
10899 	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10900 	struct tg3 *tp = dev_get_drvdata(dev);
10901 	u32 temperature;
10902 
10903 	spin_lock_bh(&tp->lock);
10904 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10905 				sizeof(temperature));
10906 	spin_unlock_bh(&tp->lock);
10907 	return sprintf(buf, "%u\n", temperature * 1000);
10908 }
10909 
10910 
10911 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10912 			  TG3_TEMP_SENSOR_OFFSET);
10913 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10914 			  TG3_TEMP_CAUTION_OFFSET);
10915 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10916 			  TG3_TEMP_MAX_OFFSET);
10917 
10918 static struct attribute *tg3_attrs[] = {
10919 	&sensor_dev_attr_temp1_input.dev_attr.attr,
10920 	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10921 	&sensor_dev_attr_temp1_max.dev_attr.attr,
10922 	NULL
10923 };
10924 ATTRIBUTE_GROUPS(tg3);
10925 
tg3_hwmon_close(struct tg3 * tp)10926 static void tg3_hwmon_close(struct tg3 *tp)
10927 {
10928 	if (tp->hwmon_dev) {
10929 		hwmon_device_unregister(tp->hwmon_dev);
10930 		tp->hwmon_dev = NULL;
10931 	}
10932 }
10933 
tg3_hwmon_open(struct tg3 * tp)10934 static void tg3_hwmon_open(struct tg3 *tp)
10935 {
10936 	int i;
10937 	u32 size = 0;
10938 	struct pci_dev *pdev = tp->pdev;
10939 	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10940 
10941 	tg3_sd_scan_scratchpad(tp, ocirs);
10942 
10943 	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10944 		if (!ocirs[i].src_data_length)
10945 			continue;
10946 
10947 		size += ocirs[i].src_hdr_length;
10948 		size += ocirs[i].src_data_length;
10949 	}
10950 
10951 	if (!size)
10952 		return;
10953 
10954 	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10955 							  tp, tg3_groups);
10956 	if (IS_ERR(tp->hwmon_dev)) {
10957 		tp->hwmon_dev = NULL;
10958 		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10959 	}
10960 }
10961 #else
tg3_hwmon_close(struct tg3 * tp)10962 static inline void tg3_hwmon_close(struct tg3 *tp) { }
tg3_hwmon_open(struct tg3 * tp)10963 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10964 #endif /* CONFIG_TIGON3_HWMON */
10965 
10966 
10967 #define TG3_STAT_ADD32(PSTAT, REG) \
10968 do {	u32 __val = tr32(REG); \
10969 	(PSTAT)->low += __val; \
10970 	if ((PSTAT)->low < __val) \
10971 		(PSTAT)->high += 1; \
10972 } while (0)
10973 
tg3_periodic_fetch_stats(struct tg3 * tp)10974 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10975 {
10976 	struct tg3_hw_stats *sp = tp->hw_stats;
10977 
10978 	if (!tp->link_up)
10979 		return;
10980 
10981 	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10982 	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10983 	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10984 	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10985 	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10986 	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10987 	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10988 	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10989 	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10990 	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10991 	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10992 	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10993 	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10994 	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10995 		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10996 		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10997 		u32 val;
10998 
10999 		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
11000 		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
11001 		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
11002 		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
11003 	}
11004 
11005 	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
11006 	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
11007 	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
11008 	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
11009 	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
11010 	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
11011 	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
11012 	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
11013 	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
11014 	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
11015 	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
11016 	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
11017 	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
11018 	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
11019 
11020 	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
11021 	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
11022 	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
11023 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
11024 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
11025 		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
11026 	} else {
11027 		u32 val = tr32(HOSTCC_FLOW_ATTN);
11028 		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
11029 		if (val) {
11030 			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
11031 			sp->rx_discards.low += val;
11032 			if (sp->rx_discards.low < val)
11033 				sp->rx_discards.high += 1;
11034 		}
11035 		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
11036 	}
11037 	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
11038 }
11039 
tg3_chk_missed_msi(struct tg3 * tp)11040 static void tg3_chk_missed_msi(struct tg3 *tp)
11041 {
11042 	u32 i;
11043 
11044 	for (i = 0; i < tp->irq_cnt; i++) {
11045 		struct tg3_napi *tnapi = &tp->napi[i];
11046 
11047 		if (tg3_has_work(tnapi)) {
11048 			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
11049 			    tnapi->last_tx_cons == tnapi->tx_cons) {
11050 				if (tnapi->chk_msi_cnt < 1) {
11051 					tnapi->chk_msi_cnt++;
11052 					return;
11053 				}
11054 				tg3_msi(0, tnapi);
11055 			}
11056 		}
11057 		tnapi->chk_msi_cnt = 0;
11058 		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
11059 		tnapi->last_tx_cons = tnapi->tx_cons;
11060 	}
11061 }
11062 
tg3_timer(struct timer_list * t)11063 static void tg3_timer(struct timer_list *t)
11064 {
11065 	struct tg3 *tp = timer_container_of(tp, t, timer);
11066 
11067 	spin_lock(&tp->lock);
11068 
11069 	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11070 		spin_unlock(&tp->lock);
11071 		goto restart_timer;
11072 	}
11073 
11074 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11075 	    tg3_flag(tp, 57765_CLASS))
11076 		tg3_chk_missed_msi(tp);
11077 
11078 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11079 		/* BCM4785: Flush posted writes from GbE to host memory. */
11080 		tr32(HOSTCC_MODE);
11081 	}
11082 
11083 	if (!tg3_flag(tp, TAGGED_STATUS)) {
11084 		/* All of this garbage is because when using non-tagged
11085 		 * IRQ status the mailbox/status_block protocol the chip
11086 		 * uses with the cpu is race prone.
11087 		 */
11088 		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11089 			tw32(GRC_LOCAL_CTRL,
11090 			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11091 		} else {
11092 			tw32(HOSTCC_MODE, tp->coalesce_mode |
11093 			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11094 		}
11095 
11096 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11097 			spin_unlock(&tp->lock);
11098 			tg3_reset_task_schedule(tp);
11099 			goto restart_timer;
11100 		}
11101 	}
11102 
11103 	/* This part only runs once per second. */
11104 	if (!--tp->timer_counter) {
11105 		if (tg3_flag(tp, 5705_PLUS))
11106 			tg3_periodic_fetch_stats(tp);
11107 
11108 		if (tp->setlpicnt && !--tp->setlpicnt)
11109 			tg3_phy_eee_enable(tp);
11110 
11111 		if (tg3_flag(tp, USE_LINKCHG_REG)) {
11112 			u32 mac_stat;
11113 			int phy_event;
11114 
11115 			mac_stat = tr32(MAC_STATUS);
11116 
11117 			phy_event = 0;
11118 			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11119 				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11120 					phy_event = 1;
11121 			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11122 				phy_event = 1;
11123 
11124 			if (phy_event)
11125 				tg3_setup_phy(tp, false);
11126 		} else if (tg3_flag(tp, POLL_SERDES)) {
11127 			u32 mac_stat = tr32(MAC_STATUS);
11128 			int need_setup = 0;
11129 
11130 			if (tp->link_up &&
11131 			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11132 				need_setup = 1;
11133 			}
11134 			if (!tp->link_up &&
11135 			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
11136 					 MAC_STATUS_SIGNAL_DET))) {
11137 				need_setup = 1;
11138 			}
11139 			if (need_setup) {
11140 				if (!tp->serdes_counter) {
11141 					tw32_f(MAC_MODE,
11142 					     (tp->mac_mode &
11143 					      ~MAC_MODE_PORT_MODE_MASK));
11144 					udelay(40);
11145 					tw32_f(MAC_MODE, tp->mac_mode);
11146 					udelay(40);
11147 				}
11148 				tg3_setup_phy(tp, false);
11149 			}
11150 		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11151 			   tg3_flag(tp, 5780_CLASS)) {
11152 			tg3_serdes_parallel_detect(tp);
11153 		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11154 			u32 cpmu = tr32(TG3_CPMU_STATUS);
11155 			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11156 					 TG3_CPMU_STATUS_LINK_MASK);
11157 
11158 			if (link_up != tp->link_up)
11159 				tg3_setup_phy(tp, false);
11160 		}
11161 
11162 		tp->timer_counter = tp->timer_multiplier;
11163 	}
11164 
11165 	/* Heartbeat is only sent once every 2 seconds.
11166 	 *
11167 	 * The heartbeat is to tell the ASF firmware that the host
11168 	 * driver is still alive.  In the event that the OS crashes,
11169 	 * ASF needs to reset the hardware to free up the FIFO space
11170 	 * that may be filled with rx packets destined for the host.
11171 	 * If the FIFO is full, ASF will no longer function properly.
11172 	 *
11173 	 * Unintended resets have been reported on real time kernels
11174 	 * where the timer doesn't run on time.  Netpoll will also have
11175 	 * same problem.
11176 	 *
11177 	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11178 	 * to check the ring condition when the heartbeat is expiring
11179 	 * before doing the reset.  This will prevent most unintended
11180 	 * resets.
11181 	 */
11182 	if (!--tp->asf_counter) {
11183 		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11184 			tg3_wait_for_event_ack(tp);
11185 
11186 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11187 				      FWCMD_NICDRV_ALIVE3);
11188 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11189 			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11190 				      TG3_FW_UPDATE_TIMEOUT_SEC);
11191 
11192 			tg3_generate_fw_event(tp);
11193 		}
11194 		tp->asf_counter = tp->asf_multiplier;
11195 	}
11196 
11197 	/* Update the APE heartbeat every 5 seconds.*/
11198 	tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11199 
11200 	spin_unlock(&tp->lock);
11201 
11202 restart_timer:
11203 	tp->timer.expires = jiffies + tp->timer_offset;
11204 	add_timer(&tp->timer);
11205 }
11206 
tg3_timer_init(struct tg3 * tp)11207 static void tg3_timer_init(struct tg3 *tp)
11208 {
11209 	if (tg3_flag(tp, TAGGED_STATUS) &&
11210 	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11211 	    !tg3_flag(tp, 57765_CLASS))
11212 		tp->timer_offset = HZ;
11213 	else
11214 		tp->timer_offset = HZ / 10;
11215 
11216 	BUG_ON(tp->timer_offset > HZ);
11217 
11218 	tp->timer_multiplier = (HZ / tp->timer_offset);
11219 	tp->asf_multiplier = (HZ / tp->timer_offset) *
11220 			     TG3_FW_UPDATE_FREQ_SEC;
11221 
11222 	timer_setup(&tp->timer, tg3_timer, 0);
11223 }
11224 
tg3_timer_start(struct tg3 * tp)11225 static void tg3_timer_start(struct tg3 *tp)
11226 {
11227 	tp->asf_counter   = tp->asf_multiplier;
11228 	tp->timer_counter = tp->timer_multiplier;
11229 
11230 	tp->timer.expires = jiffies + tp->timer_offset;
11231 	add_timer(&tp->timer);
11232 }
11233 
tg3_timer_stop(struct tg3 * tp)11234 static void tg3_timer_stop(struct tg3 *tp)
11235 {
11236 	timer_delete_sync(&tp->timer);
11237 }
11238 
11239 /* Restart hardware after configuration changes, self-test, etc.
11240  * Invoked with tp->lock held.
11241  */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11242 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11243 	__releases(tp->lock)
11244 	__acquires(tp->lock)
11245 	__releases(tp->dev->lock)
11246 	__acquires(tp->dev->lock)
11247 {
11248 	int err;
11249 
11250 	err = tg3_init_hw(tp, reset_phy);
11251 	if (err) {
11252 		netdev_err(tp->dev,
11253 			   "Failed to re-initialize device, aborting\n");
11254 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11255 		tg3_full_unlock(tp);
11256 		tg3_timer_stop(tp);
11257 		tp->irq_sync = 0;
11258 		tg3_napi_enable(tp);
11259 		netdev_unlock(tp->dev);
11260 		dev_close(tp->dev);
11261 		netdev_lock(tp->dev);
11262 		tg3_full_lock(tp, 0);
11263 	}
11264 	return err;
11265 }
11266 
tg3_reset_task(struct work_struct * work)11267 static void tg3_reset_task(struct work_struct *work)
11268 {
11269 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11270 	int err;
11271 
11272 	rtnl_lock();
11273 	tg3_full_lock(tp, 0);
11274 
11275 	if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11276 	    tp->pdev->error_state != pci_channel_io_normal) {
11277 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11278 		tg3_full_unlock(tp);
11279 		rtnl_unlock();
11280 		return;
11281 	}
11282 
11283 	tg3_full_unlock(tp);
11284 
11285 	tg3_phy_stop(tp);
11286 
11287 	tg3_netif_stop(tp);
11288 
11289 	netdev_lock(tp->dev);
11290 	tg3_full_lock(tp, 1);
11291 
11292 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11293 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11294 		tp->write32_rx_mbox = tg3_write_flush_reg32;
11295 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11296 		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11297 	}
11298 
11299 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11300 	err = tg3_init_hw(tp, true);
11301 	if (err) {
11302 		tg3_full_unlock(tp);
11303 		tp->irq_sync = 0;
11304 		tg3_napi_enable(tp);
11305 		/* Clear this flag so that tg3_reset_task_cancel() will not
11306 		 * call cancel_work_sync() and wait forever.
11307 		 */
11308 		tg3_flag_clear(tp, RESET_TASK_PENDING);
11309 		netdev_unlock(tp->dev);
11310 		dev_close(tp->dev);
11311 		goto out;
11312 	}
11313 
11314 	tg3_netif_start(tp);
11315 	tg3_full_unlock(tp);
11316 	netdev_unlock(tp->dev);
11317 	tg3_phy_start(tp);
11318 	tg3_flag_clear(tp, RESET_TASK_PENDING);
11319 out:
11320 	rtnl_unlock();
11321 }
11322 
tg3_request_irq(struct tg3 * tp,int irq_num)11323 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11324 {
11325 	irq_handler_t fn;
11326 	unsigned long flags;
11327 	char *name;
11328 	struct tg3_napi *tnapi = &tp->napi[irq_num];
11329 
11330 	if (tp->irq_cnt == 1)
11331 		name = tp->dev->name;
11332 	else {
11333 		name = &tnapi->irq_lbl[0];
11334 		if (tnapi->tx_buffers && tnapi->rx_rcb)
11335 			snprintf(name, sizeof(tnapi->irq_lbl),
11336 				 "%s-txrx-%d", tp->dev->name, irq_num);
11337 		else if (tnapi->tx_buffers)
11338 			snprintf(name, sizeof(tnapi->irq_lbl),
11339 				 "%s-tx-%d", tp->dev->name, irq_num);
11340 		else if (tnapi->rx_rcb)
11341 			snprintf(name, sizeof(tnapi->irq_lbl),
11342 				 "%s-rx-%d", tp->dev->name, irq_num);
11343 		else
11344 			snprintf(name, sizeof(tnapi->irq_lbl),
11345 				 "%s-%d", tp->dev->name, irq_num);
11346 	}
11347 
11348 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11349 		fn = tg3_msi;
11350 		if (tg3_flag(tp, 1SHOT_MSI))
11351 			fn = tg3_msi_1shot;
11352 		flags = 0;
11353 	} else {
11354 		fn = tg3_interrupt;
11355 		if (tg3_flag(tp, TAGGED_STATUS))
11356 			fn = tg3_interrupt_tagged;
11357 		flags = IRQF_SHARED;
11358 	}
11359 
11360 	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11361 }
11362 
tg3_test_interrupt(struct tg3 * tp)11363 static int tg3_test_interrupt(struct tg3 *tp)
11364 {
11365 	struct tg3_napi *tnapi = &tp->napi[0];
11366 	struct net_device *dev = tp->dev;
11367 	int err, i, intr_ok = 0;
11368 	u32 val;
11369 
11370 	if (!netif_running(dev))
11371 		return -ENODEV;
11372 
11373 	tg3_disable_ints(tp);
11374 
11375 	free_irq(tnapi->irq_vec, tnapi);
11376 
11377 	/*
11378 	 * Turn off MSI one shot mode.  Otherwise this test has no
11379 	 * observable way to know whether the interrupt was delivered.
11380 	 */
11381 	if (tg3_flag(tp, 57765_PLUS)) {
11382 		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11383 		tw32(MSGINT_MODE, val);
11384 	}
11385 
11386 	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11387 			  IRQF_SHARED, dev->name, tnapi);
11388 	if (err)
11389 		return err;
11390 
11391 	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11392 	tg3_enable_ints(tp);
11393 
11394 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11395 	       tnapi->coal_now);
11396 
11397 	for (i = 0; i < 5; i++) {
11398 		u32 int_mbox, misc_host_ctrl;
11399 
11400 		int_mbox = tr32_mailbox(tnapi->int_mbox);
11401 		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11402 
11403 		if ((int_mbox != 0) ||
11404 		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11405 			intr_ok = 1;
11406 			break;
11407 		}
11408 
11409 		if (tg3_flag(tp, 57765_PLUS) &&
11410 		    tnapi->hw_status->status_tag != tnapi->last_tag)
11411 			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11412 
11413 		msleep(10);
11414 	}
11415 
11416 	tg3_disable_ints(tp);
11417 
11418 	free_irq(tnapi->irq_vec, tnapi);
11419 
11420 	err = tg3_request_irq(tp, 0);
11421 
11422 	if (err)
11423 		return err;
11424 
11425 	if (intr_ok) {
11426 		/* Reenable MSI one shot mode. */
11427 		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11428 			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11429 			tw32(MSGINT_MODE, val);
11430 		}
11431 		return 0;
11432 	}
11433 
11434 	return -EIO;
11435 }
11436 
11437 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11438  * successfully restored
11439  */
tg3_test_msi(struct tg3 * tp)11440 static int tg3_test_msi(struct tg3 *tp)
11441 {
11442 	int err;
11443 	u16 pci_cmd;
11444 
11445 	if (!tg3_flag(tp, USING_MSI))
11446 		return 0;
11447 
11448 	/* Turn off SERR reporting in case MSI terminates with Master
11449 	 * Abort.
11450 	 */
11451 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11452 	pci_write_config_word(tp->pdev, PCI_COMMAND,
11453 			      pci_cmd & ~PCI_COMMAND_SERR);
11454 
11455 	err = tg3_test_interrupt(tp);
11456 
11457 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11458 
11459 	if (!err)
11460 		return 0;
11461 
11462 	/* other failures */
11463 	if (err != -EIO)
11464 		return err;
11465 
11466 	/* MSI test failed, go back to INTx mode */
11467 	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11468 		    "to INTx mode. Please report this failure to the PCI "
11469 		    "maintainer and include system chipset information\n");
11470 
11471 	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11472 
11473 	pci_disable_msi(tp->pdev);
11474 
11475 	tg3_flag_clear(tp, USING_MSI);
11476 	tp->napi[0].irq_vec = tp->pdev->irq;
11477 
11478 	err = tg3_request_irq(tp, 0);
11479 	if (err)
11480 		return err;
11481 
11482 	/* Need to reset the chip because the MSI cycle may have terminated
11483 	 * with Master Abort.
11484 	 */
11485 	tg3_full_lock(tp, 1);
11486 
11487 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11488 	err = tg3_init_hw(tp, true);
11489 
11490 	tg3_full_unlock(tp);
11491 
11492 	if (err)
11493 		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11494 
11495 	return err;
11496 }
11497 
tg3_request_firmware(struct tg3 * tp)11498 static int tg3_request_firmware(struct tg3 *tp)
11499 {
11500 	const struct tg3_firmware_hdr *fw_hdr;
11501 
11502 	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11503 		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11504 			   tp->fw_needed);
11505 		return -ENOENT;
11506 	}
11507 
11508 	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11509 
11510 	/* Firmware blob starts with version numbers, followed by
11511 	 * start address and _full_ length including BSS sections
11512 	 * (which must be longer than the actual data, of course
11513 	 */
11514 
11515 	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11516 	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11517 		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11518 			   tp->fw_len, tp->fw_needed);
11519 		release_firmware(tp->fw);
11520 		tp->fw = NULL;
11521 		return -EINVAL;
11522 	}
11523 
11524 	/* We no longer need firmware; we have it. */
11525 	tp->fw_needed = NULL;
11526 	return 0;
11527 }
11528 
tg3_irq_count(struct tg3 * tp)11529 static u32 tg3_irq_count(struct tg3 *tp)
11530 {
11531 	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11532 
11533 	if (irq_cnt > 1) {
11534 		/* We want as many rx rings enabled as there are cpus.
11535 		 * In multiqueue MSI-X mode, the first MSI-X vector
11536 		 * only deals with link interrupts, etc, so we add
11537 		 * one to the number of vectors we are requesting.
11538 		 */
11539 		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11540 	}
11541 
11542 	return irq_cnt;
11543 }
11544 
tg3_enable_msix(struct tg3 * tp)11545 static bool tg3_enable_msix(struct tg3 *tp)
11546 {
11547 	int i, rc;
11548 	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11549 
11550 	tp->txq_cnt = tp->txq_req;
11551 	tp->rxq_cnt = tp->rxq_req;
11552 	if (!tp->rxq_cnt)
11553 		tp->rxq_cnt = netif_get_num_default_rss_queues();
11554 	if (tp->rxq_cnt > tp->rxq_max)
11555 		tp->rxq_cnt = tp->rxq_max;
11556 
11557 	/* Disable multiple TX rings by default.  Simple round-robin hardware
11558 	 * scheduling of the TX rings can cause starvation of rings with
11559 	 * small packets when other rings have TSO or jumbo packets.
11560 	 */
11561 	if (!tp->txq_req)
11562 		tp->txq_cnt = 1;
11563 
11564 	tp->irq_cnt = tg3_irq_count(tp);
11565 
11566 	for (i = 0; i < tp->irq_max; i++) {
11567 		msix_ent[i].entry  = i;
11568 		msix_ent[i].vector = 0;
11569 	}
11570 
11571 	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11572 	if (rc < 0) {
11573 		return false;
11574 	} else if (rc < tp->irq_cnt) {
11575 		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11576 			      tp->irq_cnt, rc);
11577 		tp->irq_cnt = rc;
11578 		tp->rxq_cnt = max(rc - 1, 1);
11579 		if (tp->txq_cnt)
11580 			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11581 	}
11582 
11583 	for (i = 0; i < tp->irq_max; i++)
11584 		tp->napi[i].irq_vec = msix_ent[i].vector;
11585 
11586 	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11587 		pci_disable_msix(tp->pdev);
11588 		return false;
11589 	}
11590 
11591 	if (tp->irq_cnt == 1)
11592 		return true;
11593 
11594 	tg3_flag_set(tp, ENABLE_RSS);
11595 
11596 	if (tp->txq_cnt > 1)
11597 		tg3_flag_set(tp, ENABLE_TSS);
11598 
11599 	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11600 
11601 	return true;
11602 }
11603 
tg3_ints_init(struct tg3 * tp)11604 static void tg3_ints_init(struct tg3 *tp)
11605 {
11606 	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11607 	    !tg3_flag(tp, TAGGED_STATUS)) {
11608 		/* All MSI supporting chips should support tagged
11609 		 * status.  Assert that this is the case.
11610 		 */
11611 		netdev_warn(tp->dev,
11612 			    "MSI without TAGGED_STATUS? Not using MSI\n");
11613 		goto defcfg;
11614 	}
11615 
11616 	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11617 		tg3_flag_set(tp, USING_MSIX);
11618 	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11619 		tg3_flag_set(tp, USING_MSI);
11620 
11621 	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11622 		u32 msi_mode = tr32(MSGINT_MODE);
11623 		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11624 			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11625 		if (!tg3_flag(tp, 1SHOT_MSI))
11626 			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11627 		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11628 	}
11629 defcfg:
11630 	if (!tg3_flag(tp, USING_MSIX)) {
11631 		tp->irq_cnt = 1;
11632 		tp->napi[0].irq_vec = tp->pdev->irq;
11633 	}
11634 
11635 	if (tp->irq_cnt == 1) {
11636 		tp->txq_cnt = 1;
11637 		tp->rxq_cnt = 1;
11638 		netif_set_real_num_tx_queues(tp->dev, 1);
11639 		netif_set_real_num_rx_queues(tp->dev, 1);
11640 	}
11641 }
11642 
tg3_ints_fini(struct tg3 * tp)11643 static void tg3_ints_fini(struct tg3 *tp)
11644 {
11645 	if (tg3_flag(tp, USING_MSIX))
11646 		pci_disable_msix(tp->pdev);
11647 	else if (tg3_flag(tp, USING_MSI))
11648 		pci_disable_msi(tp->pdev);
11649 	tg3_flag_clear(tp, USING_MSI);
11650 	tg3_flag_clear(tp, USING_MSIX);
11651 	tg3_flag_clear(tp, ENABLE_RSS);
11652 	tg3_flag_clear(tp, ENABLE_TSS);
11653 }
11654 
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11655 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11656 		     bool init)
11657 {
11658 	struct net_device *dev = tp->dev;
11659 	int i, err;
11660 
11661 	/*
11662 	 * Setup interrupts first so we know how
11663 	 * many NAPI resources to allocate
11664 	 */
11665 	tg3_ints_init(tp);
11666 
11667 	tg3_rss_check_indir_tbl(tp);
11668 
11669 	/* The placement of this call is tied
11670 	 * to the setup and use of Host TX descriptors.
11671 	 */
11672 	err = tg3_alloc_consistent(tp);
11673 	if (err)
11674 		goto out_ints_fini;
11675 
11676 	netdev_lock(dev);
11677 	tg3_napi_init(tp);
11678 
11679 	tg3_napi_enable(tp);
11680 	netdev_unlock(dev);
11681 
11682 	for (i = 0; i < tp->irq_cnt; i++) {
11683 		err = tg3_request_irq(tp, i);
11684 		if (err) {
11685 			for (i--; i >= 0; i--) {
11686 				struct tg3_napi *tnapi = &tp->napi[i];
11687 
11688 				free_irq(tnapi->irq_vec, tnapi);
11689 			}
11690 			goto out_napi_fini;
11691 		}
11692 	}
11693 
11694 	tg3_full_lock(tp, 0);
11695 
11696 	if (init)
11697 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11698 
11699 	err = tg3_init_hw(tp, reset_phy);
11700 	if (err) {
11701 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11702 		tg3_free_rings(tp);
11703 	}
11704 
11705 	tg3_full_unlock(tp);
11706 
11707 	if (err)
11708 		goto out_free_irq;
11709 
11710 	if (test_irq && tg3_flag(tp, USING_MSI)) {
11711 		err = tg3_test_msi(tp);
11712 
11713 		if (err) {
11714 			tg3_full_lock(tp, 0);
11715 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11716 			tg3_free_rings(tp);
11717 			tg3_full_unlock(tp);
11718 
11719 			goto out_napi_fini;
11720 		}
11721 
11722 		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11723 			u32 val = tr32(PCIE_TRANSACTION_CFG);
11724 
11725 			tw32(PCIE_TRANSACTION_CFG,
11726 			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11727 		}
11728 	}
11729 
11730 	tg3_phy_start(tp);
11731 
11732 	tg3_hwmon_open(tp);
11733 
11734 	tg3_full_lock(tp, 0);
11735 
11736 	tg3_timer_start(tp);
11737 	tg3_flag_set(tp, INIT_COMPLETE);
11738 	tg3_enable_ints(tp);
11739 
11740 	tg3_ptp_resume(tp);
11741 
11742 	tg3_full_unlock(tp);
11743 
11744 	netif_tx_start_all_queues(dev);
11745 
11746 	/*
11747 	 * Reset loopback feature if it was turned on while the device was down
11748 	 * make sure that it's installed properly now.
11749 	 */
11750 	if (dev->features & NETIF_F_LOOPBACK)
11751 		tg3_set_loopback(dev, dev->features);
11752 
11753 	return 0;
11754 
11755 out_free_irq:
11756 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11757 		struct tg3_napi *tnapi = &tp->napi[i];
11758 		free_irq(tnapi->irq_vec, tnapi);
11759 	}
11760 
11761 out_napi_fini:
11762 	tg3_napi_disable(tp);
11763 	tg3_napi_fini(tp);
11764 	tg3_free_consistent(tp);
11765 
11766 out_ints_fini:
11767 	tg3_ints_fini(tp);
11768 
11769 	return err;
11770 }
11771 
tg3_stop(struct tg3 * tp)11772 static void tg3_stop(struct tg3 *tp)
11773 {
11774 	int i;
11775 
11776 	tg3_reset_task_cancel(tp);
11777 	tg3_netif_stop(tp);
11778 
11779 	tg3_timer_stop(tp);
11780 
11781 	tg3_hwmon_close(tp);
11782 
11783 	tg3_phy_stop(tp);
11784 
11785 	tg3_full_lock(tp, 1);
11786 
11787 	tg3_disable_ints(tp);
11788 
11789 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11790 	tg3_free_rings(tp);
11791 	tg3_flag_clear(tp, INIT_COMPLETE);
11792 
11793 	tg3_full_unlock(tp);
11794 
11795 	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11796 		struct tg3_napi *tnapi = &tp->napi[i];
11797 		free_irq(tnapi->irq_vec, tnapi);
11798 	}
11799 
11800 	tg3_ints_fini(tp);
11801 
11802 	tg3_napi_fini(tp);
11803 
11804 	tg3_free_consistent(tp);
11805 }
11806 
tg3_open(struct net_device * dev)11807 static int tg3_open(struct net_device *dev)
11808 {
11809 	struct tg3 *tp = netdev_priv(dev);
11810 	int err;
11811 
11812 	if (tp->pcierr_recovery) {
11813 		netdev_err(dev, "Failed to open device. PCI error recovery "
11814 			   "in progress\n");
11815 		return -EAGAIN;
11816 	}
11817 
11818 	if (tp->fw_needed) {
11819 		err = tg3_request_firmware(tp);
11820 		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11821 			if (err) {
11822 				netdev_warn(tp->dev, "EEE capability disabled\n");
11823 				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11824 			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11825 				netdev_warn(tp->dev, "EEE capability restored\n");
11826 				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11827 			}
11828 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11829 			if (err)
11830 				return err;
11831 		} else if (err) {
11832 			netdev_warn(tp->dev, "TSO capability disabled\n");
11833 			tg3_flag_clear(tp, TSO_CAPABLE);
11834 		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11835 			netdev_notice(tp->dev, "TSO capability restored\n");
11836 			tg3_flag_set(tp, TSO_CAPABLE);
11837 		}
11838 	}
11839 
11840 	tg3_carrier_off(tp);
11841 
11842 	err = tg3_power_up(tp);
11843 	if (err)
11844 		return err;
11845 
11846 	tg3_full_lock(tp, 0);
11847 
11848 	tg3_disable_ints(tp);
11849 	tg3_flag_clear(tp, INIT_COMPLETE);
11850 
11851 	tg3_full_unlock(tp);
11852 
11853 	err = tg3_start(tp,
11854 			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11855 			true, true);
11856 	if (err) {
11857 		tg3_frob_aux_power(tp, false);
11858 		pci_set_power_state(tp->pdev, PCI_D3hot);
11859 	}
11860 
11861 	return err;
11862 }
11863 
tg3_close(struct net_device * dev)11864 static int tg3_close(struct net_device *dev)
11865 {
11866 	struct tg3 *tp = netdev_priv(dev);
11867 
11868 	if (tp->pcierr_recovery) {
11869 		netdev_err(dev, "Failed to close device. PCI error recovery "
11870 			   "in progress\n");
11871 		return -EAGAIN;
11872 	}
11873 
11874 	tg3_stop(tp);
11875 
11876 	if (pci_device_is_present(tp->pdev)) {
11877 		tg3_power_down_prepare(tp);
11878 
11879 		tg3_carrier_off(tp);
11880 	}
11881 	return 0;
11882 }
11883 
get_stat64(tg3_stat64_t * val)11884 static inline u64 get_stat64(tg3_stat64_t *val)
11885 {
11886        return ((u64)val->high << 32) | ((u64)val->low);
11887 }
11888 
tg3_calc_crc_errors(struct tg3 * tp)11889 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11890 {
11891 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11892 
11893 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11894 	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11895 	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11896 		u32 val;
11897 
11898 		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11899 			tg3_writephy(tp, MII_TG3_TEST1,
11900 				     val | MII_TG3_TEST1_CRC_EN);
11901 			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11902 		} else
11903 			val = 0;
11904 
11905 		tp->phy_crc_errors += val;
11906 
11907 		return tp->phy_crc_errors;
11908 	}
11909 
11910 	return get_stat64(&hw_stats->rx_fcs_errors);
11911 }
11912 
11913 #define ESTAT_ADD(member) \
11914 	estats->member =	old_estats->member + \
11915 				get_stat64(&hw_stats->member)
11916 
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11917 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11918 {
11919 	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11920 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11921 
11922 	ESTAT_ADD(rx_octets);
11923 	ESTAT_ADD(rx_fragments);
11924 	ESTAT_ADD(rx_ucast_packets);
11925 	ESTAT_ADD(rx_mcast_packets);
11926 	ESTAT_ADD(rx_bcast_packets);
11927 	ESTAT_ADD(rx_fcs_errors);
11928 	ESTAT_ADD(rx_align_errors);
11929 	ESTAT_ADD(rx_xon_pause_rcvd);
11930 	ESTAT_ADD(rx_xoff_pause_rcvd);
11931 	ESTAT_ADD(rx_mac_ctrl_rcvd);
11932 	ESTAT_ADD(rx_xoff_entered);
11933 	ESTAT_ADD(rx_frame_too_long_errors);
11934 	ESTAT_ADD(rx_jabbers);
11935 	ESTAT_ADD(rx_undersize_packets);
11936 	ESTAT_ADD(rx_in_length_errors);
11937 	ESTAT_ADD(rx_out_length_errors);
11938 	ESTAT_ADD(rx_64_or_less_octet_packets);
11939 	ESTAT_ADD(rx_65_to_127_octet_packets);
11940 	ESTAT_ADD(rx_128_to_255_octet_packets);
11941 	ESTAT_ADD(rx_256_to_511_octet_packets);
11942 	ESTAT_ADD(rx_512_to_1023_octet_packets);
11943 	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11944 	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11945 	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11946 	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11947 	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11948 
11949 	ESTAT_ADD(tx_octets);
11950 	ESTAT_ADD(tx_collisions);
11951 	ESTAT_ADD(tx_xon_sent);
11952 	ESTAT_ADD(tx_xoff_sent);
11953 	ESTAT_ADD(tx_flow_control);
11954 	ESTAT_ADD(tx_mac_errors);
11955 	ESTAT_ADD(tx_single_collisions);
11956 	ESTAT_ADD(tx_mult_collisions);
11957 	ESTAT_ADD(tx_deferred);
11958 	ESTAT_ADD(tx_excessive_collisions);
11959 	ESTAT_ADD(tx_late_collisions);
11960 	ESTAT_ADD(tx_collide_2times);
11961 	ESTAT_ADD(tx_collide_3times);
11962 	ESTAT_ADD(tx_collide_4times);
11963 	ESTAT_ADD(tx_collide_5times);
11964 	ESTAT_ADD(tx_collide_6times);
11965 	ESTAT_ADD(tx_collide_7times);
11966 	ESTAT_ADD(tx_collide_8times);
11967 	ESTAT_ADD(tx_collide_9times);
11968 	ESTAT_ADD(tx_collide_10times);
11969 	ESTAT_ADD(tx_collide_11times);
11970 	ESTAT_ADD(tx_collide_12times);
11971 	ESTAT_ADD(tx_collide_13times);
11972 	ESTAT_ADD(tx_collide_14times);
11973 	ESTAT_ADD(tx_collide_15times);
11974 	ESTAT_ADD(tx_ucast_packets);
11975 	ESTAT_ADD(tx_mcast_packets);
11976 	ESTAT_ADD(tx_bcast_packets);
11977 	ESTAT_ADD(tx_carrier_sense_errors);
11978 	ESTAT_ADD(tx_discards);
11979 	ESTAT_ADD(tx_errors);
11980 
11981 	ESTAT_ADD(dma_writeq_full);
11982 	ESTAT_ADD(dma_write_prioq_full);
11983 	ESTAT_ADD(rxbds_empty);
11984 	ESTAT_ADD(rx_discards);
11985 	ESTAT_ADD(rx_errors);
11986 	ESTAT_ADD(rx_threshold_hit);
11987 
11988 	ESTAT_ADD(dma_readq_full);
11989 	ESTAT_ADD(dma_read_prioq_full);
11990 	ESTAT_ADD(tx_comp_queue_full);
11991 
11992 	ESTAT_ADD(ring_set_send_prod_index);
11993 	ESTAT_ADD(ring_status_update);
11994 	ESTAT_ADD(nic_irqs);
11995 	ESTAT_ADD(nic_avoided_irqs);
11996 	ESTAT_ADD(nic_tx_threshold_hit);
11997 
11998 	ESTAT_ADD(mbuf_lwm_thresh_hit);
11999 }
12000 
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)12001 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
12002 {
12003 	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
12004 	struct tg3_hw_stats *hw_stats = tp->hw_stats;
12005 	unsigned long rx_dropped;
12006 	unsigned long tx_dropped;
12007 	int i;
12008 
12009 	stats->rx_packets = old_stats->rx_packets +
12010 		get_stat64(&hw_stats->rx_ucast_packets) +
12011 		get_stat64(&hw_stats->rx_mcast_packets) +
12012 		get_stat64(&hw_stats->rx_bcast_packets);
12013 
12014 	stats->tx_packets = old_stats->tx_packets +
12015 		get_stat64(&hw_stats->tx_ucast_packets) +
12016 		get_stat64(&hw_stats->tx_mcast_packets) +
12017 		get_stat64(&hw_stats->tx_bcast_packets);
12018 
12019 	stats->rx_bytes = old_stats->rx_bytes +
12020 		get_stat64(&hw_stats->rx_octets);
12021 	stats->tx_bytes = old_stats->tx_bytes +
12022 		get_stat64(&hw_stats->tx_octets);
12023 
12024 	stats->rx_errors = old_stats->rx_errors +
12025 		get_stat64(&hw_stats->rx_errors);
12026 	stats->tx_errors = old_stats->tx_errors +
12027 		get_stat64(&hw_stats->tx_errors) +
12028 		get_stat64(&hw_stats->tx_mac_errors) +
12029 		get_stat64(&hw_stats->tx_carrier_sense_errors) +
12030 		get_stat64(&hw_stats->tx_discards);
12031 
12032 	stats->multicast = old_stats->multicast +
12033 		get_stat64(&hw_stats->rx_mcast_packets);
12034 	stats->collisions = old_stats->collisions +
12035 		get_stat64(&hw_stats->tx_collisions);
12036 
12037 	stats->rx_length_errors = old_stats->rx_length_errors +
12038 		get_stat64(&hw_stats->rx_frame_too_long_errors) +
12039 		get_stat64(&hw_stats->rx_undersize_packets);
12040 
12041 	stats->rx_frame_errors = old_stats->rx_frame_errors +
12042 		get_stat64(&hw_stats->rx_align_errors);
12043 	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
12044 		get_stat64(&hw_stats->tx_discards);
12045 	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
12046 		get_stat64(&hw_stats->tx_carrier_sense_errors);
12047 
12048 	stats->rx_crc_errors = old_stats->rx_crc_errors +
12049 		tg3_calc_crc_errors(tp);
12050 
12051 	stats->rx_missed_errors = old_stats->rx_missed_errors +
12052 		get_stat64(&hw_stats->rx_discards);
12053 
12054 	/* Aggregate per-queue counters. The per-queue counters are updated
12055 	 * by a single writer, race-free. The result computed by this loop
12056 	 * might not be 100% accurate (counters can be updated in the middle of
12057 	 * the loop) but the next tg3_get_nstats() will recompute the current
12058 	 * value so it is acceptable.
12059 	 *
12060 	 * Note that these counters wrap around at 4G on 32bit machines.
12061 	 */
12062 	rx_dropped = (unsigned long)(old_stats->rx_dropped);
12063 	tx_dropped = (unsigned long)(old_stats->tx_dropped);
12064 
12065 	for (i = 0; i < tp->irq_cnt; i++) {
12066 		struct tg3_napi *tnapi = &tp->napi[i];
12067 
12068 		rx_dropped += tnapi->rx_dropped;
12069 		tx_dropped += tnapi->tx_dropped;
12070 	}
12071 
12072 	stats->rx_dropped = rx_dropped;
12073 	stats->tx_dropped = tx_dropped;
12074 }
12075 
tg3_get_regs_len(struct net_device * dev)12076 static int tg3_get_regs_len(struct net_device *dev)
12077 {
12078 	return TG3_REG_BLK_SIZE;
12079 }
12080 
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)12081 static void tg3_get_regs(struct net_device *dev,
12082 		struct ethtool_regs *regs, void *_p)
12083 {
12084 	struct tg3 *tp = netdev_priv(dev);
12085 
12086 	regs->version = 0;
12087 
12088 	memset(_p, 0, TG3_REG_BLK_SIZE);
12089 
12090 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12091 		return;
12092 
12093 	tg3_full_lock(tp, 0);
12094 
12095 	tg3_dump_legacy_regs(tp, (u32 *)_p);
12096 
12097 	tg3_full_unlock(tp);
12098 }
12099 
tg3_get_eeprom_len(struct net_device * dev)12100 static int tg3_get_eeprom_len(struct net_device *dev)
12101 {
12102 	struct tg3 *tp = netdev_priv(dev);
12103 
12104 	return tp->nvram_size;
12105 }
12106 
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12107 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12108 {
12109 	struct tg3 *tp = netdev_priv(dev);
12110 	int ret, cpmu_restore = 0;
12111 	u8  *pd;
12112 	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12113 	__be32 val;
12114 
12115 	if (tg3_flag(tp, NO_NVRAM))
12116 		return -EINVAL;
12117 
12118 	offset = eeprom->offset;
12119 	len = eeprom->len;
12120 	eeprom->len = 0;
12121 
12122 	eeprom->magic = TG3_EEPROM_MAGIC;
12123 
12124 	/* Override clock, link aware and link idle modes */
12125 	if (tg3_flag(tp, CPMU_PRESENT)) {
12126 		cpmu_val = tr32(TG3_CPMU_CTRL);
12127 		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12128 				CPMU_CTRL_LINK_IDLE_MODE)) {
12129 			tw32(TG3_CPMU_CTRL, cpmu_val &
12130 					    ~(CPMU_CTRL_LINK_AWARE_MODE |
12131 					     CPMU_CTRL_LINK_IDLE_MODE));
12132 			cpmu_restore = 1;
12133 		}
12134 	}
12135 	tg3_override_clk(tp);
12136 
12137 	if (offset & 3) {
12138 		/* adjustments to start on required 4 byte boundary */
12139 		b_offset = offset & 3;
12140 		b_count = 4 - b_offset;
12141 		if (b_count > len) {
12142 			/* i.e. offset=1 len=2 */
12143 			b_count = len;
12144 		}
12145 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12146 		if (ret)
12147 			goto eeprom_done;
12148 		memcpy(data, ((char *)&val) + b_offset, b_count);
12149 		len -= b_count;
12150 		offset += b_count;
12151 		eeprom->len += b_count;
12152 	}
12153 
12154 	/* read bytes up to the last 4 byte boundary */
12155 	pd = &data[eeprom->len];
12156 	for (i = 0; i < (len - (len & 3)); i += 4) {
12157 		ret = tg3_nvram_read_be32(tp, offset + i, &val);
12158 		if (ret) {
12159 			if (i)
12160 				i -= 4;
12161 			eeprom->len += i;
12162 			goto eeprom_done;
12163 		}
12164 		memcpy(pd + i, &val, 4);
12165 		if (need_resched()) {
12166 			if (signal_pending(current)) {
12167 				eeprom->len += i;
12168 				ret = -EINTR;
12169 				goto eeprom_done;
12170 			}
12171 			cond_resched();
12172 		}
12173 	}
12174 	eeprom->len += i;
12175 
12176 	if (len & 3) {
12177 		/* read last bytes not ending on 4 byte boundary */
12178 		pd = &data[eeprom->len];
12179 		b_count = len & 3;
12180 		b_offset = offset + len - b_count;
12181 		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12182 		if (ret)
12183 			goto eeprom_done;
12184 		memcpy(pd, &val, b_count);
12185 		eeprom->len += b_count;
12186 	}
12187 	ret = 0;
12188 
12189 eeprom_done:
12190 	/* Restore clock, link aware and link idle modes */
12191 	tg3_restore_clk(tp);
12192 	if (cpmu_restore)
12193 		tw32(TG3_CPMU_CTRL, cpmu_val);
12194 
12195 	return ret;
12196 }
12197 
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12198 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12199 {
12200 	struct tg3 *tp = netdev_priv(dev);
12201 	int ret;
12202 	u32 offset, len, b_offset, odd_len;
12203 	u8 *buf;
12204 	__be32 start = 0, end;
12205 
12206 	if (tg3_flag(tp, NO_NVRAM) ||
12207 	    eeprom->magic != TG3_EEPROM_MAGIC)
12208 		return -EINVAL;
12209 
12210 	offset = eeprom->offset;
12211 	len = eeprom->len;
12212 
12213 	if ((b_offset = (offset & 3))) {
12214 		/* adjustments to start on required 4 byte boundary */
12215 		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12216 		if (ret)
12217 			return ret;
12218 		len += b_offset;
12219 		offset &= ~3;
12220 		if (len < 4)
12221 			len = 4;
12222 	}
12223 
12224 	odd_len = 0;
12225 	if (len & 3) {
12226 		/* adjustments to end on required 4 byte boundary */
12227 		odd_len = 1;
12228 		len = (len + 3) & ~3;
12229 		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12230 		if (ret)
12231 			return ret;
12232 	}
12233 
12234 	buf = data;
12235 	if (b_offset || odd_len) {
12236 		buf = kmalloc(len, GFP_KERNEL);
12237 		if (!buf)
12238 			return -ENOMEM;
12239 		if (b_offset)
12240 			memcpy(buf, &start, 4);
12241 		if (odd_len)
12242 			memcpy(buf+len-4, &end, 4);
12243 		memcpy(buf + b_offset, data, eeprom->len);
12244 	}
12245 
12246 	ret = tg3_nvram_write_block(tp, offset, len, buf);
12247 
12248 	if (buf != data)
12249 		kfree(buf);
12250 
12251 	return ret;
12252 }
12253 
tg3_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)12254 static int tg3_get_link_ksettings(struct net_device *dev,
12255 				  struct ethtool_link_ksettings *cmd)
12256 {
12257 	struct tg3 *tp = netdev_priv(dev);
12258 	u32 supported, advertising;
12259 
12260 	if (tg3_flag(tp, USE_PHYLIB)) {
12261 		struct phy_device *phydev;
12262 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12263 			return -EAGAIN;
12264 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12265 		phy_ethtool_ksettings_get(phydev, cmd);
12266 
12267 		return 0;
12268 	}
12269 
12270 	supported = (SUPPORTED_Autoneg);
12271 
12272 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12273 		supported |= (SUPPORTED_1000baseT_Half |
12274 			      SUPPORTED_1000baseT_Full);
12275 
12276 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12277 		supported |= (SUPPORTED_100baseT_Half |
12278 			      SUPPORTED_100baseT_Full |
12279 			      SUPPORTED_10baseT_Half |
12280 			      SUPPORTED_10baseT_Full |
12281 			      SUPPORTED_TP);
12282 		cmd->base.port = PORT_TP;
12283 	} else {
12284 		supported |= SUPPORTED_FIBRE;
12285 		cmd->base.port = PORT_FIBRE;
12286 	}
12287 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12288 						supported);
12289 
12290 	advertising = tp->link_config.advertising;
12291 	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12292 		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12293 			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12294 				advertising |= ADVERTISED_Pause;
12295 			} else {
12296 				advertising |= ADVERTISED_Pause |
12297 					ADVERTISED_Asym_Pause;
12298 			}
12299 		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12300 			advertising |= ADVERTISED_Asym_Pause;
12301 		}
12302 	}
12303 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12304 						advertising);
12305 
12306 	if (netif_running(dev) && tp->link_up) {
12307 		cmd->base.speed = tp->link_config.active_speed;
12308 		cmd->base.duplex = tp->link_config.active_duplex;
12309 		ethtool_convert_legacy_u32_to_link_mode(
12310 			cmd->link_modes.lp_advertising,
12311 			tp->link_config.rmt_adv);
12312 
12313 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12314 			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12315 				cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12316 			else
12317 				cmd->base.eth_tp_mdix = ETH_TP_MDI;
12318 		}
12319 	} else {
12320 		cmd->base.speed = SPEED_UNKNOWN;
12321 		cmd->base.duplex = DUPLEX_UNKNOWN;
12322 		cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12323 	}
12324 	cmd->base.phy_address = tp->phy_addr;
12325 	cmd->base.autoneg = tp->link_config.autoneg;
12326 	return 0;
12327 }
12328 
tg3_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)12329 static int tg3_set_link_ksettings(struct net_device *dev,
12330 				  const struct ethtool_link_ksettings *cmd)
12331 {
12332 	struct tg3 *tp = netdev_priv(dev);
12333 	u32 speed = cmd->base.speed;
12334 	u32 advertising;
12335 
12336 	if (tg3_flag(tp, USE_PHYLIB)) {
12337 		struct phy_device *phydev;
12338 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12339 			return -EAGAIN;
12340 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12341 		return phy_ethtool_ksettings_set(phydev, cmd);
12342 	}
12343 
12344 	if (cmd->base.autoneg != AUTONEG_ENABLE &&
12345 	    cmd->base.autoneg != AUTONEG_DISABLE)
12346 		return -EINVAL;
12347 
12348 	if (cmd->base.autoneg == AUTONEG_DISABLE &&
12349 	    cmd->base.duplex != DUPLEX_FULL &&
12350 	    cmd->base.duplex != DUPLEX_HALF)
12351 		return -EINVAL;
12352 
12353 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
12354 						cmd->link_modes.advertising);
12355 
12356 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12357 		u32 mask = ADVERTISED_Autoneg |
12358 			   ADVERTISED_Pause |
12359 			   ADVERTISED_Asym_Pause;
12360 
12361 		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12362 			mask |= ADVERTISED_1000baseT_Half |
12363 				ADVERTISED_1000baseT_Full;
12364 
12365 		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12366 			mask |= ADVERTISED_100baseT_Half |
12367 				ADVERTISED_100baseT_Full |
12368 				ADVERTISED_10baseT_Half |
12369 				ADVERTISED_10baseT_Full |
12370 				ADVERTISED_TP;
12371 		else
12372 			mask |= ADVERTISED_FIBRE;
12373 
12374 		if (advertising & ~mask)
12375 			return -EINVAL;
12376 
12377 		mask &= (ADVERTISED_1000baseT_Half |
12378 			 ADVERTISED_1000baseT_Full |
12379 			 ADVERTISED_100baseT_Half |
12380 			 ADVERTISED_100baseT_Full |
12381 			 ADVERTISED_10baseT_Half |
12382 			 ADVERTISED_10baseT_Full);
12383 
12384 		advertising &= mask;
12385 	} else {
12386 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12387 			if (speed != SPEED_1000)
12388 				return -EINVAL;
12389 
12390 			if (cmd->base.duplex != DUPLEX_FULL)
12391 				return -EINVAL;
12392 		} else {
12393 			if (speed != SPEED_100 &&
12394 			    speed != SPEED_10)
12395 				return -EINVAL;
12396 		}
12397 	}
12398 
12399 	tg3_full_lock(tp, 0);
12400 
12401 	tp->link_config.autoneg = cmd->base.autoneg;
12402 	if (cmd->base.autoneg == AUTONEG_ENABLE) {
12403 		tp->link_config.advertising = (advertising |
12404 					      ADVERTISED_Autoneg);
12405 		tp->link_config.speed = SPEED_UNKNOWN;
12406 		tp->link_config.duplex = DUPLEX_UNKNOWN;
12407 	} else {
12408 		tp->link_config.advertising = 0;
12409 		tp->link_config.speed = speed;
12410 		tp->link_config.duplex = cmd->base.duplex;
12411 	}
12412 
12413 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12414 
12415 	tg3_warn_mgmt_link_flap(tp);
12416 
12417 	if (netif_running(dev))
12418 		tg3_setup_phy(tp, true);
12419 
12420 	tg3_full_unlock(tp);
12421 
12422 	return 0;
12423 }
12424 
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12425 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12426 {
12427 	struct tg3 *tp = netdev_priv(dev);
12428 
12429 	strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12430 	strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12431 	strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12432 }
12433 
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12434 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12435 {
12436 	struct tg3 *tp = netdev_priv(dev);
12437 
12438 	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12439 		wol->supported = WAKE_MAGIC;
12440 	else
12441 		wol->supported = 0;
12442 	wol->wolopts = 0;
12443 	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12444 		wol->wolopts = WAKE_MAGIC;
12445 	memset(&wol->sopass, 0, sizeof(wol->sopass));
12446 }
12447 
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12448 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12449 {
12450 	struct tg3 *tp = netdev_priv(dev);
12451 	struct device *dp = &tp->pdev->dev;
12452 
12453 	if (wol->wolopts & ~WAKE_MAGIC)
12454 		return -EINVAL;
12455 	if ((wol->wolopts & WAKE_MAGIC) &&
12456 	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12457 		return -EINVAL;
12458 
12459 	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12460 
12461 	if (device_may_wakeup(dp))
12462 		tg3_flag_set(tp, WOL_ENABLE);
12463 	else
12464 		tg3_flag_clear(tp, WOL_ENABLE);
12465 
12466 	return 0;
12467 }
12468 
tg3_get_msglevel(struct net_device * dev)12469 static u32 tg3_get_msglevel(struct net_device *dev)
12470 {
12471 	struct tg3 *tp = netdev_priv(dev);
12472 	return tp->msg_enable;
12473 }
12474 
tg3_set_msglevel(struct net_device * dev,u32 value)12475 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12476 {
12477 	struct tg3 *tp = netdev_priv(dev);
12478 	tp->msg_enable = value;
12479 }
12480 
tg3_nway_reset(struct net_device * dev)12481 static int tg3_nway_reset(struct net_device *dev)
12482 {
12483 	struct tg3 *tp = netdev_priv(dev);
12484 	int r;
12485 
12486 	if (!netif_running(dev))
12487 		return -EAGAIN;
12488 
12489 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12490 		return -EINVAL;
12491 
12492 	tg3_warn_mgmt_link_flap(tp);
12493 
12494 	if (tg3_flag(tp, USE_PHYLIB)) {
12495 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12496 			return -EAGAIN;
12497 		r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12498 	} else {
12499 		u32 bmcr;
12500 
12501 		spin_lock_bh(&tp->lock);
12502 		r = -EINVAL;
12503 		tg3_readphy(tp, MII_BMCR, &bmcr);
12504 		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12505 		    ((bmcr & BMCR_ANENABLE) ||
12506 		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12507 			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12508 						   BMCR_ANENABLE);
12509 			r = 0;
12510 		}
12511 		spin_unlock_bh(&tp->lock);
12512 	}
12513 
12514 	return r;
12515 }
12516 
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12517 static void tg3_get_ringparam(struct net_device *dev,
12518 			      struct ethtool_ringparam *ering,
12519 			      struct kernel_ethtool_ringparam *kernel_ering,
12520 			      struct netlink_ext_ack *extack)
12521 {
12522 	struct tg3 *tp = netdev_priv(dev);
12523 
12524 	ering->rx_max_pending = tp->rx_std_ring_mask;
12525 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12526 		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12527 	else
12528 		ering->rx_jumbo_max_pending = 0;
12529 
12530 	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12531 
12532 	ering->rx_pending = tp->rx_pending;
12533 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12534 		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12535 	else
12536 		ering->rx_jumbo_pending = 0;
12537 
12538 	ering->tx_pending = tp->napi[0].tx_pending;
12539 }
12540 
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12541 static int tg3_set_ringparam(struct net_device *dev,
12542 			     struct ethtool_ringparam *ering,
12543 			     struct kernel_ethtool_ringparam *kernel_ering,
12544 			     struct netlink_ext_ack *extack)
12545 {
12546 	struct tg3 *tp = netdev_priv(dev);
12547 	int i, irq_sync = 0, err = 0;
12548 	bool reset_phy = false;
12549 
12550 	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12551 	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12552 	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12553 	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12554 	    (tg3_flag(tp, TSO_BUG) &&
12555 	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12556 		return -EINVAL;
12557 
12558 	if (netif_running(dev)) {
12559 		tg3_phy_stop(tp);
12560 		tg3_netif_stop(tp);
12561 		irq_sync = 1;
12562 	}
12563 
12564 	netdev_lock(dev);
12565 	tg3_full_lock(tp, irq_sync);
12566 
12567 	tp->rx_pending = ering->rx_pending;
12568 
12569 	if (tg3_flag(tp, MAX_RXPEND_64) &&
12570 	    tp->rx_pending > 63)
12571 		tp->rx_pending = 63;
12572 
12573 	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12574 		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12575 
12576 	for (i = 0; i < tp->irq_max; i++)
12577 		tp->napi[i].tx_pending = ering->tx_pending;
12578 
12579 	if (netif_running(dev)) {
12580 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12581 		/* Reset PHY to avoid PHY lock up */
12582 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12583 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12584 		    tg3_asic_rev(tp) == ASIC_REV_5720)
12585 			reset_phy = true;
12586 
12587 		err = tg3_restart_hw(tp, reset_phy);
12588 		if (!err)
12589 			tg3_netif_start(tp);
12590 	}
12591 
12592 	tg3_full_unlock(tp);
12593 	netdev_unlock(dev);
12594 
12595 	if (irq_sync && !err)
12596 		tg3_phy_start(tp);
12597 
12598 	return err;
12599 }
12600 
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12601 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12602 {
12603 	struct tg3 *tp = netdev_priv(dev);
12604 
12605 	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12606 
12607 	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12608 		epause->rx_pause = 1;
12609 	else
12610 		epause->rx_pause = 0;
12611 
12612 	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12613 		epause->tx_pause = 1;
12614 	else
12615 		epause->tx_pause = 0;
12616 }
12617 
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12618 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12619 {
12620 	struct tg3 *tp = netdev_priv(dev);
12621 	int err = 0;
12622 	bool reset_phy = false;
12623 
12624 	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12625 		tg3_warn_mgmt_link_flap(tp);
12626 
12627 	if (tg3_flag(tp, USE_PHYLIB)) {
12628 		struct phy_device *phydev;
12629 
12630 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12631 
12632 		if (!phy_validate_pause(phydev, epause))
12633 			return -EINVAL;
12634 
12635 		tp->link_config.flowctrl = 0;
12636 		phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12637 		if (epause->rx_pause) {
12638 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12639 
12640 			if (epause->tx_pause) {
12641 				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12642 			}
12643 		} else if (epause->tx_pause) {
12644 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12645 		}
12646 
12647 		if (epause->autoneg)
12648 			tg3_flag_set(tp, PAUSE_AUTONEG);
12649 		else
12650 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12651 
12652 		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12653 			if (phydev->autoneg) {
12654 				/* phy_set_asym_pause() will
12655 				 * renegotiate the link to inform our
12656 				 * link partner of our flow control
12657 				 * settings, even if the flow control
12658 				 * is forced.  Let tg3_adjust_link()
12659 				 * do the final flow control setup.
12660 				 */
12661 				return 0;
12662 			}
12663 
12664 			if (!epause->autoneg)
12665 				tg3_setup_flow_control(tp, 0, 0);
12666 		}
12667 	} else {
12668 		int irq_sync = 0;
12669 
12670 		if (netif_running(dev)) {
12671 			tg3_netif_stop(tp);
12672 			irq_sync = 1;
12673 		}
12674 
12675 		netdev_lock(dev);
12676 		tg3_full_lock(tp, irq_sync);
12677 
12678 		if (epause->autoneg)
12679 			tg3_flag_set(tp, PAUSE_AUTONEG);
12680 		else
12681 			tg3_flag_clear(tp, PAUSE_AUTONEG);
12682 		if (epause->rx_pause)
12683 			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12684 		else
12685 			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12686 		if (epause->tx_pause)
12687 			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12688 		else
12689 			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12690 
12691 		if (netif_running(dev)) {
12692 			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12693 			/* Reset PHY to avoid PHY lock up */
12694 			if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12695 			    tg3_asic_rev(tp) == ASIC_REV_5719 ||
12696 			    tg3_asic_rev(tp) == ASIC_REV_5720)
12697 				reset_phy = true;
12698 
12699 			err = tg3_restart_hw(tp, reset_phy);
12700 			if (!err)
12701 				tg3_netif_start(tp);
12702 		}
12703 
12704 		tg3_full_unlock(tp);
12705 		netdev_unlock(dev);
12706 	}
12707 
12708 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12709 
12710 	return err;
12711 }
12712 
tg3_get_sset_count(struct net_device * dev,int sset)12713 static int tg3_get_sset_count(struct net_device *dev, int sset)
12714 {
12715 	switch (sset) {
12716 	case ETH_SS_TEST:
12717 		return TG3_NUM_TEST;
12718 	case ETH_SS_STATS:
12719 		return TG3_NUM_STATS;
12720 	default:
12721 		return -EOPNOTSUPP;
12722 	}
12723 }
12724 
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)12725 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12726 			 u32 *rules __always_unused)
12727 {
12728 	struct tg3 *tp = netdev_priv(dev);
12729 
12730 	if (!tg3_flag(tp, SUPPORT_MSIX))
12731 		return -EOPNOTSUPP;
12732 
12733 	switch (info->cmd) {
12734 	case ETHTOOL_GRXRINGS:
12735 		if (netif_running(tp->dev))
12736 			info->data = tp->rxq_cnt;
12737 		else {
12738 			info->data = num_online_cpus();
12739 			if (info->data > TG3_RSS_MAX_NUM_QS)
12740 				info->data = TG3_RSS_MAX_NUM_QS;
12741 		}
12742 
12743 		return 0;
12744 
12745 	default:
12746 		return -EOPNOTSUPP;
12747 	}
12748 }
12749 
tg3_get_rxfh_indir_size(struct net_device * dev)12750 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12751 {
12752 	u32 size = 0;
12753 	struct tg3 *tp = netdev_priv(dev);
12754 
12755 	if (tg3_flag(tp, SUPPORT_MSIX))
12756 		size = TG3_RSS_INDIR_TBL_SIZE;
12757 
12758 	return size;
12759 }
12760 
tg3_get_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh)12761 static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
12762 {
12763 	struct tg3 *tp = netdev_priv(dev);
12764 	int i;
12765 
12766 	rxfh->hfunc = ETH_RSS_HASH_TOP;
12767 	if (!rxfh->indir)
12768 		return 0;
12769 
12770 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12771 		rxfh->indir[i] = tp->rss_ind_tbl[i];
12772 
12773 	return 0;
12774 }
12775 
tg3_set_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)12776 static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
12777 			struct netlink_ext_ack *extack)
12778 {
12779 	struct tg3 *tp = netdev_priv(dev);
12780 	size_t i;
12781 
12782 	/* We require at least one supported parameter to be changed and no
12783 	 * change in any of the unsupported parameters
12784 	 */
12785 	if (rxfh->key ||
12786 	    (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
12787 	     rxfh->hfunc != ETH_RSS_HASH_TOP))
12788 		return -EOPNOTSUPP;
12789 
12790 	if (!rxfh->indir)
12791 		return 0;
12792 
12793 	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12794 		tp->rss_ind_tbl[i] = rxfh->indir[i];
12795 
12796 	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12797 		return 0;
12798 
12799 	/* It is legal to write the indirection
12800 	 * table while the device is running.
12801 	 */
12802 	tg3_full_lock(tp, 0);
12803 	tg3_rss_write_indir_tbl(tp);
12804 	tg3_full_unlock(tp);
12805 
12806 	return 0;
12807 }
12808 
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12809 static void tg3_get_channels(struct net_device *dev,
12810 			     struct ethtool_channels *channel)
12811 {
12812 	struct tg3 *tp = netdev_priv(dev);
12813 	u32 deflt_qs = netif_get_num_default_rss_queues();
12814 
12815 	channel->max_rx = tp->rxq_max;
12816 	channel->max_tx = tp->txq_max;
12817 
12818 	if (netif_running(dev)) {
12819 		channel->rx_count = tp->rxq_cnt;
12820 		channel->tx_count = tp->txq_cnt;
12821 	} else {
12822 		if (tp->rxq_req)
12823 			channel->rx_count = tp->rxq_req;
12824 		else
12825 			channel->rx_count = min(deflt_qs, tp->rxq_max);
12826 
12827 		if (tp->txq_req)
12828 			channel->tx_count = tp->txq_req;
12829 		else
12830 			channel->tx_count = min(deflt_qs, tp->txq_max);
12831 	}
12832 }
12833 
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12834 static int tg3_set_channels(struct net_device *dev,
12835 			    struct ethtool_channels *channel)
12836 {
12837 	struct tg3 *tp = netdev_priv(dev);
12838 
12839 	if (!tg3_flag(tp, SUPPORT_MSIX))
12840 		return -EOPNOTSUPP;
12841 
12842 	if (channel->rx_count > tp->rxq_max ||
12843 	    channel->tx_count > tp->txq_max)
12844 		return -EINVAL;
12845 
12846 	tp->rxq_req = channel->rx_count;
12847 	tp->txq_req = channel->tx_count;
12848 
12849 	if (!netif_running(dev))
12850 		return 0;
12851 
12852 	tg3_stop(tp);
12853 
12854 	tg3_carrier_off(tp);
12855 
12856 	tg3_start(tp, true, false, false);
12857 
12858 	return 0;
12859 }
12860 
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12861 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12862 {
12863 	switch (stringset) {
12864 	case ETH_SS_STATS:
12865 		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12866 		break;
12867 	case ETH_SS_TEST:
12868 		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12869 		break;
12870 	default:
12871 		WARN_ON(1);	/* we need a WARN() */
12872 		break;
12873 	}
12874 }
12875 
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12876 static int tg3_set_phys_id(struct net_device *dev,
12877 			    enum ethtool_phys_id_state state)
12878 {
12879 	struct tg3 *tp = netdev_priv(dev);
12880 
12881 	switch (state) {
12882 	case ETHTOOL_ID_ACTIVE:
12883 		return 1;	/* cycle on/off once per second */
12884 
12885 	case ETHTOOL_ID_ON:
12886 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12887 		     LED_CTRL_1000MBPS_ON |
12888 		     LED_CTRL_100MBPS_ON |
12889 		     LED_CTRL_10MBPS_ON |
12890 		     LED_CTRL_TRAFFIC_OVERRIDE |
12891 		     LED_CTRL_TRAFFIC_BLINK |
12892 		     LED_CTRL_TRAFFIC_LED);
12893 		break;
12894 
12895 	case ETHTOOL_ID_OFF:
12896 		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12897 		     LED_CTRL_TRAFFIC_OVERRIDE);
12898 		break;
12899 
12900 	case ETHTOOL_ID_INACTIVE:
12901 		tw32(MAC_LED_CTRL, tp->led_ctrl);
12902 		break;
12903 	}
12904 
12905 	return 0;
12906 }
12907 
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12908 static void tg3_get_ethtool_stats(struct net_device *dev,
12909 				   struct ethtool_stats *estats, u64 *tmp_stats)
12910 {
12911 	struct tg3 *tp = netdev_priv(dev);
12912 
12913 	if (tp->hw_stats)
12914 		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12915 	else
12916 		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12917 }
12918 
tg3_vpd_readblock(struct tg3 * tp,unsigned int * vpdlen)12919 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12920 {
12921 	int i;
12922 	__be32 *buf;
12923 	u32 offset = 0, len = 0;
12924 	u32 magic, val;
12925 
12926 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12927 		return NULL;
12928 
12929 	if (magic == TG3_EEPROM_MAGIC) {
12930 		for (offset = TG3_NVM_DIR_START;
12931 		     offset < TG3_NVM_DIR_END;
12932 		     offset += TG3_NVM_DIRENT_SIZE) {
12933 			if (tg3_nvram_read(tp, offset, &val))
12934 				return NULL;
12935 
12936 			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12937 			    TG3_NVM_DIRTYPE_EXTVPD)
12938 				break;
12939 		}
12940 
12941 		if (offset != TG3_NVM_DIR_END) {
12942 			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12943 			if (tg3_nvram_read(tp, offset + 4, &offset))
12944 				return NULL;
12945 
12946 			offset = tg3_nvram_logical_addr(tp, offset);
12947 		}
12948 
12949 		if (!offset || !len) {
12950 			offset = TG3_NVM_VPD_OFF;
12951 			len = TG3_NVM_VPD_LEN;
12952 		}
12953 
12954 		buf = kmalloc(len, GFP_KERNEL);
12955 		if (!buf)
12956 			return NULL;
12957 
12958 		for (i = 0; i < len; i += 4) {
12959 			/* The data is in little-endian format in NVRAM.
12960 			 * Use the big-endian read routines to preserve
12961 			 * the byte order as it exists in NVRAM.
12962 			 */
12963 			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12964 				goto error;
12965 		}
12966 		*vpdlen = len;
12967 	} else {
12968 		buf = pci_vpd_alloc(tp->pdev, vpdlen);
12969 		if (IS_ERR(buf))
12970 			return NULL;
12971 	}
12972 
12973 	return buf;
12974 
12975 error:
12976 	kfree(buf);
12977 	return NULL;
12978 }
12979 
12980 #define NVRAM_TEST_SIZE 0x100
12981 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12982 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12983 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12984 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12985 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12986 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12987 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12988 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12989 
tg3_test_nvram(struct tg3 * tp)12990 static int tg3_test_nvram(struct tg3 *tp)
12991 {
12992 	u32 csum, magic;
12993 	__be32 *buf;
12994 	int i, j, k, err = 0, size;
12995 	unsigned int len;
12996 
12997 	if (tg3_flag(tp, NO_NVRAM))
12998 		return 0;
12999 
13000 	if (tg3_nvram_read(tp, 0, &magic) != 0)
13001 		return -EIO;
13002 
13003 	if (magic == TG3_EEPROM_MAGIC)
13004 		size = NVRAM_TEST_SIZE;
13005 	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
13006 		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
13007 		    TG3_EEPROM_SB_FORMAT_1) {
13008 			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
13009 			case TG3_EEPROM_SB_REVISION_0:
13010 				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
13011 				break;
13012 			case TG3_EEPROM_SB_REVISION_2:
13013 				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
13014 				break;
13015 			case TG3_EEPROM_SB_REVISION_3:
13016 				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
13017 				break;
13018 			case TG3_EEPROM_SB_REVISION_4:
13019 				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
13020 				break;
13021 			case TG3_EEPROM_SB_REVISION_5:
13022 				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
13023 				break;
13024 			case TG3_EEPROM_SB_REVISION_6:
13025 				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
13026 				break;
13027 			default:
13028 				return -EIO;
13029 			}
13030 		} else
13031 			return 0;
13032 	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13033 		size = NVRAM_SELFBOOT_HW_SIZE;
13034 	else
13035 		return -EIO;
13036 
13037 	buf = kmalloc(size, GFP_KERNEL);
13038 	if (buf == NULL)
13039 		return -ENOMEM;
13040 
13041 	err = -EIO;
13042 	for (i = 0, j = 0; i < size; i += 4, j++) {
13043 		err = tg3_nvram_read_be32(tp, i, &buf[j]);
13044 		if (err)
13045 			break;
13046 	}
13047 	if (i < size)
13048 		goto out;
13049 
13050 	/* Selfboot format */
13051 	magic = be32_to_cpu(buf[0]);
13052 	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
13053 	    TG3_EEPROM_MAGIC_FW) {
13054 		u8 *buf8 = (u8 *) buf, csum8 = 0;
13055 
13056 		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
13057 		    TG3_EEPROM_SB_REVISION_2) {
13058 			/* For rev 2, the csum doesn't include the MBA. */
13059 			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
13060 				csum8 += buf8[i];
13061 			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
13062 				csum8 += buf8[i];
13063 		} else {
13064 			for (i = 0; i < size; i++)
13065 				csum8 += buf8[i];
13066 		}
13067 
13068 		if (csum8 == 0) {
13069 			err = 0;
13070 			goto out;
13071 		}
13072 
13073 		err = -EIO;
13074 		goto out;
13075 	}
13076 
13077 	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13078 	    TG3_EEPROM_MAGIC_HW) {
13079 		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13080 		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13081 		u8 *buf8 = (u8 *) buf;
13082 
13083 		/* Separate the parity bits and the data bytes.  */
13084 		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13085 			if ((i == 0) || (i == 8)) {
13086 				int l;
13087 				u8 msk;
13088 
13089 				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13090 					parity[k++] = buf8[i] & msk;
13091 				i++;
13092 			} else if (i == 16) {
13093 				int l;
13094 				u8 msk;
13095 
13096 				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13097 					parity[k++] = buf8[i] & msk;
13098 				i++;
13099 
13100 				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13101 					parity[k++] = buf8[i] & msk;
13102 				i++;
13103 			}
13104 			data[j++] = buf8[i];
13105 		}
13106 
13107 		err = -EIO;
13108 		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13109 			u8 hw8 = hweight8(data[i]);
13110 
13111 			if ((hw8 & 0x1) && parity[i])
13112 				goto out;
13113 			else if (!(hw8 & 0x1) && !parity[i])
13114 				goto out;
13115 		}
13116 		err = 0;
13117 		goto out;
13118 	}
13119 
13120 	err = -EIO;
13121 
13122 	/* Bootstrap checksum at offset 0x10 */
13123 	csum = calc_crc((unsigned char *) buf, 0x10);
13124 
13125 	/* The type of buf is __be32 *, but this value is __le32 */
13126 	if (csum != le32_to_cpu((__force __le32)buf[0x10 / 4]))
13127 		goto out;
13128 
13129 	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13130 	csum = calc_crc((unsigned char *)&buf[0x74 / 4], 0x88);
13131 
13132 	/* The type of buf is __be32 *, but this value is __le32 */
13133 	if (csum != le32_to_cpu((__force __le32)buf[0xfc / 4]))
13134 		goto out;
13135 
13136 	kfree(buf);
13137 
13138 	buf = tg3_vpd_readblock(tp, &len);
13139 	if (!buf)
13140 		return -ENOMEM;
13141 
13142 	err = pci_vpd_check_csum(buf, len);
13143 	/* go on if no checksum found */
13144 	if (err == 1)
13145 		err = 0;
13146 out:
13147 	kfree(buf);
13148 	return err;
13149 }
13150 
13151 #define TG3_SERDES_TIMEOUT_SEC	2
13152 #define TG3_COPPER_TIMEOUT_SEC	6
13153 
tg3_test_link(struct tg3 * tp)13154 static int tg3_test_link(struct tg3 *tp)
13155 {
13156 	int i, max;
13157 
13158 	if (!netif_running(tp->dev))
13159 		return -ENODEV;
13160 
13161 	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13162 		max = TG3_SERDES_TIMEOUT_SEC;
13163 	else
13164 		max = TG3_COPPER_TIMEOUT_SEC;
13165 
13166 	for (i = 0; i < max; i++) {
13167 		if (tp->link_up)
13168 			return 0;
13169 
13170 		if (msleep_interruptible(1000))
13171 			break;
13172 	}
13173 
13174 	return -EIO;
13175 }
13176 
13177 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13178 static int tg3_test_registers(struct tg3 *tp)
13179 {
13180 	int i, is_5705, is_5750;
13181 	u32 offset, read_mask, write_mask, val, save_val, read_val;
13182 	static struct {
13183 		u16 offset;
13184 		u16 flags;
13185 #define TG3_FL_5705	0x1
13186 #define TG3_FL_NOT_5705	0x2
13187 #define TG3_FL_NOT_5788	0x4
13188 #define TG3_FL_NOT_5750	0x8
13189 		u32 read_mask;
13190 		u32 write_mask;
13191 	} reg_tbl[] = {
13192 		/* MAC Control Registers */
13193 		{ MAC_MODE, TG3_FL_NOT_5705,
13194 			0x00000000, 0x00ef6f8c },
13195 		{ MAC_MODE, TG3_FL_5705,
13196 			0x00000000, 0x01ef6b8c },
13197 		{ MAC_STATUS, TG3_FL_NOT_5705,
13198 			0x03800107, 0x00000000 },
13199 		{ MAC_STATUS, TG3_FL_5705,
13200 			0x03800100, 0x00000000 },
13201 		{ MAC_ADDR_0_HIGH, 0x0000,
13202 			0x00000000, 0x0000ffff },
13203 		{ MAC_ADDR_0_LOW, 0x0000,
13204 			0x00000000, 0xffffffff },
13205 		{ MAC_RX_MTU_SIZE, 0x0000,
13206 			0x00000000, 0x0000ffff },
13207 		{ MAC_TX_MODE, 0x0000,
13208 			0x00000000, 0x00000070 },
13209 		{ MAC_TX_LENGTHS, 0x0000,
13210 			0x00000000, 0x00003fff },
13211 		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13212 			0x00000000, 0x000007fc },
13213 		{ MAC_RX_MODE, TG3_FL_5705,
13214 			0x00000000, 0x000007dc },
13215 		{ MAC_HASH_REG_0, 0x0000,
13216 			0x00000000, 0xffffffff },
13217 		{ MAC_HASH_REG_1, 0x0000,
13218 			0x00000000, 0xffffffff },
13219 		{ MAC_HASH_REG_2, 0x0000,
13220 			0x00000000, 0xffffffff },
13221 		{ MAC_HASH_REG_3, 0x0000,
13222 			0x00000000, 0xffffffff },
13223 
13224 		/* Receive Data and Receive BD Initiator Control Registers. */
13225 		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13226 			0x00000000, 0xffffffff },
13227 		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13228 			0x00000000, 0xffffffff },
13229 		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13230 			0x00000000, 0x00000003 },
13231 		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13232 			0x00000000, 0xffffffff },
13233 		{ RCVDBDI_STD_BD+0, 0x0000,
13234 			0x00000000, 0xffffffff },
13235 		{ RCVDBDI_STD_BD+4, 0x0000,
13236 			0x00000000, 0xffffffff },
13237 		{ RCVDBDI_STD_BD+8, 0x0000,
13238 			0x00000000, 0xffff0002 },
13239 		{ RCVDBDI_STD_BD+0xc, 0x0000,
13240 			0x00000000, 0xffffffff },
13241 
13242 		/* Receive BD Initiator Control Registers. */
13243 		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13244 			0x00000000, 0xffffffff },
13245 		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13246 			0x00000000, 0x000003ff },
13247 		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13248 			0x00000000, 0xffffffff },
13249 
13250 		/* Host Coalescing Control Registers. */
13251 		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13252 			0x00000000, 0x00000004 },
13253 		{ HOSTCC_MODE, TG3_FL_5705,
13254 			0x00000000, 0x000000f6 },
13255 		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13256 			0x00000000, 0xffffffff },
13257 		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13258 			0x00000000, 0x000003ff },
13259 		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13260 			0x00000000, 0xffffffff },
13261 		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13262 			0x00000000, 0x000003ff },
13263 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13264 			0x00000000, 0xffffffff },
13265 		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13266 			0x00000000, 0x000000ff },
13267 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13268 			0x00000000, 0xffffffff },
13269 		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13270 			0x00000000, 0x000000ff },
13271 		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13272 			0x00000000, 0xffffffff },
13273 		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13274 			0x00000000, 0xffffffff },
13275 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13276 			0x00000000, 0xffffffff },
13277 		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13278 			0x00000000, 0x000000ff },
13279 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13280 			0x00000000, 0xffffffff },
13281 		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13282 			0x00000000, 0x000000ff },
13283 		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13284 			0x00000000, 0xffffffff },
13285 		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13286 			0x00000000, 0xffffffff },
13287 		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13288 			0x00000000, 0xffffffff },
13289 		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13290 			0x00000000, 0xffffffff },
13291 		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13292 			0x00000000, 0xffffffff },
13293 		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13294 			0xffffffff, 0x00000000 },
13295 		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13296 			0xffffffff, 0x00000000 },
13297 
13298 		/* Buffer Manager Control Registers. */
13299 		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13300 			0x00000000, 0x007fff80 },
13301 		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13302 			0x00000000, 0x007fffff },
13303 		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13304 			0x00000000, 0x0000003f },
13305 		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13306 			0x00000000, 0x000001ff },
13307 		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13308 			0x00000000, 0x000001ff },
13309 		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13310 			0xffffffff, 0x00000000 },
13311 		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13312 			0xffffffff, 0x00000000 },
13313 
13314 		/* Mailbox Registers */
13315 		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13316 			0x00000000, 0x000001ff },
13317 		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13318 			0x00000000, 0x000001ff },
13319 		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13320 			0x00000000, 0x000007ff },
13321 		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13322 			0x00000000, 0x000001ff },
13323 
13324 		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13325 	};
13326 
13327 	is_5705 = is_5750 = 0;
13328 	if (tg3_flag(tp, 5705_PLUS)) {
13329 		is_5705 = 1;
13330 		if (tg3_flag(tp, 5750_PLUS))
13331 			is_5750 = 1;
13332 	}
13333 
13334 	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13335 		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13336 			continue;
13337 
13338 		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13339 			continue;
13340 
13341 		if (tg3_flag(tp, IS_5788) &&
13342 		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13343 			continue;
13344 
13345 		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13346 			continue;
13347 
13348 		offset = (u32) reg_tbl[i].offset;
13349 		read_mask = reg_tbl[i].read_mask;
13350 		write_mask = reg_tbl[i].write_mask;
13351 
13352 		/* Save the original register content */
13353 		save_val = tr32(offset);
13354 
13355 		/* Determine the read-only value. */
13356 		read_val = save_val & read_mask;
13357 
13358 		/* Write zero to the register, then make sure the read-only bits
13359 		 * are not changed and the read/write bits are all zeros.
13360 		 */
13361 		tw32(offset, 0);
13362 
13363 		val = tr32(offset);
13364 
13365 		/* Test the read-only and read/write bits. */
13366 		if (((val & read_mask) != read_val) || (val & write_mask))
13367 			goto out;
13368 
13369 		/* Write ones to all the bits defined by RdMask and WrMask, then
13370 		 * make sure the read-only bits are not changed and the
13371 		 * read/write bits are all ones.
13372 		 */
13373 		tw32(offset, read_mask | write_mask);
13374 
13375 		val = tr32(offset);
13376 
13377 		/* Test the read-only bits. */
13378 		if ((val & read_mask) != read_val)
13379 			goto out;
13380 
13381 		/* Test the read/write bits. */
13382 		if ((val & write_mask) != write_mask)
13383 			goto out;
13384 
13385 		tw32(offset, save_val);
13386 	}
13387 
13388 	return 0;
13389 
13390 out:
13391 	if (netif_msg_hw(tp))
13392 		netdev_err(tp->dev,
13393 			   "Register test failed at offset %x\n", offset);
13394 	tw32(offset, save_val);
13395 	return -EIO;
13396 }
13397 
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13398 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13399 {
13400 	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13401 	int i;
13402 	u32 j;
13403 
13404 	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13405 		for (j = 0; j < len; j += 4) {
13406 			u32 val;
13407 
13408 			tg3_write_mem(tp, offset + j, test_pattern[i]);
13409 			tg3_read_mem(tp, offset + j, &val);
13410 			if (val != test_pattern[i])
13411 				return -EIO;
13412 		}
13413 	}
13414 	return 0;
13415 }
13416 
tg3_test_memory(struct tg3 * tp)13417 static int tg3_test_memory(struct tg3 *tp)
13418 {
13419 	static struct mem_entry {
13420 		u32 offset;
13421 		u32 len;
13422 	} mem_tbl_570x[] = {
13423 		{ 0x00000000, 0x00b50},
13424 		{ 0x00002000, 0x1c000},
13425 		{ 0xffffffff, 0x00000}
13426 	}, mem_tbl_5705[] = {
13427 		{ 0x00000100, 0x0000c},
13428 		{ 0x00000200, 0x00008},
13429 		{ 0x00004000, 0x00800},
13430 		{ 0x00006000, 0x01000},
13431 		{ 0x00008000, 0x02000},
13432 		{ 0x00010000, 0x0e000},
13433 		{ 0xffffffff, 0x00000}
13434 	}, mem_tbl_5755[] = {
13435 		{ 0x00000200, 0x00008},
13436 		{ 0x00004000, 0x00800},
13437 		{ 0x00006000, 0x00800},
13438 		{ 0x00008000, 0x02000},
13439 		{ 0x00010000, 0x0c000},
13440 		{ 0xffffffff, 0x00000}
13441 	}, mem_tbl_5906[] = {
13442 		{ 0x00000200, 0x00008},
13443 		{ 0x00004000, 0x00400},
13444 		{ 0x00006000, 0x00400},
13445 		{ 0x00008000, 0x01000},
13446 		{ 0x00010000, 0x01000},
13447 		{ 0xffffffff, 0x00000}
13448 	}, mem_tbl_5717[] = {
13449 		{ 0x00000200, 0x00008},
13450 		{ 0x00010000, 0x0a000},
13451 		{ 0x00020000, 0x13c00},
13452 		{ 0xffffffff, 0x00000}
13453 	}, mem_tbl_57765[] = {
13454 		{ 0x00000200, 0x00008},
13455 		{ 0x00004000, 0x00800},
13456 		{ 0x00006000, 0x09800},
13457 		{ 0x00010000, 0x0a000},
13458 		{ 0xffffffff, 0x00000}
13459 	};
13460 	struct mem_entry *mem_tbl;
13461 	int err = 0;
13462 	int i;
13463 
13464 	if (tg3_flag(tp, 5717_PLUS))
13465 		mem_tbl = mem_tbl_5717;
13466 	else if (tg3_flag(tp, 57765_CLASS) ||
13467 		 tg3_asic_rev(tp) == ASIC_REV_5762)
13468 		mem_tbl = mem_tbl_57765;
13469 	else if (tg3_flag(tp, 5755_PLUS))
13470 		mem_tbl = mem_tbl_5755;
13471 	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13472 		mem_tbl = mem_tbl_5906;
13473 	else if (tg3_flag(tp, 5705_PLUS))
13474 		mem_tbl = mem_tbl_5705;
13475 	else
13476 		mem_tbl = mem_tbl_570x;
13477 
13478 	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13479 		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13480 		if (err)
13481 			break;
13482 	}
13483 
13484 	return err;
13485 }
13486 
13487 #define TG3_TSO_MSS		500
13488 
13489 #define TG3_TSO_IP_HDR_LEN	20
13490 #define TG3_TSO_TCP_HDR_LEN	20
13491 #define TG3_TSO_TCP_OPT_LEN	12
13492 
13493 static const u8 tg3_tso_header[] = {
13494 0x08, 0x00,
13495 0x45, 0x00, 0x00, 0x00,
13496 0x00, 0x00, 0x40, 0x00,
13497 0x40, 0x06, 0x00, 0x00,
13498 0x0a, 0x00, 0x00, 0x01,
13499 0x0a, 0x00, 0x00, 0x02,
13500 0x0d, 0x00, 0xe0, 0x00,
13501 0x00, 0x00, 0x01, 0x00,
13502 0x00, 0x00, 0x02, 0x00,
13503 0x80, 0x10, 0x10, 0x00,
13504 0x14, 0x09, 0x00, 0x00,
13505 0x01, 0x01, 0x08, 0x0a,
13506 0x11, 0x11, 0x11, 0x11,
13507 0x11, 0x11, 0x11, 0x11,
13508 };
13509 
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13510 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13511 {
13512 	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13513 	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13514 	u32 budget;
13515 	struct sk_buff *skb;
13516 	u8 *tx_data, *rx_data;
13517 	dma_addr_t map;
13518 	int num_pkts, tx_len, rx_len, i, err;
13519 	struct tg3_rx_buffer_desc *desc;
13520 	struct tg3_napi *tnapi, *rnapi;
13521 	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13522 
13523 	tnapi = &tp->napi[0];
13524 	rnapi = &tp->napi[0];
13525 	if (tp->irq_cnt > 1) {
13526 		if (tg3_flag(tp, ENABLE_RSS))
13527 			rnapi = &tp->napi[1];
13528 		if (tg3_flag(tp, ENABLE_TSS))
13529 			tnapi = &tp->napi[1];
13530 	}
13531 	coal_now = tnapi->coal_now | rnapi->coal_now;
13532 
13533 	err = -EIO;
13534 
13535 	tx_len = pktsz;
13536 	skb = netdev_alloc_skb(tp->dev, tx_len);
13537 	if (!skb)
13538 		return -ENOMEM;
13539 
13540 	tx_data = skb_put(skb, tx_len);
13541 	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13542 	memset(tx_data + ETH_ALEN, 0x0, 8);
13543 
13544 	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13545 
13546 	if (tso_loopback) {
13547 		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13548 
13549 		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13550 			      TG3_TSO_TCP_OPT_LEN;
13551 
13552 		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13553 		       sizeof(tg3_tso_header));
13554 		mss = TG3_TSO_MSS;
13555 
13556 		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13557 		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13558 
13559 		/* Set the total length field in the IP header */
13560 		iph->tot_len = htons((u16)(mss + hdr_len));
13561 
13562 		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13563 			      TXD_FLAG_CPU_POST_DMA);
13564 
13565 		if (tg3_flag(tp, HW_TSO_1) ||
13566 		    tg3_flag(tp, HW_TSO_2) ||
13567 		    tg3_flag(tp, HW_TSO_3)) {
13568 			struct tcphdr *th;
13569 			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13570 			th = (struct tcphdr *)&tx_data[val];
13571 			th->check = 0;
13572 		} else
13573 			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13574 
13575 		if (tg3_flag(tp, HW_TSO_3)) {
13576 			mss |= (hdr_len & 0xc) << 12;
13577 			if (hdr_len & 0x10)
13578 				base_flags |= 0x00000010;
13579 			base_flags |= (hdr_len & 0x3e0) << 5;
13580 		} else if (tg3_flag(tp, HW_TSO_2))
13581 			mss |= hdr_len << 9;
13582 		else if (tg3_flag(tp, HW_TSO_1) ||
13583 			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13584 			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13585 		} else {
13586 			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13587 		}
13588 
13589 		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13590 	} else {
13591 		num_pkts = 1;
13592 		data_off = ETH_HLEN;
13593 
13594 		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13595 		    tx_len > VLAN_ETH_FRAME_LEN)
13596 			base_flags |= TXD_FLAG_JMB_PKT;
13597 	}
13598 
13599 	for (i = data_off; i < tx_len; i++)
13600 		tx_data[i] = (u8) (i & 0xff);
13601 
13602 	map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13603 	if (dma_mapping_error(&tp->pdev->dev, map)) {
13604 		dev_kfree_skb(skb);
13605 		return -EIO;
13606 	}
13607 
13608 	val = tnapi->tx_prod;
13609 	tnapi->tx_buffers[val].skb = skb;
13610 	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13611 
13612 	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13613 	       rnapi->coal_now);
13614 
13615 	udelay(10);
13616 
13617 	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13618 
13619 	budget = tg3_tx_avail(tnapi);
13620 	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13621 			    base_flags | TXD_FLAG_END, mss, 0)) {
13622 		tnapi->tx_buffers[val].skb = NULL;
13623 		dev_kfree_skb(skb);
13624 		return -EIO;
13625 	}
13626 
13627 	tnapi->tx_prod++;
13628 
13629 	/* Sync BD data before updating mailbox */
13630 	wmb();
13631 
13632 	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13633 	tr32_mailbox(tnapi->prodmbox);
13634 
13635 	udelay(10);
13636 
13637 	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13638 	for (i = 0; i < 35; i++) {
13639 		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13640 		       coal_now);
13641 
13642 		udelay(10);
13643 
13644 		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13645 		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13646 		if ((tx_idx == tnapi->tx_prod) &&
13647 		    (rx_idx == (rx_start_idx + num_pkts)))
13648 			break;
13649 	}
13650 
13651 	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13652 	dev_kfree_skb(skb);
13653 
13654 	if (tx_idx != tnapi->tx_prod)
13655 		goto out;
13656 
13657 	if (rx_idx != rx_start_idx + num_pkts)
13658 		goto out;
13659 
13660 	val = data_off;
13661 	while (rx_idx != rx_start_idx) {
13662 		desc = &rnapi->rx_rcb[rx_start_idx++];
13663 		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13664 		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13665 
13666 		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13667 		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13668 			goto out;
13669 
13670 		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13671 			 - ETH_FCS_LEN;
13672 
13673 		if (!tso_loopback) {
13674 			if (rx_len != tx_len)
13675 				goto out;
13676 
13677 			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13678 				if (opaque_key != RXD_OPAQUE_RING_STD)
13679 					goto out;
13680 			} else {
13681 				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13682 					goto out;
13683 			}
13684 		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13685 			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13686 			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13687 			goto out;
13688 		}
13689 
13690 		if (opaque_key == RXD_OPAQUE_RING_STD) {
13691 			rx_data = tpr->rx_std_buffers[desc_idx].data;
13692 			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13693 					     mapping);
13694 		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13695 			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13696 			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13697 					     mapping);
13698 		} else
13699 			goto out;
13700 
13701 		dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13702 					DMA_FROM_DEVICE);
13703 
13704 		rx_data += TG3_RX_OFFSET(tp);
13705 		for (i = data_off; i < rx_len; i++, val++) {
13706 			if (*(rx_data + i) != (u8) (val & 0xff))
13707 				goto out;
13708 		}
13709 	}
13710 
13711 	err = 0;
13712 
13713 	/* tg3_free_rings will unmap and free the rx_data */
13714 out:
13715 	return err;
13716 }
13717 
13718 #define TG3_STD_LOOPBACK_FAILED		1
13719 #define TG3_JMB_LOOPBACK_FAILED		2
13720 #define TG3_TSO_LOOPBACK_FAILED		4
13721 #define TG3_LOOPBACK_FAILED \
13722 	(TG3_STD_LOOPBACK_FAILED | \
13723 	 TG3_JMB_LOOPBACK_FAILED | \
13724 	 TG3_TSO_LOOPBACK_FAILED)
13725 
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13726 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13727 {
13728 	int err = -EIO;
13729 	u32 eee_cap;
13730 	u32 jmb_pkt_sz = 9000;
13731 
13732 	if (tp->dma_limit)
13733 		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13734 
13735 	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13736 	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13737 
13738 	if (!netif_running(tp->dev)) {
13739 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13740 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13741 		if (do_extlpbk)
13742 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13743 		goto done;
13744 	}
13745 
13746 	err = tg3_reset_hw(tp, true);
13747 	if (err) {
13748 		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13749 		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13750 		if (do_extlpbk)
13751 			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13752 		goto done;
13753 	}
13754 
13755 	if (tg3_flag(tp, ENABLE_RSS)) {
13756 		int i;
13757 
13758 		/* Reroute all rx packets to the 1st queue */
13759 		for (i = MAC_RSS_INDIR_TBL_0;
13760 		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13761 			tw32(i, 0x0);
13762 	}
13763 
13764 	/* HW errata - mac loopback fails in some cases on 5780.
13765 	 * Normal traffic and PHY loopback are not affected by
13766 	 * errata.  Also, the MAC loopback test is deprecated for
13767 	 * all newer ASIC revisions.
13768 	 */
13769 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13770 	    !tg3_flag(tp, CPMU_PRESENT)) {
13771 		tg3_mac_loopback(tp, true);
13772 
13773 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13774 			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13775 
13776 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13777 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13778 			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13779 
13780 		tg3_mac_loopback(tp, false);
13781 	}
13782 
13783 	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13784 	    !tg3_flag(tp, USE_PHYLIB)) {
13785 		int i;
13786 
13787 		tg3_phy_lpbk_set(tp, 0, false);
13788 
13789 		/* Wait for link */
13790 		for (i = 0; i < 100; i++) {
13791 			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13792 				break;
13793 			mdelay(1);
13794 		}
13795 
13796 		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13797 			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13798 		if (tg3_flag(tp, TSO_CAPABLE) &&
13799 		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13800 			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13801 		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13802 		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13803 			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13804 
13805 		if (do_extlpbk) {
13806 			tg3_phy_lpbk_set(tp, 0, true);
13807 
13808 			/* All link indications report up, but the hardware
13809 			 * isn't really ready for about 20 msec.  Double it
13810 			 * to be sure.
13811 			 */
13812 			mdelay(40);
13813 
13814 			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13815 				data[TG3_EXT_LOOPB_TEST] |=
13816 							TG3_STD_LOOPBACK_FAILED;
13817 			if (tg3_flag(tp, TSO_CAPABLE) &&
13818 			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13819 				data[TG3_EXT_LOOPB_TEST] |=
13820 							TG3_TSO_LOOPBACK_FAILED;
13821 			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13822 			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13823 				data[TG3_EXT_LOOPB_TEST] |=
13824 							TG3_JMB_LOOPBACK_FAILED;
13825 		}
13826 
13827 		/* Re-enable gphy autopowerdown. */
13828 		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13829 			tg3_phy_toggle_apd(tp, true);
13830 	}
13831 
13832 	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13833 	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13834 
13835 done:
13836 	tp->phy_flags |= eee_cap;
13837 
13838 	return err;
13839 }
13840 
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13841 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13842 			  u64 *data)
13843 {
13844 	struct tg3 *tp = netdev_priv(dev);
13845 	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13846 
13847 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13848 		if (tg3_power_up(tp)) {
13849 			etest->flags |= ETH_TEST_FL_FAILED;
13850 			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13851 			return;
13852 		}
13853 		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13854 	}
13855 
13856 	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13857 
13858 	if (tg3_test_nvram(tp) != 0) {
13859 		etest->flags |= ETH_TEST_FL_FAILED;
13860 		data[TG3_NVRAM_TEST] = 1;
13861 	}
13862 	if (!doextlpbk && tg3_test_link(tp)) {
13863 		etest->flags |= ETH_TEST_FL_FAILED;
13864 		data[TG3_LINK_TEST] = 1;
13865 	}
13866 	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13867 		int err, err2 = 0, irq_sync = 0;
13868 
13869 		if (netif_running(dev)) {
13870 			tg3_phy_stop(tp);
13871 			tg3_netif_stop(tp);
13872 			irq_sync = 1;
13873 		}
13874 
13875 		tg3_full_lock(tp, irq_sync);
13876 		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13877 		err = tg3_nvram_lock(tp);
13878 		tg3_halt_cpu(tp, RX_CPU_BASE);
13879 		if (!tg3_flag(tp, 5705_PLUS))
13880 			tg3_halt_cpu(tp, TX_CPU_BASE);
13881 		if (!err)
13882 			tg3_nvram_unlock(tp);
13883 
13884 		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13885 			tg3_phy_reset(tp);
13886 
13887 		if (tg3_test_registers(tp) != 0) {
13888 			etest->flags |= ETH_TEST_FL_FAILED;
13889 			data[TG3_REGISTER_TEST] = 1;
13890 		}
13891 
13892 		if (tg3_test_memory(tp) != 0) {
13893 			etest->flags |= ETH_TEST_FL_FAILED;
13894 			data[TG3_MEMORY_TEST] = 1;
13895 		}
13896 
13897 		if (doextlpbk)
13898 			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13899 
13900 		if (tg3_test_loopback(tp, data, doextlpbk))
13901 			etest->flags |= ETH_TEST_FL_FAILED;
13902 
13903 		tg3_full_unlock(tp);
13904 
13905 		if (tg3_test_interrupt(tp) != 0) {
13906 			etest->flags |= ETH_TEST_FL_FAILED;
13907 			data[TG3_INTERRUPT_TEST] = 1;
13908 		}
13909 
13910 		netdev_lock(dev);
13911 		tg3_full_lock(tp, 0);
13912 
13913 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13914 		if (netif_running(dev)) {
13915 			tg3_flag_set(tp, INIT_COMPLETE);
13916 			err2 = tg3_restart_hw(tp, true);
13917 			if (!err2)
13918 				tg3_netif_start(tp);
13919 		}
13920 
13921 		tg3_full_unlock(tp);
13922 		netdev_unlock(dev);
13923 
13924 		if (irq_sync && !err2)
13925 			tg3_phy_start(tp);
13926 	}
13927 	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13928 		tg3_power_down_prepare(tp);
13929 
13930 }
13931 
tg3_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)13932 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13933 {
13934 	struct tg3 *tp = netdev_priv(dev);
13935 	struct hwtstamp_config stmpconf;
13936 
13937 	if (!tg3_flag(tp, PTP_CAPABLE))
13938 		return -EOPNOTSUPP;
13939 
13940 	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13941 		return -EFAULT;
13942 
13943 	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13944 	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13945 		return -ERANGE;
13946 
13947 	switch (stmpconf.rx_filter) {
13948 	case HWTSTAMP_FILTER_NONE:
13949 		tp->rxptpctl = 0;
13950 		break;
13951 	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13952 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13953 			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13954 		break;
13955 	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13956 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13957 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13958 		break;
13959 	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13960 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13961 			       TG3_RX_PTP_CTL_DELAY_REQ;
13962 		break;
13963 	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13964 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13965 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13966 		break;
13967 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13968 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13969 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13970 		break;
13971 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13972 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13973 			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13974 		break;
13975 	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13976 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13977 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13978 		break;
13979 	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13980 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13981 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13982 		break;
13983 	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13984 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13985 			       TG3_RX_PTP_CTL_SYNC_EVNT;
13986 		break;
13987 	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13988 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13989 			       TG3_RX_PTP_CTL_DELAY_REQ;
13990 		break;
13991 	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13992 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13993 			       TG3_RX_PTP_CTL_DELAY_REQ;
13994 		break;
13995 	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13996 		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13997 			       TG3_RX_PTP_CTL_DELAY_REQ;
13998 		break;
13999 	default:
14000 		return -ERANGE;
14001 	}
14002 
14003 	if (netif_running(dev) && tp->rxptpctl)
14004 		tw32(TG3_RX_PTP_CTL,
14005 		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
14006 
14007 	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
14008 		tg3_flag_set(tp, TX_TSTAMP_EN);
14009 	else
14010 		tg3_flag_clear(tp, TX_TSTAMP_EN);
14011 
14012 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14013 		-EFAULT : 0;
14014 }
14015 
tg3_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)14016 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
14017 {
14018 	struct tg3 *tp = netdev_priv(dev);
14019 	struct hwtstamp_config stmpconf;
14020 
14021 	if (!tg3_flag(tp, PTP_CAPABLE))
14022 		return -EOPNOTSUPP;
14023 
14024 	stmpconf.flags = 0;
14025 	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
14026 			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
14027 
14028 	switch (tp->rxptpctl) {
14029 	case 0:
14030 		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
14031 		break;
14032 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
14033 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14034 		break;
14035 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14036 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
14037 		break;
14038 	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14039 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
14040 		break;
14041 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14042 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14043 		break;
14044 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14045 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14046 		break;
14047 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14048 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14049 		break;
14050 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14051 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
14052 		break;
14053 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14054 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
14055 		break;
14056 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14057 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14058 		break;
14059 	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14060 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14061 		break;
14062 	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14063 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14064 		break;
14065 	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14066 		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14067 		break;
14068 	default:
14069 		WARN_ON_ONCE(1);
14070 		return -ERANGE;
14071 	}
14072 
14073 	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14074 		-EFAULT : 0;
14075 }
14076 
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)14077 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14078 {
14079 	struct mii_ioctl_data *data = if_mii(ifr);
14080 	struct tg3 *tp = netdev_priv(dev);
14081 	int err;
14082 
14083 	if (tg3_flag(tp, USE_PHYLIB)) {
14084 		struct phy_device *phydev;
14085 		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14086 			return -EAGAIN;
14087 		phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14088 		return phy_mii_ioctl(phydev, ifr, cmd);
14089 	}
14090 
14091 	switch (cmd) {
14092 	case SIOCGMIIPHY:
14093 		data->phy_id = tp->phy_addr;
14094 
14095 		fallthrough;
14096 	case SIOCGMIIREG: {
14097 		u32 mii_regval;
14098 
14099 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14100 			break;			/* We have no PHY */
14101 
14102 		if (!netif_running(dev))
14103 			return -EAGAIN;
14104 
14105 		spin_lock_bh(&tp->lock);
14106 		err = __tg3_readphy(tp, data->phy_id & 0x1f,
14107 				    data->reg_num & 0x1f, &mii_regval);
14108 		spin_unlock_bh(&tp->lock);
14109 
14110 		data->val_out = mii_regval;
14111 
14112 		return err;
14113 	}
14114 
14115 	case SIOCSMIIREG:
14116 		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14117 			break;			/* We have no PHY */
14118 
14119 		if (!netif_running(dev))
14120 			return -EAGAIN;
14121 
14122 		spin_lock_bh(&tp->lock);
14123 		err = __tg3_writephy(tp, data->phy_id & 0x1f,
14124 				     data->reg_num & 0x1f, data->val_in);
14125 		spin_unlock_bh(&tp->lock);
14126 
14127 		return err;
14128 
14129 	case SIOCSHWTSTAMP:
14130 		return tg3_hwtstamp_set(dev, ifr);
14131 
14132 	case SIOCGHWTSTAMP:
14133 		return tg3_hwtstamp_get(dev, ifr);
14134 
14135 	default:
14136 		/* do nothing */
14137 		break;
14138 	}
14139 	return -EOPNOTSUPP;
14140 }
14141 
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14142 static int tg3_get_coalesce(struct net_device *dev,
14143 			    struct ethtool_coalesce *ec,
14144 			    struct kernel_ethtool_coalesce *kernel_coal,
14145 			    struct netlink_ext_ack *extack)
14146 {
14147 	struct tg3 *tp = netdev_priv(dev);
14148 
14149 	memcpy(ec, &tp->coal, sizeof(*ec));
14150 	return 0;
14151 }
14152 
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14153 static int tg3_set_coalesce(struct net_device *dev,
14154 			    struct ethtool_coalesce *ec,
14155 			    struct kernel_ethtool_coalesce *kernel_coal,
14156 			    struct netlink_ext_ack *extack)
14157 {
14158 	struct tg3 *tp = netdev_priv(dev);
14159 	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14160 	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14161 
14162 	if (!tg3_flag(tp, 5705_PLUS)) {
14163 		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14164 		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14165 		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14166 		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14167 	}
14168 
14169 	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14170 	    (!ec->rx_coalesce_usecs) ||
14171 	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14172 	    (!ec->tx_coalesce_usecs) ||
14173 	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14174 	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14175 	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14176 	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14177 	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14178 	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14179 	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14180 	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14181 		return -EINVAL;
14182 
14183 	/* Only copy relevant parameters, ignore all others. */
14184 	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14185 	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14186 	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14187 	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14188 	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14189 	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14190 	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14191 	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14192 	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14193 
14194 	if (netif_running(dev)) {
14195 		tg3_full_lock(tp, 0);
14196 		__tg3_set_coalesce(tp, &tp->coal);
14197 		tg3_full_unlock(tp);
14198 	}
14199 	return 0;
14200 }
14201 
tg3_set_eee(struct net_device * dev,struct ethtool_keee * edata)14202 static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
14203 {
14204 	struct tg3 *tp = netdev_priv(dev);
14205 
14206 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14207 		netdev_warn(tp->dev, "Board does not support EEE!\n");
14208 		return -EOPNOTSUPP;
14209 	}
14210 
14211 	if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
14212 		netdev_warn(tp->dev,
14213 			    "Direct manipulation of EEE advertisement is not supported\n");
14214 		return -EINVAL;
14215 	}
14216 
14217 	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14218 		netdev_warn(tp->dev,
14219 			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14220 			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14221 		return -EINVAL;
14222 	}
14223 
14224 	tp->eee.eee_enabled = edata->eee_enabled;
14225 	tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
14226 	tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
14227 
14228 	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14229 	tg3_warn_mgmt_link_flap(tp);
14230 
14231 	if (netif_running(tp->dev)) {
14232 		tg3_full_lock(tp, 0);
14233 		tg3_setup_eee(tp);
14234 		tg3_phy_reset(tp);
14235 		tg3_full_unlock(tp);
14236 	}
14237 
14238 	return 0;
14239 }
14240 
tg3_get_eee(struct net_device * dev,struct ethtool_keee * edata)14241 static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
14242 {
14243 	struct tg3 *tp = netdev_priv(dev);
14244 
14245 	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14246 		netdev_warn(tp->dev,
14247 			    "Board does not support EEE!\n");
14248 		return -EOPNOTSUPP;
14249 	}
14250 
14251 	*edata = tp->eee;
14252 	return 0;
14253 }
14254 
14255 static const struct ethtool_ops tg3_ethtool_ops = {
14256 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14257 				     ETHTOOL_COALESCE_MAX_FRAMES |
14258 				     ETHTOOL_COALESCE_USECS_IRQ |
14259 				     ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14260 				     ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14261 	.get_drvinfo		= tg3_get_drvinfo,
14262 	.get_regs_len		= tg3_get_regs_len,
14263 	.get_regs		= tg3_get_regs,
14264 	.get_wol		= tg3_get_wol,
14265 	.set_wol		= tg3_set_wol,
14266 	.get_msglevel		= tg3_get_msglevel,
14267 	.set_msglevel		= tg3_set_msglevel,
14268 	.nway_reset		= tg3_nway_reset,
14269 	.get_link		= ethtool_op_get_link,
14270 	.get_eeprom_len		= tg3_get_eeprom_len,
14271 	.get_eeprom		= tg3_get_eeprom,
14272 	.set_eeprom		= tg3_set_eeprom,
14273 	.get_ringparam		= tg3_get_ringparam,
14274 	.set_ringparam		= tg3_set_ringparam,
14275 	.get_pauseparam		= tg3_get_pauseparam,
14276 	.set_pauseparam		= tg3_set_pauseparam,
14277 	.self_test		= tg3_self_test,
14278 	.get_strings		= tg3_get_strings,
14279 	.set_phys_id		= tg3_set_phys_id,
14280 	.get_ethtool_stats	= tg3_get_ethtool_stats,
14281 	.get_coalesce		= tg3_get_coalesce,
14282 	.set_coalesce		= tg3_set_coalesce,
14283 	.get_sset_count		= tg3_get_sset_count,
14284 	.get_rxnfc		= tg3_get_rxnfc,
14285 	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14286 	.get_rxfh		= tg3_get_rxfh,
14287 	.set_rxfh		= tg3_set_rxfh,
14288 	.get_channels		= tg3_get_channels,
14289 	.set_channels		= tg3_set_channels,
14290 	.get_ts_info		= tg3_get_ts_info,
14291 	.get_eee		= tg3_get_eee,
14292 	.set_eee		= tg3_set_eee,
14293 	.get_link_ksettings	= tg3_get_link_ksettings,
14294 	.set_link_ksettings	= tg3_set_link_ksettings,
14295 };
14296 
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14297 static void tg3_get_stats64(struct net_device *dev,
14298 			    struct rtnl_link_stats64 *stats)
14299 {
14300 	struct tg3 *tp = netdev_priv(dev);
14301 
14302 	spin_lock_bh(&tp->lock);
14303 	if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14304 		*stats = tp->net_stats_prev;
14305 		spin_unlock_bh(&tp->lock);
14306 		return;
14307 	}
14308 
14309 	tg3_get_nstats(tp, stats);
14310 	spin_unlock_bh(&tp->lock);
14311 }
14312 
tg3_set_rx_mode(struct net_device * dev)14313 static void tg3_set_rx_mode(struct net_device *dev)
14314 {
14315 	struct tg3 *tp = netdev_priv(dev);
14316 
14317 	if (!netif_running(dev))
14318 		return;
14319 
14320 	tg3_full_lock(tp, 0);
14321 	__tg3_set_rx_mode(dev);
14322 	tg3_full_unlock(tp);
14323 }
14324 
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14325 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14326 			       int new_mtu)
14327 {
14328 	WRITE_ONCE(dev->mtu, new_mtu);
14329 
14330 	if (new_mtu > ETH_DATA_LEN) {
14331 		if (tg3_flag(tp, 5780_CLASS)) {
14332 			netdev_update_features(dev);
14333 			tg3_flag_clear(tp, TSO_CAPABLE);
14334 		} else {
14335 			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14336 		}
14337 	} else {
14338 		if (tg3_flag(tp, 5780_CLASS)) {
14339 			tg3_flag_set(tp, TSO_CAPABLE);
14340 			netdev_update_features(dev);
14341 		}
14342 		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14343 	}
14344 }
14345 
tg3_change_mtu(struct net_device * dev,int new_mtu)14346 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14347 {
14348 	struct tg3 *tp = netdev_priv(dev);
14349 	int err;
14350 	bool reset_phy = false;
14351 
14352 	if (!netif_running(dev)) {
14353 		/* We'll just catch it later when the
14354 		 * device is up'd.
14355 		 */
14356 		tg3_set_mtu(dev, tp, new_mtu);
14357 		return 0;
14358 	}
14359 
14360 	tg3_phy_stop(tp);
14361 
14362 	tg3_netif_stop(tp);
14363 
14364 	tg3_set_mtu(dev, tp, new_mtu);
14365 
14366 	netdev_lock(dev);
14367 	tg3_full_lock(tp, 1);
14368 
14369 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14370 
14371 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14372 	 * breaks all requests to 256 bytes.
14373 	 */
14374 	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14375 	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
14376 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
14377 	    tg3_asic_rev(tp) == ASIC_REV_5720)
14378 		reset_phy = true;
14379 
14380 	err = tg3_restart_hw(tp, reset_phy);
14381 
14382 	if (!err)
14383 		tg3_netif_start(tp);
14384 
14385 	tg3_full_unlock(tp);
14386 	netdev_unlock(dev);
14387 
14388 	if (!err)
14389 		tg3_phy_start(tp);
14390 
14391 	return err;
14392 }
14393 
14394 static const struct net_device_ops tg3_netdev_ops = {
14395 	.ndo_open		= tg3_open,
14396 	.ndo_stop		= tg3_close,
14397 	.ndo_start_xmit		= tg3_start_xmit,
14398 	.ndo_get_stats64	= tg3_get_stats64,
14399 	.ndo_validate_addr	= eth_validate_addr,
14400 	.ndo_set_rx_mode	= tg3_set_rx_mode,
14401 	.ndo_set_mac_address	= tg3_set_mac_addr,
14402 	.ndo_eth_ioctl		= tg3_ioctl,
14403 	.ndo_tx_timeout		= tg3_tx_timeout,
14404 	.ndo_change_mtu		= tg3_change_mtu,
14405 	.ndo_fix_features	= tg3_fix_features,
14406 	.ndo_set_features	= tg3_set_features,
14407 #ifdef CONFIG_NET_POLL_CONTROLLER
14408 	.ndo_poll_controller	= tg3_poll_controller,
14409 #endif
14410 };
14411 
tg3_get_eeprom_size(struct tg3 * tp)14412 static void tg3_get_eeprom_size(struct tg3 *tp)
14413 {
14414 	u32 cursize, val, magic;
14415 
14416 	tp->nvram_size = EEPROM_CHIP_SIZE;
14417 
14418 	if (tg3_nvram_read(tp, 0, &magic) != 0)
14419 		return;
14420 
14421 	if ((magic != TG3_EEPROM_MAGIC) &&
14422 	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14423 	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14424 		return;
14425 
14426 	/*
14427 	 * Size the chip by reading offsets at increasing powers of two.
14428 	 * When we encounter our validation signature, we know the addressing
14429 	 * has wrapped around, and thus have our chip size.
14430 	 */
14431 	cursize = 0x10;
14432 
14433 	while (cursize < tp->nvram_size) {
14434 		if (tg3_nvram_read(tp, cursize, &val) != 0)
14435 			return;
14436 
14437 		if (val == magic)
14438 			break;
14439 
14440 		cursize <<= 1;
14441 	}
14442 
14443 	tp->nvram_size = cursize;
14444 }
14445 
tg3_get_nvram_size(struct tg3 * tp)14446 static void tg3_get_nvram_size(struct tg3 *tp)
14447 {
14448 	u32 val;
14449 
14450 	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14451 		return;
14452 
14453 	/* Selfboot format */
14454 	if (val != TG3_EEPROM_MAGIC) {
14455 		tg3_get_eeprom_size(tp);
14456 		return;
14457 	}
14458 
14459 	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14460 		if (val != 0) {
14461 			/* This is confusing.  We want to operate on the
14462 			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14463 			 * call will read from NVRAM and byteswap the data
14464 			 * according to the byteswapping settings for all
14465 			 * other register accesses.  This ensures the data we
14466 			 * want will always reside in the lower 16-bits.
14467 			 * However, the data in NVRAM is in LE format, which
14468 			 * means the data from the NVRAM read will always be
14469 			 * opposite the endianness of the CPU.  The 16-bit
14470 			 * byteswap then brings the data to CPU endianness.
14471 			 */
14472 			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14473 			return;
14474 		}
14475 	}
14476 	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14477 }
14478 
tg3_get_nvram_info(struct tg3 * tp)14479 static void tg3_get_nvram_info(struct tg3 *tp)
14480 {
14481 	u32 nvcfg1;
14482 
14483 	nvcfg1 = tr32(NVRAM_CFG1);
14484 	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14485 		tg3_flag_set(tp, FLASH);
14486 	} else {
14487 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14488 		tw32(NVRAM_CFG1, nvcfg1);
14489 	}
14490 
14491 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14492 	    tg3_flag(tp, 5780_CLASS)) {
14493 		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14494 		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14495 			tp->nvram_jedecnum = JEDEC_ATMEL;
14496 			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14497 			tg3_flag_set(tp, NVRAM_BUFFERED);
14498 			break;
14499 		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14500 			tp->nvram_jedecnum = JEDEC_ATMEL;
14501 			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14502 			break;
14503 		case FLASH_VENDOR_ATMEL_EEPROM:
14504 			tp->nvram_jedecnum = JEDEC_ATMEL;
14505 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14506 			tg3_flag_set(tp, NVRAM_BUFFERED);
14507 			break;
14508 		case FLASH_VENDOR_ST:
14509 			tp->nvram_jedecnum = JEDEC_ST;
14510 			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14511 			tg3_flag_set(tp, NVRAM_BUFFERED);
14512 			break;
14513 		case FLASH_VENDOR_SAIFUN:
14514 			tp->nvram_jedecnum = JEDEC_SAIFUN;
14515 			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14516 			break;
14517 		case FLASH_VENDOR_SST_SMALL:
14518 		case FLASH_VENDOR_SST_LARGE:
14519 			tp->nvram_jedecnum = JEDEC_SST;
14520 			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14521 			break;
14522 		}
14523 	} else {
14524 		tp->nvram_jedecnum = JEDEC_ATMEL;
14525 		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14526 		tg3_flag_set(tp, NVRAM_BUFFERED);
14527 	}
14528 }
14529 
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14530 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14531 {
14532 	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14533 	case FLASH_5752PAGE_SIZE_256:
14534 		tp->nvram_pagesize = 256;
14535 		break;
14536 	case FLASH_5752PAGE_SIZE_512:
14537 		tp->nvram_pagesize = 512;
14538 		break;
14539 	case FLASH_5752PAGE_SIZE_1K:
14540 		tp->nvram_pagesize = 1024;
14541 		break;
14542 	case FLASH_5752PAGE_SIZE_2K:
14543 		tp->nvram_pagesize = 2048;
14544 		break;
14545 	case FLASH_5752PAGE_SIZE_4K:
14546 		tp->nvram_pagesize = 4096;
14547 		break;
14548 	case FLASH_5752PAGE_SIZE_264:
14549 		tp->nvram_pagesize = 264;
14550 		break;
14551 	case FLASH_5752PAGE_SIZE_528:
14552 		tp->nvram_pagesize = 528;
14553 		break;
14554 	}
14555 }
14556 
tg3_get_5752_nvram_info(struct tg3 * tp)14557 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14558 {
14559 	u32 nvcfg1;
14560 
14561 	nvcfg1 = tr32(NVRAM_CFG1);
14562 
14563 	/* NVRAM protection for TPM */
14564 	if (nvcfg1 & (1 << 27))
14565 		tg3_flag_set(tp, PROTECTED_NVRAM);
14566 
14567 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14568 	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14569 	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14570 		tp->nvram_jedecnum = JEDEC_ATMEL;
14571 		tg3_flag_set(tp, NVRAM_BUFFERED);
14572 		break;
14573 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14574 		tp->nvram_jedecnum = JEDEC_ATMEL;
14575 		tg3_flag_set(tp, NVRAM_BUFFERED);
14576 		tg3_flag_set(tp, FLASH);
14577 		break;
14578 	case FLASH_5752VENDOR_ST_M45PE10:
14579 	case FLASH_5752VENDOR_ST_M45PE20:
14580 	case FLASH_5752VENDOR_ST_M45PE40:
14581 		tp->nvram_jedecnum = JEDEC_ST;
14582 		tg3_flag_set(tp, NVRAM_BUFFERED);
14583 		tg3_flag_set(tp, FLASH);
14584 		break;
14585 	}
14586 
14587 	if (tg3_flag(tp, FLASH)) {
14588 		tg3_nvram_get_pagesize(tp, nvcfg1);
14589 	} else {
14590 		/* For eeprom, set pagesize to maximum eeprom size */
14591 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14592 
14593 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14594 		tw32(NVRAM_CFG1, nvcfg1);
14595 	}
14596 }
14597 
tg3_get_5755_nvram_info(struct tg3 * tp)14598 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14599 {
14600 	u32 nvcfg1, protect = 0;
14601 
14602 	nvcfg1 = tr32(NVRAM_CFG1);
14603 
14604 	/* NVRAM protection for TPM */
14605 	if (nvcfg1 & (1 << 27)) {
14606 		tg3_flag_set(tp, PROTECTED_NVRAM);
14607 		protect = 1;
14608 	}
14609 
14610 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14611 	switch (nvcfg1) {
14612 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14613 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14614 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14615 	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14616 		tp->nvram_jedecnum = JEDEC_ATMEL;
14617 		tg3_flag_set(tp, NVRAM_BUFFERED);
14618 		tg3_flag_set(tp, FLASH);
14619 		tp->nvram_pagesize = 264;
14620 		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14621 		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14622 			tp->nvram_size = (protect ? 0x3e200 :
14623 					  TG3_NVRAM_SIZE_512KB);
14624 		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14625 			tp->nvram_size = (protect ? 0x1f200 :
14626 					  TG3_NVRAM_SIZE_256KB);
14627 		else
14628 			tp->nvram_size = (protect ? 0x1f200 :
14629 					  TG3_NVRAM_SIZE_128KB);
14630 		break;
14631 	case FLASH_5752VENDOR_ST_M45PE10:
14632 	case FLASH_5752VENDOR_ST_M45PE20:
14633 	case FLASH_5752VENDOR_ST_M45PE40:
14634 		tp->nvram_jedecnum = JEDEC_ST;
14635 		tg3_flag_set(tp, NVRAM_BUFFERED);
14636 		tg3_flag_set(tp, FLASH);
14637 		tp->nvram_pagesize = 256;
14638 		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14639 			tp->nvram_size = (protect ?
14640 					  TG3_NVRAM_SIZE_64KB :
14641 					  TG3_NVRAM_SIZE_128KB);
14642 		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14643 			tp->nvram_size = (protect ?
14644 					  TG3_NVRAM_SIZE_64KB :
14645 					  TG3_NVRAM_SIZE_256KB);
14646 		else
14647 			tp->nvram_size = (protect ?
14648 					  TG3_NVRAM_SIZE_128KB :
14649 					  TG3_NVRAM_SIZE_512KB);
14650 		break;
14651 	}
14652 }
14653 
tg3_get_5787_nvram_info(struct tg3 * tp)14654 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14655 {
14656 	u32 nvcfg1;
14657 
14658 	nvcfg1 = tr32(NVRAM_CFG1);
14659 
14660 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14661 	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14662 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14663 	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14664 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14665 		tp->nvram_jedecnum = JEDEC_ATMEL;
14666 		tg3_flag_set(tp, NVRAM_BUFFERED);
14667 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14668 
14669 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14670 		tw32(NVRAM_CFG1, nvcfg1);
14671 		break;
14672 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14673 	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14674 	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14675 	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14676 		tp->nvram_jedecnum = JEDEC_ATMEL;
14677 		tg3_flag_set(tp, NVRAM_BUFFERED);
14678 		tg3_flag_set(tp, FLASH);
14679 		tp->nvram_pagesize = 264;
14680 		break;
14681 	case FLASH_5752VENDOR_ST_M45PE10:
14682 	case FLASH_5752VENDOR_ST_M45PE20:
14683 	case FLASH_5752VENDOR_ST_M45PE40:
14684 		tp->nvram_jedecnum = JEDEC_ST;
14685 		tg3_flag_set(tp, NVRAM_BUFFERED);
14686 		tg3_flag_set(tp, FLASH);
14687 		tp->nvram_pagesize = 256;
14688 		break;
14689 	}
14690 }
14691 
tg3_get_5761_nvram_info(struct tg3 * tp)14692 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14693 {
14694 	u32 nvcfg1, protect = 0;
14695 
14696 	nvcfg1 = tr32(NVRAM_CFG1);
14697 
14698 	/* NVRAM protection for TPM */
14699 	if (nvcfg1 & (1 << 27)) {
14700 		tg3_flag_set(tp, PROTECTED_NVRAM);
14701 		protect = 1;
14702 	}
14703 
14704 	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14705 	switch (nvcfg1) {
14706 	case FLASH_5761VENDOR_ATMEL_ADB021D:
14707 	case FLASH_5761VENDOR_ATMEL_ADB041D:
14708 	case FLASH_5761VENDOR_ATMEL_ADB081D:
14709 	case FLASH_5761VENDOR_ATMEL_ADB161D:
14710 	case FLASH_5761VENDOR_ATMEL_MDB021D:
14711 	case FLASH_5761VENDOR_ATMEL_MDB041D:
14712 	case FLASH_5761VENDOR_ATMEL_MDB081D:
14713 	case FLASH_5761VENDOR_ATMEL_MDB161D:
14714 		tp->nvram_jedecnum = JEDEC_ATMEL;
14715 		tg3_flag_set(tp, NVRAM_BUFFERED);
14716 		tg3_flag_set(tp, FLASH);
14717 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14718 		tp->nvram_pagesize = 256;
14719 		break;
14720 	case FLASH_5761VENDOR_ST_A_M45PE20:
14721 	case FLASH_5761VENDOR_ST_A_M45PE40:
14722 	case FLASH_5761VENDOR_ST_A_M45PE80:
14723 	case FLASH_5761VENDOR_ST_A_M45PE16:
14724 	case FLASH_5761VENDOR_ST_M_M45PE20:
14725 	case FLASH_5761VENDOR_ST_M_M45PE40:
14726 	case FLASH_5761VENDOR_ST_M_M45PE80:
14727 	case FLASH_5761VENDOR_ST_M_M45PE16:
14728 		tp->nvram_jedecnum = JEDEC_ST;
14729 		tg3_flag_set(tp, NVRAM_BUFFERED);
14730 		tg3_flag_set(tp, FLASH);
14731 		tp->nvram_pagesize = 256;
14732 		break;
14733 	}
14734 
14735 	if (protect) {
14736 		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14737 	} else {
14738 		switch (nvcfg1) {
14739 		case FLASH_5761VENDOR_ATMEL_ADB161D:
14740 		case FLASH_5761VENDOR_ATMEL_MDB161D:
14741 		case FLASH_5761VENDOR_ST_A_M45PE16:
14742 		case FLASH_5761VENDOR_ST_M_M45PE16:
14743 			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14744 			break;
14745 		case FLASH_5761VENDOR_ATMEL_ADB081D:
14746 		case FLASH_5761VENDOR_ATMEL_MDB081D:
14747 		case FLASH_5761VENDOR_ST_A_M45PE80:
14748 		case FLASH_5761VENDOR_ST_M_M45PE80:
14749 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14750 			break;
14751 		case FLASH_5761VENDOR_ATMEL_ADB041D:
14752 		case FLASH_5761VENDOR_ATMEL_MDB041D:
14753 		case FLASH_5761VENDOR_ST_A_M45PE40:
14754 		case FLASH_5761VENDOR_ST_M_M45PE40:
14755 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14756 			break;
14757 		case FLASH_5761VENDOR_ATMEL_ADB021D:
14758 		case FLASH_5761VENDOR_ATMEL_MDB021D:
14759 		case FLASH_5761VENDOR_ST_A_M45PE20:
14760 		case FLASH_5761VENDOR_ST_M_M45PE20:
14761 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14762 			break;
14763 		}
14764 	}
14765 }
14766 
tg3_get_5906_nvram_info(struct tg3 * tp)14767 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14768 {
14769 	tp->nvram_jedecnum = JEDEC_ATMEL;
14770 	tg3_flag_set(tp, NVRAM_BUFFERED);
14771 	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14772 }
14773 
tg3_get_57780_nvram_info(struct tg3 * tp)14774 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14775 {
14776 	u32 nvcfg1;
14777 
14778 	nvcfg1 = tr32(NVRAM_CFG1);
14779 
14780 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14781 	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14782 	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14783 		tp->nvram_jedecnum = JEDEC_ATMEL;
14784 		tg3_flag_set(tp, NVRAM_BUFFERED);
14785 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14786 
14787 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14788 		tw32(NVRAM_CFG1, nvcfg1);
14789 		return;
14790 	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14791 	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14792 	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14793 	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14794 	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14795 	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14796 	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14797 		tp->nvram_jedecnum = JEDEC_ATMEL;
14798 		tg3_flag_set(tp, NVRAM_BUFFERED);
14799 		tg3_flag_set(tp, FLASH);
14800 
14801 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14802 		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14803 		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14804 		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14805 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14806 			break;
14807 		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14808 		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14809 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14810 			break;
14811 		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14812 		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14813 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14814 			break;
14815 		}
14816 		break;
14817 	case FLASH_5752VENDOR_ST_M45PE10:
14818 	case FLASH_5752VENDOR_ST_M45PE20:
14819 	case FLASH_5752VENDOR_ST_M45PE40:
14820 		tp->nvram_jedecnum = JEDEC_ST;
14821 		tg3_flag_set(tp, NVRAM_BUFFERED);
14822 		tg3_flag_set(tp, FLASH);
14823 
14824 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14825 		case FLASH_5752VENDOR_ST_M45PE10:
14826 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14827 			break;
14828 		case FLASH_5752VENDOR_ST_M45PE20:
14829 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14830 			break;
14831 		case FLASH_5752VENDOR_ST_M45PE40:
14832 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14833 			break;
14834 		}
14835 		break;
14836 	default:
14837 		tg3_flag_set(tp, NO_NVRAM);
14838 		return;
14839 	}
14840 
14841 	tg3_nvram_get_pagesize(tp, nvcfg1);
14842 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14843 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14844 }
14845 
14846 
tg3_get_5717_nvram_info(struct tg3 * tp)14847 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14848 {
14849 	u32 nvcfg1;
14850 
14851 	nvcfg1 = tr32(NVRAM_CFG1);
14852 
14853 	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14854 	case FLASH_5717VENDOR_ATMEL_EEPROM:
14855 	case FLASH_5717VENDOR_MICRO_EEPROM:
14856 		tp->nvram_jedecnum = JEDEC_ATMEL;
14857 		tg3_flag_set(tp, NVRAM_BUFFERED);
14858 		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14859 
14860 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14861 		tw32(NVRAM_CFG1, nvcfg1);
14862 		return;
14863 	case FLASH_5717VENDOR_ATMEL_MDB011D:
14864 	case FLASH_5717VENDOR_ATMEL_ADB011B:
14865 	case FLASH_5717VENDOR_ATMEL_ADB011D:
14866 	case FLASH_5717VENDOR_ATMEL_MDB021D:
14867 	case FLASH_5717VENDOR_ATMEL_ADB021B:
14868 	case FLASH_5717VENDOR_ATMEL_ADB021D:
14869 	case FLASH_5717VENDOR_ATMEL_45USPT:
14870 		tp->nvram_jedecnum = JEDEC_ATMEL;
14871 		tg3_flag_set(tp, NVRAM_BUFFERED);
14872 		tg3_flag_set(tp, FLASH);
14873 
14874 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14875 		case FLASH_5717VENDOR_ATMEL_MDB021D:
14876 			/* Detect size with tg3_nvram_get_size() */
14877 			break;
14878 		case FLASH_5717VENDOR_ATMEL_ADB021B:
14879 		case FLASH_5717VENDOR_ATMEL_ADB021D:
14880 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14881 			break;
14882 		default:
14883 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14884 			break;
14885 		}
14886 		break;
14887 	case FLASH_5717VENDOR_ST_M_M25PE10:
14888 	case FLASH_5717VENDOR_ST_A_M25PE10:
14889 	case FLASH_5717VENDOR_ST_M_M45PE10:
14890 	case FLASH_5717VENDOR_ST_A_M45PE10:
14891 	case FLASH_5717VENDOR_ST_M_M25PE20:
14892 	case FLASH_5717VENDOR_ST_A_M25PE20:
14893 	case FLASH_5717VENDOR_ST_M_M45PE20:
14894 	case FLASH_5717VENDOR_ST_A_M45PE20:
14895 	case FLASH_5717VENDOR_ST_25USPT:
14896 	case FLASH_5717VENDOR_ST_45USPT:
14897 		tp->nvram_jedecnum = JEDEC_ST;
14898 		tg3_flag_set(tp, NVRAM_BUFFERED);
14899 		tg3_flag_set(tp, FLASH);
14900 
14901 		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14902 		case FLASH_5717VENDOR_ST_M_M25PE20:
14903 		case FLASH_5717VENDOR_ST_M_M45PE20:
14904 			/* Detect size with tg3_nvram_get_size() */
14905 			break;
14906 		case FLASH_5717VENDOR_ST_A_M25PE20:
14907 		case FLASH_5717VENDOR_ST_A_M45PE20:
14908 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14909 			break;
14910 		default:
14911 			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14912 			break;
14913 		}
14914 		break;
14915 	default:
14916 		tg3_flag_set(tp, NO_NVRAM);
14917 		return;
14918 	}
14919 
14920 	tg3_nvram_get_pagesize(tp, nvcfg1);
14921 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14922 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14923 }
14924 
tg3_get_5720_nvram_info(struct tg3 * tp)14925 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14926 {
14927 	u32 nvcfg1, nvmpinstrp, nv_status;
14928 
14929 	nvcfg1 = tr32(NVRAM_CFG1);
14930 	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14931 
14932 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14933 		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14934 			tg3_flag_set(tp, NO_NVRAM);
14935 			return;
14936 		}
14937 
14938 		switch (nvmpinstrp) {
14939 		case FLASH_5762_MX25L_100:
14940 		case FLASH_5762_MX25L_200:
14941 		case FLASH_5762_MX25L_400:
14942 		case FLASH_5762_MX25L_800:
14943 		case FLASH_5762_MX25L_160_320:
14944 			tp->nvram_pagesize = 4096;
14945 			tp->nvram_jedecnum = JEDEC_MACRONIX;
14946 			tg3_flag_set(tp, NVRAM_BUFFERED);
14947 			tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14948 			tg3_flag_set(tp, FLASH);
14949 			nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14950 			tp->nvram_size =
14951 				(1 << (nv_status >> AUTOSENSE_DEVID &
14952 						AUTOSENSE_DEVID_MASK)
14953 					<< AUTOSENSE_SIZE_IN_MB);
14954 			return;
14955 
14956 		case FLASH_5762_EEPROM_HD:
14957 			nvmpinstrp = FLASH_5720_EEPROM_HD;
14958 			break;
14959 		case FLASH_5762_EEPROM_LD:
14960 			nvmpinstrp = FLASH_5720_EEPROM_LD;
14961 			break;
14962 		case FLASH_5720VENDOR_M_ST_M45PE20:
14963 			/* This pinstrap supports multiple sizes, so force it
14964 			 * to read the actual size from location 0xf0.
14965 			 */
14966 			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14967 			break;
14968 		}
14969 	}
14970 
14971 	switch (nvmpinstrp) {
14972 	case FLASH_5720_EEPROM_HD:
14973 	case FLASH_5720_EEPROM_LD:
14974 		tp->nvram_jedecnum = JEDEC_ATMEL;
14975 		tg3_flag_set(tp, NVRAM_BUFFERED);
14976 
14977 		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14978 		tw32(NVRAM_CFG1, nvcfg1);
14979 		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14980 			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14981 		else
14982 			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14983 		return;
14984 	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14985 	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14986 	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14987 	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14988 	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14989 	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14990 	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14991 	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14992 	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14993 	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14994 	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14995 	case FLASH_5720VENDOR_ATMEL_45USPT:
14996 		tp->nvram_jedecnum = JEDEC_ATMEL;
14997 		tg3_flag_set(tp, NVRAM_BUFFERED);
14998 		tg3_flag_set(tp, FLASH);
14999 
15000 		switch (nvmpinstrp) {
15001 		case FLASH_5720VENDOR_M_ATMEL_DB021D:
15002 		case FLASH_5720VENDOR_A_ATMEL_DB021B:
15003 		case FLASH_5720VENDOR_A_ATMEL_DB021D:
15004 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15005 			break;
15006 		case FLASH_5720VENDOR_M_ATMEL_DB041D:
15007 		case FLASH_5720VENDOR_A_ATMEL_DB041B:
15008 		case FLASH_5720VENDOR_A_ATMEL_DB041D:
15009 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15010 			break;
15011 		case FLASH_5720VENDOR_M_ATMEL_DB081D:
15012 		case FLASH_5720VENDOR_A_ATMEL_DB081D:
15013 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15014 			break;
15015 		default:
15016 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
15017 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15018 			break;
15019 		}
15020 		break;
15021 	case FLASH_5720VENDOR_M_ST_M25PE10:
15022 	case FLASH_5720VENDOR_M_ST_M45PE10:
15023 	case FLASH_5720VENDOR_A_ST_M25PE10:
15024 	case FLASH_5720VENDOR_A_ST_M45PE10:
15025 	case FLASH_5720VENDOR_M_ST_M25PE20:
15026 	case FLASH_5720VENDOR_M_ST_M45PE20:
15027 	case FLASH_5720VENDOR_A_ST_M25PE20:
15028 	case FLASH_5720VENDOR_A_ST_M45PE20:
15029 	case FLASH_5720VENDOR_M_ST_M25PE40:
15030 	case FLASH_5720VENDOR_M_ST_M45PE40:
15031 	case FLASH_5720VENDOR_A_ST_M25PE40:
15032 	case FLASH_5720VENDOR_A_ST_M45PE40:
15033 	case FLASH_5720VENDOR_M_ST_M25PE80:
15034 	case FLASH_5720VENDOR_M_ST_M45PE80:
15035 	case FLASH_5720VENDOR_A_ST_M25PE80:
15036 	case FLASH_5720VENDOR_A_ST_M45PE80:
15037 	case FLASH_5720VENDOR_ST_25USPT:
15038 	case FLASH_5720VENDOR_ST_45USPT:
15039 		tp->nvram_jedecnum = JEDEC_ST;
15040 		tg3_flag_set(tp, NVRAM_BUFFERED);
15041 		tg3_flag_set(tp, FLASH);
15042 
15043 		switch (nvmpinstrp) {
15044 		case FLASH_5720VENDOR_M_ST_M25PE20:
15045 		case FLASH_5720VENDOR_M_ST_M45PE20:
15046 		case FLASH_5720VENDOR_A_ST_M25PE20:
15047 		case FLASH_5720VENDOR_A_ST_M45PE20:
15048 			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15049 			break;
15050 		case FLASH_5720VENDOR_M_ST_M25PE40:
15051 		case FLASH_5720VENDOR_M_ST_M45PE40:
15052 		case FLASH_5720VENDOR_A_ST_M25PE40:
15053 		case FLASH_5720VENDOR_A_ST_M45PE40:
15054 			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15055 			break;
15056 		case FLASH_5720VENDOR_M_ST_M25PE80:
15057 		case FLASH_5720VENDOR_M_ST_M45PE80:
15058 		case FLASH_5720VENDOR_A_ST_M25PE80:
15059 		case FLASH_5720VENDOR_A_ST_M45PE80:
15060 			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15061 			break;
15062 		default:
15063 			if (tg3_asic_rev(tp) != ASIC_REV_5762)
15064 				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15065 			break;
15066 		}
15067 		break;
15068 	default:
15069 		tg3_flag_set(tp, NO_NVRAM);
15070 		return;
15071 	}
15072 
15073 	tg3_nvram_get_pagesize(tp, nvcfg1);
15074 	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15075 		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15076 
15077 	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15078 		u32 val;
15079 
15080 		if (tg3_nvram_read(tp, 0, &val))
15081 			return;
15082 
15083 		if (val != TG3_EEPROM_MAGIC &&
15084 		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15085 			tg3_flag_set(tp, NO_NVRAM);
15086 	}
15087 }
15088 
15089 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)15090 static void tg3_nvram_init(struct tg3 *tp)
15091 {
15092 	if (tg3_flag(tp, IS_SSB_CORE)) {
15093 		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15094 		tg3_flag_clear(tp, NVRAM);
15095 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15096 		tg3_flag_set(tp, NO_NVRAM);
15097 		return;
15098 	}
15099 
15100 	tw32_f(GRC_EEPROM_ADDR,
15101 	     (EEPROM_ADDR_FSM_RESET |
15102 	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
15103 	       EEPROM_ADDR_CLKPERD_SHIFT)));
15104 
15105 	msleep(1);
15106 
15107 	/* Enable seeprom accesses. */
15108 	tw32_f(GRC_LOCAL_CTRL,
15109 	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15110 	udelay(100);
15111 
15112 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15113 	    tg3_asic_rev(tp) != ASIC_REV_5701) {
15114 		tg3_flag_set(tp, NVRAM);
15115 
15116 		if (tg3_nvram_lock(tp)) {
15117 			netdev_warn(tp->dev,
15118 				    "Cannot get nvram lock, %s failed\n",
15119 				    __func__);
15120 			return;
15121 		}
15122 		tg3_enable_nvram_access(tp);
15123 
15124 		tp->nvram_size = 0;
15125 
15126 		if (tg3_asic_rev(tp) == ASIC_REV_5752)
15127 			tg3_get_5752_nvram_info(tp);
15128 		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15129 			tg3_get_5755_nvram_info(tp);
15130 		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15131 			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15132 			 tg3_asic_rev(tp) == ASIC_REV_5785)
15133 			tg3_get_5787_nvram_info(tp);
15134 		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15135 			tg3_get_5761_nvram_info(tp);
15136 		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15137 			tg3_get_5906_nvram_info(tp);
15138 		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15139 			 tg3_flag(tp, 57765_CLASS))
15140 			tg3_get_57780_nvram_info(tp);
15141 		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15142 			 tg3_asic_rev(tp) == ASIC_REV_5719)
15143 			tg3_get_5717_nvram_info(tp);
15144 		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15145 			 tg3_asic_rev(tp) == ASIC_REV_5762)
15146 			tg3_get_5720_nvram_info(tp);
15147 		else
15148 			tg3_get_nvram_info(tp);
15149 
15150 		if (tp->nvram_size == 0)
15151 			tg3_get_nvram_size(tp);
15152 
15153 		tg3_disable_nvram_access(tp);
15154 		tg3_nvram_unlock(tp);
15155 
15156 	} else {
15157 		tg3_flag_clear(tp, NVRAM);
15158 		tg3_flag_clear(tp, NVRAM_BUFFERED);
15159 
15160 		tg3_get_eeprom_size(tp);
15161 	}
15162 }
15163 
15164 struct subsys_tbl_ent {
15165 	u16 subsys_vendor, subsys_devid;
15166 	u32 phy_id;
15167 };
15168 
15169 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15170 	/* Broadcom boards. */
15171 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15172 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15173 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15174 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15175 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15176 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15177 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15178 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15179 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15180 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15181 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15182 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15183 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15184 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15185 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15186 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15187 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15188 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15189 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15190 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15191 	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15192 	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15193 
15194 	/* 3com boards. */
15195 	{ TG3PCI_SUBVENDOR_ID_3COM,
15196 	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15197 	{ TG3PCI_SUBVENDOR_ID_3COM,
15198 	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15199 	{ TG3PCI_SUBVENDOR_ID_3COM,
15200 	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15201 	{ TG3PCI_SUBVENDOR_ID_3COM,
15202 	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15203 	{ TG3PCI_SUBVENDOR_ID_3COM,
15204 	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15205 
15206 	/* DELL boards. */
15207 	{ TG3PCI_SUBVENDOR_ID_DELL,
15208 	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15209 	{ TG3PCI_SUBVENDOR_ID_DELL,
15210 	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15211 	{ TG3PCI_SUBVENDOR_ID_DELL,
15212 	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15213 	{ TG3PCI_SUBVENDOR_ID_DELL,
15214 	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15215 
15216 	/* Compaq boards. */
15217 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15218 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15219 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15220 	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15221 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15222 	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15223 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15224 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15225 	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15226 	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15227 
15228 	/* IBM boards. */
15229 	{ TG3PCI_SUBVENDOR_ID_IBM,
15230 	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15231 };
15232 
tg3_lookup_by_subsys(struct tg3 * tp)15233 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15234 {
15235 	int i;
15236 
15237 	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15238 		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15239 		     tp->pdev->subsystem_vendor) &&
15240 		    (subsys_id_to_phy_id[i].subsys_devid ==
15241 		     tp->pdev->subsystem_device))
15242 			return &subsys_id_to_phy_id[i];
15243 	}
15244 	return NULL;
15245 }
15246 
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15247 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15248 {
15249 	u32 val;
15250 
15251 	tp->phy_id = TG3_PHY_ID_INVALID;
15252 	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15253 
15254 	/* Assume an onboard device and WOL capable by default.  */
15255 	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15256 	tg3_flag_set(tp, WOL_CAP);
15257 
15258 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15259 		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15260 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15261 			tg3_flag_set(tp, IS_NIC);
15262 		}
15263 		val = tr32(VCPU_CFGSHDW);
15264 		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15265 			tg3_flag_set(tp, ASPM_WORKAROUND);
15266 		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15267 		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15268 			tg3_flag_set(tp, WOL_ENABLE);
15269 			device_set_wakeup_enable(&tp->pdev->dev, true);
15270 		}
15271 		goto done;
15272 	}
15273 
15274 	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15275 	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15276 		u32 nic_cfg, led_cfg;
15277 		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15278 		u32 nic_phy_id, ver, eeprom_phy_id;
15279 		int eeprom_phy_serdes = 0;
15280 
15281 		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15282 		tp->nic_sram_data_cfg = nic_cfg;
15283 
15284 		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15285 		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15286 		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15287 		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15288 		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15289 		    (ver > 0) && (ver < 0x100))
15290 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15291 
15292 		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15293 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15294 
15295 		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15296 		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15297 		    tg3_asic_rev(tp) == ASIC_REV_5720)
15298 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15299 
15300 		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15301 		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15302 			eeprom_phy_serdes = 1;
15303 
15304 		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15305 		if (nic_phy_id != 0) {
15306 			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15307 			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15308 
15309 			eeprom_phy_id  = (id1 >> 16) << 10;
15310 			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15311 			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15312 		} else
15313 			eeprom_phy_id = 0;
15314 
15315 		tp->phy_id = eeprom_phy_id;
15316 		if (eeprom_phy_serdes) {
15317 			if (!tg3_flag(tp, 5705_PLUS))
15318 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15319 			else
15320 				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15321 		}
15322 
15323 		if (tg3_flag(tp, 5750_PLUS))
15324 			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15325 				    SHASTA_EXT_LED_MODE_MASK);
15326 		else
15327 			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15328 
15329 		switch (led_cfg) {
15330 		default:
15331 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15332 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15333 			break;
15334 
15335 		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15336 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15337 			break;
15338 
15339 		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15340 			tp->led_ctrl = LED_CTRL_MODE_MAC;
15341 
15342 			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15343 			 * read on some older 5700/5701 bootcode.
15344 			 */
15345 			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15346 			    tg3_asic_rev(tp) == ASIC_REV_5701)
15347 				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15348 
15349 			break;
15350 
15351 		case SHASTA_EXT_LED_SHARED:
15352 			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15353 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15354 			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15355 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15356 						 LED_CTRL_MODE_PHY_2);
15357 
15358 			if (tg3_flag(tp, 5717_PLUS) ||
15359 			    tg3_asic_rev(tp) == ASIC_REV_5762)
15360 				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15361 						LED_CTRL_BLINK_RATE_MASK;
15362 
15363 			break;
15364 
15365 		case SHASTA_EXT_LED_MAC:
15366 			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15367 			break;
15368 
15369 		case SHASTA_EXT_LED_COMBO:
15370 			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15371 			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15372 				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15373 						 LED_CTRL_MODE_PHY_2);
15374 			break;
15375 
15376 		}
15377 
15378 		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15379 		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15380 		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15381 			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15382 
15383 		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15384 			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15385 
15386 		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15387 			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15388 			if ((tp->pdev->subsystem_vendor ==
15389 			     PCI_VENDOR_ID_ARIMA) &&
15390 			    (tp->pdev->subsystem_device == 0x205a ||
15391 			     tp->pdev->subsystem_device == 0x2063))
15392 				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15393 		} else {
15394 			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15395 			tg3_flag_set(tp, IS_NIC);
15396 		}
15397 
15398 		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15399 			tg3_flag_set(tp, ENABLE_ASF);
15400 			if (tg3_flag(tp, 5750_PLUS))
15401 				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15402 		}
15403 
15404 		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15405 		    tg3_flag(tp, 5750_PLUS))
15406 			tg3_flag_set(tp, ENABLE_APE);
15407 
15408 		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15409 		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15410 			tg3_flag_clear(tp, WOL_CAP);
15411 
15412 		if (tg3_flag(tp, WOL_CAP) &&
15413 		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15414 			tg3_flag_set(tp, WOL_ENABLE);
15415 			device_set_wakeup_enable(&tp->pdev->dev, true);
15416 		}
15417 
15418 		if (cfg2 & (1 << 17))
15419 			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15420 
15421 		/* serdes signal pre-emphasis in register 0x590 set by */
15422 		/* bootcode if bit 18 is set */
15423 		if (cfg2 & (1 << 18))
15424 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15425 
15426 		if ((tg3_flag(tp, 57765_PLUS) ||
15427 		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15428 		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15429 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15430 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15431 
15432 		if (tg3_flag(tp, PCI_EXPRESS)) {
15433 			u32 cfg3;
15434 
15435 			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15436 			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15437 			    !tg3_flag(tp, 57765_PLUS) &&
15438 			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15439 				tg3_flag_set(tp, ASPM_WORKAROUND);
15440 			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15441 				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15442 			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15443 				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15444 		}
15445 
15446 		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15447 			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15448 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15449 			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15450 		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15451 			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15452 
15453 		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15454 			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15455 	}
15456 done:
15457 	if (tg3_flag(tp, WOL_CAP))
15458 		device_set_wakeup_enable(&tp->pdev->dev,
15459 					 tg3_flag(tp, WOL_ENABLE));
15460 	else
15461 		device_set_wakeup_capable(&tp->pdev->dev, false);
15462 }
15463 
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15464 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15465 {
15466 	int i, err;
15467 	u32 val2, off = offset * 8;
15468 
15469 	err = tg3_nvram_lock(tp);
15470 	if (err)
15471 		return err;
15472 
15473 	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15474 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15475 			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15476 	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15477 	udelay(10);
15478 
15479 	for (i = 0; i < 100; i++) {
15480 		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15481 		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15482 			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15483 			break;
15484 		}
15485 		udelay(10);
15486 	}
15487 
15488 	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15489 
15490 	tg3_nvram_unlock(tp);
15491 	if (val2 & APE_OTP_STATUS_CMD_DONE)
15492 		return 0;
15493 
15494 	return -EBUSY;
15495 }
15496 
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15497 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15498 {
15499 	int i;
15500 	u32 val;
15501 
15502 	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15503 	tw32(OTP_CTRL, cmd);
15504 
15505 	/* Wait for up to 1 ms for command to execute. */
15506 	for (i = 0; i < 100; i++) {
15507 		val = tr32(OTP_STATUS);
15508 		if (val & OTP_STATUS_CMD_DONE)
15509 			break;
15510 		udelay(10);
15511 	}
15512 
15513 	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15514 }
15515 
15516 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15517  * configuration is a 32-bit value that straddles the alignment boundary.
15518  * We do two 32-bit reads and then shift and merge the results.
15519  */
tg3_read_otp_phycfg(struct tg3 * tp)15520 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15521 {
15522 	u32 bhalf_otp, thalf_otp;
15523 
15524 	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15525 
15526 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15527 		return 0;
15528 
15529 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15530 
15531 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15532 		return 0;
15533 
15534 	thalf_otp = tr32(OTP_READ_DATA);
15535 
15536 	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15537 
15538 	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15539 		return 0;
15540 
15541 	bhalf_otp = tr32(OTP_READ_DATA);
15542 
15543 	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15544 }
15545 
tg3_phy_init_link_config(struct tg3 * tp)15546 static void tg3_phy_init_link_config(struct tg3 *tp)
15547 {
15548 	u32 adv = ADVERTISED_Autoneg;
15549 
15550 	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15551 		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15552 			adv |= ADVERTISED_1000baseT_Half;
15553 		adv |= ADVERTISED_1000baseT_Full;
15554 	}
15555 
15556 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15557 		adv |= ADVERTISED_100baseT_Half |
15558 		       ADVERTISED_100baseT_Full |
15559 		       ADVERTISED_10baseT_Half |
15560 		       ADVERTISED_10baseT_Full |
15561 		       ADVERTISED_TP;
15562 	else
15563 		adv |= ADVERTISED_FIBRE;
15564 
15565 	tp->link_config.advertising = adv;
15566 	tp->link_config.speed = SPEED_UNKNOWN;
15567 	tp->link_config.duplex = DUPLEX_UNKNOWN;
15568 	tp->link_config.autoneg = AUTONEG_ENABLE;
15569 	tp->link_config.active_speed = SPEED_UNKNOWN;
15570 	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15571 
15572 	tp->old_link = -1;
15573 }
15574 
tg3_phy_probe(struct tg3 * tp)15575 static int tg3_phy_probe(struct tg3 *tp)
15576 {
15577 	u32 hw_phy_id_1, hw_phy_id_2;
15578 	u32 hw_phy_id, hw_phy_id_masked;
15579 	int err;
15580 
15581 	/* flow control autonegotiation is default behavior */
15582 	tg3_flag_set(tp, PAUSE_AUTONEG);
15583 	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15584 
15585 	if (tg3_flag(tp, ENABLE_APE)) {
15586 		switch (tp->pci_fn) {
15587 		case 0:
15588 			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15589 			break;
15590 		case 1:
15591 			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15592 			break;
15593 		case 2:
15594 			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15595 			break;
15596 		case 3:
15597 			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15598 			break;
15599 		}
15600 	}
15601 
15602 	if (!tg3_flag(tp, ENABLE_ASF) &&
15603 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15604 	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15605 		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15606 				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15607 
15608 	if (tg3_flag(tp, USE_PHYLIB))
15609 		return tg3_phy_init(tp);
15610 
15611 	/* Reading the PHY ID register can conflict with ASF
15612 	 * firmware access to the PHY hardware.
15613 	 */
15614 	err = 0;
15615 	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15616 		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15617 	} else {
15618 		/* Now read the physical PHY_ID from the chip and verify
15619 		 * that it is sane.  If it doesn't look good, we fall back
15620 		 * to either the hard-coded table based PHY_ID and failing
15621 		 * that the value found in the eeprom area.
15622 		 */
15623 		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15624 		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15625 
15626 		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15627 		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15628 		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15629 
15630 		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15631 	}
15632 
15633 	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15634 		tp->phy_id = hw_phy_id;
15635 		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15636 			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15637 		else
15638 			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15639 	} else {
15640 		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15641 			/* Do nothing, phy ID already set up in
15642 			 * tg3_get_eeprom_hw_cfg().
15643 			 */
15644 		} else {
15645 			struct subsys_tbl_ent *p;
15646 
15647 			/* No eeprom signature?  Try the hardcoded
15648 			 * subsys device table.
15649 			 */
15650 			p = tg3_lookup_by_subsys(tp);
15651 			if (p) {
15652 				tp->phy_id = p->phy_id;
15653 			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15654 				/* For now we saw the IDs 0xbc050cd0,
15655 				 * 0xbc050f80 and 0xbc050c30 on devices
15656 				 * connected to an BCM4785 and there are
15657 				 * probably more. Just assume that the phy is
15658 				 * supported when it is connected to a SSB core
15659 				 * for now.
15660 				 */
15661 				return -ENODEV;
15662 			}
15663 
15664 			if (!tp->phy_id ||
15665 			    tp->phy_id == TG3_PHY_ID_BCM8002)
15666 				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15667 		}
15668 	}
15669 
15670 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15671 	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15672 	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15673 	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15674 	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15675 	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15676 	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15677 	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15678 	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15679 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15680 
15681 		linkmode_zero(tp->eee.supported);
15682 		linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
15683 				 tp->eee.supported);
15684 		linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
15685 				 tp->eee.supported);
15686 		linkmode_copy(tp->eee.advertised, tp->eee.supported);
15687 
15688 		tp->eee.eee_enabled = 1;
15689 		tp->eee.tx_lpi_enabled = 1;
15690 		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15691 	}
15692 
15693 	tg3_phy_init_link_config(tp);
15694 
15695 	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15696 	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15697 	    !tg3_flag(tp, ENABLE_APE) &&
15698 	    !tg3_flag(tp, ENABLE_ASF)) {
15699 		u32 bmsr, dummy;
15700 
15701 		tg3_readphy(tp, MII_BMSR, &bmsr);
15702 		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15703 		    (bmsr & BMSR_LSTATUS))
15704 			goto skip_phy_reset;
15705 
15706 		err = tg3_phy_reset(tp);
15707 		if (err)
15708 			return err;
15709 
15710 		tg3_phy_set_wirespeed(tp);
15711 
15712 		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15713 			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15714 					    tp->link_config.flowctrl);
15715 
15716 			tg3_writephy(tp, MII_BMCR,
15717 				     BMCR_ANENABLE | BMCR_ANRESTART);
15718 		}
15719 	}
15720 
15721 skip_phy_reset:
15722 	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15723 		err = tg3_init_5401phy_dsp(tp);
15724 		if (err)
15725 			return err;
15726 
15727 		err = tg3_init_5401phy_dsp(tp);
15728 	}
15729 
15730 	return err;
15731 }
15732 
tg3_read_vpd(struct tg3 * tp)15733 static void tg3_read_vpd(struct tg3 *tp)
15734 {
15735 	u8 *vpd_data;
15736 	unsigned int len, vpdlen;
15737 	int i;
15738 
15739 	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15740 	if (!vpd_data)
15741 		goto out_no_vpd;
15742 
15743 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15744 					 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15745 	if (i < 0)
15746 		goto partno;
15747 
15748 	if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15749 		goto partno;
15750 
15751 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15752 					 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15753 	if (i < 0)
15754 		goto partno;
15755 
15756 	memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15757 	snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15758 
15759 partno:
15760 	i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15761 					 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15762 	if (i < 0)
15763 		goto out_not_found;
15764 
15765 	if (len > TG3_BPN_SIZE)
15766 		goto out_not_found;
15767 
15768 	memcpy(tp->board_part_number, &vpd_data[i], len);
15769 
15770 out_not_found:
15771 	kfree(vpd_data);
15772 	if (tp->board_part_number[0])
15773 		return;
15774 
15775 out_no_vpd:
15776 	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15777 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15778 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15779 			strcpy(tp->board_part_number, "BCM5717");
15780 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15781 			strcpy(tp->board_part_number, "BCM5718");
15782 		else
15783 			goto nomatch;
15784 	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15785 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15786 			strcpy(tp->board_part_number, "BCM57780");
15787 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15788 			strcpy(tp->board_part_number, "BCM57760");
15789 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15790 			strcpy(tp->board_part_number, "BCM57790");
15791 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15792 			strcpy(tp->board_part_number, "BCM57788");
15793 		else
15794 			goto nomatch;
15795 	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15796 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15797 			strcpy(tp->board_part_number, "BCM57761");
15798 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15799 			strcpy(tp->board_part_number, "BCM57765");
15800 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15801 			strcpy(tp->board_part_number, "BCM57781");
15802 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15803 			strcpy(tp->board_part_number, "BCM57785");
15804 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15805 			strcpy(tp->board_part_number, "BCM57791");
15806 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15807 			strcpy(tp->board_part_number, "BCM57795");
15808 		else
15809 			goto nomatch;
15810 	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15811 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15812 			strcpy(tp->board_part_number, "BCM57762");
15813 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15814 			strcpy(tp->board_part_number, "BCM57766");
15815 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15816 			strcpy(tp->board_part_number, "BCM57782");
15817 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15818 			strcpy(tp->board_part_number, "BCM57786");
15819 		else
15820 			goto nomatch;
15821 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15822 		strcpy(tp->board_part_number, "BCM95906");
15823 	} else {
15824 nomatch:
15825 		strcpy(tp->board_part_number, "none");
15826 	}
15827 }
15828 
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15829 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15830 {
15831 	u32 val;
15832 
15833 	if (tg3_nvram_read(tp, offset, &val) ||
15834 	    (val & 0xfc000000) != 0x0c000000 ||
15835 	    tg3_nvram_read(tp, offset + 4, &val) ||
15836 	    val != 0)
15837 		return 0;
15838 
15839 	return 1;
15840 }
15841 
tg3_read_bc_ver(struct tg3 * tp)15842 static void tg3_read_bc_ver(struct tg3 *tp)
15843 {
15844 	u32 val, offset, start, ver_offset;
15845 	int i, dst_off;
15846 	bool newver = false;
15847 
15848 	if (tg3_nvram_read(tp, 0xc, &offset) ||
15849 	    tg3_nvram_read(tp, 0x4, &start))
15850 		return;
15851 
15852 	offset = tg3_nvram_logical_addr(tp, offset);
15853 
15854 	if (tg3_nvram_read(tp, offset, &val))
15855 		return;
15856 
15857 	if ((val & 0xfc000000) == 0x0c000000) {
15858 		if (tg3_nvram_read(tp, offset + 4, &val))
15859 			return;
15860 
15861 		if (val == 0)
15862 			newver = true;
15863 	}
15864 
15865 	dst_off = strlen(tp->fw_ver);
15866 
15867 	if (newver) {
15868 		if (TG3_VER_SIZE - dst_off < 16 ||
15869 		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15870 			return;
15871 
15872 		offset = offset + ver_offset - start;
15873 		for (i = 0; i < 16; i += 4) {
15874 			__be32 v;
15875 			if (tg3_nvram_read_be32(tp, offset + i, &v))
15876 				return;
15877 
15878 			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15879 		}
15880 	} else {
15881 		u32 major, minor;
15882 
15883 		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15884 			return;
15885 
15886 		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15887 			TG3_NVM_BCVER_MAJSFT;
15888 		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15889 		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15890 			 "v%d.%02d", major, minor);
15891 	}
15892 }
15893 
tg3_read_hwsb_ver(struct tg3 * tp)15894 static void tg3_read_hwsb_ver(struct tg3 *tp)
15895 {
15896 	u32 val, major, minor;
15897 
15898 	/* Use native endian representation */
15899 	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15900 		return;
15901 
15902 	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15903 		TG3_NVM_HWSB_CFG1_MAJSFT;
15904 	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15905 		TG3_NVM_HWSB_CFG1_MINSFT;
15906 
15907 	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15908 }
15909 
tg3_read_sb_ver(struct tg3 * tp,u32 val)15910 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15911 {
15912 	u32 offset, major, minor, build;
15913 
15914 	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15915 
15916 	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15917 		return;
15918 
15919 	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15920 	case TG3_EEPROM_SB_REVISION_0:
15921 		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15922 		break;
15923 	case TG3_EEPROM_SB_REVISION_2:
15924 		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15925 		break;
15926 	case TG3_EEPROM_SB_REVISION_3:
15927 		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15928 		break;
15929 	case TG3_EEPROM_SB_REVISION_4:
15930 		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15931 		break;
15932 	case TG3_EEPROM_SB_REVISION_5:
15933 		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15934 		break;
15935 	case TG3_EEPROM_SB_REVISION_6:
15936 		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15937 		break;
15938 	default:
15939 		return;
15940 	}
15941 
15942 	if (tg3_nvram_read(tp, offset, &val))
15943 		return;
15944 
15945 	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15946 		TG3_EEPROM_SB_EDH_BLD_SHFT;
15947 	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15948 		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15949 	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15950 
15951 	if (minor > 99 || build > 26)
15952 		return;
15953 
15954 	offset = strlen(tp->fw_ver);
15955 	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15956 		 " v%d.%02d", major, minor);
15957 
15958 	if (build > 0) {
15959 		offset = strlen(tp->fw_ver);
15960 		if (offset < TG3_VER_SIZE - 1)
15961 			tp->fw_ver[offset] = 'a' + build - 1;
15962 	}
15963 }
15964 
tg3_read_mgmtfw_ver(struct tg3 * tp)15965 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15966 {
15967 	u32 val, offset, start;
15968 	int i, vlen;
15969 
15970 	for (offset = TG3_NVM_DIR_START;
15971 	     offset < TG3_NVM_DIR_END;
15972 	     offset += TG3_NVM_DIRENT_SIZE) {
15973 		if (tg3_nvram_read(tp, offset, &val))
15974 			return;
15975 
15976 		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15977 			break;
15978 	}
15979 
15980 	if (offset == TG3_NVM_DIR_END)
15981 		return;
15982 
15983 	if (!tg3_flag(tp, 5705_PLUS))
15984 		start = 0x08000000;
15985 	else if (tg3_nvram_read(tp, offset - 4, &start))
15986 		return;
15987 
15988 	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15989 	    !tg3_fw_img_is_valid(tp, offset) ||
15990 	    tg3_nvram_read(tp, offset + 8, &val))
15991 		return;
15992 
15993 	offset += val - start;
15994 
15995 	vlen = strlen(tp->fw_ver);
15996 
15997 	tp->fw_ver[vlen++] = ',';
15998 	tp->fw_ver[vlen++] = ' ';
15999 
16000 	for (i = 0; i < 4; i++) {
16001 		__be32 v;
16002 		if (tg3_nvram_read_be32(tp, offset, &v))
16003 			return;
16004 
16005 		offset += sizeof(v);
16006 
16007 		if (vlen > TG3_VER_SIZE - sizeof(v)) {
16008 			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
16009 			break;
16010 		}
16011 
16012 		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
16013 		vlen += sizeof(v);
16014 	}
16015 }
16016 
tg3_probe_ncsi(struct tg3 * tp)16017 static void tg3_probe_ncsi(struct tg3 *tp)
16018 {
16019 	u32 apedata;
16020 
16021 	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
16022 	if (apedata != APE_SEG_SIG_MAGIC)
16023 		return;
16024 
16025 	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
16026 	if (!(apedata & APE_FW_STATUS_READY))
16027 		return;
16028 
16029 	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
16030 		tg3_flag_set(tp, APE_HAS_NCSI);
16031 }
16032 
tg3_read_dash_ver(struct tg3 * tp)16033 static void tg3_read_dash_ver(struct tg3 *tp)
16034 {
16035 	int vlen;
16036 	u32 apedata;
16037 	char *fwtype;
16038 
16039 	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
16040 
16041 	if (tg3_flag(tp, APE_HAS_NCSI))
16042 		fwtype = "NCSI";
16043 	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
16044 		fwtype = "SMASH";
16045 	else
16046 		fwtype = "DASH";
16047 
16048 	vlen = strlen(tp->fw_ver);
16049 
16050 	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
16051 		 fwtype,
16052 		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
16053 		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
16054 		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
16055 		 (apedata & APE_FW_VERSION_BLDMSK));
16056 }
16057 
tg3_read_otp_ver(struct tg3 * tp)16058 static void tg3_read_otp_ver(struct tg3 *tp)
16059 {
16060 	u32 val, val2;
16061 
16062 	if (tg3_asic_rev(tp) != ASIC_REV_5762)
16063 		return;
16064 
16065 	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16066 	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16067 	    TG3_OTP_MAGIC0_VALID(val)) {
16068 		u64 val64 = (u64) val << 32 | val2;
16069 		u32 ver = 0;
16070 		int i, vlen;
16071 
16072 		for (i = 0; i < 7; i++) {
16073 			if ((val64 & 0xff) == 0)
16074 				break;
16075 			ver = val64 & 0xff;
16076 			val64 >>= 8;
16077 		}
16078 		vlen = strlen(tp->fw_ver);
16079 		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16080 	}
16081 }
16082 
tg3_read_fw_ver(struct tg3 * tp)16083 static void tg3_read_fw_ver(struct tg3 *tp)
16084 {
16085 	u32 val;
16086 	bool vpd_vers = false;
16087 
16088 	if (tp->fw_ver[0] != 0)
16089 		vpd_vers = true;
16090 
16091 	if (tg3_flag(tp, NO_NVRAM)) {
16092 		strcat(tp->fw_ver, "sb");
16093 		tg3_read_otp_ver(tp);
16094 		return;
16095 	}
16096 
16097 	if (tg3_nvram_read(tp, 0, &val))
16098 		return;
16099 
16100 	if (val == TG3_EEPROM_MAGIC)
16101 		tg3_read_bc_ver(tp);
16102 	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16103 		tg3_read_sb_ver(tp, val);
16104 	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16105 		tg3_read_hwsb_ver(tp);
16106 
16107 	if (tg3_flag(tp, ENABLE_ASF)) {
16108 		if (tg3_flag(tp, ENABLE_APE)) {
16109 			tg3_probe_ncsi(tp);
16110 			if (!vpd_vers)
16111 				tg3_read_dash_ver(tp);
16112 		} else if (!vpd_vers) {
16113 			tg3_read_mgmtfw_ver(tp);
16114 		}
16115 	}
16116 
16117 	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16118 }
16119 
tg3_rx_ret_ring_size(struct tg3 * tp)16120 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16121 {
16122 	if (tg3_flag(tp, LRG_PROD_RING_CAP))
16123 		return TG3_RX_RET_MAX_SIZE_5717;
16124 	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16125 		return TG3_RX_RET_MAX_SIZE_5700;
16126 	else
16127 		return TG3_RX_RET_MAX_SIZE_5705;
16128 }
16129 
16130 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16131 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16132 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16133 	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16134 	{ },
16135 };
16136 
tg3_find_peer(struct tg3 * tp)16137 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16138 {
16139 	struct pci_dev *peer;
16140 	unsigned int func, devnr = tp->pdev->devfn & ~7;
16141 
16142 	for (func = 0; func < 8; func++) {
16143 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16144 		if (peer && peer != tp->pdev)
16145 			break;
16146 		pci_dev_put(peer);
16147 	}
16148 	/* 5704 can be configured in single-port mode, set peer to
16149 	 * tp->pdev in that case.
16150 	 */
16151 	if (!peer) {
16152 		peer = tp->pdev;
16153 		return peer;
16154 	}
16155 
16156 	/*
16157 	 * We don't need to keep the refcount elevated; there's no way
16158 	 * to remove one half of this device without removing the other
16159 	 */
16160 	pci_dev_put(peer);
16161 
16162 	return peer;
16163 }
16164 
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16165 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16166 {
16167 	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16168 	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16169 		u32 reg;
16170 
16171 		/* All devices that use the alternate
16172 		 * ASIC REV location have a CPMU.
16173 		 */
16174 		tg3_flag_set(tp, CPMU_PRESENT);
16175 
16176 		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16177 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16178 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16179 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16180 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16181 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16182 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16183 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16184 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16185 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16186 		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16187 			reg = TG3PCI_GEN2_PRODID_ASICREV;
16188 		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16189 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16190 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16191 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16192 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16193 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16194 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16195 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16196 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16197 			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16198 			reg = TG3PCI_GEN15_PRODID_ASICREV;
16199 		else
16200 			reg = TG3PCI_PRODID_ASICREV;
16201 
16202 		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16203 	}
16204 
16205 	/* Wrong chip ID in 5752 A0. This code can be removed later
16206 	 * as A0 is not in production.
16207 	 */
16208 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16209 		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16210 
16211 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16212 		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16213 
16214 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16215 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16216 	    tg3_asic_rev(tp) == ASIC_REV_5720)
16217 		tg3_flag_set(tp, 5717_PLUS);
16218 
16219 	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16220 	    tg3_asic_rev(tp) == ASIC_REV_57766)
16221 		tg3_flag_set(tp, 57765_CLASS);
16222 
16223 	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16224 	     tg3_asic_rev(tp) == ASIC_REV_5762)
16225 		tg3_flag_set(tp, 57765_PLUS);
16226 
16227 	/* Intentionally exclude ASIC_REV_5906 */
16228 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16229 	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16230 	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16231 	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16232 	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16233 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16234 	    tg3_flag(tp, 57765_PLUS))
16235 		tg3_flag_set(tp, 5755_PLUS);
16236 
16237 	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16238 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16239 		tg3_flag_set(tp, 5780_CLASS);
16240 
16241 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16242 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16243 	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16244 	    tg3_flag(tp, 5755_PLUS) ||
16245 	    tg3_flag(tp, 5780_CLASS))
16246 		tg3_flag_set(tp, 5750_PLUS);
16247 
16248 	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16249 	    tg3_flag(tp, 5750_PLUS))
16250 		tg3_flag_set(tp, 5705_PLUS);
16251 }
16252 
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16253 static bool tg3_10_100_only_device(struct tg3 *tp,
16254 				   const struct pci_device_id *ent)
16255 {
16256 	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16257 
16258 	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16259 	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16260 	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16261 		return true;
16262 
16263 	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16264 		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16265 			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16266 				return true;
16267 		} else {
16268 			return true;
16269 		}
16270 	}
16271 
16272 	return false;
16273 }
16274 
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16275 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16276 {
16277 	u32 misc_ctrl_reg;
16278 	u32 pci_state_reg, grc_misc_cfg;
16279 	u32 val;
16280 	u16 pci_cmd;
16281 	int err;
16282 
16283 	/* Force memory write invalidate off.  If we leave it on,
16284 	 * then on 5700_BX chips we have to enable a workaround.
16285 	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16286 	 * to match the cacheline size.  The Broadcom driver have this
16287 	 * workaround but turns MWI off all the times so never uses
16288 	 * it.  This seems to suggest that the workaround is insufficient.
16289 	 */
16290 	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16291 	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16292 	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16293 
16294 	/* Important! -- Make sure register accesses are byteswapped
16295 	 * correctly.  Also, for those chips that require it, make
16296 	 * sure that indirect register accesses are enabled before
16297 	 * the first operation.
16298 	 */
16299 	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16300 			      &misc_ctrl_reg);
16301 	tp->misc_host_ctrl |= (misc_ctrl_reg &
16302 			       MISC_HOST_CTRL_CHIPREV);
16303 	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16304 			       tp->misc_host_ctrl);
16305 
16306 	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16307 
16308 	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16309 	 * we need to disable memory and use config. cycles
16310 	 * only to access all registers. The 5702/03 chips
16311 	 * can mistakenly decode the special cycles from the
16312 	 * ICH chipsets as memory write cycles, causing corruption
16313 	 * of register and memory space. Only certain ICH bridges
16314 	 * will drive special cycles with non-zero data during the
16315 	 * address phase which can fall within the 5703's address
16316 	 * range. This is not an ICH bug as the PCI spec allows
16317 	 * non-zero address during special cycles. However, only
16318 	 * these ICH bridges are known to drive non-zero addresses
16319 	 * during special cycles.
16320 	 *
16321 	 * Since special cycles do not cross PCI bridges, we only
16322 	 * enable this workaround if the 5703 is on the secondary
16323 	 * bus of these ICH bridges.
16324 	 */
16325 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16326 	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16327 		static struct tg3_dev_id {
16328 			u32	vendor;
16329 			u32	device;
16330 			u32	rev;
16331 		} ich_chipsets[] = {
16332 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16333 			  PCI_ANY_ID },
16334 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16335 			  PCI_ANY_ID },
16336 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16337 			  0xa },
16338 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16339 			  PCI_ANY_ID },
16340 			{ },
16341 		};
16342 		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16343 		struct pci_dev *bridge = NULL;
16344 
16345 		while (pci_id->vendor != 0) {
16346 			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16347 						bridge);
16348 			if (!bridge) {
16349 				pci_id++;
16350 				continue;
16351 			}
16352 			if (pci_id->rev != PCI_ANY_ID) {
16353 				if (bridge->revision > pci_id->rev)
16354 					continue;
16355 			}
16356 			if (bridge->subordinate &&
16357 			    (bridge->subordinate->number ==
16358 			     tp->pdev->bus->number)) {
16359 				tg3_flag_set(tp, ICH_WORKAROUND);
16360 				pci_dev_put(bridge);
16361 				break;
16362 			}
16363 		}
16364 	}
16365 
16366 	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16367 		static struct tg3_dev_id {
16368 			u32	vendor;
16369 			u32	device;
16370 		} bridge_chipsets[] = {
16371 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16372 			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16373 			{ },
16374 		};
16375 		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16376 		struct pci_dev *bridge = NULL;
16377 
16378 		while (pci_id->vendor != 0) {
16379 			bridge = pci_get_device(pci_id->vendor,
16380 						pci_id->device,
16381 						bridge);
16382 			if (!bridge) {
16383 				pci_id++;
16384 				continue;
16385 			}
16386 			if (bridge->subordinate &&
16387 			    (bridge->subordinate->number <=
16388 			     tp->pdev->bus->number) &&
16389 			    (bridge->subordinate->busn_res.end >=
16390 			     tp->pdev->bus->number)) {
16391 				tg3_flag_set(tp, 5701_DMA_BUG);
16392 				pci_dev_put(bridge);
16393 				break;
16394 			}
16395 		}
16396 	}
16397 
16398 	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16399 	 * DMA addresses > 40-bit. This bridge may have other additional
16400 	 * 57xx devices behind it in some 4-port NIC designs for example.
16401 	 * Any tg3 device found behind the bridge will also need the 40-bit
16402 	 * DMA workaround.
16403 	 */
16404 	if (tg3_flag(tp, 5780_CLASS)) {
16405 		tg3_flag_set(tp, 40BIT_DMA_BUG);
16406 		tp->msi_cap = tp->pdev->msi_cap;
16407 	} else {
16408 		struct pci_dev *bridge = NULL;
16409 
16410 		do {
16411 			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16412 						PCI_DEVICE_ID_SERVERWORKS_EPB,
16413 						bridge);
16414 			if (bridge && bridge->subordinate &&
16415 			    (bridge->subordinate->number <=
16416 			     tp->pdev->bus->number) &&
16417 			    (bridge->subordinate->busn_res.end >=
16418 			     tp->pdev->bus->number)) {
16419 				tg3_flag_set(tp, 40BIT_DMA_BUG);
16420 				pci_dev_put(bridge);
16421 				break;
16422 			}
16423 		} while (bridge);
16424 	}
16425 
16426 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16427 	    tg3_asic_rev(tp) == ASIC_REV_5714)
16428 		tp->pdev_peer = tg3_find_peer(tp);
16429 
16430 	/* Determine TSO capabilities */
16431 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16432 		; /* Do nothing. HW bug. */
16433 	else if (tg3_flag(tp, 57765_PLUS))
16434 		tg3_flag_set(tp, HW_TSO_3);
16435 	else if (tg3_flag(tp, 5755_PLUS) ||
16436 		 tg3_asic_rev(tp) == ASIC_REV_5906)
16437 		tg3_flag_set(tp, HW_TSO_2);
16438 	else if (tg3_flag(tp, 5750_PLUS)) {
16439 		tg3_flag_set(tp, HW_TSO_1);
16440 		tg3_flag_set(tp, TSO_BUG);
16441 		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16442 		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16443 			tg3_flag_clear(tp, TSO_BUG);
16444 	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16445 		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16446 		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16447 		tg3_flag_set(tp, FW_TSO);
16448 		tg3_flag_set(tp, TSO_BUG);
16449 		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16450 			tp->fw_needed = FIRMWARE_TG3TSO5;
16451 		else
16452 			tp->fw_needed = FIRMWARE_TG3TSO;
16453 	}
16454 
16455 	/* Selectively allow TSO based on operating conditions */
16456 	if (tg3_flag(tp, HW_TSO_1) ||
16457 	    tg3_flag(tp, HW_TSO_2) ||
16458 	    tg3_flag(tp, HW_TSO_3) ||
16459 	    tg3_flag(tp, FW_TSO)) {
16460 		/* For firmware TSO, assume ASF is disabled.
16461 		 * We'll disable TSO later if we discover ASF
16462 		 * is enabled in tg3_get_eeprom_hw_cfg().
16463 		 */
16464 		tg3_flag_set(tp, TSO_CAPABLE);
16465 	} else {
16466 		tg3_flag_clear(tp, TSO_CAPABLE);
16467 		tg3_flag_clear(tp, TSO_BUG);
16468 		tp->fw_needed = NULL;
16469 	}
16470 
16471 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16472 		tp->fw_needed = FIRMWARE_TG3;
16473 
16474 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16475 		tp->fw_needed = FIRMWARE_TG357766;
16476 
16477 	tp->irq_max = 1;
16478 
16479 	if (tg3_flag(tp, 5750_PLUS)) {
16480 		tg3_flag_set(tp, SUPPORT_MSI);
16481 		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16482 		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16483 		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16484 		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16485 		     tp->pdev_peer == tp->pdev))
16486 			tg3_flag_clear(tp, SUPPORT_MSI);
16487 
16488 		if (tg3_flag(tp, 5755_PLUS) ||
16489 		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16490 			tg3_flag_set(tp, 1SHOT_MSI);
16491 		}
16492 
16493 		if (tg3_flag(tp, 57765_PLUS)) {
16494 			tg3_flag_set(tp, SUPPORT_MSIX);
16495 			tp->irq_max = TG3_IRQ_MAX_VECS;
16496 		}
16497 	}
16498 
16499 	tp->txq_max = 1;
16500 	tp->rxq_max = 1;
16501 	if (tp->irq_max > 1) {
16502 		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16503 		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16504 
16505 		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16506 		    tg3_asic_rev(tp) == ASIC_REV_5720)
16507 			tp->txq_max = tp->irq_max - 1;
16508 	}
16509 
16510 	if (tg3_flag(tp, 5755_PLUS) ||
16511 	    tg3_asic_rev(tp) == ASIC_REV_5906)
16512 		tg3_flag_set(tp, SHORT_DMA_BUG);
16513 
16514 	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16515 		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16516 
16517 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16518 	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16519 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16520 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16521 		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16522 
16523 	if (tg3_flag(tp, 57765_PLUS) &&
16524 	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16525 		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16526 
16527 	if (!tg3_flag(tp, 5705_PLUS) ||
16528 	    tg3_flag(tp, 5780_CLASS) ||
16529 	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16530 		tg3_flag_set(tp, JUMBO_CAPABLE);
16531 
16532 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16533 			      &pci_state_reg);
16534 
16535 	if (pci_is_pcie(tp->pdev)) {
16536 		u16 lnkctl;
16537 
16538 		tg3_flag_set(tp, PCI_EXPRESS);
16539 
16540 		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16541 		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16542 			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16543 				tg3_flag_clear(tp, HW_TSO_2);
16544 				tg3_flag_clear(tp, TSO_CAPABLE);
16545 			}
16546 			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16547 			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16548 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16549 			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16550 				tg3_flag_set(tp, CLKREQ_BUG);
16551 		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16552 			tg3_flag_set(tp, L1PLLPD_EN);
16553 		}
16554 	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16555 		/* BCM5785 devices are effectively PCIe devices, and should
16556 		 * follow PCIe codepaths, but do not have a PCIe capabilities
16557 		 * section.
16558 		 */
16559 		tg3_flag_set(tp, PCI_EXPRESS);
16560 	} else if (!tg3_flag(tp, 5705_PLUS) ||
16561 		   tg3_flag(tp, 5780_CLASS)) {
16562 		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16563 		if (!tp->pcix_cap) {
16564 			dev_err(&tp->pdev->dev,
16565 				"Cannot find PCI-X capability, aborting\n");
16566 			return -EIO;
16567 		}
16568 
16569 		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16570 			tg3_flag_set(tp, PCIX_MODE);
16571 	}
16572 
16573 	/* If we have an AMD 762 or VIA K8T800 chipset, write
16574 	 * reordering to the mailbox registers done by the host
16575 	 * controller can cause major troubles.  We read back from
16576 	 * every mailbox register write to force the writes to be
16577 	 * posted to the chip in order.
16578 	 */
16579 	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16580 	    !tg3_flag(tp, PCI_EXPRESS))
16581 		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16582 
16583 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16584 			     &tp->pci_cacheline_sz);
16585 	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16586 			     &tp->pci_lat_timer);
16587 	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16588 	    tp->pci_lat_timer < 64) {
16589 		tp->pci_lat_timer = 64;
16590 		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16591 				      tp->pci_lat_timer);
16592 	}
16593 
16594 	/* Important! -- It is critical that the PCI-X hw workaround
16595 	 * situation is decided before the first MMIO register access.
16596 	 */
16597 	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16598 		/* 5700 BX chips need to have their TX producer index
16599 		 * mailboxes written twice to workaround a bug.
16600 		 */
16601 		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16602 
16603 		/* If we are in PCI-X mode, enable register write workaround.
16604 		 *
16605 		 * The workaround is to use indirect register accesses
16606 		 * for all chip writes not to mailbox registers.
16607 		 */
16608 		if (tg3_flag(tp, PCIX_MODE)) {
16609 			u32 pm_reg;
16610 
16611 			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16612 
16613 			/* The chip can have it's power management PCI config
16614 			 * space registers clobbered due to this bug.
16615 			 * So explicitly force the chip into D0 here.
16616 			 */
16617 			pci_read_config_dword(tp->pdev,
16618 					      tp->pdev->pm_cap + PCI_PM_CTRL,
16619 					      &pm_reg);
16620 			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16621 			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16622 			pci_write_config_dword(tp->pdev,
16623 					       tp->pdev->pm_cap + PCI_PM_CTRL,
16624 					       pm_reg);
16625 
16626 			/* Also, force SERR#/PERR# in PCI command. */
16627 			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16628 			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16629 			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16630 		}
16631 	}
16632 
16633 	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16634 		tg3_flag_set(tp, PCI_HIGH_SPEED);
16635 	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16636 		tg3_flag_set(tp, PCI_32BIT);
16637 
16638 	/* Chip-specific fixup from Broadcom driver */
16639 	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16640 	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16641 		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16642 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16643 	}
16644 
16645 	/* Default fast path register access methods */
16646 	tp->read32 = tg3_read32;
16647 	tp->write32 = tg3_write32;
16648 	tp->read32_mbox = tg3_read32;
16649 	tp->write32_mbox = tg3_write32;
16650 	tp->write32_tx_mbox = tg3_write32;
16651 	tp->write32_rx_mbox = tg3_write32;
16652 
16653 	/* Various workaround register access methods */
16654 	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16655 		tp->write32 = tg3_write_indirect_reg32;
16656 	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16657 		 (tg3_flag(tp, PCI_EXPRESS) &&
16658 		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16659 		/*
16660 		 * Back to back register writes can cause problems on these
16661 		 * chips, the workaround is to read back all reg writes
16662 		 * except those to mailbox regs.
16663 		 *
16664 		 * See tg3_write_indirect_reg32().
16665 		 */
16666 		tp->write32 = tg3_write_flush_reg32;
16667 	}
16668 
16669 	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16670 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16671 		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16672 			tp->write32_rx_mbox = tg3_write_flush_reg32;
16673 	}
16674 
16675 	if (tg3_flag(tp, ICH_WORKAROUND)) {
16676 		tp->read32 = tg3_read_indirect_reg32;
16677 		tp->write32 = tg3_write_indirect_reg32;
16678 		tp->read32_mbox = tg3_read_indirect_mbox;
16679 		tp->write32_mbox = tg3_write_indirect_mbox;
16680 		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16681 		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16682 
16683 		iounmap(tp->regs);
16684 		tp->regs = NULL;
16685 
16686 		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16687 		pci_cmd &= ~PCI_COMMAND_MEMORY;
16688 		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16689 	}
16690 	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16691 		tp->read32_mbox = tg3_read32_mbox_5906;
16692 		tp->write32_mbox = tg3_write32_mbox_5906;
16693 		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16694 		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16695 	}
16696 
16697 	if (tp->write32 == tg3_write_indirect_reg32 ||
16698 	    (tg3_flag(tp, PCIX_MODE) &&
16699 	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16700 	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16701 		tg3_flag_set(tp, SRAM_USE_CONFIG);
16702 
16703 	/* The memory arbiter has to be enabled in order for SRAM accesses
16704 	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16705 	 * sure it is enabled, but other entities such as system netboot
16706 	 * code might disable it.
16707 	 */
16708 	val = tr32(MEMARB_MODE);
16709 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16710 
16711 	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16712 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16713 	    tg3_flag(tp, 5780_CLASS)) {
16714 		if (tg3_flag(tp, PCIX_MODE)) {
16715 			pci_read_config_dword(tp->pdev,
16716 					      tp->pcix_cap + PCI_X_STATUS,
16717 					      &val);
16718 			tp->pci_fn = val & 0x7;
16719 		}
16720 	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16721 		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16722 		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16723 		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16724 		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16725 			val = tr32(TG3_CPMU_STATUS);
16726 
16727 		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16728 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16729 		else
16730 			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16731 				     TG3_CPMU_STATUS_FSHFT_5719;
16732 	}
16733 
16734 	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16735 		tp->write32_tx_mbox = tg3_write_flush_reg32;
16736 		tp->write32_rx_mbox = tg3_write_flush_reg32;
16737 	}
16738 
16739 	/* Get eeprom hw config before calling tg3_set_power_state().
16740 	 * In particular, the TG3_FLAG_IS_NIC flag must be
16741 	 * determined before calling tg3_set_power_state() so that
16742 	 * we know whether or not to switch out of Vaux power.
16743 	 * When the flag is set, it means that GPIO1 is used for eeprom
16744 	 * write protect and also implies that it is a LOM where GPIOs
16745 	 * are not used to switch power.
16746 	 */
16747 	tg3_get_eeprom_hw_cfg(tp);
16748 
16749 	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16750 		tg3_flag_clear(tp, TSO_CAPABLE);
16751 		tg3_flag_clear(tp, TSO_BUG);
16752 		tp->fw_needed = NULL;
16753 	}
16754 
16755 	if (tg3_flag(tp, ENABLE_APE)) {
16756 		/* Allow reads and writes to the
16757 		 * APE register and memory space.
16758 		 */
16759 		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16760 				 PCISTATE_ALLOW_APE_SHMEM_WR |
16761 				 PCISTATE_ALLOW_APE_PSPACE_WR;
16762 		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16763 				       pci_state_reg);
16764 
16765 		tg3_ape_lock_init(tp);
16766 		tp->ape_hb_interval =
16767 			msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16768 	}
16769 
16770 	/* Set up tp->grc_local_ctrl before calling
16771 	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16772 	 * will bring 5700's external PHY out of reset.
16773 	 * It is also used as eeprom write protect on LOMs.
16774 	 */
16775 	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16776 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16777 	    tg3_flag(tp, EEPROM_WRITE_PROT))
16778 		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16779 				       GRC_LCLCTRL_GPIO_OUTPUT1);
16780 	/* Unused GPIO3 must be driven as output on 5752 because there
16781 	 * are no pull-up resistors on unused GPIO pins.
16782 	 */
16783 	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16784 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16785 
16786 	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16787 	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16788 	    tg3_flag(tp, 57765_CLASS))
16789 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16790 
16791 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16792 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16793 		/* Turn off the debug UART. */
16794 		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16795 		if (tg3_flag(tp, IS_NIC))
16796 			/* Keep VMain power. */
16797 			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16798 					      GRC_LCLCTRL_GPIO_OUTPUT0;
16799 	}
16800 
16801 	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16802 		tp->grc_local_ctrl |=
16803 			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16804 
16805 	/* Switch out of Vaux if it is a NIC */
16806 	tg3_pwrsrc_switch_to_vmain(tp);
16807 
16808 	/* Derive initial jumbo mode from MTU assigned in
16809 	 * ether_setup() via the alloc_etherdev() call
16810 	 */
16811 	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16812 		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16813 
16814 	/* Determine WakeOnLan speed to use. */
16815 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16816 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16817 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16818 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16819 		tg3_flag_clear(tp, WOL_SPEED_100MB);
16820 	} else {
16821 		tg3_flag_set(tp, WOL_SPEED_100MB);
16822 	}
16823 
16824 	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16825 		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16826 
16827 	/* A few boards don't want Ethernet@WireSpeed phy feature */
16828 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16829 	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16830 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16831 	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16832 	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16833 	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16834 		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16835 
16836 	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16837 	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16838 		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16839 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16840 		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16841 
16842 	if (tg3_flag(tp, 5705_PLUS) &&
16843 	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16844 	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16845 	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16846 	    !tg3_flag(tp, 57765_PLUS)) {
16847 		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16848 		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16849 		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16850 		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16851 			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16852 			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16853 				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16854 			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16855 				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16856 		} else
16857 			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16858 	}
16859 
16860 	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16861 	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16862 		tp->phy_otp = tg3_read_otp_phycfg(tp);
16863 		if (tp->phy_otp == 0)
16864 			tp->phy_otp = TG3_OTP_DEFAULT;
16865 	}
16866 
16867 	if (tg3_flag(tp, CPMU_PRESENT))
16868 		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16869 	else
16870 		tp->mi_mode = MAC_MI_MODE_BASE;
16871 
16872 	tp->coalesce_mode = 0;
16873 	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16874 	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16875 		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16876 
16877 	/* Set these bits to enable statistics workaround. */
16878 	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16879 	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16880 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16881 	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16882 		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16883 		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16884 	}
16885 
16886 	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16887 	    tg3_asic_rev(tp) == ASIC_REV_57780)
16888 		tg3_flag_set(tp, USE_PHYLIB);
16889 
16890 	err = tg3_mdio_init(tp);
16891 	if (err)
16892 		return err;
16893 
16894 	/* Initialize data/descriptor byte/word swapping. */
16895 	val = tr32(GRC_MODE);
16896 	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16897 	    tg3_asic_rev(tp) == ASIC_REV_5762)
16898 		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16899 			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16900 			GRC_MODE_B2HRX_ENABLE |
16901 			GRC_MODE_HTX2B_ENABLE |
16902 			GRC_MODE_HOST_STACKUP);
16903 	else
16904 		val &= GRC_MODE_HOST_STACKUP;
16905 
16906 	tw32(GRC_MODE, val | tp->grc_mode);
16907 
16908 	tg3_switch_clocks(tp);
16909 
16910 	/* Clear this out for sanity. */
16911 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16912 
16913 	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16914 	tw32(TG3PCI_REG_BASE_ADDR, 0);
16915 
16916 	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16917 			      &pci_state_reg);
16918 	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16919 	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16920 		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16921 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16922 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16923 		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16924 			void __iomem *sram_base;
16925 
16926 			/* Write some dummy words into the SRAM status block
16927 			 * area, see if it reads back correctly.  If the return
16928 			 * value is bad, force enable the PCIX workaround.
16929 			 */
16930 			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16931 
16932 			writel(0x00000000, sram_base);
16933 			writel(0x00000000, sram_base + 4);
16934 			writel(0xffffffff, sram_base + 4);
16935 			if (readl(sram_base) != 0x00000000)
16936 				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16937 		}
16938 	}
16939 
16940 	udelay(50);
16941 	tg3_nvram_init(tp);
16942 
16943 	/* If the device has an NVRAM, no need to load patch firmware */
16944 	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16945 	    !tg3_flag(tp, NO_NVRAM))
16946 		tp->fw_needed = NULL;
16947 
16948 	grc_misc_cfg = tr32(GRC_MISC_CFG);
16949 	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16950 
16951 	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16952 	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16953 	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16954 		tg3_flag_set(tp, IS_5788);
16955 
16956 	if (!tg3_flag(tp, IS_5788) &&
16957 	    tg3_asic_rev(tp) != ASIC_REV_5700)
16958 		tg3_flag_set(tp, TAGGED_STATUS);
16959 	if (tg3_flag(tp, TAGGED_STATUS)) {
16960 		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16961 				      HOSTCC_MODE_CLRTICK_TXBD);
16962 
16963 		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16964 		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16965 				       tp->misc_host_ctrl);
16966 	}
16967 
16968 	/* Preserve the APE MAC_MODE bits */
16969 	if (tg3_flag(tp, ENABLE_APE))
16970 		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16971 	else
16972 		tp->mac_mode = 0;
16973 
16974 	if (tg3_10_100_only_device(tp, ent))
16975 		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16976 
16977 	err = tg3_phy_probe(tp);
16978 	if (err) {
16979 		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16980 		/* ... but do not return immediately ... */
16981 		tg3_mdio_fini(tp);
16982 	}
16983 
16984 	tg3_read_vpd(tp);
16985 	tg3_read_fw_ver(tp);
16986 
16987 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16988 		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16989 	} else {
16990 		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16991 			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16992 		else
16993 			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16994 	}
16995 
16996 	/* 5700 {AX,BX} chips have a broken status block link
16997 	 * change bit implementation, so we must use the
16998 	 * status register in those cases.
16999 	 */
17000 	if (tg3_asic_rev(tp) == ASIC_REV_5700)
17001 		tg3_flag_set(tp, USE_LINKCHG_REG);
17002 	else
17003 		tg3_flag_clear(tp, USE_LINKCHG_REG);
17004 
17005 	/* The led_ctrl is set during tg3_phy_probe, here we might
17006 	 * have to force the link status polling mechanism based
17007 	 * upon subsystem IDs.
17008 	 */
17009 	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
17010 	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
17011 	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
17012 		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
17013 		tg3_flag_set(tp, USE_LINKCHG_REG);
17014 	}
17015 
17016 	/* For all SERDES we poll the MAC status register. */
17017 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
17018 		tg3_flag_set(tp, POLL_SERDES);
17019 	else
17020 		tg3_flag_clear(tp, POLL_SERDES);
17021 
17022 	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
17023 		tg3_flag_set(tp, POLL_CPMU_LINK);
17024 
17025 	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
17026 	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
17027 	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
17028 	    tg3_flag(tp, PCIX_MODE)) {
17029 		tp->rx_offset = NET_SKB_PAD;
17030 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
17031 		tp->rx_copy_thresh = ~(u16)0;
17032 #endif
17033 	}
17034 
17035 	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
17036 	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
17037 	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
17038 
17039 	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
17040 
17041 	/* Increment the rx prod index on the rx std ring by at most
17042 	 * 8 for these chips to workaround hw errata.
17043 	 */
17044 	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
17045 	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
17046 	    tg3_asic_rev(tp) == ASIC_REV_5755)
17047 		tp->rx_std_max_post = 8;
17048 
17049 	if (tg3_flag(tp, ASPM_WORKAROUND))
17050 		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
17051 				     PCIE_PWR_MGMT_L1_THRESH_MSK;
17052 
17053 	return err;
17054 }
17055 
tg3_get_device_address(struct tg3 * tp,u8 * addr)17056 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
17057 {
17058 	u32 hi, lo, mac_offset;
17059 	int addr_ok = 0;
17060 	int err;
17061 
17062 	if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
17063 		return 0;
17064 
17065 	if (tg3_flag(tp, IS_SSB_CORE)) {
17066 		err = ssb_gige_get_macaddr(tp->pdev, addr);
17067 		if (!err && is_valid_ether_addr(addr))
17068 			return 0;
17069 	}
17070 
17071 	mac_offset = 0x7c;
17072 	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17073 	    tg3_flag(tp, 5780_CLASS)) {
17074 		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17075 			mac_offset = 0xcc;
17076 		if (tg3_nvram_lock(tp))
17077 			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17078 		else
17079 			tg3_nvram_unlock(tp);
17080 	} else if (tg3_flag(tp, 5717_PLUS)) {
17081 		if (tp->pci_fn & 1)
17082 			mac_offset = 0xcc;
17083 		if (tp->pci_fn > 1)
17084 			mac_offset += 0x18c;
17085 	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17086 		mac_offset = 0x10;
17087 
17088 	/* First try to get it from MAC address mailbox. */
17089 	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17090 	if ((hi >> 16) == 0x484b) {
17091 		addr[0] = (hi >>  8) & 0xff;
17092 		addr[1] = (hi >>  0) & 0xff;
17093 
17094 		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17095 		addr[2] = (lo >> 24) & 0xff;
17096 		addr[3] = (lo >> 16) & 0xff;
17097 		addr[4] = (lo >>  8) & 0xff;
17098 		addr[5] = (lo >>  0) & 0xff;
17099 
17100 		/* Some old bootcode may report a 0 MAC address in SRAM */
17101 		addr_ok = is_valid_ether_addr(addr);
17102 	}
17103 	if (!addr_ok) {
17104 		__be32 be_hi, be_lo;
17105 
17106 		/* Next, try NVRAM. */
17107 		if (!tg3_flag(tp, NO_NVRAM) &&
17108 		    !tg3_nvram_read_be32(tp, mac_offset + 0, &be_hi) &&
17109 		    !tg3_nvram_read_be32(tp, mac_offset + 4, &be_lo)) {
17110 			memcpy(&addr[0], ((char *)&be_hi) + 2, 2);
17111 			memcpy(&addr[2], (char *)&be_lo, sizeof(be_lo));
17112 		}
17113 		/* Finally just fetch it out of the MAC control regs. */
17114 		else {
17115 			hi = tr32(MAC_ADDR_0_HIGH);
17116 			lo = tr32(MAC_ADDR_0_LOW);
17117 
17118 			addr[5] = lo & 0xff;
17119 			addr[4] = (lo >> 8) & 0xff;
17120 			addr[3] = (lo >> 16) & 0xff;
17121 			addr[2] = (lo >> 24) & 0xff;
17122 			addr[1] = hi & 0xff;
17123 			addr[0] = (hi >> 8) & 0xff;
17124 		}
17125 	}
17126 
17127 	if (!is_valid_ether_addr(addr))
17128 		return -EINVAL;
17129 	return 0;
17130 }
17131 
17132 #define BOUNDARY_SINGLE_CACHELINE	1
17133 #define BOUNDARY_MULTI_CACHELINE	2
17134 
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)17135 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17136 {
17137 	int cacheline_size;
17138 	u8 byte;
17139 	int goal;
17140 
17141 	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17142 	if (byte == 0)
17143 		cacheline_size = 1024;
17144 	else
17145 		cacheline_size = (int) byte * 4;
17146 
17147 	/* On 5703 and later chips, the boundary bits have no
17148 	 * effect.
17149 	 */
17150 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17151 	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17152 	    !tg3_flag(tp, PCI_EXPRESS))
17153 		goto out;
17154 
17155 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
17156 	goal = BOUNDARY_MULTI_CACHELINE;
17157 #else
17158 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17159 	goal = BOUNDARY_SINGLE_CACHELINE;
17160 #else
17161 	goal = 0;
17162 #endif
17163 #endif
17164 
17165 	if (tg3_flag(tp, 57765_PLUS)) {
17166 		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17167 		goto out;
17168 	}
17169 
17170 	if (!goal)
17171 		goto out;
17172 
17173 	/* PCI controllers on most RISC systems tend to disconnect
17174 	 * when a device tries to burst across a cache-line boundary.
17175 	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17176 	 *
17177 	 * Unfortunately, for PCI-E there are only limited
17178 	 * write-side controls for this, and thus for reads
17179 	 * we will still get the disconnects.  We'll also waste
17180 	 * these PCI cycles for both read and write for chips
17181 	 * other than 5700 and 5701 which do not implement the
17182 	 * boundary bits.
17183 	 */
17184 	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17185 		switch (cacheline_size) {
17186 		case 16:
17187 		case 32:
17188 		case 64:
17189 		case 128:
17190 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17191 				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17192 					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17193 			} else {
17194 				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17195 					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17196 			}
17197 			break;
17198 
17199 		case 256:
17200 			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17201 				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17202 			break;
17203 
17204 		default:
17205 			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17206 				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17207 			break;
17208 		}
17209 	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17210 		switch (cacheline_size) {
17211 		case 16:
17212 		case 32:
17213 		case 64:
17214 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17215 				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17216 				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17217 				break;
17218 			}
17219 			fallthrough;
17220 		case 128:
17221 		default:
17222 			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17223 			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17224 			break;
17225 		}
17226 	} else {
17227 		switch (cacheline_size) {
17228 		case 16:
17229 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17230 				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17231 					DMA_RWCTRL_WRITE_BNDRY_16);
17232 				break;
17233 			}
17234 			fallthrough;
17235 		case 32:
17236 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17237 				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17238 					DMA_RWCTRL_WRITE_BNDRY_32);
17239 				break;
17240 			}
17241 			fallthrough;
17242 		case 64:
17243 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17244 				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17245 					DMA_RWCTRL_WRITE_BNDRY_64);
17246 				break;
17247 			}
17248 			fallthrough;
17249 		case 128:
17250 			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17251 				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17252 					DMA_RWCTRL_WRITE_BNDRY_128);
17253 				break;
17254 			}
17255 			fallthrough;
17256 		case 256:
17257 			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17258 				DMA_RWCTRL_WRITE_BNDRY_256);
17259 			break;
17260 		case 512:
17261 			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17262 				DMA_RWCTRL_WRITE_BNDRY_512);
17263 			break;
17264 		case 1024:
17265 		default:
17266 			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17267 				DMA_RWCTRL_WRITE_BNDRY_1024);
17268 			break;
17269 		}
17270 	}
17271 
17272 out:
17273 	return val;
17274 }
17275 
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17276 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17277 			   int size, bool to_device)
17278 {
17279 	struct tg3_internal_buffer_desc test_desc;
17280 	u32 sram_dma_descs;
17281 	int i, ret;
17282 
17283 	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17284 
17285 	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17286 	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17287 	tw32(RDMAC_STATUS, 0);
17288 	tw32(WDMAC_STATUS, 0);
17289 
17290 	tw32(BUFMGR_MODE, 0);
17291 	tw32(FTQ_RESET, 0);
17292 
17293 	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17294 	test_desc.addr_lo = buf_dma & 0xffffffff;
17295 	test_desc.nic_mbuf = 0x00002100;
17296 	test_desc.len = size;
17297 
17298 	/*
17299 	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17300 	 * the *second* time the tg3 driver was getting loaded after an
17301 	 * initial scan.
17302 	 *
17303 	 * Broadcom tells me:
17304 	 *   ...the DMA engine is connected to the GRC block and a DMA
17305 	 *   reset may affect the GRC block in some unpredictable way...
17306 	 *   The behavior of resets to individual blocks has not been tested.
17307 	 *
17308 	 * Broadcom noted the GRC reset will also reset all sub-components.
17309 	 */
17310 	if (to_device) {
17311 		test_desc.cqid_sqid = (13 << 8) | 2;
17312 
17313 		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17314 		udelay(40);
17315 	} else {
17316 		test_desc.cqid_sqid = (16 << 8) | 7;
17317 
17318 		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17319 		udelay(40);
17320 	}
17321 	test_desc.flags = 0x00000005;
17322 
17323 	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17324 		u32 val;
17325 
17326 		val = *(((u32 *)&test_desc) + i);
17327 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17328 				       sram_dma_descs + (i * sizeof(u32)));
17329 		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17330 	}
17331 	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17332 
17333 	if (to_device)
17334 		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17335 	else
17336 		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17337 
17338 	ret = -ENODEV;
17339 	for (i = 0; i < 40; i++) {
17340 		u32 val;
17341 
17342 		if (to_device)
17343 			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17344 		else
17345 			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17346 		if ((val & 0xffff) == sram_dma_descs) {
17347 			ret = 0;
17348 			break;
17349 		}
17350 
17351 		udelay(100);
17352 	}
17353 
17354 	return ret;
17355 }
17356 
17357 #define TEST_BUFFER_SIZE	0x2000
17358 
17359 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17360 	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17361 	{ },
17362 };
17363 
tg3_test_dma(struct tg3 * tp)17364 static int tg3_test_dma(struct tg3 *tp)
17365 {
17366 	dma_addr_t buf_dma;
17367 	u32 *buf, saved_dma_rwctrl;
17368 	int ret = 0;
17369 
17370 	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17371 				 &buf_dma, GFP_KERNEL);
17372 	if (!buf) {
17373 		ret = -ENOMEM;
17374 		goto out_nofree;
17375 	}
17376 
17377 	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17378 			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17379 
17380 	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17381 
17382 	if (tg3_flag(tp, 57765_PLUS))
17383 		goto out;
17384 
17385 	if (tg3_flag(tp, PCI_EXPRESS)) {
17386 		/* DMA read watermark not used on PCIE */
17387 		tp->dma_rwctrl |= 0x00180000;
17388 	} else if (!tg3_flag(tp, PCIX_MODE)) {
17389 		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17390 		    tg3_asic_rev(tp) == ASIC_REV_5750)
17391 			tp->dma_rwctrl |= 0x003f0000;
17392 		else
17393 			tp->dma_rwctrl |= 0x003f000f;
17394 	} else {
17395 		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17396 		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17397 			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17398 			u32 read_water = 0x7;
17399 
17400 			/* If the 5704 is behind the EPB bridge, we can
17401 			 * do the less restrictive ONE_DMA workaround for
17402 			 * better performance.
17403 			 */
17404 			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17405 			    tg3_asic_rev(tp) == ASIC_REV_5704)
17406 				tp->dma_rwctrl |= 0x8000;
17407 			else if (ccval == 0x6 || ccval == 0x7)
17408 				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17409 
17410 			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17411 				read_water = 4;
17412 			/* Set bit 23 to enable PCIX hw bug fix */
17413 			tp->dma_rwctrl |=
17414 				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17415 				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17416 				(1 << 23);
17417 		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17418 			/* 5780 always in PCIX mode */
17419 			tp->dma_rwctrl |= 0x00144000;
17420 		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17421 			/* 5714 always in PCIX mode */
17422 			tp->dma_rwctrl |= 0x00148000;
17423 		} else {
17424 			tp->dma_rwctrl |= 0x001b000f;
17425 		}
17426 	}
17427 	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17428 		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17429 
17430 	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17431 	    tg3_asic_rev(tp) == ASIC_REV_5704)
17432 		tp->dma_rwctrl &= 0xfffffff0;
17433 
17434 	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17435 	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17436 		/* Remove this if it causes problems for some boards. */
17437 		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17438 
17439 		/* On 5700/5701 chips, we need to set this bit.
17440 		 * Otherwise the chip will issue cacheline transactions
17441 		 * to streamable DMA memory with not all the byte
17442 		 * enables turned on.  This is an error on several
17443 		 * RISC PCI controllers, in particular sparc64.
17444 		 *
17445 		 * On 5703/5704 chips, this bit has been reassigned
17446 		 * a different meaning.  In particular, it is used
17447 		 * on those chips to enable a PCI-X workaround.
17448 		 */
17449 		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17450 	}
17451 
17452 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17453 
17454 
17455 	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17456 	    tg3_asic_rev(tp) != ASIC_REV_5701)
17457 		goto out;
17458 
17459 	/* It is best to perform DMA test with maximum write burst size
17460 	 * to expose the 5700/5701 write DMA bug.
17461 	 */
17462 	saved_dma_rwctrl = tp->dma_rwctrl;
17463 	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17464 	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17465 
17466 	while (1) {
17467 		u32 *p = buf, i;
17468 
17469 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17470 			p[i] = i;
17471 
17472 		/* Send the buffer to the chip. */
17473 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17474 		if (ret) {
17475 			dev_err(&tp->pdev->dev,
17476 				"%s: Buffer write failed. err = %d\n",
17477 				__func__, ret);
17478 			break;
17479 		}
17480 
17481 		/* Now read it back. */
17482 		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17483 		if (ret) {
17484 			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17485 				"err = %d\n", __func__, ret);
17486 			break;
17487 		}
17488 
17489 		/* Verify it. */
17490 		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17491 			if (p[i] == i)
17492 				continue;
17493 
17494 			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17495 			    DMA_RWCTRL_WRITE_BNDRY_16) {
17496 				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17497 				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17498 				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17499 				break;
17500 			} else {
17501 				dev_err(&tp->pdev->dev,
17502 					"%s: Buffer corrupted on read back! "
17503 					"(%d != %d)\n", __func__, p[i], i);
17504 				ret = -ENODEV;
17505 				goto out;
17506 			}
17507 		}
17508 
17509 		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17510 			/* Success. */
17511 			ret = 0;
17512 			break;
17513 		}
17514 	}
17515 	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17516 	    DMA_RWCTRL_WRITE_BNDRY_16) {
17517 		/* DMA test passed without adjusting DMA boundary,
17518 		 * now look for chipsets that are known to expose the
17519 		 * DMA bug without failing the test.
17520 		 */
17521 		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17522 			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17523 			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17524 		} else {
17525 			/* Safe to use the calculated DMA boundary. */
17526 			tp->dma_rwctrl = saved_dma_rwctrl;
17527 		}
17528 
17529 		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17530 	}
17531 
17532 out:
17533 	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17534 out_nofree:
17535 	return ret;
17536 }
17537 
tg3_init_bufmgr_config(struct tg3 * tp)17538 static void tg3_init_bufmgr_config(struct tg3 *tp)
17539 {
17540 	if (tg3_flag(tp, 57765_PLUS)) {
17541 		tp->bufmgr_config.mbuf_read_dma_low_water =
17542 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17543 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17544 			DEFAULT_MB_MACRX_LOW_WATER_57765;
17545 		tp->bufmgr_config.mbuf_high_water =
17546 			DEFAULT_MB_HIGH_WATER_57765;
17547 
17548 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17549 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17550 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17551 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17552 		tp->bufmgr_config.mbuf_high_water_jumbo =
17553 			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17554 	} else if (tg3_flag(tp, 5705_PLUS)) {
17555 		tp->bufmgr_config.mbuf_read_dma_low_water =
17556 			DEFAULT_MB_RDMA_LOW_WATER_5705;
17557 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17558 			DEFAULT_MB_MACRX_LOW_WATER_5705;
17559 		tp->bufmgr_config.mbuf_high_water =
17560 			DEFAULT_MB_HIGH_WATER_5705;
17561 		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17562 			tp->bufmgr_config.mbuf_mac_rx_low_water =
17563 				DEFAULT_MB_MACRX_LOW_WATER_5906;
17564 			tp->bufmgr_config.mbuf_high_water =
17565 				DEFAULT_MB_HIGH_WATER_5906;
17566 		}
17567 
17568 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17569 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17570 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17571 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17572 		tp->bufmgr_config.mbuf_high_water_jumbo =
17573 			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17574 	} else {
17575 		tp->bufmgr_config.mbuf_read_dma_low_water =
17576 			DEFAULT_MB_RDMA_LOW_WATER;
17577 		tp->bufmgr_config.mbuf_mac_rx_low_water =
17578 			DEFAULT_MB_MACRX_LOW_WATER;
17579 		tp->bufmgr_config.mbuf_high_water =
17580 			DEFAULT_MB_HIGH_WATER;
17581 
17582 		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17583 			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17584 		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17585 			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17586 		tp->bufmgr_config.mbuf_high_water_jumbo =
17587 			DEFAULT_MB_HIGH_WATER_JUMBO;
17588 	}
17589 
17590 	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17591 	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17592 }
17593 
tg3_phy_string(struct tg3 * tp)17594 static char *tg3_phy_string(struct tg3 *tp)
17595 {
17596 	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17597 	case TG3_PHY_ID_BCM5400:	return "5400";
17598 	case TG3_PHY_ID_BCM5401:	return "5401";
17599 	case TG3_PHY_ID_BCM5411:	return "5411";
17600 	case TG3_PHY_ID_BCM5701:	return "5701";
17601 	case TG3_PHY_ID_BCM5703:	return "5703";
17602 	case TG3_PHY_ID_BCM5704:	return "5704";
17603 	case TG3_PHY_ID_BCM5705:	return "5705";
17604 	case TG3_PHY_ID_BCM5750:	return "5750";
17605 	case TG3_PHY_ID_BCM5752:	return "5752";
17606 	case TG3_PHY_ID_BCM5714:	return "5714";
17607 	case TG3_PHY_ID_BCM5780:	return "5780";
17608 	case TG3_PHY_ID_BCM5755:	return "5755";
17609 	case TG3_PHY_ID_BCM5787:	return "5787";
17610 	case TG3_PHY_ID_BCM5784:	return "5784";
17611 	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17612 	case TG3_PHY_ID_BCM5906:	return "5906";
17613 	case TG3_PHY_ID_BCM5761:	return "5761";
17614 	case TG3_PHY_ID_BCM5718C:	return "5718C";
17615 	case TG3_PHY_ID_BCM5718S:	return "5718S";
17616 	case TG3_PHY_ID_BCM57765:	return "57765";
17617 	case TG3_PHY_ID_BCM5719C:	return "5719C";
17618 	case TG3_PHY_ID_BCM5720C:	return "5720C";
17619 	case TG3_PHY_ID_BCM5762:	return "5762C";
17620 	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17621 	case 0:			return "serdes";
17622 	default:		return "unknown";
17623 	}
17624 }
17625 
tg3_bus_string(struct tg3 * tp,char * str)17626 static char *tg3_bus_string(struct tg3 *tp, char *str)
17627 {
17628 	if (tg3_flag(tp, PCI_EXPRESS)) {
17629 		strcpy(str, "PCI Express");
17630 		return str;
17631 	} else if (tg3_flag(tp, PCIX_MODE)) {
17632 		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17633 
17634 		strcpy(str, "PCIX:");
17635 
17636 		if ((clock_ctrl == 7) ||
17637 		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17638 		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17639 			strcat(str, "133MHz");
17640 		else if (clock_ctrl == 0)
17641 			strcat(str, "33MHz");
17642 		else if (clock_ctrl == 2)
17643 			strcat(str, "50MHz");
17644 		else if (clock_ctrl == 4)
17645 			strcat(str, "66MHz");
17646 		else if (clock_ctrl == 6)
17647 			strcat(str, "100MHz");
17648 	} else {
17649 		strcpy(str, "PCI:");
17650 		if (tg3_flag(tp, PCI_HIGH_SPEED))
17651 			strcat(str, "66MHz");
17652 		else
17653 			strcat(str, "33MHz");
17654 	}
17655 	if (tg3_flag(tp, PCI_32BIT))
17656 		strcat(str, ":32-bit");
17657 	else
17658 		strcat(str, ":64-bit");
17659 	return str;
17660 }
17661 
tg3_init_coal(struct tg3 * tp)17662 static void tg3_init_coal(struct tg3 *tp)
17663 {
17664 	struct ethtool_coalesce *ec = &tp->coal;
17665 
17666 	memset(ec, 0, sizeof(*ec));
17667 	ec->cmd = ETHTOOL_GCOALESCE;
17668 	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17669 	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17670 	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17671 	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17672 	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17673 	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17674 	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17675 	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17676 	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17677 
17678 	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17679 				 HOSTCC_MODE_CLRTICK_TXBD)) {
17680 		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17681 		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17682 		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17683 		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17684 	}
17685 
17686 	if (tg3_flag(tp, 5705_PLUS)) {
17687 		ec->rx_coalesce_usecs_irq = 0;
17688 		ec->tx_coalesce_usecs_irq = 0;
17689 		ec->stats_block_coalesce_usecs = 0;
17690 	}
17691 }
17692 
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17693 static int tg3_init_one(struct pci_dev *pdev,
17694 				  const struct pci_device_id *ent)
17695 {
17696 	struct net_device *dev;
17697 	struct tg3 *tp;
17698 	int i, err;
17699 	u32 sndmbx, rcvmbx, intmbx;
17700 	char str[40];
17701 	u64 dma_mask, persist_dma_mask;
17702 	netdev_features_t features = 0;
17703 	u8 addr[ETH_ALEN] __aligned(2);
17704 
17705 	err = pci_enable_device(pdev);
17706 	if (err) {
17707 		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17708 		return err;
17709 	}
17710 
17711 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17712 	if (err) {
17713 		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17714 		goto err_out_disable_pdev;
17715 	}
17716 
17717 	pci_set_master(pdev);
17718 
17719 	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17720 	if (!dev) {
17721 		err = -ENOMEM;
17722 		goto err_out_free_res;
17723 	}
17724 
17725 	SET_NETDEV_DEV(dev, &pdev->dev);
17726 
17727 	tp = netdev_priv(dev);
17728 	tp->pdev = pdev;
17729 	tp->dev = dev;
17730 	tp->rx_mode = TG3_DEF_RX_MODE;
17731 	tp->tx_mode = TG3_DEF_TX_MODE;
17732 	tp->irq_sync = 1;
17733 	tp->pcierr_recovery = false;
17734 
17735 	if (tg3_debug > 0)
17736 		tp->msg_enable = tg3_debug;
17737 	else
17738 		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17739 
17740 	if (pdev_is_ssb_gige_core(pdev)) {
17741 		tg3_flag_set(tp, IS_SSB_CORE);
17742 		if (ssb_gige_must_flush_posted_writes(pdev))
17743 			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17744 		if (ssb_gige_one_dma_at_once(pdev))
17745 			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17746 		if (ssb_gige_have_roboswitch(pdev)) {
17747 			tg3_flag_set(tp, USE_PHYLIB);
17748 			tg3_flag_set(tp, ROBOSWITCH);
17749 		}
17750 		if (ssb_gige_is_rgmii(pdev))
17751 			tg3_flag_set(tp, RGMII_MODE);
17752 	}
17753 
17754 	/* The word/byte swap controls here control register access byte
17755 	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17756 	 * setting below.
17757 	 */
17758 	tp->misc_host_ctrl =
17759 		MISC_HOST_CTRL_MASK_PCI_INT |
17760 		MISC_HOST_CTRL_WORD_SWAP |
17761 		MISC_HOST_CTRL_INDIR_ACCESS |
17762 		MISC_HOST_CTRL_PCISTATE_RW;
17763 
17764 	/* The NONFRM (non-frame) byte/word swap controls take effect
17765 	 * on descriptor entries, anything which isn't packet data.
17766 	 *
17767 	 * The StrongARM chips on the board (one for tx, one for rx)
17768 	 * are running in big-endian mode.
17769 	 */
17770 	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17771 			GRC_MODE_WSWAP_NONFRM_DATA);
17772 #ifdef __BIG_ENDIAN
17773 	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17774 #endif
17775 	spin_lock_init(&tp->lock);
17776 	spin_lock_init(&tp->indirect_lock);
17777 	INIT_WORK(&tp->reset_task, tg3_reset_task);
17778 
17779 	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17780 	if (!tp->regs) {
17781 		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17782 		err = -ENOMEM;
17783 		goto err_out_free_dev;
17784 	}
17785 
17786 	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17787 	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17788 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17789 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17790 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17791 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17792 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17793 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17794 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17795 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17796 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17797 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17798 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17799 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17800 	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17801 		tg3_flag_set(tp, ENABLE_APE);
17802 		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17803 		if (!tp->aperegs) {
17804 			dev_err(&pdev->dev,
17805 				"Cannot map APE registers, aborting\n");
17806 			err = -ENOMEM;
17807 			goto err_out_iounmap;
17808 		}
17809 	}
17810 
17811 	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17812 	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17813 
17814 	dev->ethtool_ops = &tg3_ethtool_ops;
17815 	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17816 	dev->netdev_ops = &tg3_netdev_ops;
17817 	dev->irq = pdev->irq;
17818 
17819 	err = tg3_get_invariants(tp, ent);
17820 	if (err) {
17821 		dev_err(&pdev->dev,
17822 			"Problem fetching invariants of chip, aborting\n");
17823 		goto err_out_apeunmap;
17824 	}
17825 
17826 	/* The EPB bridge inside 5714, 5715, and 5780 and any
17827 	 * device behind the EPB cannot support DMA addresses > 40-bit.
17828 	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17829 	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17830 	 * do DMA address check in __tg3_start_xmit().
17831 	 */
17832 	if (tg3_flag(tp, IS_5788))
17833 		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17834 	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17835 		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17836 #ifdef CONFIG_HIGHMEM
17837 		dma_mask = DMA_BIT_MASK(64);
17838 #endif
17839 	} else
17840 		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17841 
17842 	if (tg3_asic_rev(tp) == ASIC_REV_57766)
17843 		persist_dma_mask = DMA_BIT_MASK(31);
17844 
17845 	/* Configure DMA attributes. */
17846 	if (dma_mask > DMA_BIT_MASK(32)) {
17847 		err = dma_set_mask(&pdev->dev, dma_mask);
17848 		if (!err) {
17849 			features |= NETIF_F_HIGHDMA;
17850 			err = dma_set_coherent_mask(&pdev->dev,
17851 						    persist_dma_mask);
17852 			if (err < 0) {
17853 				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17854 					"DMA for consistent allocations\n");
17855 				goto err_out_apeunmap;
17856 			}
17857 		}
17858 	}
17859 	if (err || dma_mask == DMA_BIT_MASK(32)) {
17860 		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17861 		if (err) {
17862 			dev_err(&pdev->dev,
17863 				"No usable DMA configuration, aborting\n");
17864 			goto err_out_apeunmap;
17865 		}
17866 	}
17867 
17868 	tg3_init_bufmgr_config(tp);
17869 
17870 	/* 5700 B0 chips do not support checksumming correctly due
17871 	 * to hardware bugs.
17872 	 */
17873 	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17874 		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17875 
17876 		if (tg3_flag(tp, 5755_PLUS))
17877 			features |= NETIF_F_IPV6_CSUM;
17878 	}
17879 
17880 	/* TSO is on by default on chips that support hardware TSO.
17881 	 * Firmware TSO on older chips gives lower performance, so it
17882 	 * is off by default, but can be enabled using ethtool.
17883 	 */
17884 	if ((tg3_flag(tp, HW_TSO_1) ||
17885 	     tg3_flag(tp, HW_TSO_2) ||
17886 	     tg3_flag(tp, HW_TSO_3)) &&
17887 	    (features & NETIF_F_IP_CSUM))
17888 		features |= NETIF_F_TSO;
17889 	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17890 		if (features & NETIF_F_IPV6_CSUM)
17891 			features |= NETIF_F_TSO6;
17892 		if (tg3_flag(tp, HW_TSO_3) ||
17893 		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17894 		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17895 		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17896 		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17897 		    tg3_asic_rev(tp) == ASIC_REV_57780)
17898 			features |= NETIF_F_TSO_ECN;
17899 	}
17900 
17901 	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17902 			 NETIF_F_HW_VLAN_CTAG_RX;
17903 	dev->vlan_features |= features;
17904 
17905 	/*
17906 	 * Add loopback capability only for a subset of devices that support
17907 	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17908 	 * loopback for the remaining devices.
17909 	 */
17910 	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17911 	    !tg3_flag(tp, CPMU_PRESENT))
17912 		/* Add the loopback capability */
17913 		features |= NETIF_F_LOOPBACK;
17914 
17915 	dev->hw_features |= features;
17916 	dev->priv_flags |= IFF_UNICAST_FLT;
17917 
17918 	/* MTU range: 60 - 9000 or 1500, depending on hardware */
17919 	dev->min_mtu = TG3_MIN_MTU;
17920 	dev->max_mtu = TG3_MAX_MTU(tp);
17921 
17922 	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17923 	    !tg3_flag(tp, TSO_CAPABLE) &&
17924 	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17925 		tg3_flag_set(tp, MAX_RXPEND_64);
17926 		tp->rx_pending = 63;
17927 	}
17928 
17929 	err = tg3_get_device_address(tp, addr);
17930 	if (err) {
17931 		dev_err(&pdev->dev,
17932 			"Could not obtain valid ethernet address, aborting\n");
17933 		goto err_out_apeunmap;
17934 	}
17935 	eth_hw_addr_set(dev, addr);
17936 
17937 	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17938 	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17939 	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17940 	for (i = 0; i < tp->irq_max; i++) {
17941 		struct tg3_napi *tnapi = &tp->napi[i];
17942 
17943 		tnapi->tp = tp;
17944 		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17945 
17946 		tnapi->int_mbox = intmbx;
17947 		intmbx += 0x8;
17948 
17949 		tnapi->consmbox = rcvmbx;
17950 		tnapi->prodmbox = sndmbx;
17951 
17952 		if (i)
17953 			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17954 		else
17955 			tnapi->coal_now = HOSTCC_MODE_NOW;
17956 
17957 		if (!tg3_flag(tp, SUPPORT_MSIX))
17958 			break;
17959 
17960 		/*
17961 		 * If we support MSIX, we'll be using RSS.  If we're using
17962 		 * RSS, the first vector only handles link interrupts and the
17963 		 * remaining vectors handle rx and tx interrupts.  Reuse the
17964 		 * mailbox values for the next iteration.  The values we setup
17965 		 * above are still useful for the single vectored mode.
17966 		 */
17967 		if (!i)
17968 			continue;
17969 
17970 		rcvmbx += 0x8;
17971 
17972 		if (sndmbx & 0x4)
17973 			sndmbx -= 0x4;
17974 		else
17975 			sndmbx += 0xc;
17976 	}
17977 
17978 	/*
17979 	 * Reset chip in case UNDI or EFI driver did not shutdown
17980 	 * DMA self test will enable WDMAC and we'll see (spurious)
17981 	 * pending DMA on the PCI bus at that point.
17982 	 */
17983 	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17984 	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17985 		tg3_full_lock(tp, 0);
17986 		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17987 		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17988 		tg3_full_unlock(tp);
17989 	}
17990 
17991 	err = tg3_test_dma(tp);
17992 	if (err) {
17993 		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17994 		goto err_out_apeunmap;
17995 	}
17996 
17997 	tg3_init_coal(tp);
17998 
17999 	pci_set_drvdata(pdev, dev);
18000 
18001 	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
18002 	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
18003 	    tg3_asic_rev(tp) == ASIC_REV_5762)
18004 		tg3_flag_set(tp, PTP_CAPABLE);
18005 
18006 	tg3_timer_init(tp);
18007 
18008 	tg3_carrier_off(tp);
18009 
18010 	err = register_netdev(dev);
18011 	if (err) {
18012 		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
18013 		goto err_out_apeunmap;
18014 	}
18015 
18016 	if (tg3_flag(tp, PTP_CAPABLE)) {
18017 		tg3_ptp_init(tp);
18018 		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
18019 						   &tp->pdev->dev);
18020 		if (IS_ERR(tp->ptp_clock))
18021 			tp->ptp_clock = NULL;
18022 	}
18023 
18024 	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
18025 		    tp->board_part_number,
18026 		    tg3_chip_rev_id(tp),
18027 		    tg3_bus_string(tp, str),
18028 		    dev->dev_addr);
18029 
18030 	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
18031 		char *ethtype;
18032 
18033 		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
18034 			ethtype = "10/100Base-TX";
18035 		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
18036 			ethtype = "1000Base-SX";
18037 		else
18038 			ethtype = "10/100/1000Base-T";
18039 
18040 		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
18041 			    "(WireSpeed[%d], EEE[%d])\n",
18042 			    tg3_phy_string(tp), ethtype,
18043 			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18044 			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18045 	}
18046 
18047 	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18048 		    (dev->features & NETIF_F_RXCSUM) != 0,
18049 		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
18050 		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18051 		    tg3_flag(tp, ENABLE_ASF) != 0,
18052 		    tg3_flag(tp, TSO_CAPABLE) != 0);
18053 	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18054 		    tp->dma_rwctrl,
18055 		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18056 		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18057 
18058 	pci_save_state(pdev);
18059 
18060 	return 0;
18061 
18062 err_out_apeunmap:
18063 	if (tp->aperegs) {
18064 		iounmap(tp->aperegs);
18065 		tp->aperegs = NULL;
18066 	}
18067 
18068 err_out_iounmap:
18069 	if (tp->regs) {
18070 		iounmap(tp->regs);
18071 		tp->regs = NULL;
18072 	}
18073 
18074 err_out_free_dev:
18075 	free_netdev(dev);
18076 
18077 err_out_free_res:
18078 	pci_release_regions(pdev);
18079 
18080 err_out_disable_pdev:
18081 	if (pci_is_enabled(pdev))
18082 		pci_disable_device(pdev);
18083 	return err;
18084 }
18085 
tg3_remove_one(struct pci_dev * pdev)18086 static void tg3_remove_one(struct pci_dev *pdev)
18087 {
18088 	struct net_device *dev = pci_get_drvdata(pdev);
18089 
18090 	if (dev) {
18091 		struct tg3 *tp = netdev_priv(dev);
18092 
18093 		tg3_ptp_fini(tp);
18094 
18095 		release_firmware(tp->fw);
18096 
18097 		tg3_reset_task_cancel(tp);
18098 
18099 		if (tg3_flag(tp, USE_PHYLIB)) {
18100 			tg3_phy_fini(tp);
18101 			tg3_mdio_fini(tp);
18102 		}
18103 
18104 		unregister_netdev(dev);
18105 		if (tp->aperegs) {
18106 			iounmap(tp->aperegs);
18107 			tp->aperegs = NULL;
18108 		}
18109 		if (tp->regs) {
18110 			iounmap(tp->regs);
18111 			tp->regs = NULL;
18112 		}
18113 		free_netdev(dev);
18114 		pci_release_regions(pdev);
18115 		pci_disable_device(pdev);
18116 	}
18117 }
18118 
18119 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)18120 static int tg3_suspend(struct device *device)
18121 {
18122 	struct net_device *dev = dev_get_drvdata(device);
18123 	struct tg3 *tp = netdev_priv(dev);
18124 
18125 	rtnl_lock();
18126 
18127 	if (!netif_running(dev))
18128 		goto unlock;
18129 
18130 	tg3_reset_task_cancel(tp);
18131 	tg3_phy_stop(tp);
18132 	tg3_netif_stop(tp);
18133 
18134 	tg3_timer_stop(tp);
18135 
18136 	tg3_full_lock(tp, 1);
18137 	tg3_disable_ints(tp);
18138 	tg3_full_unlock(tp);
18139 
18140 	netif_device_detach(dev);
18141 
18142 	tg3_full_lock(tp, 0);
18143 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18144 	tg3_flag_clear(tp, INIT_COMPLETE);
18145 	tg3_full_unlock(tp);
18146 
18147 	tg3_power_down_prepare(tp);
18148 
18149 unlock:
18150 	rtnl_unlock();
18151 	return 0;
18152 }
18153 
tg3_resume(struct device * device)18154 static int tg3_resume(struct device *device)
18155 {
18156 	struct net_device *dev = dev_get_drvdata(device);
18157 	struct tg3 *tp = netdev_priv(dev);
18158 	int err = 0;
18159 
18160 	rtnl_lock();
18161 
18162 	if (!netif_running(dev))
18163 		goto unlock;
18164 
18165 	netif_device_attach(dev);
18166 
18167 	netdev_lock(dev);
18168 	tg3_full_lock(tp, 0);
18169 
18170 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18171 
18172 	tg3_flag_set(tp, INIT_COMPLETE);
18173 	err = tg3_restart_hw(tp,
18174 			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18175 	if (err)
18176 		goto out;
18177 
18178 	tg3_timer_start(tp);
18179 
18180 	tg3_netif_start(tp);
18181 
18182 out:
18183 	tg3_full_unlock(tp);
18184 	netdev_unlock(dev);
18185 
18186 	if (!err)
18187 		tg3_phy_start(tp);
18188 
18189 unlock:
18190 	rtnl_unlock();
18191 	return err;
18192 }
18193 #endif /* CONFIG_PM_SLEEP */
18194 
18195 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18196 
18197 /* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
18198  * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
18199  * be, powered down.
18200  */
18201 static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
18202 	{
18203 		.matches = {
18204 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18205 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
18206 		},
18207 	},
18208 	{
18209 		.matches = {
18210 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18211 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
18212 		},
18213 	},
18214 	{
18215 		.matches = {
18216 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18217 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
18218 		},
18219 	},
18220 	{
18221 		.matches = {
18222 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18223 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
18224 		},
18225 	},
18226 	{
18227 		.matches = {
18228 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18229 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
18230 		},
18231 	},
18232 	{
18233 		.matches = {
18234 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18235 			DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
18236 		},
18237 	},
18238 	{}
18239 };
18240 
tg3_shutdown(struct pci_dev * pdev)18241 static void tg3_shutdown(struct pci_dev *pdev)
18242 {
18243 	struct net_device *dev = pci_get_drvdata(pdev);
18244 	struct tg3 *tp = netdev_priv(dev);
18245 
18246 	tg3_reset_task_cancel(tp);
18247 
18248 	rtnl_lock();
18249 
18250 	netif_device_detach(dev);
18251 
18252 	if (netif_running(dev))
18253 		dev_close(dev);
18254 
18255 	if (system_state == SYSTEM_POWER_OFF)
18256 		tg3_power_down(tp);
18257 	else if (system_state == SYSTEM_RESTART &&
18258 		 dmi_first_match(tg3_restart_aer_quirk_table) &&
18259 		 pdev->current_state != PCI_D3cold &&
18260 		 pdev->current_state != PCI_UNKNOWN) {
18261 		/* Disable PCIe AER on the tg3 to avoid a fatal
18262 		 * error during this system restart.
18263 		 */
18264 		pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
18265 					   PCI_EXP_DEVCTL_CERE |
18266 					   PCI_EXP_DEVCTL_NFERE |
18267 					   PCI_EXP_DEVCTL_FERE |
18268 					   PCI_EXP_DEVCTL_URRE);
18269 	}
18270 
18271 	rtnl_unlock();
18272 
18273 	pci_disable_device(pdev);
18274 }
18275 
18276 /**
18277  * tg3_io_error_detected - called when PCI error is detected
18278  * @pdev: Pointer to PCI device
18279  * @state: The current pci connection state
18280  *
18281  * This function is called after a PCI bus error affecting
18282  * this device has been detected.
18283  */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18284 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18285 					      pci_channel_state_t state)
18286 {
18287 	struct net_device *netdev = pci_get_drvdata(pdev);
18288 	struct tg3 *tp = netdev_priv(netdev);
18289 	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18290 
18291 	netdev_info(netdev, "PCI I/O error detected\n");
18292 
18293 	/* Want to make sure that the reset task doesn't run */
18294 	tg3_reset_task_cancel(tp);
18295 
18296 	rtnl_lock();
18297 
18298 	/* Could be second call or maybe we don't have netdev yet */
18299 	if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18300 		goto done;
18301 
18302 	/* We needn't recover from permanent error */
18303 	if (state == pci_channel_io_frozen)
18304 		tp->pcierr_recovery = true;
18305 
18306 	tg3_phy_stop(tp);
18307 
18308 	tg3_netif_stop(tp);
18309 
18310 	tg3_timer_stop(tp);
18311 
18312 	netif_device_detach(netdev);
18313 
18314 	/* Clean up software state, even if MMIO is blocked */
18315 	tg3_full_lock(tp, 0);
18316 	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18317 	tg3_full_unlock(tp);
18318 
18319 done:
18320 	if (state == pci_channel_io_perm_failure) {
18321 		if (netdev) {
18322 			netdev_lock(netdev);
18323 			tg3_napi_enable(tp);
18324 			netdev_unlock(netdev);
18325 			dev_close(netdev);
18326 		}
18327 		err = PCI_ERS_RESULT_DISCONNECT;
18328 	} else {
18329 		pci_disable_device(pdev);
18330 	}
18331 
18332 	rtnl_unlock();
18333 
18334 	return err;
18335 }
18336 
18337 /**
18338  * tg3_io_slot_reset - called after the pci bus has been reset.
18339  * @pdev: Pointer to PCI device
18340  *
18341  * Restart the card from scratch, as if from a cold-boot.
18342  * At this point, the card has experienced a hard reset,
18343  * followed by fixups by BIOS, and has its config space
18344  * set up identically to what it was at cold boot.
18345  */
tg3_io_slot_reset(struct pci_dev * pdev)18346 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18347 {
18348 	struct net_device *netdev = pci_get_drvdata(pdev);
18349 	struct tg3 *tp = netdev_priv(netdev);
18350 	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18351 	int err;
18352 
18353 	rtnl_lock();
18354 
18355 	if (pci_enable_device(pdev)) {
18356 		dev_err(&pdev->dev,
18357 			"Cannot re-enable PCI device after reset.\n");
18358 		goto done;
18359 	}
18360 
18361 	pci_set_master(pdev);
18362 	pci_restore_state(pdev);
18363 	pci_save_state(pdev);
18364 
18365 	if (!netdev || !netif_running(netdev)) {
18366 		rc = PCI_ERS_RESULT_RECOVERED;
18367 		goto done;
18368 	}
18369 
18370 	err = tg3_power_up(tp);
18371 	if (err)
18372 		goto done;
18373 
18374 	rc = PCI_ERS_RESULT_RECOVERED;
18375 
18376 done:
18377 	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18378 		netdev_lock(netdev);
18379 		tg3_napi_enable(tp);
18380 		netdev_unlock(netdev);
18381 		dev_close(netdev);
18382 	}
18383 	rtnl_unlock();
18384 
18385 	return rc;
18386 }
18387 
18388 /**
18389  * tg3_io_resume - called when traffic can start flowing again.
18390  * @pdev: Pointer to PCI device
18391  *
18392  * This callback is called when the error recovery driver tells
18393  * us that its OK to resume normal operation.
18394  */
tg3_io_resume(struct pci_dev * pdev)18395 static void tg3_io_resume(struct pci_dev *pdev)
18396 {
18397 	struct net_device *netdev = pci_get_drvdata(pdev);
18398 	struct tg3 *tp = netdev_priv(netdev);
18399 	int err;
18400 
18401 	rtnl_lock();
18402 
18403 	if (!netdev || !netif_running(netdev))
18404 		goto done;
18405 
18406 	netdev_lock(netdev);
18407 	tg3_full_lock(tp, 0);
18408 	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18409 	tg3_flag_set(tp, INIT_COMPLETE);
18410 	err = tg3_restart_hw(tp, true);
18411 	if (err) {
18412 		tg3_full_unlock(tp);
18413 		netdev_unlock(netdev);
18414 		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18415 		goto done;
18416 	}
18417 
18418 	netif_device_attach(netdev);
18419 
18420 	tg3_timer_start(tp);
18421 
18422 	tg3_netif_start(tp);
18423 
18424 	tg3_full_unlock(tp);
18425 	netdev_unlock(netdev);
18426 
18427 	tg3_phy_start(tp);
18428 
18429 done:
18430 	tp->pcierr_recovery = false;
18431 	rtnl_unlock();
18432 }
18433 
18434 static const struct pci_error_handlers tg3_err_handler = {
18435 	.error_detected	= tg3_io_error_detected,
18436 	.slot_reset	= tg3_io_slot_reset,
18437 	.resume		= tg3_io_resume
18438 };
18439 
18440 static struct pci_driver tg3_driver = {
18441 	.name		= DRV_MODULE_NAME,
18442 	.id_table	= tg3_pci_tbl,
18443 	.probe		= tg3_init_one,
18444 	.remove		= tg3_remove_one,
18445 	.err_handler	= &tg3_err_handler,
18446 	.driver.pm	= &tg3_pm_ops,
18447 	.shutdown	= tg3_shutdown,
18448 };
18449 
18450 module_pci_driver(tg3_driver);
18451