1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
11 *
12 * Firmware is:
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
18 *
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
22 */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/gso.h>
61 #include <net/ip.h>
62
63 #include <linux/io.h>
64 #include <asm/byteorder.h>
65 #include <linux/uaccess.h>
66
67 #include <uapi/linux/net_tstamp.h>
68 #include <linux/ptp_clock_kernel.h>
69
70 #define BAR_0 0
71 #define BAR_2 2
72
73 #include "tg3.h"
74
75 /* Functions & macros to verify TG3_FLAGS types */
76
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79 return test_bit(flag, bits);
80 }
81
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84 set_bit(flag, bits);
85 }
86
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 {
89 clear_bit(flag, bits);
90 }
91
92 #define tg3_flag(tp, flag) \
93 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define tg3_flag_set(tp, flag) \
95 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define tg3_flag_clear(tp, flag) \
97 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98
99 #define DRV_MODULE_NAME "tg3"
100 /* DO NOT UPDATE TG3_*_NUM defines */
101 #define TG3_MAJ_NUM 3
102 #define TG3_MIN_NUM 137
103
104 #define RESET_KIND_SHUTDOWN 0
105 #define RESET_KIND_INIT 1
106 #define RESET_KIND_SUSPEND 2
107
108 #define TG3_DEF_RX_MODE 0
109 #define TG3_DEF_TX_MODE 0
110 #define TG3_DEF_MSG_ENABLE \
111 (NETIF_MSG_DRV | \
112 NETIF_MSG_PROBE | \
113 NETIF_MSG_LINK | \
114 NETIF_MSG_TIMER | \
115 NETIF_MSG_IFDOWN | \
116 NETIF_MSG_IFUP | \
117 NETIF_MSG_RX_ERR | \
118 NETIF_MSG_TX_ERR)
119
120 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
121
122 /* length of time before we decide the hardware is borked,
123 * and dev->tx_timeout() should be called to fix the problem
124 */
125
126 #define TG3_TX_TIMEOUT (5 * HZ)
127
128 /* hardware minimum and maximum for a single frame's data payload */
129 #define TG3_MIN_MTU ETH_ZLEN
130 #define TG3_MAX_MTU(tp) \
131 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132
133 /* These numbers seem to be hard coded in the NIC firmware somehow.
134 * You can't change the ring sizes, but you can change where you place
135 * them in the NIC onboard memory.
136 */
137 #define TG3_RX_STD_RING_SIZE(tp) \
138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
140 #define TG3_DEF_RX_RING_PENDING 200
141 #define TG3_RX_JMB_RING_SIZE(tp) \
142 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
143 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
144 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
145
146 /* Do not place this n-ring entries value into the tp struct itself,
147 * we really want to expose these constants to GCC so that modulo et
148 * al. operations are done with shifts and masks instead of with
149 * hw multiply/modulo instructions. Another solution would be to
150 * replace things like '% foo' with '& (foo - 1)'.
151 */
152
153 #define TG3_TX_RING_SIZE 512
154 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
155
156 #define TG3_RX_STD_RING_BYTES(tp) \
157 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
158 #define TG3_RX_JMB_RING_BYTES(tp) \
159 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
160 #define TG3_RX_RCB_RING_BYTES(tp) \
161 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
162 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
163 TG3_TX_RING_SIZE)
164 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
165
166 #define TG3_DMA_BYTE_ENAB 64
167
168 #define TG3_RX_STD_DMA_SZ 1536
169 #define TG3_RX_JMB_DMA_SZ 9046
170
171 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
172
173 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
174 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175
176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
177 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178
179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
180 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181
182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
183 * that are at least dword aligned when used in PCIX mode. The driver
184 * works around this bug by double copying the packet. This workaround
185 * is built into the normal double copy length check for efficiency.
186 *
187 * However, the double copy is only necessary on those architectures
188 * where unaligned memory accesses are inefficient. For those architectures
189 * where unaligned memory accesses incur little penalty, we can reintegrate
190 * the 5701 in the normal rx path. Doing so saves a device structure
191 * dereference by hardcoding the double copy threshold in place.
192 */
193 #define TG3_RX_COPY_THRESHOLD 256
194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
195 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
196 #else
197 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #endif
199
200 #if (NET_IP_ALIGN != 0)
201 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
202 #else
203 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 #endif
205
206 /* minimum number of free TX descriptors required to wake up TX process */
207 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
208 #define TG3_TX_BD_DMA_MAX_2K 2048
209 #define TG3_TX_BD_DMA_MAX_4K 4096
210
211 #define TG3_RAW_IP_ALIGN 2
212
213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215
216 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
217 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218
219 #define FIRMWARE_TG3 "tigon/tg3.bin"
220 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
221 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
222 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
223
224 MODULE_AUTHOR("David S. Miller <davem@redhat.com> and Jeff Garzik <jgarzik@pobox.com>");
225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
226 MODULE_LICENSE("GPL");
227 MODULE_FIRMWARE(FIRMWARE_TG3);
228 MODULE_FIRMWARE(FIRMWARE_TG357766);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
231
232 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
233 module_param(tg3_debug, int, 0);
234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
235
236 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
237 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
238
239 static const struct pci_device_id tg3_pci_tbl[] = {
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 TG3_DRV_DATA_FLAG_5705_10_100},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 TG3_DRV_DATA_FLAG_5705_10_100},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
274 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
280 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
288 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
289 PCI_VENDOR_ID_LENOVO,
290 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
294 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
317 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
318 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
334 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
347 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
353 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
354 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
355 {}
356 };
357
358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
359
360 static const struct {
361 const char string[ETH_GSTRING_LEN];
362 } ethtool_stats_keys[] = {
363 { "rx_octets" },
364 { "rx_fragments" },
365 { "rx_ucast_packets" },
366 { "rx_mcast_packets" },
367 { "rx_bcast_packets" },
368 { "rx_fcs_errors" },
369 { "rx_align_errors" },
370 { "rx_xon_pause_rcvd" },
371 { "rx_xoff_pause_rcvd" },
372 { "rx_mac_ctrl_rcvd" },
373 { "rx_xoff_entered" },
374 { "rx_frame_too_long_errors" },
375 { "rx_jabbers" },
376 { "rx_undersize_packets" },
377 { "rx_in_length_errors" },
378 { "rx_out_length_errors" },
379 { "rx_64_or_less_octet_packets" },
380 { "rx_65_to_127_octet_packets" },
381 { "rx_128_to_255_octet_packets" },
382 { "rx_256_to_511_octet_packets" },
383 { "rx_512_to_1023_octet_packets" },
384 { "rx_1024_to_1522_octet_packets" },
385 { "rx_1523_to_2047_octet_packets" },
386 { "rx_2048_to_4095_octet_packets" },
387 { "rx_4096_to_8191_octet_packets" },
388 { "rx_8192_to_9022_octet_packets" },
389
390 { "tx_octets" },
391 { "tx_collisions" },
392
393 { "tx_xon_sent" },
394 { "tx_xoff_sent" },
395 { "tx_flow_control" },
396 { "tx_mac_errors" },
397 { "tx_single_collisions" },
398 { "tx_mult_collisions" },
399 { "tx_deferred" },
400 { "tx_excessive_collisions" },
401 { "tx_late_collisions" },
402 { "tx_collide_2times" },
403 { "tx_collide_3times" },
404 { "tx_collide_4times" },
405 { "tx_collide_5times" },
406 { "tx_collide_6times" },
407 { "tx_collide_7times" },
408 { "tx_collide_8times" },
409 { "tx_collide_9times" },
410 { "tx_collide_10times" },
411 { "tx_collide_11times" },
412 { "tx_collide_12times" },
413 { "tx_collide_13times" },
414 { "tx_collide_14times" },
415 { "tx_collide_15times" },
416 { "tx_ucast_packets" },
417 { "tx_mcast_packets" },
418 { "tx_bcast_packets" },
419 { "tx_carrier_sense_errors" },
420 { "tx_discards" },
421 { "tx_errors" },
422
423 { "dma_writeq_full" },
424 { "dma_write_prioq_full" },
425 { "rxbds_empty" },
426 { "rx_discards" },
427 { "rx_errors" },
428 { "rx_threshold_hit" },
429
430 { "dma_readq_full" },
431 { "dma_read_prioq_full" },
432 { "tx_comp_queue_full" },
433
434 { "ring_set_send_prod_index" },
435 { "ring_status_update" },
436 { "nic_irqs" },
437 { "nic_avoided_irqs" },
438 { "nic_tx_threshold_hit" },
439
440 { "mbuf_lwm_thresh_hit" },
441 };
442
443 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
444 #define TG3_NVRAM_TEST 0
445 #define TG3_LINK_TEST 1
446 #define TG3_REGISTER_TEST 2
447 #define TG3_MEMORY_TEST 3
448 #define TG3_MAC_LOOPB_TEST 4
449 #define TG3_PHY_LOOPB_TEST 5
450 #define TG3_EXT_LOOPB_TEST 6
451 #define TG3_INTERRUPT_TEST 7
452
453
454 static const struct {
455 const char string[ETH_GSTRING_LEN];
456 } ethtool_test_keys[] = {
457 [TG3_NVRAM_TEST] = { "nvram test (online) " },
458 [TG3_LINK_TEST] = { "link test (online) " },
459 [TG3_REGISTER_TEST] = { "register test (offline)" },
460 [TG3_MEMORY_TEST] = { "memory test (offline)" },
461 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
462 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
463 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
464 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
465 };
466
467 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
468
469
tg3_write32(struct tg3 * tp,u32 off,u32 val)470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
471 {
472 writel(val, tp->regs + off);
473 }
474
tg3_read32(struct tg3 * tp,u32 off)475 static u32 tg3_read32(struct tg3 *tp, u32 off)
476 {
477 return readl(tp->regs + off);
478 }
479
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
481 {
482 writel(val, tp->aperegs + off);
483 }
484
tg3_ape_read32(struct tg3 * tp,u32 off)485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
486 {
487 return readl(tp->aperegs + off);
488 }
489
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
491 {
492 unsigned long flags;
493
494 spin_lock_irqsave(&tp->indirect_lock, flags);
495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
497 spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 }
499
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
501 {
502 writel(val, tp->regs + off);
503 readl(tp->regs + off);
504 }
505
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
507 {
508 unsigned long flags;
509 u32 val;
510
511 spin_lock_irqsave(&tp->indirect_lock, flags);
512 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
513 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
514 spin_unlock_irqrestore(&tp->indirect_lock, flags);
515 return val;
516 }
517
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
519 {
520 unsigned long flags;
521
522 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
523 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
524 TG3_64BIT_REG_LOW, val);
525 return;
526 }
527 if (off == TG3_RX_STD_PROD_IDX_REG) {
528 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
529 TG3_64BIT_REG_LOW, val);
530 return;
531 }
532
533 spin_lock_irqsave(&tp->indirect_lock, flags);
534 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
536 spin_unlock_irqrestore(&tp->indirect_lock, flags);
537
538 /* In indirect mode when disabling interrupts, we also need
539 * to clear the interrupt bit in the GRC local ctrl register.
540 */
541 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
542 (val == 0x1)) {
543 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
544 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
545 }
546 }
547
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
549 {
550 unsigned long flags;
551 u32 val;
552
553 spin_lock_irqsave(&tp->indirect_lock, flags);
554 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
555 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
556 spin_unlock_irqrestore(&tp->indirect_lock, flags);
557 return val;
558 }
559
560 /* usec_wait specifies the wait time in usec when writing to certain registers
561 * where it is unsafe to read back the register without some delay.
562 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
563 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
564 */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
566 {
567 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
568 /* Non-posted methods */
569 tp->write32(tp, off, val);
570 else {
571 /* Posted method */
572 tg3_write32(tp, off, val);
573 if (usec_wait)
574 udelay(usec_wait);
575 tp->read32(tp, off);
576 }
577 /* Wait again after the read for the posted method to guarantee that
578 * the wait time is met.
579 */
580 if (usec_wait)
581 udelay(usec_wait);
582 }
583
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
585 {
586 tp->write32_mbox(tp, off, val);
587 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
588 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
589 !tg3_flag(tp, ICH_WORKAROUND)))
590 tp->read32_mbox(tp, off);
591 }
592
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
594 {
595 void __iomem *mbox = tp->regs + off;
596 writel(val, mbox);
597 if (tg3_flag(tp, TXD_MBOX_HWBUG))
598 writel(val, mbox);
599 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
600 tg3_flag(tp, FLUSH_POSTED_WRITES))
601 readl(mbox);
602 }
603
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
605 {
606 return readl(tp->regs + off + GRCMBOX_BASE);
607 }
608
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
610 {
611 writel(val, tp->regs + off + GRCMBOX_BASE);
612 }
613
614 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
615 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
616 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
617 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
618 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
619
620 #define tw32(reg, val) tp->write32(tp, reg, val)
621 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
622 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
623 #define tr32(reg) tp->read32(tp, reg)
624
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
626 {
627 unsigned long flags;
628
629 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
630 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
631 return;
632
633 spin_lock_irqsave(&tp->indirect_lock, flags);
634 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
637
638 /* Always leave this as zero. */
639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 } else {
641 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
642 tw32_f(TG3PCI_MEM_WIN_DATA, val);
643
644 /* Always leave this as zero. */
645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
646 }
647 spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 }
649
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
651 {
652 unsigned long flags;
653
654 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
655 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
656 *val = 0;
657 return;
658 }
659
660 spin_lock_irqsave(&tp->indirect_lock, flags);
661 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
662 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
663 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
664
665 /* Always leave this as zero. */
666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 } else {
668 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
669 *val = tr32(TG3PCI_MEM_WIN_DATA);
670
671 /* Always leave this as zero. */
672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
673 }
674 spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 }
676
tg3_ape_lock_init(struct tg3 * tp)677 static void tg3_ape_lock_init(struct tg3 *tp)
678 {
679 int i;
680 u32 regbase, bit;
681
682 if (tg3_asic_rev(tp) == ASIC_REV_5761)
683 regbase = TG3_APE_LOCK_GRANT;
684 else
685 regbase = TG3_APE_PER_LOCK_GRANT;
686
687 /* Make sure the driver hasn't any stale locks. */
688 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
689 switch (i) {
690 case TG3_APE_LOCK_PHY0:
691 case TG3_APE_LOCK_PHY1:
692 case TG3_APE_LOCK_PHY2:
693 case TG3_APE_LOCK_PHY3:
694 bit = APE_LOCK_GRANT_DRIVER;
695 break;
696 default:
697 if (!tp->pci_fn)
698 bit = APE_LOCK_GRANT_DRIVER;
699 else
700 bit = 1 << tp->pci_fn;
701 }
702 tg3_ape_write32(tp, regbase + 4 * i, bit);
703 }
704
705 }
706
tg3_ape_lock(struct tg3 * tp,int locknum)707 static int tg3_ape_lock(struct tg3 *tp, int locknum)
708 {
709 int i, off;
710 int ret = 0;
711 u32 status, req, gnt, bit;
712
713 if (!tg3_flag(tp, ENABLE_APE))
714 return 0;
715
716 switch (locknum) {
717 case TG3_APE_LOCK_GPIO:
718 if (tg3_asic_rev(tp) == ASIC_REV_5761)
719 return 0;
720 fallthrough;
721 case TG3_APE_LOCK_GRC:
722 case TG3_APE_LOCK_MEM:
723 if (!tp->pci_fn)
724 bit = APE_LOCK_REQ_DRIVER;
725 else
726 bit = 1 << tp->pci_fn;
727 break;
728 case TG3_APE_LOCK_PHY0:
729 case TG3_APE_LOCK_PHY1:
730 case TG3_APE_LOCK_PHY2:
731 case TG3_APE_LOCK_PHY3:
732 bit = APE_LOCK_REQ_DRIVER;
733 break;
734 default:
735 return -EINVAL;
736 }
737
738 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 req = TG3_APE_LOCK_REQ;
740 gnt = TG3_APE_LOCK_GRANT;
741 } else {
742 req = TG3_APE_PER_LOCK_REQ;
743 gnt = TG3_APE_PER_LOCK_GRANT;
744 }
745
746 off = 4 * locknum;
747
748 tg3_ape_write32(tp, req + off, bit);
749
750 /* Wait for up to 1 millisecond to acquire lock. */
751 for (i = 0; i < 100; i++) {
752 status = tg3_ape_read32(tp, gnt + off);
753 if (status == bit)
754 break;
755 if (pci_channel_offline(tp->pdev))
756 break;
757
758 udelay(10);
759 }
760
761 if (status != bit) {
762 /* Revoke the lock request. */
763 tg3_ape_write32(tp, gnt + off, bit);
764 ret = -EBUSY;
765 }
766
767 return ret;
768 }
769
tg3_ape_unlock(struct tg3 * tp,int locknum)770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772 u32 gnt, bit;
773
774 if (!tg3_flag(tp, ENABLE_APE))
775 return;
776
777 switch (locknum) {
778 case TG3_APE_LOCK_GPIO:
779 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 return;
781 fallthrough;
782 case TG3_APE_LOCK_GRC:
783 case TG3_APE_LOCK_MEM:
784 if (!tp->pci_fn)
785 bit = APE_LOCK_GRANT_DRIVER;
786 else
787 bit = 1 << tp->pci_fn;
788 break;
789 case TG3_APE_LOCK_PHY0:
790 case TG3_APE_LOCK_PHY1:
791 case TG3_APE_LOCK_PHY2:
792 case TG3_APE_LOCK_PHY3:
793 bit = APE_LOCK_GRANT_DRIVER;
794 break;
795 default:
796 return;
797 }
798
799 if (tg3_asic_rev(tp) == ASIC_REV_5761)
800 gnt = TG3_APE_LOCK_GRANT;
801 else
802 gnt = TG3_APE_PER_LOCK_GRANT;
803
804 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809 u32 apedata;
810
811 while (timeout_us) {
812 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813 return -EBUSY;
814
815 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817 break;
818
819 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820
821 udelay(10);
822 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823 }
824
825 return timeout_us ? 0 : -EBUSY;
826 }
827
828 #ifdef CONFIG_TIGON3_HWMON
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
830 {
831 u32 i, apedata;
832
833 for (i = 0; i < timeout_us / 10; i++) {
834 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
835
836 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
837 break;
838
839 udelay(10);
840 }
841
842 return i == timeout_us / 10;
843 }
844
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
846 u32 len)
847 {
848 int err;
849 u32 i, bufoff, msgoff, maxlen, apedata;
850
851 if (!tg3_flag(tp, APE_HAS_NCSI))
852 return 0;
853
854 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855 if (apedata != APE_SEG_SIG_MAGIC)
856 return -ENODEV;
857
858 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859 if (!(apedata & APE_FW_STATUS_READY))
860 return -EAGAIN;
861
862 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
863 TG3_APE_SHMEM_BASE;
864 msgoff = bufoff + 2 * sizeof(u32);
865 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
866
867 while (len) {
868 u32 length;
869
870 /* Cap xfer sizes to scratchpad limits. */
871 length = (len > maxlen) ? maxlen : len;
872 len -= length;
873
874 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875 if (!(apedata & APE_FW_STATUS_READY))
876 return -EAGAIN;
877
878 /* Wait for up to 1 msec for APE to service previous event. */
879 err = tg3_ape_event_lock(tp, 1000);
880 if (err)
881 return err;
882
883 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884 APE_EVENT_STATUS_SCRTCHPD_READ |
885 APE_EVENT_STATUS_EVENT_PENDING;
886 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
887
888 tg3_ape_write32(tp, bufoff, base_off);
889 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
890
891 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
893
894 base_off += length;
895
896 if (tg3_ape_wait_for_event(tp, 30000))
897 return -EAGAIN;
898
899 for (i = 0; length; i += 4, length -= 4) {
900 u32 val = tg3_ape_read32(tp, msgoff + i);
901 memcpy(data, &val, sizeof(u32));
902 data++;
903 }
904 }
905
906 return 0;
907 }
908 #endif
909
tg3_ape_send_event(struct tg3 * tp,u32 event)910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
911 {
912 int err;
913 u32 apedata;
914
915 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916 if (apedata != APE_SEG_SIG_MAGIC)
917 return -EAGAIN;
918
919 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920 if (!(apedata & APE_FW_STATUS_READY))
921 return -EAGAIN;
922
923 /* Wait for up to 20 millisecond for APE to service previous event. */
924 err = tg3_ape_event_lock(tp, 20000);
925 if (err)
926 return err;
927
928 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929 event | APE_EVENT_STATUS_EVENT_PENDING);
930
931 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
933
934 return 0;
935 }
936
tg3_ape_driver_state_change(struct tg3 * tp,int kind)937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
938 {
939 u32 event;
940 u32 apedata;
941
942 if (!tg3_flag(tp, ENABLE_APE))
943 return;
944
945 switch (kind) {
946 case RESET_KIND_INIT:
947 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
949 APE_HOST_SEG_SIG_MAGIC);
950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
951 APE_HOST_SEG_LEN_MAGIC);
952 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
953 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
954 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
955 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
956 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
957 APE_HOST_BEHAV_NO_PHYLOCK);
958 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
959 TG3_APE_HOST_DRVR_STATE_START);
960
961 event = APE_EVENT_STATUS_STATE_START;
962 break;
963 case RESET_KIND_SHUTDOWN:
964 if (device_may_wakeup(&tp->pdev->dev) &&
965 tg3_flag(tp, WOL_ENABLE)) {
966 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 TG3_APE_HOST_WOL_SPEED_AUTO);
968 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969 } else
970 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
971
972 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
973
974 event = APE_EVENT_STATUS_STATE_UNLOAD;
975 break;
976 default:
977 return;
978 }
979
980 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
981
982 tg3_ape_send_event(tp, event);
983 }
984
tg3_send_ape_heartbeat(struct tg3 * tp,unsigned long interval)985 static void tg3_send_ape_heartbeat(struct tg3 *tp,
986 unsigned long interval)
987 {
988 /* Check if hb interval has exceeded */
989 if (!tg3_flag(tp, ENABLE_APE) ||
990 time_before(jiffies, tp->ape_hb_jiffies + interval))
991 return;
992
993 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
994 tp->ape_hb_jiffies = jiffies;
995 }
996
tg3_disable_ints(struct tg3 * tp)997 static void tg3_disable_ints(struct tg3 *tp)
998 {
999 int i;
1000
1001 tw32(TG3PCI_MISC_HOST_CTRL,
1002 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1003 for (i = 0; i < tp->irq_max; i++)
1004 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1005 }
1006
tg3_enable_ints(struct tg3 * tp)1007 static void tg3_enable_ints(struct tg3 *tp)
1008 {
1009 int i;
1010
1011 tp->irq_sync = 0;
1012 wmb();
1013
1014 tw32(TG3PCI_MISC_HOST_CTRL,
1015 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1016
1017 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1018 for (i = 0; i < tp->irq_cnt; i++) {
1019 struct tg3_napi *tnapi = &tp->napi[i];
1020
1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022 if (tg3_flag(tp, 1SHOT_MSI))
1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024
1025 tp->coal_now |= tnapi->coal_now;
1026 }
1027
1028 /* Force an initial interrupt */
1029 if (!tg3_flag(tp, TAGGED_STATUS) &&
1030 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1031 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1032 else
1033 tw32(HOSTCC_MODE, tp->coal_now);
1034
1035 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1036 }
1037
tg3_has_work(struct tg3_napi * tnapi)1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1039 {
1040 struct tg3 *tp = tnapi->tp;
1041 struct tg3_hw_status *sblk = tnapi->hw_status;
1042 unsigned int work_exists = 0;
1043
1044 /* check for phy events */
1045 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1046 if (sblk->status & SD_STATUS_LINK_CHG)
1047 work_exists = 1;
1048 }
1049
1050 /* check for TX work to do */
1051 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1052 work_exists = 1;
1053
1054 /* check for RX work to do */
1055 if (tnapi->rx_rcb_prod_idx &&
1056 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1057 work_exists = 1;
1058
1059 return work_exists;
1060 }
1061
1062 /* tg3_int_reenable
1063 * similar to tg3_enable_ints, but it accurately determines whether there
1064 * is new work pending and can return without flushing the PIO write
1065 * which reenables interrupts
1066 */
tg3_int_reenable(struct tg3_napi * tnapi)1067 static void tg3_int_reenable(struct tg3_napi *tnapi)
1068 {
1069 struct tg3 *tp = tnapi->tp;
1070
1071 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1072
1073 /* When doing tagged status, this work check is unnecessary.
1074 * The last_tag we write above tells the chip which piece of
1075 * work we've completed.
1076 */
1077 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1078 tw32(HOSTCC_MODE, tp->coalesce_mode |
1079 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1080 }
1081
tg3_switch_clocks(struct tg3 * tp)1082 static void tg3_switch_clocks(struct tg3 *tp)
1083 {
1084 u32 clock_ctrl;
1085 u32 orig_clock_ctrl;
1086
1087 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1088 return;
1089
1090 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1091
1092 orig_clock_ctrl = clock_ctrl;
1093 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1094 CLOCK_CTRL_CLKRUN_OENABLE |
1095 0x1f);
1096 tp->pci_clock_ctrl = clock_ctrl;
1097
1098 if (tg3_flag(tp, 5705_PLUS)) {
1099 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1100 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1102 }
1103 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1105 clock_ctrl |
1106 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1107 40);
1108 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1110 40);
1111 }
1112 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1113 }
1114
1115 #define PHY_BUSY_LOOPS 5000
1116
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1118 u32 *val)
1119 {
1120 u32 frame_val;
1121 unsigned int loops;
1122 int ret;
1123
1124 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1125 tw32_f(MAC_MI_MODE,
1126 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1127 udelay(80);
1128 }
1129
1130 tg3_ape_lock(tp, tp->phy_ape_lock);
1131
1132 *val = 0x0;
1133
1134 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1135 MI_COM_PHY_ADDR_MASK);
1136 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1137 MI_COM_REG_ADDR_MASK);
1138 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1139
1140 tw32_f(MAC_MI_COM, frame_val);
1141
1142 loops = PHY_BUSY_LOOPS;
1143 while (loops != 0) {
1144 udelay(10);
1145 frame_val = tr32(MAC_MI_COM);
1146
1147 if ((frame_val & MI_COM_BUSY) == 0) {
1148 udelay(5);
1149 frame_val = tr32(MAC_MI_COM);
1150 break;
1151 }
1152 loops -= 1;
1153 }
1154
1155 ret = -EBUSY;
1156 if (loops != 0) {
1157 *val = frame_val & MI_COM_DATA_MASK;
1158 ret = 0;
1159 }
1160
1161 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1162 tw32_f(MAC_MI_MODE, tp->mi_mode);
1163 udelay(80);
1164 }
1165
1166 tg3_ape_unlock(tp, tp->phy_ape_lock);
1167
1168 return ret;
1169 }
1170
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1172 {
1173 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1174 }
1175
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1177 u32 val)
1178 {
1179 u32 frame_val;
1180 unsigned int loops;
1181 int ret;
1182
1183 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1184 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1185 return 0;
1186
1187 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188 tw32_f(MAC_MI_MODE,
1189 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1190 udelay(80);
1191 }
1192
1193 tg3_ape_lock(tp, tp->phy_ape_lock);
1194
1195 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1196 MI_COM_PHY_ADDR_MASK);
1197 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1198 MI_COM_REG_ADDR_MASK);
1199 frame_val |= (val & MI_COM_DATA_MASK);
1200 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1201
1202 tw32_f(MAC_MI_COM, frame_val);
1203
1204 loops = PHY_BUSY_LOOPS;
1205 while (loops != 0) {
1206 udelay(10);
1207 frame_val = tr32(MAC_MI_COM);
1208 if ((frame_val & MI_COM_BUSY) == 0) {
1209 udelay(5);
1210 frame_val = tr32(MAC_MI_COM);
1211 break;
1212 }
1213 loops -= 1;
1214 }
1215
1216 ret = -EBUSY;
1217 if (loops != 0)
1218 ret = 0;
1219
1220 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1221 tw32_f(MAC_MI_MODE, tp->mi_mode);
1222 udelay(80);
1223 }
1224
1225 tg3_ape_unlock(tp, tp->phy_ape_lock);
1226
1227 return ret;
1228 }
1229
tg3_writephy(struct tg3 * tp,int reg,u32 val)1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1231 {
1232 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1233 }
1234
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1236 {
1237 int err;
1238
1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1240 if (err)
1241 goto done;
1242
1243 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1244 if (err)
1245 goto done;
1246
1247 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1248 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1249 if (err)
1250 goto done;
1251
1252 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1253
1254 done:
1255 return err;
1256 }
1257
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1259 {
1260 int err;
1261
1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1263 if (err)
1264 goto done;
1265
1266 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1267 if (err)
1268 goto done;
1269
1270 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1271 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1272 if (err)
1273 goto done;
1274
1275 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1276
1277 done:
1278 return err;
1279 }
1280
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1282 {
1283 int err;
1284
1285 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286 if (!err)
1287 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1288
1289 return err;
1290 }
1291
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1293 {
1294 int err;
1295
1296 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1297 if (!err)
1298 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1299
1300 return err;
1301 }
1302
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1304 {
1305 int err;
1306
1307 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1308 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1309 MII_TG3_AUXCTL_SHDWSEL_MISC);
1310 if (!err)
1311 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1312
1313 return err;
1314 }
1315
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1317 {
1318 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1319 set |= MII_TG3_AUXCTL_MISC_WREN;
1320
1321 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1322 }
1323
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1325 {
1326 u32 val;
1327 int err;
1328
1329 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1330
1331 if (err)
1332 return err;
1333
1334 if (enable)
1335 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336 else
1337 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1338
1339 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1340 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1341
1342 return err;
1343 }
1344
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1346 {
1347 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1348 reg | val | MII_TG3_MISC_SHDW_WREN);
1349 }
1350
tg3_bmcr_reset(struct tg3 * tp)1351 static int tg3_bmcr_reset(struct tg3 *tp)
1352 {
1353 u32 phy_control;
1354 int limit, err;
1355
1356 /* OK, reset it, and poll the BMCR_RESET bit until it
1357 * clears or we time out.
1358 */
1359 phy_control = BMCR_RESET;
1360 err = tg3_writephy(tp, MII_BMCR, phy_control);
1361 if (err != 0)
1362 return -EBUSY;
1363
1364 limit = 5000;
1365 while (limit--) {
1366 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1367 if (err != 0)
1368 return -EBUSY;
1369
1370 if ((phy_control & BMCR_RESET) == 0) {
1371 udelay(40);
1372 break;
1373 }
1374 udelay(10);
1375 }
1376 if (limit < 0)
1377 return -EBUSY;
1378
1379 return 0;
1380 }
1381
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1383 {
1384 struct tg3 *tp = bp->priv;
1385 u32 val;
1386
1387 spin_lock_bh(&tp->lock);
1388
1389 if (__tg3_readphy(tp, mii_id, reg, &val))
1390 val = -EIO;
1391
1392 spin_unlock_bh(&tp->lock);
1393
1394 return val;
1395 }
1396
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1398 {
1399 struct tg3 *tp = bp->priv;
1400 u32 ret = 0;
1401
1402 spin_lock_bh(&tp->lock);
1403
1404 if (__tg3_writephy(tp, mii_id, reg, val))
1405 ret = -EIO;
1406
1407 spin_unlock_bh(&tp->lock);
1408
1409 return ret;
1410 }
1411
tg3_mdio_config_5785(struct tg3 * tp)1412 static void tg3_mdio_config_5785(struct tg3 *tp)
1413 {
1414 u32 val;
1415 struct phy_device *phydev;
1416
1417 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1418 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1419 case PHY_ID_BCM50610:
1420 case PHY_ID_BCM50610M:
1421 val = MAC_PHYCFG2_50610_LED_MODES;
1422 break;
1423 case PHY_ID_BCMAC131:
1424 val = MAC_PHYCFG2_AC131_LED_MODES;
1425 break;
1426 case PHY_ID_RTL8211C:
1427 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1428 break;
1429 case PHY_ID_RTL8201E:
1430 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1431 break;
1432 default:
1433 return;
1434 }
1435
1436 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1437 tw32(MAC_PHYCFG2, val);
1438
1439 val = tr32(MAC_PHYCFG1);
1440 val &= ~(MAC_PHYCFG1_RGMII_INT |
1441 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1442 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1443 tw32(MAC_PHYCFG1, val);
1444
1445 return;
1446 }
1447
1448 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1449 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1450 MAC_PHYCFG2_FMODE_MASK_MASK |
1451 MAC_PHYCFG2_GMODE_MASK_MASK |
1452 MAC_PHYCFG2_ACT_MASK_MASK |
1453 MAC_PHYCFG2_QUAL_MASK_MASK |
1454 MAC_PHYCFG2_INBAND_ENABLE;
1455
1456 tw32(MAC_PHYCFG2, val);
1457
1458 val = tr32(MAC_PHYCFG1);
1459 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1460 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1464 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1465 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1466 }
1467 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1468 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1469 tw32(MAC_PHYCFG1, val);
1470
1471 val = tr32(MAC_EXT_RGMII_MODE);
1472 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1473 MAC_RGMII_MODE_RX_QUALITY |
1474 MAC_RGMII_MODE_RX_ACTIVITY |
1475 MAC_RGMII_MODE_RX_ENG_DET |
1476 MAC_RGMII_MODE_TX_ENABLE |
1477 MAC_RGMII_MODE_TX_LOWPWR |
1478 MAC_RGMII_MODE_TX_RESET);
1479 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1480 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1481 val |= MAC_RGMII_MODE_RX_INT_B |
1482 MAC_RGMII_MODE_RX_QUALITY |
1483 MAC_RGMII_MODE_RX_ACTIVITY |
1484 MAC_RGMII_MODE_RX_ENG_DET;
1485 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1486 val |= MAC_RGMII_MODE_TX_ENABLE |
1487 MAC_RGMII_MODE_TX_LOWPWR |
1488 MAC_RGMII_MODE_TX_RESET;
1489 }
1490 tw32(MAC_EXT_RGMII_MODE, val);
1491 }
1492
tg3_mdio_start(struct tg3 * tp)1493 static void tg3_mdio_start(struct tg3 *tp)
1494 {
1495 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1496 tw32_f(MAC_MI_MODE, tp->mi_mode);
1497 udelay(80);
1498
1499 if (tg3_flag(tp, MDIOBUS_INITED) &&
1500 tg3_asic_rev(tp) == ASIC_REV_5785)
1501 tg3_mdio_config_5785(tp);
1502 }
1503
tg3_mdio_init(struct tg3 * tp)1504 static int tg3_mdio_init(struct tg3 *tp)
1505 {
1506 int i;
1507 u32 reg;
1508 struct phy_device *phydev;
1509
1510 if (tg3_flag(tp, 5717_PLUS)) {
1511 u32 is_serdes;
1512
1513 tp->phy_addr = tp->pci_fn + 1;
1514
1515 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1516 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1517 else
1518 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1519 TG3_CPMU_PHY_STRAP_IS_SERDES;
1520 if (is_serdes)
1521 tp->phy_addr += 7;
1522 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1523 int addr;
1524
1525 addr = ssb_gige_get_phyaddr(tp->pdev);
1526 if (addr < 0)
1527 return addr;
1528 tp->phy_addr = addr;
1529 } else
1530 tp->phy_addr = TG3_PHY_MII_ADDR;
1531
1532 tg3_mdio_start(tp);
1533
1534 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1535 return 0;
1536
1537 tp->mdio_bus = mdiobus_alloc();
1538 if (tp->mdio_bus == NULL)
1539 return -ENOMEM;
1540
1541 tp->mdio_bus->name = "tg3 mdio bus";
1542 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1543 tp->mdio_bus->priv = tp;
1544 tp->mdio_bus->parent = &tp->pdev->dev;
1545 tp->mdio_bus->read = &tg3_mdio_read;
1546 tp->mdio_bus->write = &tg3_mdio_write;
1547 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1548
1549 /* The bus registration will look for all the PHYs on the mdio bus.
1550 * Unfortunately, it does not ensure the PHY is powered up before
1551 * accessing the PHY ID registers. A chip reset is the
1552 * quickest way to bring the device back to an operational state..
1553 */
1554 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1555 tg3_bmcr_reset(tp);
1556
1557 i = mdiobus_register(tp->mdio_bus);
1558 if (i) {
1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560 mdiobus_free(tp->mdio_bus);
1561 return i;
1562 }
1563
1564 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1565
1566 if (!phydev || !phydev->drv) {
1567 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568 mdiobus_unregister(tp->mdio_bus);
1569 mdiobus_free(tp->mdio_bus);
1570 return -ENODEV;
1571 }
1572
1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574 case PHY_ID_BCM57780:
1575 phydev->interface = PHY_INTERFACE_MODE_GMII;
1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 break;
1578 case PHY_ID_BCM50610:
1579 case PHY_ID_BCM50610M:
1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581 PHY_BRCM_RX_REFCLK_UNUSED |
1582 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 fallthrough;
1585 case PHY_ID_RTL8211C:
1586 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1587 break;
1588 case PHY_ID_RTL8201E:
1589 case PHY_ID_BCMAC131:
1590 phydev->interface = PHY_INTERFACE_MODE_MII;
1591 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1592 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1593 break;
1594 }
1595
1596 tg3_flag_set(tp, MDIOBUS_INITED);
1597
1598 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1599 tg3_mdio_config_5785(tp);
1600
1601 return 0;
1602 }
1603
tg3_mdio_fini(struct tg3 * tp)1604 static void tg3_mdio_fini(struct tg3 *tp)
1605 {
1606 if (tg3_flag(tp, MDIOBUS_INITED)) {
1607 tg3_flag_clear(tp, MDIOBUS_INITED);
1608 mdiobus_unregister(tp->mdio_bus);
1609 mdiobus_free(tp->mdio_bus);
1610 }
1611 }
1612
1613 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1614 static inline void tg3_generate_fw_event(struct tg3 *tp)
1615 {
1616 u32 val;
1617
1618 val = tr32(GRC_RX_CPU_EVENT);
1619 val |= GRC_RX_CPU_DRIVER_EVENT;
1620 tw32_f(GRC_RX_CPU_EVENT, val);
1621
1622 tp->last_event_jiffies = jiffies;
1623 }
1624
1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1626
1627 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1628 static void tg3_wait_for_event_ack(struct tg3 *tp)
1629 {
1630 int i;
1631 unsigned int delay_cnt;
1632 long time_remain;
1633
1634 /* If enough time has passed, no wait is necessary. */
1635 time_remain = (long)(tp->last_event_jiffies + 1 +
1636 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1637 (long)jiffies;
1638 if (time_remain < 0)
1639 return;
1640
1641 /* Check if we can shorten the wait time. */
1642 delay_cnt = jiffies_to_usecs(time_remain);
1643 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1644 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1645 delay_cnt = (delay_cnt >> 3) + 1;
1646
1647 for (i = 0; i < delay_cnt; i++) {
1648 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1649 break;
1650 if (pci_channel_offline(tp->pdev))
1651 break;
1652
1653 udelay(8);
1654 }
1655 }
1656
1657 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1659 {
1660 u32 reg, val;
1661
1662 val = 0;
1663 if (!tg3_readphy(tp, MII_BMCR, ®))
1664 val = reg << 16;
1665 if (!tg3_readphy(tp, MII_BMSR, ®))
1666 val |= (reg & 0xffff);
1667 *data++ = val;
1668
1669 val = 0;
1670 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1671 val = reg << 16;
1672 if (!tg3_readphy(tp, MII_LPA, ®))
1673 val |= (reg & 0xffff);
1674 *data++ = val;
1675
1676 val = 0;
1677 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1678 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1679 val = reg << 16;
1680 if (!tg3_readphy(tp, MII_STAT1000, ®))
1681 val |= (reg & 0xffff);
1682 }
1683 *data++ = val;
1684
1685 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1686 val = reg << 16;
1687 else
1688 val = 0;
1689 *data++ = val;
1690 }
1691
1692 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1693 static void tg3_ump_link_report(struct tg3 *tp)
1694 {
1695 u32 data[4];
1696
1697 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1698 return;
1699
1700 tg3_phy_gather_ump_data(tp, data);
1701
1702 tg3_wait_for_event_ack(tp);
1703
1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1710
1711 tg3_generate_fw_event(tp);
1712 }
1713
1714 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1715 static void tg3_stop_fw(struct tg3 *tp)
1716 {
1717 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1718 /* Wait for RX cpu to ACK the previous event. */
1719 tg3_wait_for_event_ack(tp);
1720
1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1722
1723 tg3_generate_fw_event(tp);
1724
1725 /* Wait for RX cpu to ACK this event. */
1726 tg3_wait_for_event_ack(tp);
1727 }
1728 }
1729
1730 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1732 {
1733 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1734 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1735
1736 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1737 switch (kind) {
1738 case RESET_KIND_INIT:
1739 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1740 DRV_STATE_START);
1741 break;
1742
1743 case RESET_KIND_SHUTDOWN:
1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745 DRV_STATE_UNLOAD);
1746 break;
1747
1748 case RESET_KIND_SUSPEND:
1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750 DRV_STATE_SUSPEND);
1751 break;
1752
1753 default:
1754 break;
1755 }
1756 }
1757 }
1758
1759 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1761 {
1762 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1763 switch (kind) {
1764 case RESET_KIND_INIT:
1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766 DRV_STATE_START_DONE);
1767 break;
1768
1769 case RESET_KIND_SHUTDOWN:
1770 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1771 DRV_STATE_UNLOAD_DONE);
1772 break;
1773
1774 default:
1775 break;
1776 }
1777 }
1778 }
1779
1780 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1782 {
1783 if (tg3_flag(tp, ENABLE_ASF)) {
1784 switch (kind) {
1785 case RESET_KIND_INIT:
1786 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787 DRV_STATE_START);
1788 break;
1789
1790 case RESET_KIND_SHUTDOWN:
1791 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1792 DRV_STATE_UNLOAD);
1793 break;
1794
1795 case RESET_KIND_SUSPEND:
1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797 DRV_STATE_SUSPEND);
1798 break;
1799
1800 default:
1801 break;
1802 }
1803 }
1804 }
1805
tg3_poll_fw(struct tg3 * tp)1806 static int tg3_poll_fw(struct tg3 *tp)
1807 {
1808 int i;
1809 u32 val;
1810
1811 if (tg3_flag(tp, NO_FWARE_REPORTED))
1812 return 0;
1813
1814 if (tg3_flag(tp, IS_SSB_CORE)) {
1815 /* We don't use firmware. */
1816 return 0;
1817 }
1818
1819 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1820 /* Wait up to 20ms for init done. */
1821 for (i = 0; i < 200; i++) {
1822 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1823 return 0;
1824 if (pci_channel_offline(tp->pdev))
1825 return -ENODEV;
1826
1827 udelay(100);
1828 }
1829 return -ENODEV;
1830 }
1831
1832 /* Wait for firmware initialization to complete. */
1833 for (i = 0; i < 100000; i++) {
1834 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1835 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1836 break;
1837 if (pci_channel_offline(tp->pdev)) {
1838 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1839 tg3_flag_set(tp, NO_FWARE_REPORTED);
1840 netdev_info(tp->dev, "No firmware running\n");
1841 }
1842
1843 break;
1844 }
1845
1846 udelay(10);
1847 }
1848
1849 /* Chip might not be fitted with firmware. Some Sun onboard
1850 * parts are configured like that. So don't signal the timeout
1851 * of the above loop as an error, but do report the lack of
1852 * running firmware once.
1853 */
1854 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1855 tg3_flag_set(tp, NO_FWARE_REPORTED);
1856
1857 netdev_info(tp->dev, "No firmware running\n");
1858 }
1859
1860 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1861 /* The 57765 A0 needs a little more
1862 * time to do some important work.
1863 */
1864 mdelay(10);
1865 }
1866
1867 return 0;
1868 }
1869
tg3_link_report(struct tg3 * tp)1870 static void tg3_link_report(struct tg3 *tp)
1871 {
1872 if (!netif_carrier_ok(tp->dev)) {
1873 netif_info(tp, link, tp->dev, "Link is down\n");
1874 tg3_ump_link_report(tp);
1875 } else if (netif_msg_link(tp)) {
1876 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1877 (tp->link_config.active_speed == SPEED_1000 ?
1878 1000 :
1879 (tp->link_config.active_speed == SPEED_100 ?
1880 100 : 10)),
1881 (tp->link_config.active_duplex == DUPLEX_FULL ?
1882 "full" : "half"));
1883
1884 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1885 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1886 "on" : "off",
1887 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1888 "on" : "off");
1889
1890 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1891 netdev_info(tp->dev, "EEE is %s\n",
1892 tp->setlpicnt ? "enabled" : "disabled");
1893
1894 tg3_ump_link_report(tp);
1895 }
1896
1897 tp->link_up = netif_carrier_ok(tp->dev);
1898 }
1899
tg3_decode_flowctrl_1000T(u32 adv)1900 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1901 {
1902 u32 flowctrl = 0;
1903
1904 if (adv & ADVERTISE_PAUSE_CAP) {
1905 flowctrl |= FLOW_CTRL_RX;
1906 if (!(adv & ADVERTISE_PAUSE_ASYM))
1907 flowctrl |= FLOW_CTRL_TX;
1908 } else if (adv & ADVERTISE_PAUSE_ASYM)
1909 flowctrl |= FLOW_CTRL_TX;
1910
1911 return flowctrl;
1912 }
1913
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1915 {
1916 u16 miireg;
1917
1918 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1919 miireg = ADVERTISE_1000XPAUSE;
1920 else if (flow_ctrl & FLOW_CTRL_TX)
1921 miireg = ADVERTISE_1000XPSE_ASYM;
1922 else if (flow_ctrl & FLOW_CTRL_RX)
1923 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1924 else
1925 miireg = 0;
1926
1927 return miireg;
1928 }
1929
tg3_decode_flowctrl_1000X(u32 adv)1930 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1931 {
1932 u32 flowctrl = 0;
1933
1934 if (adv & ADVERTISE_1000XPAUSE) {
1935 flowctrl |= FLOW_CTRL_RX;
1936 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1937 flowctrl |= FLOW_CTRL_TX;
1938 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1939 flowctrl |= FLOW_CTRL_TX;
1940
1941 return flowctrl;
1942 }
1943
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1945 {
1946 u8 cap = 0;
1947
1948 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1949 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1950 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1951 if (lcladv & ADVERTISE_1000XPAUSE)
1952 cap = FLOW_CTRL_RX;
1953 if (rmtadv & ADVERTISE_1000XPAUSE)
1954 cap = FLOW_CTRL_TX;
1955 }
1956
1957 return cap;
1958 }
1959
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1961 {
1962 u8 autoneg;
1963 u8 flowctrl = 0;
1964 u32 old_rx_mode = tp->rx_mode;
1965 u32 old_tx_mode = tp->tx_mode;
1966
1967 if (tg3_flag(tp, USE_PHYLIB))
1968 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1969 else
1970 autoneg = tp->link_config.autoneg;
1971
1972 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1973 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1974 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1975 else
1976 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1977 } else
1978 flowctrl = tp->link_config.flowctrl;
1979
1980 tp->link_config.active_flowctrl = flowctrl;
1981
1982 if (flowctrl & FLOW_CTRL_RX)
1983 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1984 else
1985 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1986
1987 if (old_rx_mode != tp->rx_mode)
1988 tw32_f(MAC_RX_MODE, tp->rx_mode);
1989
1990 if (flowctrl & FLOW_CTRL_TX)
1991 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1992 else
1993 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1994
1995 if (old_tx_mode != tp->tx_mode)
1996 tw32_f(MAC_TX_MODE, tp->tx_mode);
1997 }
1998
tg3_adjust_link(struct net_device * dev)1999 static void tg3_adjust_link(struct net_device *dev)
2000 {
2001 u8 oldflowctrl, linkmesg = 0;
2002 u32 mac_mode, lcl_adv, rmt_adv;
2003 struct tg3 *tp = netdev_priv(dev);
2004 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2005
2006 spin_lock_bh(&tp->lock);
2007
2008 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2009 MAC_MODE_HALF_DUPLEX);
2010
2011 oldflowctrl = tp->link_config.active_flowctrl;
2012
2013 if (phydev->link) {
2014 lcl_adv = 0;
2015 rmt_adv = 0;
2016
2017 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2018 mac_mode |= MAC_MODE_PORT_MODE_MII;
2019 else if (phydev->speed == SPEED_1000 ||
2020 tg3_asic_rev(tp) != ASIC_REV_5785)
2021 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2022 else
2023 mac_mode |= MAC_MODE_PORT_MODE_MII;
2024
2025 if (phydev->duplex == DUPLEX_HALF)
2026 mac_mode |= MAC_MODE_HALF_DUPLEX;
2027 else {
2028 lcl_adv = mii_advertise_flowctrl(
2029 tp->link_config.flowctrl);
2030
2031 if (phydev->pause)
2032 rmt_adv = LPA_PAUSE_CAP;
2033 if (phydev->asym_pause)
2034 rmt_adv |= LPA_PAUSE_ASYM;
2035 }
2036
2037 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2038 } else
2039 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2040
2041 if (mac_mode != tp->mac_mode) {
2042 tp->mac_mode = mac_mode;
2043 tw32_f(MAC_MODE, tp->mac_mode);
2044 udelay(40);
2045 }
2046
2047 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2048 if (phydev->speed == SPEED_10)
2049 tw32(MAC_MI_STAT,
2050 MAC_MI_STAT_10MBPS_MODE |
2051 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052 else
2053 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2054 }
2055
2056 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2057 tw32(MAC_TX_LENGTHS,
2058 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2059 (6 << TX_LENGTHS_IPG_SHIFT) |
2060 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2061 else
2062 tw32(MAC_TX_LENGTHS,
2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2064 (6 << TX_LENGTHS_IPG_SHIFT) |
2065 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066
2067 if (phydev->link != tp->old_link ||
2068 phydev->speed != tp->link_config.active_speed ||
2069 phydev->duplex != tp->link_config.active_duplex ||
2070 oldflowctrl != tp->link_config.active_flowctrl)
2071 linkmesg = 1;
2072
2073 tp->old_link = phydev->link;
2074 tp->link_config.active_speed = phydev->speed;
2075 tp->link_config.active_duplex = phydev->duplex;
2076
2077 spin_unlock_bh(&tp->lock);
2078
2079 if (linkmesg)
2080 tg3_link_report(tp);
2081 }
2082
tg3_phy_init(struct tg3 * tp)2083 static int tg3_phy_init(struct tg3 *tp)
2084 {
2085 struct phy_device *phydev;
2086
2087 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2088 return 0;
2089
2090 /* Bring the PHY back to a known state. */
2091 tg3_bmcr_reset(tp);
2092
2093 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2094
2095 /* Attach the MAC to the PHY. */
2096 phydev = phy_connect(tp->dev, phydev_name(phydev),
2097 tg3_adjust_link, phydev->interface);
2098 if (IS_ERR(phydev)) {
2099 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2100 return PTR_ERR(phydev);
2101 }
2102
2103 /* Mask with MAC supported features. */
2104 switch (phydev->interface) {
2105 case PHY_INTERFACE_MODE_GMII:
2106 case PHY_INTERFACE_MODE_RGMII:
2107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2108 phy_set_max_speed(phydev, SPEED_1000);
2109 phy_support_asym_pause(phydev);
2110 break;
2111 }
2112 fallthrough;
2113 case PHY_INTERFACE_MODE_MII:
2114 phy_set_max_speed(phydev, SPEED_100);
2115 phy_support_asym_pause(phydev);
2116 break;
2117 default:
2118 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2119 return -EINVAL;
2120 }
2121
2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123
2124 phy_attached_info(phydev);
2125
2126 return 0;
2127 }
2128
tg3_phy_start(struct tg3 * tp)2129 static void tg3_phy_start(struct tg3 *tp)
2130 {
2131 struct phy_device *phydev;
2132
2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2134 return;
2135
2136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2137
2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140 phydev->speed = tp->link_config.speed;
2141 phydev->duplex = tp->link_config.duplex;
2142 phydev->autoneg = tp->link_config.autoneg;
2143 ethtool_convert_legacy_u32_to_link_mode(
2144 phydev->advertising, tp->link_config.advertising);
2145 }
2146
2147 phy_start(phydev);
2148
2149 phy_start_aneg(phydev);
2150 }
2151
tg3_phy_stop(struct tg3 * tp)2152 static void tg3_phy_stop(struct tg3 *tp)
2153 {
2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2155 return;
2156
2157 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2158 }
2159
tg3_phy_fini(struct tg3 * tp)2160 static void tg3_phy_fini(struct tg3 *tp)
2161 {
2162 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2163 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2164 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2165 }
2166 }
2167
tg3_phy_set_extloopbk(struct tg3 * tp)2168 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2169 {
2170 int err;
2171 u32 val;
2172
2173 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2174 return 0;
2175
2176 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2177 /* Cannot do read-modify-write on 5401 */
2178 err = tg3_phy_auxctl_write(tp,
2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2180 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2181 0x4c20);
2182 goto done;
2183 }
2184
2185 err = tg3_phy_auxctl_read(tp,
2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2187 if (err)
2188 return err;
2189
2190 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2191 err = tg3_phy_auxctl_write(tp,
2192 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2193
2194 done:
2195 return err;
2196 }
2197
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2199 {
2200 u32 phytest;
2201
2202 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2203 u32 phy;
2204
2205 tg3_writephy(tp, MII_TG3_FET_TEST,
2206 phytest | MII_TG3_FET_SHADOW_EN);
2207 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2208 if (enable)
2209 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2210 else
2211 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2212 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2213 }
2214 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2215 }
2216 }
2217
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2219 {
2220 u32 reg;
2221
2222 if (!tg3_flag(tp, 5705_PLUS) ||
2223 (tg3_flag(tp, 5717_PLUS) &&
2224 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2225 return;
2226
2227 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2228 tg3_phy_fet_toggle_apd(tp, enable);
2229 return;
2230 }
2231
2232 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2233 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2234 MII_TG3_MISC_SHDW_SCR5_SDTL |
2235 MII_TG3_MISC_SHDW_SCR5_C125OE;
2236 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2237 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2238
2239 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2240
2241
2242 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2243 if (enable)
2244 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2245
2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2247 }
2248
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2250 {
2251 u32 phy;
2252
2253 if (!tg3_flag(tp, 5705_PLUS) ||
2254 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2255 return;
2256
2257 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2258 u32 ephy;
2259
2260 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2261 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2262
2263 tg3_writephy(tp, MII_TG3_FET_TEST,
2264 ephy | MII_TG3_FET_SHADOW_EN);
2265 if (!tg3_readphy(tp, reg, &phy)) {
2266 if (enable)
2267 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2268 else
2269 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2270 tg3_writephy(tp, reg, phy);
2271 }
2272 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2273 }
2274 } else {
2275 int ret;
2276
2277 ret = tg3_phy_auxctl_read(tp,
2278 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2279 if (!ret) {
2280 if (enable)
2281 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2282 else
2283 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2284 tg3_phy_auxctl_write(tp,
2285 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2286 }
2287 }
2288 }
2289
tg3_phy_set_wirespeed(struct tg3 * tp)2290 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2291 {
2292 int ret;
2293 u32 val;
2294
2295 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2296 return;
2297
2298 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2299 if (!ret)
2300 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2301 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2302 }
2303
tg3_phy_apply_otp(struct tg3 * tp)2304 static void tg3_phy_apply_otp(struct tg3 *tp)
2305 {
2306 u32 otp, phy;
2307
2308 if (!tp->phy_otp)
2309 return;
2310
2311 otp = tp->phy_otp;
2312
2313 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2314 return;
2315
2316 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2317 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2318 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2319
2320 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2321 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2322 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2323
2324 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2325 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2327
2328 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2330
2331 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2333
2334 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2335 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2337
2338 tg3_phy_toggle_auxctl_smdsp(tp, false);
2339 }
2340
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_keee * eee)2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
2342 {
2343 u32 val;
2344 struct ethtool_keee *dest = &tp->eee;
2345
2346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2347 return;
2348
2349 if (eee)
2350 dest = eee;
2351
2352 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2353 return;
2354
2355 /* Pull eee_active */
2356 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2357 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2358 dest->eee_active = 1;
2359 } else
2360 dest->eee_active = 0;
2361
2362 /* Pull lp advertised settings */
2363 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2364 return;
2365 mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
2366
2367 /* Pull advertised and eee_enabled settings */
2368 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2369 return;
2370 dest->eee_enabled = !!val;
2371 mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
2372
2373 /* Pull tx_lpi_enabled */
2374 val = tr32(TG3_CPMU_EEE_MODE);
2375 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2376
2377 /* Pull lpi timer value */
2378 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2379 }
2380
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2382 {
2383 u32 val;
2384
2385 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2386 return;
2387
2388 tp->setlpicnt = 0;
2389
2390 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2391 current_link_up &&
2392 tp->link_config.active_duplex == DUPLEX_FULL &&
2393 (tp->link_config.active_speed == SPEED_100 ||
2394 tp->link_config.active_speed == SPEED_1000)) {
2395 u32 eeectl;
2396
2397 if (tp->link_config.active_speed == SPEED_1000)
2398 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2399 else
2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2401
2402 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2403
2404 tg3_eee_pull_config(tp, NULL);
2405 if (tp->eee.eee_active)
2406 tp->setlpicnt = 2;
2407 }
2408
2409 if (!tp->setlpicnt) {
2410 if (current_link_up &&
2411 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2412 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2413 tg3_phy_toggle_auxctl_smdsp(tp, false);
2414 }
2415
2416 val = tr32(TG3_CPMU_EEE_MODE);
2417 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2418 }
2419 }
2420
tg3_phy_eee_enable(struct tg3 * tp)2421 static void tg3_phy_eee_enable(struct tg3 *tp)
2422 {
2423 u32 val;
2424
2425 if (tp->link_config.active_speed == SPEED_1000 &&
2426 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2427 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2428 tg3_flag(tp, 57765_CLASS)) &&
2429 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2430 val = MII_TG3_DSP_TAP26_ALNOKO |
2431 MII_TG3_DSP_TAP26_RMRXSTO;
2432 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2433 tg3_phy_toggle_auxctl_smdsp(tp, false);
2434 }
2435
2436 val = tr32(TG3_CPMU_EEE_MODE);
2437 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2438 }
2439
tg3_wait_macro_done(struct tg3 * tp)2440 static int tg3_wait_macro_done(struct tg3 *tp)
2441 {
2442 int limit = 100;
2443
2444 while (limit--) {
2445 u32 tmp32;
2446
2447 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2448 if ((tmp32 & 0x1000) == 0)
2449 break;
2450 }
2451 }
2452 if (limit < 0)
2453 return -EBUSY;
2454
2455 return 0;
2456 }
2457
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2459 {
2460 static const u32 test_pat[4][6] = {
2461 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2462 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2463 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2464 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2465 };
2466 int chan;
2467
2468 for (chan = 0; chan < 4; chan++) {
2469 int i;
2470
2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2472 (chan * 0x2000) | 0x0200);
2473 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2474
2475 for (i = 0; i < 6; i++)
2476 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2477 test_pat[chan][i]);
2478
2479 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2480 if (tg3_wait_macro_done(tp)) {
2481 *resetp = 1;
2482 return -EBUSY;
2483 }
2484
2485 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2486 (chan * 0x2000) | 0x0200);
2487 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2488 if (tg3_wait_macro_done(tp)) {
2489 *resetp = 1;
2490 return -EBUSY;
2491 }
2492
2493 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2494 if (tg3_wait_macro_done(tp)) {
2495 *resetp = 1;
2496 return -EBUSY;
2497 }
2498
2499 for (i = 0; i < 6; i += 2) {
2500 u32 low, high;
2501
2502 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2503 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2504 tg3_wait_macro_done(tp)) {
2505 *resetp = 1;
2506 return -EBUSY;
2507 }
2508 low &= 0x7fff;
2509 high &= 0x000f;
2510 if (low != test_pat[chan][i] ||
2511 high != test_pat[chan][i+1]) {
2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2515
2516 return -EBUSY;
2517 }
2518 }
2519 }
2520
2521 return 0;
2522 }
2523
tg3_phy_reset_chanpat(struct tg3 * tp)2524 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2525 {
2526 int chan;
2527
2528 for (chan = 0; chan < 4; chan++) {
2529 int i;
2530
2531 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2532 (chan * 0x2000) | 0x0200);
2533 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2534 for (i = 0; i < 6; i++)
2535 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2536 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2537 if (tg3_wait_macro_done(tp))
2538 return -EBUSY;
2539 }
2540
2541 return 0;
2542 }
2543
tg3_phy_reset_5703_4_5(struct tg3 * tp)2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2545 {
2546 u32 reg32, phy9_orig;
2547 int retries, do_phy_reset, err;
2548
2549 retries = 10;
2550 do_phy_reset = 1;
2551 do {
2552 if (do_phy_reset) {
2553 err = tg3_bmcr_reset(tp);
2554 if (err)
2555 return err;
2556 do_phy_reset = 0;
2557 }
2558
2559 /* Disable transmitter and interrupt. */
2560 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2561 continue;
2562
2563 reg32 |= 0x3000;
2564 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2565
2566 /* Set full-duplex, 1000 mbps. */
2567 tg3_writephy(tp, MII_BMCR,
2568 BMCR_FULLDPLX | BMCR_SPEED1000);
2569
2570 /* Set to master mode. */
2571 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2572 continue;
2573
2574 tg3_writephy(tp, MII_CTRL1000,
2575 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2576
2577 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2578 if (err)
2579 return err;
2580
2581 /* Block the PHY control access. */
2582 tg3_phydsp_write(tp, 0x8005, 0x0800);
2583
2584 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2585 if (!err)
2586 break;
2587 } while (--retries);
2588
2589 err = tg3_phy_reset_chanpat(tp);
2590 if (err)
2591 return err;
2592
2593 tg3_phydsp_write(tp, 0x8005, 0x0000);
2594
2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2596 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2597
2598 tg3_phy_toggle_auxctl_smdsp(tp, false);
2599
2600 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2601
2602 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2603 if (err)
2604 return err;
2605
2606 reg32 &= ~0x3000;
2607 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2608
2609 return 0;
2610 }
2611
tg3_carrier_off(struct tg3 * tp)2612 static void tg3_carrier_off(struct tg3 *tp)
2613 {
2614 netif_carrier_off(tp->dev);
2615 tp->link_up = false;
2616 }
2617
tg3_warn_mgmt_link_flap(struct tg3 * tp)2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2619 {
2620 if (tg3_flag(tp, ENABLE_ASF))
2621 netdev_warn(tp->dev,
2622 "Management side-band traffic will be interrupted during phy settings change\n");
2623 }
2624
2625 /* This will reset the tigon3 PHY if there is no valid
2626 * link unless the FORCE argument is non-zero.
2627 */
tg3_phy_reset(struct tg3 * tp)2628 static int tg3_phy_reset(struct tg3 *tp)
2629 {
2630 u32 val, cpmuctrl;
2631 int err;
2632
2633 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2634 val = tr32(GRC_MISC_CFG);
2635 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2636 udelay(40);
2637 }
2638 err = tg3_readphy(tp, MII_BMSR, &val);
2639 err |= tg3_readphy(tp, MII_BMSR, &val);
2640 if (err != 0)
2641 return -EBUSY;
2642
2643 if (netif_running(tp->dev) && tp->link_up) {
2644 netif_carrier_off(tp->dev);
2645 tg3_link_report(tp);
2646 }
2647
2648 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2649 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2650 tg3_asic_rev(tp) == ASIC_REV_5705) {
2651 err = tg3_phy_reset_5703_4_5(tp);
2652 if (err)
2653 return err;
2654 goto out;
2655 }
2656
2657 cpmuctrl = 0;
2658 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2659 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2660 cpmuctrl = tr32(TG3_CPMU_CTRL);
2661 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2662 tw32(TG3_CPMU_CTRL,
2663 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2664 }
2665
2666 err = tg3_bmcr_reset(tp);
2667 if (err)
2668 return err;
2669
2670 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2671 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2672 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2673
2674 tw32(TG3_CPMU_CTRL, cpmuctrl);
2675 }
2676
2677 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2678 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2679 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2680 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2681 CPMU_LSPD_1000MB_MACCLK_12_5) {
2682 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2683 udelay(40);
2684 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2685 }
2686 }
2687
2688 if (tg3_flag(tp, 5717_PLUS) &&
2689 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2690 return 0;
2691
2692 tg3_phy_apply_otp(tp);
2693
2694 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2695 tg3_phy_toggle_apd(tp, true);
2696 else
2697 tg3_phy_toggle_apd(tp, false);
2698
2699 out:
2700 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2701 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2702 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2703 tg3_phydsp_write(tp, 0x000a, 0x0323);
2704 tg3_phy_toggle_auxctl_smdsp(tp, false);
2705 }
2706
2707 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710 }
2711
2712 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2713 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2714 tg3_phydsp_write(tp, 0x000a, 0x310b);
2715 tg3_phydsp_write(tp, 0x201f, 0x9506);
2716 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2717 tg3_phy_toggle_auxctl_smdsp(tp, false);
2718 }
2719 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2720 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2721 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2722 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2723 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2724 tg3_writephy(tp, MII_TG3_TEST1,
2725 MII_TG3_TEST1_TRIM_EN | 0x4);
2726 } else
2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2728
2729 tg3_phy_toggle_auxctl_smdsp(tp, false);
2730 }
2731 }
2732
2733 /* Set Extended packet length bit (bit 14) on all chips that */
2734 /* support jumbo frames */
2735 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2736 /* Cannot do read-modify-write on 5401 */
2737 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2738 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2739 /* Set bit 14 with read-modify-write to preserve other bits */
2740 err = tg3_phy_auxctl_read(tp,
2741 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2742 if (!err)
2743 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2744 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2745 }
2746
2747 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2748 * jumbo frames transmission.
2749 */
2750 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2751 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2752 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2753 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2754 }
2755
2756 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2757 /* adjust output voltage */
2758 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2759 }
2760
2761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2762 tg3_phydsp_write(tp, 0xffb, 0x4000);
2763
2764 tg3_phy_toggle_automdix(tp, true);
2765 tg3_phy_set_wirespeed(tp);
2766 return 0;
2767 }
2768
2769 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2770 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2771 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2772 TG3_GPIO_MSG_NEED_VAUX)
2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2774 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2775 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2776 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2777 (TG3_GPIO_MSG_DRVR_PRES << 12))
2778
2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2780 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2781 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2782 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2783 (TG3_GPIO_MSG_NEED_VAUX << 12))
2784
tg3_set_function_status(struct tg3 * tp,u32 newstat)2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2786 {
2787 u32 status, shift;
2788
2789 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2790 tg3_asic_rev(tp) == ASIC_REV_5719)
2791 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2792 else
2793 status = tr32(TG3_CPMU_DRV_STATUS);
2794
2795 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2796 status &= ~(TG3_GPIO_MSG_MASK << shift);
2797 status |= (newstat << shift);
2798
2799 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2800 tg3_asic_rev(tp) == ASIC_REV_5719)
2801 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2802 else
2803 tw32(TG3_CPMU_DRV_STATUS, status);
2804
2805 return status >> TG3_APE_GPIO_MSG_SHIFT;
2806 }
2807
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2809 {
2810 if (!tg3_flag(tp, IS_NIC))
2811 return 0;
2812
2813 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2814 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2815 tg3_asic_rev(tp) == ASIC_REV_5720) {
2816 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2817 return -EIO;
2818
2819 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2820
2821 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2822 TG3_GRC_LCLCTL_PWRSW_DELAY);
2823
2824 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2825 } else {
2826 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827 TG3_GRC_LCLCTL_PWRSW_DELAY);
2828 }
2829
2830 return 0;
2831 }
2832
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2834 {
2835 u32 grc_local_ctrl;
2836
2837 if (!tg3_flag(tp, IS_NIC) ||
2838 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2839 tg3_asic_rev(tp) == ASIC_REV_5701)
2840 return;
2841
2842 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2843
2844 tw32_wait_f(GRC_LOCAL_CTRL,
2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2847
2848 tw32_wait_f(GRC_LOCAL_CTRL,
2849 grc_local_ctrl,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852 tw32_wait_f(GRC_LOCAL_CTRL,
2853 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY);
2855 }
2856
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2858 {
2859 if (!tg3_flag(tp, IS_NIC))
2860 return;
2861
2862 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2863 tg3_asic_rev(tp) == ASIC_REV_5701) {
2864 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2865 (GRC_LCLCTRL_GPIO_OE0 |
2866 GRC_LCLCTRL_GPIO_OE1 |
2867 GRC_LCLCTRL_GPIO_OE2 |
2868 GRC_LCLCTRL_GPIO_OUTPUT0 |
2869 GRC_LCLCTRL_GPIO_OUTPUT1),
2870 TG3_GRC_LCLCTL_PWRSW_DELAY);
2871 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2873 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2874 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2875 GRC_LCLCTRL_GPIO_OE1 |
2876 GRC_LCLCTRL_GPIO_OE2 |
2877 GRC_LCLCTRL_GPIO_OUTPUT0 |
2878 GRC_LCLCTRL_GPIO_OUTPUT1 |
2879 tp->grc_local_ctrl;
2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881 TG3_GRC_LCLCTL_PWRSW_DELAY);
2882
2883 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885 TG3_GRC_LCLCTL_PWRSW_DELAY);
2886
2887 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889 TG3_GRC_LCLCTL_PWRSW_DELAY);
2890 } else {
2891 u32 no_gpio2;
2892 u32 grc_local_ctrl = 0;
2893
2894 /* Workaround to prevent overdrawing Amps. */
2895 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2897 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2898 grc_local_ctrl,
2899 TG3_GRC_LCLCTL_PWRSW_DELAY);
2900 }
2901
2902 /* On 5753 and variants, GPIO2 cannot be used. */
2903 no_gpio2 = tp->nic_sram_data_cfg &
2904 NIC_SRAM_DATA_CFG_NO_GPIO2;
2905
2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2907 GRC_LCLCTRL_GPIO_OE1 |
2908 GRC_LCLCTRL_GPIO_OE2 |
2909 GRC_LCLCTRL_GPIO_OUTPUT1 |
2910 GRC_LCLCTRL_GPIO_OUTPUT2;
2911 if (no_gpio2) {
2912 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2913 GRC_LCLCTRL_GPIO_OUTPUT2);
2914 }
2915 tw32_wait_f(GRC_LOCAL_CTRL,
2916 tp->grc_local_ctrl | grc_local_ctrl,
2917 TG3_GRC_LCLCTL_PWRSW_DELAY);
2918
2919 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2920
2921 tw32_wait_f(GRC_LOCAL_CTRL,
2922 tp->grc_local_ctrl | grc_local_ctrl,
2923 TG3_GRC_LCLCTL_PWRSW_DELAY);
2924
2925 if (!no_gpio2) {
2926 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2927 tw32_wait_f(GRC_LOCAL_CTRL,
2928 tp->grc_local_ctrl | grc_local_ctrl,
2929 TG3_GRC_LCLCTL_PWRSW_DELAY);
2930 }
2931 }
2932 }
2933
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2935 {
2936 u32 msg = 0;
2937
2938 /* Serialize power state transitions */
2939 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2940 return;
2941
2942 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2943 msg = TG3_GPIO_MSG_NEED_VAUX;
2944
2945 msg = tg3_set_function_status(tp, msg);
2946
2947 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2948 goto done;
2949
2950 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2951 tg3_pwrsrc_switch_to_vaux(tp);
2952 else
2953 tg3_pwrsrc_die_with_vmain(tp);
2954
2955 done:
2956 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2957 }
2958
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2960 {
2961 bool need_vaux = false;
2962
2963 /* The GPIOs do something completely different on 57765. */
2964 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2965 return;
2966
2967 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2968 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2969 tg3_asic_rev(tp) == ASIC_REV_5720) {
2970 tg3_frob_aux_power_5717(tp, include_wol ?
2971 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2972 return;
2973 }
2974
2975 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2976 struct net_device *dev_peer;
2977
2978 dev_peer = pci_get_drvdata(tp->pdev_peer);
2979
2980 /* remove_one() may have been run on the peer. */
2981 if (dev_peer) {
2982 struct tg3 *tp_peer = netdev_priv(dev_peer);
2983
2984 if (tg3_flag(tp_peer, INIT_COMPLETE))
2985 return;
2986
2987 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2988 tg3_flag(tp_peer, ENABLE_ASF))
2989 need_vaux = true;
2990 }
2991 }
2992
2993 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2994 tg3_flag(tp, ENABLE_ASF))
2995 need_vaux = true;
2996
2997 if (need_vaux)
2998 tg3_pwrsrc_switch_to_vaux(tp);
2999 else
3000 tg3_pwrsrc_die_with_vmain(tp);
3001 }
3002
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3004 {
3005 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3006 return 1;
3007 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3008 if (speed != SPEED_10)
3009 return 1;
3010 } else if (speed == SPEED_10)
3011 return 1;
3012
3013 return 0;
3014 }
3015
tg3_phy_power_bug(struct tg3 * tp)3016 static bool tg3_phy_power_bug(struct tg3 *tp)
3017 {
3018 switch (tg3_asic_rev(tp)) {
3019 case ASIC_REV_5700:
3020 case ASIC_REV_5704:
3021 return true;
3022 case ASIC_REV_5780:
3023 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3024 return true;
3025 return false;
3026 case ASIC_REV_5717:
3027 if (!tp->pci_fn)
3028 return true;
3029 return false;
3030 case ASIC_REV_5719:
3031 case ASIC_REV_5720:
3032 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3033 !tp->pci_fn)
3034 return true;
3035 return false;
3036 }
3037
3038 return false;
3039 }
3040
tg3_phy_led_bug(struct tg3 * tp)3041 static bool tg3_phy_led_bug(struct tg3 *tp)
3042 {
3043 switch (tg3_asic_rev(tp)) {
3044 case ASIC_REV_5719:
3045 case ASIC_REV_5720:
3046 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3047 !tp->pci_fn)
3048 return true;
3049 return false;
3050 }
3051
3052 return false;
3053 }
3054
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3056 {
3057 u32 val;
3058
3059 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3060 return;
3061
3062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3063 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3064 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3065 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3066
3067 sg_dig_ctrl |=
3068 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3069 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3070 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3071 }
3072 return;
3073 }
3074
3075 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3076 tg3_bmcr_reset(tp);
3077 val = tr32(GRC_MISC_CFG);
3078 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3079 udelay(40);
3080 return;
3081 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3082 u32 phytest;
3083 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3084 u32 phy;
3085
3086 tg3_writephy(tp, MII_ADVERTISE, 0);
3087 tg3_writephy(tp, MII_BMCR,
3088 BMCR_ANENABLE | BMCR_ANRESTART);
3089
3090 tg3_writephy(tp, MII_TG3_FET_TEST,
3091 phytest | MII_TG3_FET_SHADOW_EN);
3092 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3093 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3094 tg3_writephy(tp,
3095 MII_TG3_FET_SHDW_AUXMODE4,
3096 phy);
3097 }
3098 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3099 }
3100 return;
3101 } else if (do_low_power) {
3102 if (!tg3_phy_led_bug(tp))
3103 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3104 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3105
3106 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3107 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3108 MII_TG3_AUXCTL_PCTL_VREG_11V;
3109 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3110 }
3111
3112 /* The PHY should not be powered down on some chips because
3113 * of bugs.
3114 */
3115 if (tg3_phy_power_bug(tp))
3116 return;
3117
3118 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3119 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3120 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3121 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3122 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3123 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3124 }
3125
3126 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3127 }
3128
3129 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3130 static int tg3_nvram_lock(struct tg3 *tp)
3131 {
3132 if (tg3_flag(tp, NVRAM)) {
3133 int i;
3134
3135 if (tp->nvram_lock_cnt == 0) {
3136 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3137 for (i = 0; i < 8000; i++) {
3138 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3139 break;
3140 udelay(20);
3141 }
3142 if (i == 8000) {
3143 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3144 return -ENODEV;
3145 }
3146 }
3147 tp->nvram_lock_cnt++;
3148 }
3149 return 0;
3150 }
3151
3152 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3153 static void tg3_nvram_unlock(struct tg3 *tp)
3154 {
3155 if (tg3_flag(tp, NVRAM)) {
3156 if (tp->nvram_lock_cnt > 0)
3157 tp->nvram_lock_cnt--;
3158 if (tp->nvram_lock_cnt == 0)
3159 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3160 }
3161 }
3162
3163 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3164 static void tg3_enable_nvram_access(struct tg3 *tp)
3165 {
3166 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3167 u32 nvaccess = tr32(NVRAM_ACCESS);
3168
3169 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3170 }
3171 }
3172
3173 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3174 static void tg3_disable_nvram_access(struct tg3 *tp)
3175 {
3176 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3177 u32 nvaccess = tr32(NVRAM_ACCESS);
3178
3179 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3180 }
3181 }
3182
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3184 u32 offset, u32 *val)
3185 {
3186 u32 tmp;
3187 int i;
3188
3189 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3190 return -EINVAL;
3191
3192 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3193 EEPROM_ADDR_DEVID_MASK |
3194 EEPROM_ADDR_READ);
3195 tw32(GRC_EEPROM_ADDR,
3196 tmp |
3197 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3198 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3199 EEPROM_ADDR_ADDR_MASK) |
3200 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3201
3202 for (i = 0; i < 1000; i++) {
3203 tmp = tr32(GRC_EEPROM_ADDR);
3204
3205 if (tmp & EEPROM_ADDR_COMPLETE)
3206 break;
3207 msleep(1);
3208 }
3209 if (!(tmp & EEPROM_ADDR_COMPLETE))
3210 return -EBUSY;
3211
3212 tmp = tr32(GRC_EEPROM_DATA);
3213
3214 /*
3215 * The data will always be opposite the native endian
3216 * format. Perform a blind byteswap to compensate.
3217 */
3218 *val = swab32(tmp);
3219
3220 return 0;
3221 }
3222
3223 #define NVRAM_CMD_TIMEOUT 10000
3224
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3226 {
3227 int i;
3228
3229 tw32(NVRAM_CMD, nvram_cmd);
3230 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3231 usleep_range(10, 40);
3232 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3233 udelay(10);
3234 break;
3235 }
3236 }
3237
3238 if (i == NVRAM_CMD_TIMEOUT)
3239 return -EBUSY;
3240
3241 return 0;
3242 }
3243
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3245 {
3246 if (tg3_flag(tp, NVRAM) &&
3247 tg3_flag(tp, NVRAM_BUFFERED) &&
3248 tg3_flag(tp, FLASH) &&
3249 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3250 (tp->nvram_jedecnum == JEDEC_ATMEL))
3251
3252 addr = ((addr / tp->nvram_pagesize) <<
3253 ATMEL_AT45DB0X1B_PAGE_POS) +
3254 (addr % tp->nvram_pagesize);
3255
3256 return addr;
3257 }
3258
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3260 {
3261 if (tg3_flag(tp, NVRAM) &&
3262 tg3_flag(tp, NVRAM_BUFFERED) &&
3263 tg3_flag(tp, FLASH) &&
3264 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3265 (tp->nvram_jedecnum == JEDEC_ATMEL))
3266
3267 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3268 tp->nvram_pagesize) +
3269 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3270
3271 return addr;
3272 }
3273
3274 /* NOTE: Data read in from NVRAM is byteswapped according to
3275 * the byteswapping settings for all other register accesses.
3276 * tg3 devices are BE devices, so on a BE machine, the data
3277 * returned will be exactly as it is seen in NVRAM. On a LE
3278 * machine, the 32-bit value will be byteswapped.
3279 */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3281 {
3282 int ret;
3283
3284 if (!tg3_flag(tp, NVRAM))
3285 return tg3_nvram_read_using_eeprom(tp, offset, val);
3286
3287 offset = tg3_nvram_phys_addr(tp, offset);
3288
3289 if (offset > NVRAM_ADDR_MSK)
3290 return -EINVAL;
3291
3292 ret = tg3_nvram_lock(tp);
3293 if (ret)
3294 return ret;
3295
3296 tg3_enable_nvram_access(tp);
3297
3298 tw32(NVRAM_ADDR, offset);
3299 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3301
3302 if (ret == 0)
3303 *val = tr32(NVRAM_RDDATA);
3304
3305 tg3_disable_nvram_access(tp);
3306
3307 tg3_nvram_unlock(tp);
3308
3309 return ret;
3310 }
3311
3312 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3314 {
3315 u32 v;
3316 int res = tg3_nvram_read(tp, offset, &v);
3317 if (!res)
3318 *val = cpu_to_be32(v);
3319 return res;
3320 }
3321
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3323 u32 offset, u32 len, u8 *buf)
3324 {
3325 int i, j, rc = 0;
3326 u32 val;
3327
3328 for (i = 0; i < len; i += 4) {
3329 u32 addr;
3330 __be32 data;
3331
3332 addr = offset + i;
3333
3334 memcpy(&data, buf + i, 4);
3335
3336 /*
3337 * The SEEPROM interface expects the data to always be opposite
3338 * the native endian format. We accomplish this by reversing
3339 * all the operations that would have been performed on the
3340 * data from a call to tg3_nvram_read_be32().
3341 */
3342 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3343
3344 val = tr32(GRC_EEPROM_ADDR);
3345 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3346
3347 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3348 EEPROM_ADDR_READ);
3349 tw32(GRC_EEPROM_ADDR, val |
3350 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3351 (addr & EEPROM_ADDR_ADDR_MASK) |
3352 EEPROM_ADDR_START |
3353 EEPROM_ADDR_WRITE);
3354
3355 for (j = 0; j < 1000; j++) {
3356 val = tr32(GRC_EEPROM_ADDR);
3357
3358 if (val & EEPROM_ADDR_COMPLETE)
3359 break;
3360 msleep(1);
3361 }
3362 if (!(val & EEPROM_ADDR_COMPLETE)) {
3363 rc = -EBUSY;
3364 break;
3365 }
3366 }
3367
3368 return rc;
3369 }
3370
3371 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3373 u8 *buf)
3374 {
3375 int ret = 0;
3376 u32 pagesize = tp->nvram_pagesize;
3377 u32 pagemask = pagesize - 1;
3378 u32 nvram_cmd;
3379 u8 *tmp;
3380
3381 tmp = kmalloc(pagesize, GFP_KERNEL);
3382 if (tmp == NULL)
3383 return -ENOMEM;
3384
3385 while (len) {
3386 int j;
3387 u32 phy_addr, page_off, size;
3388
3389 phy_addr = offset & ~pagemask;
3390
3391 for (j = 0; j < pagesize; j += 4) {
3392 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3393 (__be32 *) (tmp + j));
3394 if (ret)
3395 break;
3396 }
3397 if (ret)
3398 break;
3399
3400 page_off = offset & pagemask;
3401 size = pagesize;
3402 if (len < size)
3403 size = len;
3404
3405 len -= size;
3406
3407 memcpy(tmp + page_off, buf, size);
3408
3409 offset = offset + (pagesize - page_off);
3410
3411 tg3_enable_nvram_access(tp);
3412
3413 /*
3414 * Before we can erase the flash page, we need
3415 * to issue a special "write enable" command.
3416 */
3417 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418
3419 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3420 break;
3421
3422 /* Erase the target page */
3423 tw32(NVRAM_ADDR, phy_addr);
3424
3425 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3426 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3427
3428 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3429 break;
3430
3431 /* Issue another write enable to start the write. */
3432 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3433
3434 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435 break;
3436
3437 for (j = 0; j < pagesize; j += 4) {
3438 __be32 data;
3439
3440 data = *((__be32 *) (tmp + j));
3441
3442 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3443
3444 tw32(NVRAM_ADDR, phy_addr + j);
3445
3446 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3447 NVRAM_CMD_WR;
3448
3449 if (j == 0)
3450 nvram_cmd |= NVRAM_CMD_FIRST;
3451 else if (j == (pagesize - 4))
3452 nvram_cmd |= NVRAM_CMD_LAST;
3453
3454 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3455 if (ret)
3456 break;
3457 }
3458 if (ret)
3459 break;
3460 }
3461
3462 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3463 tg3_nvram_exec_cmd(tp, nvram_cmd);
3464
3465 kfree(tmp);
3466
3467 return ret;
3468 }
3469
3470 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3472 u8 *buf)
3473 {
3474 int i, ret = 0;
3475
3476 for (i = 0; i < len; i += 4, offset += 4) {
3477 u32 page_off, phy_addr, nvram_cmd;
3478 __be32 data;
3479
3480 memcpy(&data, buf + i, 4);
3481 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3482
3483 page_off = offset % tp->nvram_pagesize;
3484
3485 phy_addr = tg3_nvram_phys_addr(tp, offset);
3486
3487 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3488
3489 if (page_off == 0 || i == 0)
3490 nvram_cmd |= NVRAM_CMD_FIRST;
3491 if (page_off == (tp->nvram_pagesize - 4))
3492 nvram_cmd |= NVRAM_CMD_LAST;
3493
3494 if (i == (len - 4))
3495 nvram_cmd |= NVRAM_CMD_LAST;
3496
3497 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3498 !tg3_flag(tp, FLASH) ||
3499 !tg3_flag(tp, 57765_PLUS))
3500 tw32(NVRAM_ADDR, phy_addr);
3501
3502 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3503 !tg3_flag(tp, 5755_PLUS) &&
3504 (tp->nvram_jedecnum == JEDEC_ST) &&
3505 (nvram_cmd & NVRAM_CMD_FIRST)) {
3506 u32 cmd;
3507
3508 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3509 ret = tg3_nvram_exec_cmd(tp, cmd);
3510 if (ret)
3511 break;
3512 }
3513 if (!tg3_flag(tp, FLASH)) {
3514 /* We always do complete word writes to eeprom. */
3515 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3516 }
3517
3518 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3519 if (ret)
3520 break;
3521 }
3522 return ret;
3523 }
3524
3525 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3527 {
3528 int ret;
3529
3530 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3531 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3532 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3533 udelay(40);
3534 }
3535
3536 if (!tg3_flag(tp, NVRAM)) {
3537 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3538 } else {
3539 u32 grc_mode;
3540
3541 ret = tg3_nvram_lock(tp);
3542 if (ret)
3543 return ret;
3544
3545 tg3_enable_nvram_access(tp);
3546 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3547 tw32(NVRAM_WRITE1, 0x406);
3548
3549 grc_mode = tr32(GRC_MODE);
3550 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3551
3552 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3553 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3554 buf);
3555 } else {
3556 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3557 buf);
3558 }
3559
3560 grc_mode = tr32(GRC_MODE);
3561 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3562
3563 tg3_disable_nvram_access(tp);
3564 tg3_nvram_unlock(tp);
3565 }
3566
3567 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3568 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3569 udelay(40);
3570 }
3571
3572 return ret;
3573 }
3574
3575 #define RX_CPU_SCRATCH_BASE 0x30000
3576 #define RX_CPU_SCRATCH_SIZE 0x04000
3577 #define TX_CPU_SCRATCH_BASE 0x34000
3578 #define TX_CPU_SCRATCH_SIZE 0x04000
3579
3580 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3582 {
3583 int i;
3584 const int iters = 10000;
3585
3586 for (i = 0; i < iters; i++) {
3587 tw32(cpu_base + CPU_STATE, 0xffffffff);
3588 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3589 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3590 break;
3591 if (pci_channel_offline(tp->pdev))
3592 return -EBUSY;
3593 }
3594
3595 return (i == iters) ? -EBUSY : 0;
3596 }
3597
3598 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3599 static int tg3_rxcpu_pause(struct tg3 *tp)
3600 {
3601 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3602
3603 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3604 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3605 udelay(10);
3606
3607 return rc;
3608 }
3609
3610 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3611 static int tg3_txcpu_pause(struct tg3 *tp)
3612 {
3613 return tg3_pause_cpu(tp, TX_CPU_BASE);
3614 }
3615
3616 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3618 {
3619 tw32(cpu_base + CPU_STATE, 0xffffffff);
3620 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3621 }
3622
3623 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3624 static void tg3_rxcpu_resume(struct tg3 *tp)
3625 {
3626 tg3_resume_cpu(tp, RX_CPU_BASE);
3627 }
3628
3629 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3631 {
3632 int rc;
3633
3634 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3635
3636 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3637 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3638
3639 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3640 return 0;
3641 }
3642 if (cpu_base == RX_CPU_BASE) {
3643 rc = tg3_rxcpu_pause(tp);
3644 } else {
3645 /*
3646 * There is only an Rx CPU for the 5750 derivative in the
3647 * BCM4785.
3648 */
3649 if (tg3_flag(tp, IS_SSB_CORE))
3650 return 0;
3651
3652 rc = tg3_txcpu_pause(tp);
3653 }
3654
3655 if (rc) {
3656 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3657 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3658 return -ENODEV;
3659 }
3660
3661 /* Clear firmware's nvram arbitration. */
3662 if (tg3_flag(tp, NVRAM))
3663 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3664 return 0;
3665 }
3666
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3667 static int tg3_fw_data_len(struct tg3 *tp,
3668 const struct tg3_firmware_hdr *fw_hdr)
3669 {
3670 int fw_len;
3671
3672 /* Non fragmented firmware have one firmware header followed by a
3673 * contiguous chunk of data to be written. The length field in that
3674 * header is not the length of data to be written but the complete
3675 * length of the bss. The data length is determined based on
3676 * tp->fw->size minus headers.
3677 *
3678 * Fragmented firmware have a main header followed by multiple
3679 * fragments. Each fragment is identical to non fragmented firmware
3680 * with a firmware header followed by a contiguous chunk of data. In
3681 * the main header, the length field is unused and set to 0xffffffff.
3682 * In each fragment header the length is the entire size of that
3683 * fragment i.e. fragment data + header length. Data length is
3684 * therefore length field in the header minus TG3_FW_HDR_LEN.
3685 */
3686 if (tp->fw_len == 0xffffffff)
3687 fw_len = be32_to_cpu(fw_hdr->len);
3688 else
3689 fw_len = tp->fw->size;
3690
3691 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3692 }
3693
3694 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3696 u32 cpu_scratch_base, int cpu_scratch_size,
3697 const struct tg3_firmware_hdr *fw_hdr)
3698 {
3699 int err, i;
3700 void (*write_op)(struct tg3 *, u32, u32);
3701 int total_len = tp->fw->size;
3702
3703 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3704 netdev_err(tp->dev,
3705 "%s: Trying to load TX cpu firmware which is 5705\n",
3706 __func__);
3707 return -EINVAL;
3708 }
3709
3710 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3711 write_op = tg3_write_mem;
3712 else
3713 write_op = tg3_write_indirect_reg32;
3714
3715 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3716 /* It is possible that bootcode is still loading at this point.
3717 * Get the nvram lock first before halting the cpu.
3718 */
3719 int lock_err = tg3_nvram_lock(tp);
3720 err = tg3_halt_cpu(tp, cpu_base);
3721 if (!lock_err)
3722 tg3_nvram_unlock(tp);
3723 if (err)
3724 goto out;
3725
3726 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3727 write_op(tp, cpu_scratch_base + i, 0);
3728 tw32(cpu_base + CPU_STATE, 0xffffffff);
3729 tw32(cpu_base + CPU_MODE,
3730 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3731 } else {
3732 /* Subtract additional main header for fragmented firmware and
3733 * advance to the first fragment
3734 */
3735 total_len -= TG3_FW_HDR_LEN;
3736 fw_hdr++;
3737 }
3738
3739 do {
3740 __be32 *fw_data = (__be32 *)(fw_hdr + 1);
3741 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3742 write_op(tp, cpu_scratch_base +
3743 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3744 (i * sizeof(u32)),
3745 be32_to_cpu(fw_data[i]));
3746
3747 total_len -= be32_to_cpu(fw_hdr->len);
3748
3749 /* Advance to next fragment */
3750 fw_hdr = (struct tg3_firmware_hdr *)
3751 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3752 } while (total_len > 0);
3753
3754 err = 0;
3755
3756 out:
3757 return err;
3758 }
3759
3760 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3762 {
3763 int i;
3764 const int iters = 5;
3765
3766 tw32(cpu_base + CPU_STATE, 0xffffffff);
3767 tw32_f(cpu_base + CPU_PC, pc);
3768
3769 for (i = 0; i < iters; i++) {
3770 if (tr32(cpu_base + CPU_PC) == pc)
3771 break;
3772 tw32(cpu_base + CPU_STATE, 0xffffffff);
3773 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3774 tw32_f(cpu_base + CPU_PC, pc);
3775 udelay(1000);
3776 }
3777
3778 return (i == iters) ? -EBUSY : 0;
3779 }
3780
3781 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3783 {
3784 const struct tg3_firmware_hdr *fw_hdr;
3785 int err;
3786
3787 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3788
3789 /* Firmware blob starts with version numbers, followed by
3790 start address and length. We are setting complete length.
3791 length = end_address_of_bss - start_address_of_text.
3792 Remainder is the blob to be loaded contiguously
3793 from start address. */
3794
3795 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3796 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3797 fw_hdr);
3798 if (err)
3799 return err;
3800
3801 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3802 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3803 fw_hdr);
3804 if (err)
3805 return err;
3806
3807 /* Now startup only the RX cpu. */
3808 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3809 be32_to_cpu(fw_hdr->base_addr));
3810 if (err) {
3811 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3812 "should be %08x\n", __func__,
3813 tr32(RX_CPU_BASE + CPU_PC),
3814 be32_to_cpu(fw_hdr->base_addr));
3815 return -ENODEV;
3816 }
3817
3818 tg3_rxcpu_resume(tp);
3819
3820 return 0;
3821 }
3822
tg3_validate_rxcpu_state(struct tg3 * tp)3823 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3824 {
3825 const int iters = 1000;
3826 int i;
3827 u32 val;
3828
3829 /* Wait for boot code to complete initialization and enter service
3830 * loop. It is then safe to download service patches
3831 */
3832 for (i = 0; i < iters; i++) {
3833 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3834 break;
3835
3836 udelay(10);
3837 }
3838
3839 if (i == iters) {
3840 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3841 return -EBUSY;
3842 }
3843
3844 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3845 if (val & 0xff) {
3846 netdev_warn(tp->dev,
3847 "Other patches exist. Not downloading EEE patch\n");
3848 return -EEXIST;
3849 }
3850
3851 return 0;
3852 }
3853
3854 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3855 static void tg3_load_57766_firmware(struct tg3 *tp)
3856 {
3857 struct tg3_firmware_hdr *fw_hdr;
3858
3859 if (!tg3_flag(tp, NO_NVRAM))
3860 return;
3861
3862 if (tg3_validate_rxcpu_state(tp))
3863 return;
3864
3865 if (!tp->fw)
3866 return;
3867
3868 /* This firmware blob has a different format than older firmware
3869 * releases as given below. The main difference is we have fragmented
3870 * data to be written to non-contiguous locations.
3871 *
3872 * In the beginning we have a firmware header identical to other
3873 * firmware which consists of version, base addr and length. The length
3874 * here is unused and set to 0xffffffff.
3875 *
3876 * This is followed by a series of firmware fragments which are
3877 * individually identical to previous firmware. i.e. they have the
3878 * firmware header and followed by data for that fragment. The version
3879 * field of the individual fragment header is unused.
3880 */
3881
3882 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3883 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3884 return;
3885
3886 if (tg3_rxcpu_pause(tp))
3887 return;
3888
3889 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3890 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3891
3892 tg3_rxcpu_resume(tp);
3893 }
3894
3895 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3896 static int tg3_load_tso_firmware(struct tg3 *tp)
3897 {
3898 const struct tg3_firmware_hdr *fw_hdr;
3899 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3900 int err;
3901
3902 if (!tg3_flag(tp, FW_TSO))
3903 return 0;
3904
3905 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3906
3907 /* Firmware blob starts with version numbers, followed by
3908 start address and length. We are setting complete length.
3909 length = end_address_of_bss - start_address_of_text.
3910 Remainder is the blob to be loaded contiguously
3911 from start address. */
3912
3913 cpu_scratch_size = tp->fw_len;
3914
3915 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3916 cpu_base = RX_CPU_BASE;
3917 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3918 } else {
3919 cpu_base = TX_CPU_BASE;
3920 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3921 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3922 }
3923
3924 err = tg3_load_firmware_cpu(tp, cpu_base,
3925 cpu_scratch_base, cpu_scratch_size,
3926 fw_hdr);
3927 if (err)
3928 return err;
3929
3930 /* Now startup the cpu. */
3931 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3932 be32_to_cpu(fw_hdr->base_addr));
3933 if (err) {
3934 netdev_err(tp->dev,
3935 "%s fails to set CPU PC, is %08x should be %08x\n",
3936 __func__, tr32(cpu_base + CPU_PC),
3937 be32_to_cpu(fw_hdr->base_addr));
3938 return -ENODEV;
3939 }
3940
3941 tg3_resume_cpu(tp, cpu_base);
3942 return 0;
3943 }
3944
3945 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,const u8 * mac_addr,int index)3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3947 int index)
3948 {
3949 u32 addr_high, addr_low;
3950
3951 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3952 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3953 (mac_addr[4] << 8) | mac_addr[5]);
3954
3955 if (index < 4) {
3956 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3957 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3958 } else {
3959 index -= 4;
3960 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3961 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3962 }
3963 }
3964
3965 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3967 {
3968 u32 addr_high;
3969 int i;
3970
3971 for (i = 0; i < 4; i++) {
3972 if (i == 1 && skip_mac_1)
3973 continue;
3974 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3975 }
3976
3977 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3978 tg3_asic_rev(tp) == ASIC_REV_5704) {
3979 for (i = 4; i < 16; i++)
3980 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3981 }
3982
3983 addr_high = (tp->dev->dev_addr[0] +
3984 tp->dev->dev_addr[1] +
3985 tp->dev->dev_addr[2] +
3986 tp->dev->dev_addr[3] +
3987 tp->dev->dev_addr[4] +
3988 tp->dev->dev_addr[5]) &
3989 TX_BACKOFF_SEED_MASK;
3990 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3991 }
3992
tg3_enable_register_access(struct tg3 * tp)3993 static void tg3_enable_register_access(struct tg3 *tp)
3994 {
3995 /*
3996 * Make sure register accesses (indirect or otherwise) will function
3997 * correctly.
3998 */
3999 pci_write_config_dword(tp->pdev,
4000 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4001 }
4002
tg3_power_up(struct tg3 * tp)4003 static int tg3_power_up(struct tg3 *tp)
4004 {
4005 int err;
4006
4007 tg3_enable_register_access(tp);
4008
4009 err = pci_set_power_state(tp->pdev, PCI_D0);
4010 if (!err) {
4011 /* Switch out of Vaux if it is a NIC */
4012 tg3_pwrsrc_switch_to_vmain(tp);
4013 } else {
4014 netdev_err(tp->dev, "Transition to D0 failed\n");
4015 }
4016
4017 return err;
4018 }
4019
4020 static int tg3_setup_phy(struct tg3 *, bool);
4021
tg3_power_down_prepare(struct tg3 * tp)4022 static void tg3_power_down_prepare(struct tg3 *tp)
4023 {
4024 u32 misc_host_ctrl;
4025 bool device_should_wake, do_low_power;
4026
4027 tg3_enable_register_access(tp);
4028
4029 /* Restore the CLKREQ setting. */
4030 if (tg3_flag(tp, CLKREQ_BUG))
4031 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4032 PCI_EXP_LNKCTL_CLKREQ_EN);
4033
4034 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4035 tw32(TG3PCI_MISC_HOST_CTRL,
4036 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4037
4038 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4039 tg3_flag(tp, WOL_ENABLE);
4040
4041 if (tg3_flag(tp, USE_PHYLIB)) {
4042 do_low_power = false;
4043 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4044 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4045 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4046 struct phy_device *phydev;
4047 u32 phyid;
4048
4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4050
4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4052
4053 tp->link_config.speed = phydev->speed;
4054 tp->link_config.duplex = phydev->duplex;
4055 tp->link_config.autoneg = phydev->autoneg;
4056 ethtool_convert_link_mode_to_legacy_u32(
4057 &tp->link_config.advertising,
4058 phydev->advertising);
4059
4060 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4061 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4062 advertising);
4063 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4064 advertising);
4065 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4066 advertising);
4067
4068 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4069 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4070 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4071 advertising);
4072 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4073 advertising);
4074 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4075 advertising);
4076 } else {
4077 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4078 advertising);
4079 }
4080 }
4081
4082 linkmode_copy(phydev->advertising, advertising);
4083 phy_start_aneg(phydev);
4084
4085 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4086 if (phyid != PHY_ID_BCMAC131) {
4087 phyid &= PHY_BCM_OUI_MASK;
4088 if (phyid == PHY_BCM_OUI_1 ||
4089 phyid == PHY_BCM_OUI_2 ||
4090 phyid == PHY_BCM_OUI_3)
4091 do_low_power = true;
4092 }
4093 }
4094 } else {
4095 do_low_power = true;
4096
4097 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4098 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4099
4100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4101 tg3_setup_phy(tp, false);
4102 }
4103
4104 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4105 u32 val;
4106
4107 val = tr32(GRC_VCPU_EXT_CTRL);
4108 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4109 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4110 int i;
4111 u32 val;
4112
4113 for (i = 0; i < 200; i++) {
4114 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4115 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4116 break;
4117 msleep(1);
4118 }
4119 }
4120 if (tg3_flag(tp, WOL_CAP))
4121 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4122 WOL_DRV_STATE_SHUTDOWN |
4123 WOL_DRV_WOL |
4124 WOL_SET_MAGIC_PKT);
4125
4126 if (device_should_wake) {
4127 u32 mac_mode;
4128
4129 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4130 if (do_low_power &&
4131 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4132 tg3_phy_auxctl_write(tp,
4133 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4134 MII_TG3_AUXCTL_PCTL_WOL_EN |
4135 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4136 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4137 udelay(40);
4138 }
4139
4140 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4141 mac_mode = MAC_MODE_PORT_MODE_GMII;
4142 else if (tp->phy_flags &
4143 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4144 if (tp->link_config.active_speed == SPEED_1000)
4145 mac_mode = MAC_MODE_PORT_MODE_GMII;
4146 else
4147 mac_mode = MAC_MODE_PORT_MODE_MII;
4148 } else
4149 mac_mode = MAC_MODE_PORT_MODE_MII;
4150
4151 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4152 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4153 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4154 SPEED_100 : SPEED_10;
4155 if (tg3_5700_link_polarity(tp, speed))
4156 mac_mode |= MAC_MODE_LINK_POLARITY;
4157 else
4158 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4159 }
4160 } else {
4161 mac_mode = MAC_MODE_PORT_MODE_TBI;
4162 }
4163
4164 if (!tg3_flag(tp, 5750_PLUS))
4165 tw32(MAC_LED_CTRL, tp->led_ctrl);
4166
4167 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4168 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4169 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4170 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4171
4172 if (tg3_flag(tp, ENABLE_APE))
4173 mac_mode |= MAC_MODE_APE_TX_EN |
4174 MAC_MODE_APE_RX_EN |
4175 MAC_MODE_TDE_ENABLE;
4176
4177 tw32_f(MAC_MODE, mac_mode);
4178 udelay(100);
4179
4180 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4181 udelay(10);
4182 }
4183
4184 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4185 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4186 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4187 u32 base_val;
4188
4189 base_val = tp->pci_clock_ctrl;
4190 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4191 CLOCK_CTRL_TXCLK_DISABLE);
4192
4193 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4194 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4195 } else if (tg3_flag(tp, 5780_CLASS) ||
4196 tg3_flag(tp, CPMU_PRESENT) ||
4197 tg3_asic_rev(tp) == ASIC_REV_5906) {
4198 /* do nothing */
4199 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4200 u32 newbits1, newbits2;
4201
4202 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4203 tg3_asic_rev(tp) == ASIC_REV_5701) {
4204 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4205 CLOCK_CTRL_TXCLK_DISABLE |
4206 CLOCK_CTRL_ALTCLK);
4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208 } else if (tg3_flag(tp, 5705_PLUS)) {
4209 newbits1 = CLOCK_CTRL_625_CORE;
4210 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4211 } else {
4212 newbits1 = CLOCK_CTRL_ALTCLK;
4213 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4214 }
4215
4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4217 40);
4218
4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4220 40);
4221
4222 if (!tg3_flag(tp, 5705_PLUS)) {
4223 u32 newbits3;
4224
4225 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4226 tg3_asic_rev(tp) == ASIC_REV_5701) {
4227 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4228 CLOCK_CTRL_TXCLK_DISABLE |
4229 CLOCK_CTRL_44MHZ_CORE);
4230 } else {
4231 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4232 }
4233
4234 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4235 tp->pci_clock_ctrl | newbits3, 40);
4236 }
4237 }
4238
4239 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4240 tg3_power_down_phy(tp, do_low_power);
4241
4242 tg3_frob_aux_power(tp, true);
4243
4244 /* Workaround for unstable PLL clock */
4245 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4246 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4247 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4248 u32 val = tr32(0x7d00);
4249
4250 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4251 tw32(0x7d00, val);
4252 if (!tg3_flag(tp, ENABLE_ASF)) {
4253 int err;
4254
4255 err = tg3_nvram_lock(tp);
4256 tg3_halt_cpu(tp, RX_CPU_BASE);
4257 if (!err)
4258 tg3_nvram_unlock(tp);
4259 }
4260 }
4261
4262 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4263
4264 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4265
4266 return;
4267 }
4268
tg3_power_down(struct tg3 * tp)4269 static void tg3_power_down(struct tg3 *tp)
4270 {
4271 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4272 pci_set_power_state(tp->pdev, PCI_D3hot);
4273 }
4274
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u32 * speed,u8 * duplex)4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4276 {
4277 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4278 case MII_TG3_AUX_STAT_10HALF:
4279 *speed = SPEED_10;
4280 *duplex = DUPLEX_HALF;
4281 break;
4282
4283 case MII_TG3_AUX_STAT_10FULL:
4284 *speed = SPEED_10;
4285 *duplex = DUPLEX_FULL;
4286 break;
4287
4288 case MII_TG3_AUX_STAT_100HALF:
4289 *speed = SPEED_100;
4290 *duplex = DUPLEX_HALF;
4291 break;
4292
4293 case MII_TG3_AUX_STAT_100FULL:
4294 *speed = SPEED_100;
4295 *duplex = DUPLEX_FULL;
4296 break;
4297
4298 case MII_TG3_AUX_STAT_1000HALF:
4299 *speed = SPEED_1000;
4300 *duplex = DUPLEX_HALF;
4301 break;
4302
4303 case MII_TG3_AUX_STAT_1000FULL:
4304 *speed = SPEED_1000;
4305 *duplex = DUPLEX_FULL;
4306 break;
4307
4308 default:
4309 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4310 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4311 SPEED_10;
4312 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4313 DUPLEX_HALF;
4314 break;
4315 }
4316 *speed = SPEED_UNKNOWN;
4317 *duplex = DUPLEX_UNKNOWN;
4318 break;
4319 }
4320 }
4321
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4323 {
4324 int err = 0;
4325 u32 val, new_adv;
4326
4327 new_adv = ADVERTISE_CSMA;
4328 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4329 new_adv |= mii_advertise_flowctrl(flowctrl);
4330
4331 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4332 if (err)
4333 goto done;
4334
4335 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4336 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4337
4338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4339 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4340 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4341
4342 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4343 if (err)
4344 goto done;
4345 }
4346
4347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4348 goto done;
4349
4350 tw32(TG3_CPMU_EEE_MODE,
4351 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4352
4353 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4354 if (!err) {
4355 u32 err2;
4356
4357 if (!tp->eee.eee_enabled)
4358 val = 0;
4359 else
4360 val = ethtool_adv_to_mmd_eee_adv_t(advertise);
4361
4362 mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
4363 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4364 if (err)
4365 val = 0;
4366
4367 switch (tg3_asic_rev(tp)) {
4368 case ASIC_REV_5717:
4369 case ASIC_REV_57765:
4370 case ASIC_REV_57766:
4371 case ASIC_REV_5719:
4372 /* If we advertised any eee advertisements above... */
4373 if (val)
4374 val = MII_TG3_DSP_TAP26_ALNOKO |
4375 MII_TG3_DSP_TAP26_RMRXSTO |
4376 MII_TG3_DSP_TAP26_OPCSINPT;
4377 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4378 fallthrough;
4379 case ASIC_REV_5720:
4380 case ASIC_REV_5762:
4381 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4382 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4383 MII_TG3_DSP_CH34TP2_HIBW01);
4384 }
4385
4386 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4387 if (!err)
4388 err = err2;
4389 }
4390
4391 done:
4392 return err;
4393 }
4394
tg3_phy_copper_begin(struct tg3 * tp)4395 static void tg3_phy_copper_begin(struct tg3 *tp)
4396 {
4397 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4398 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4399 u32 adv, fc;
4400
4401 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4402 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4403 adv = ADVERTISED_10baseT_Half |
4404 ADVERTISED_10baseT_Full;
4405 if (tg3_flag(tp, WOL_SPEED_100MB))
4406 adv |= ADVERTISED_100baseT_Half |
4407 ADVERTISED_100baseT_Full;
4408 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4409 if (!(tp->phy_flags &
4410 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4411 adv |= ADVERTISED_1000baseT_Half;
4412 adv |= ADVERTISED_1000baseT_Full;
4413 }
4414
4415 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4416 } else {
4417 adv = tp->link_config.advertising;
4418 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4419 adv &= ~(ADVERTISED_1000baseT_Half |
4420 ADVERTISED_1000baseT_Full);
4421
4422 fc = tp->link_config.flowctrl;
4423 }
4424
4425 tg3_phy_autoneg_cfg(tp, adv, fc);
4426
4427 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4428 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4429 /* Normally during power down we want to autonegotiate
4430 * the lowest possible speed for WOL. However, to avoid
4431 * link flap, we leave it untouched.
4432 */
4433 return;
4434 }
4435
4436 tg3_writephy(tp, MII_BMCR,
4437 BMCR_ANENABLE | BMCR_ANRESTART);
4438 } else {
4439 int i;
4440 u32 bmcr, orig_bmcr;
4441
4442 tp->link_config.active_speed = tp->link_config.speed;
4443 tp->link_config.active_duplex = tp->link_config.duplex;
4444
4445 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4446 /* With autoneg disabled, 5715 only links up when the
4447 * advertisement register has the configured speed
4448 * enabled.
4449 */
4450 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4451 }
4452
4453 bmcr = 0;
4454 switch (tp->link_config.speed) {
4455 default:
4456 case SPEED_10:
4457 break;
4458
4459 case SPEED_100:
4460 bmcr |= BMCR_SPEED100;
4461 break;
4462
4463 case SPEED_1000:
4464 bmcr |= BMCR_SPEED1000;
4465 break;
4466 }
4467
4468 if (tp->link_config.duplex == DUPLEX_FULL)
4469 bmcr |= BMCR_FULLDPLX;
4470
4471 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4472 (bmcr != orig_bmcr)) {
4473 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4474 for (i = 0; i < 1500; i++) {
4475 u32 tmp;
4476
4477 udelay(10);
4478 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4479 tg3_readphy(tp, MII_BMSR, &tmp))
4480 continue;
4481 if (!(tmp & BMSR_LSTATUS)) {
4482 udelay(40);
4483 break;
4484 }
4485 }
4486 tg3_writephy(tp, MII_BMCR, bmcr);
4487 udelay(40);
4488 }
4489 }
4490 }
4491
tg3_phy_pull_config(struct tg3 * tp)4492 static int tg3_phy_pull_config(struct tg3 *tp)
4493 {
4494 int err;
4495 u32 val;
4496
4497 err = tg3_readphy(tp, MII_BMCR, &val);
4498 if (err)
4499 goto done;
4500
4501 if (!(val & BMCR_ANENABLE)) {
4502 tp->link_config.autoneg = AUTONEG_DISABLE;
4503 tp->link_config.advertising = 0;
4504 tg3_flag_clear(tp, PAUSE_AUTONEG);
4505
4506 err = -EIO;
4507
4508 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4509 case 0:
4510 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4511 goto done;
4512
4513 tp->link_config.speed = SPEED_10;
4514 break;
4515 case BMCR_SPEED100:
4516 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4517 goto done;
4518
4519 tp->link_config.speed = SPEED_100;
4520 break;
4521 case BMCR_SPEED1000:
4522 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4523 tp->link_config.speed = SPEED_1000;
4524 break;
4525 }
4526 fallthrough;
4527 default:
4528 goto done;
4529 }
4530
4531 if (val & BMCR_FULLDPLX)
4532 tp->link_config.duplex = DUPLEX_FULL;
4533 else
4534 tp->link_config.duplex = DUPLEX_HALF;
4535
4536 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4537
4538 err = 0;
4539 goto done;
4540 }
4541
4542 tp->link_config.autoneg = AUTONEG_ENABLE;
4543 tp->link_config.advertising = ADVERTISED_Autoneg;
4544 tg3_flag_set(tp, PAUSE_AUTONEG);
4545
4546 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4547 u32 adv;
4548
4549 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4550 if (err)
4551 goto done;
4552
4553 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4554 tp->link_config.advertising |= adv | ADVERTISED_TP;
4555
4556 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4557 } else {
4558 tp->link_config.advertising |= ADVERTISED_FIBRE;
4559 }
4560
4561 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4562 u32 adv;
4563
4564 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4565 err = tg3_readphy(tp, MII_CTRL1000, &val);
4566 if (err)
4567 goto done;
4568
4569 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4570 } else {
4571 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4572 if (err)
4573 goto done;
4574
4575 adv = tg3_decode_flowctrl_1000X(val);
4576 tp->link_config.flowctrl = adv;
4577
4578 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4579 adv = mii_adv_to_ethtool_adv_x(val);
4580 }
4581
4582 tp->link_config.advertising |= adv;
4583 }
4584
4585 done:
4586 return err;
4587 }
4588
tg3_init_5401phy_dsp(struct tg3 * tp)4589 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4590 {
4591 int err;
4592
4593 /* Turn off tap power management. */
4594 /* Set Extended packet length bit */
4595 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4596
4597 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4598 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4599 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4600 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4601 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4602
4603 udelay(40);
4604
4605 return err;
4606 }
4607
tg3_phy_eee_config_ok(struct tg3 * tp)4608 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4609 {
4610 struct ethtool_keee eee = {};
4611
4612 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4613 return true;
4614
4615 tg3_eee_pull_config(tp, &eee);
4616
4617 if (tp->eee.eee_enabled) {
4618 if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
4619 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4620 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4621 return false;
4622 } else {
4623 /* EEE is disabled but we're advertising */
4624 if (!linkmode_empty(eee.advertised))
4625 return false;
4626 }
4627
4628 return true;
4629 }
4630
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4631 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4632 {
4633 u32 advmsk, tgtadv, advertising;
4634
4635 advertising = tp->link_config.advertising;
4636 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4637
4638 advmsk = ADVERTISE_ALL;
4639 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4640 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4641 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4642 }
4643
4644 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4645 return false;
4646
4647 if ((*lcladv & advmsk) != tgtadv)
4648 return false;
4649
4650 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4651 u32 tg3_ctrl;
4652
4653 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4654
4655 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4656 return false;
4657
4658 if (tgtadv &&
4659 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4660 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4661 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4662 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4663 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4664 } else {
4665 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4666 }
4667
4668 if (tg3_ctrl != tgtadv)
4669 return false;
4670 }
4671
4672 return true;
4673 }
4674
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4675 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4676 {
4677 u32 lpeth = 0;
4678
4679 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4680 u32 val;
4681
4682 if (tg3_readphy(tp, MII_STAT1000, &val))
4683 return false;
4684
4685 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4686 }
4687
4688 if (tg3_readphy(tp, MII_LPA, rmtadv))
4689 return false;
4690
4691 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4692 tp->link_config.rmt_adv = lpeth;
4693
4694 return true;
4695 }
4696
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4697 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4698 {
4699 if (curr_link_up != tp->link_up) {
4700 if (curr_link_up) {
4701 netif_carrier_on(tp->dev);
4702 } else {
4703 netif_carrier_off(tp->dev);
4704 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4705 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4706 }
4707
4708 tg3_link_report(tp);
4709 return true;
4710 }
4711
4712 return false;
4713 }
4714
tg3_clear_mac_status(struct tg3 * tp)4715 static void tg3_clear_mac_status(struct tg3 *tp)
4716 {
4717 tw32(MAC_EVENT, 0);
4718
4719 tw32_f(MAC_STATUS,
4720 MAC_STATUS_SYNC_CHANGED |
4721 MAC_STATUS_CFG_CHANGED |
4722 MAC_STATUS_MI_COMPLETION |
4723 MAC_STATUS_LNKSTATE_CHANGED);
4724 udelay(40);
4725 }
4726
tg3_setup_eee(struct tg3 * tp)4727 static void tg3_setup_eee(struct tg3 *tp)
4728 {
4729 u32 val;
4730
4731 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4732 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4733 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4734 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4735
4736 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4737
4738 tw32_f(TG3_CPMU_EEE_CTRL,
4739 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4740
4741 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4742 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4743 TG3_CPMU_EEEMD_LPI_IN_RX |
4744 TG3_CPMU_EEEMD_EEE_ENABLE;
4745
4746 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4747 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4748
4749 if (tg3_flag(tp, ENABLE_APE))
4750 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4751
4752 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4753
4754 tw32_f(TG3_CPMU_EEE_DBTMR1,
4755 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4756 (tp->eee.tx_lpi_timer & 0xffff));
4757
4758 tw32_f(TG3_CPMU_EEE_DBTMR2,
4759 TG3_CPMU_DBTMR2_APE_TX_2047US |
4760 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4761 }
4762
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4763 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4764 {
4765 bool current_link_up;
4766 u32 bmsr, val;
4767 u32 lcl_adv, rmt_adv;
4768 u32 current_speed;
4769 u8 current_duplex;
4770 int i, err;
4771
4772 tg3_clear_mac_status(tp);
4773
4774 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4775 tw32_f(MAC_MI_MODE,
4776 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4777 udelay(80);
4778 }
4779
4780 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4781
4782 /* Some third-party PHYs need to be reset on link going
4783 * down.
4784 */
4785 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4786 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4787 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4788 tp->link_up) {
4789 tg3_readphy(tp, MII_BMSR, &bmsr);
4790 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4791 !(bmsr & BMSR_LSTATUS))
4792 force_reset = true;
4793 }
4794 if (force_reset)
4795 tg3_phy_reset(tp);
4796
4797 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4798 tg3_readphy(tp, MII_BMSR, &bmsr);
4799 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4800 !tg3_flag(tp, INIT_COMPLETE))
4801 bmsr = 0;
4802
4803 if (!(bmsr & BMSR_LSTATUS)) {
4804 err = tg3_init_5401phy_dsp(tp);
4805 if (err)
4806 return err;
4807
4808 tg3_readphy(tp, MII_BMSR, &bmsr);
4809 for (i = 0; i < 1000; i++) {
4810 udelay(10);
4811 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4812 (bmsr & BMSR_LSTATUS)) {
4813 udelay(40);
4814 break;
4815 }
4816 }
4817
4818 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4819 TG3_PHY_REV_BCM5401_B0 &&
4820 !(bmsr & BMSR_LSTATUS) &&
4821 tp->link_config.active_speed == SPEED_1000) {
4822 err = tg3_phy_reset(tp);
4823 if (!err)
4824 err = tg3_init_5401phy_dsp(tp);
4825 if (err)
4826 return err;
4827 }
4828 }
4829 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4830 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4831 /* 5701 {A0,B0} CRC bug workaround */
4832 tg3_writephy(tp, 0x15, 0x0a75);
4833 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4834 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4835 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4836 }
4837
4838 /* Clear pending interrupts... */
4839 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4840 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4841
4842 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4843 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4844 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4845 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4846
4847 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4848 tg3_asic_rev(tp) == ASIC_REV_5701) {
4849 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4850 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4851 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4852 else
4853 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4854 }
4855
4856 current_link_up = false;
4857 current_speed = SPEED_UNKNOWN;
4858 current_duplex = DUPLEX_UNKNOWN;
4859 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4860 tp->link_config.rmt_adv = 0;
4861
4862 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4863 err = tg3_phy_auxctl_read(tp,
4864 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4865 &val);
4866 if (!err && !(val & (1 << 10))) {
4867 tg3_phy_auxctl_write(tp,
4868 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4869 val | (1 << 10));
4870 goto relink;
4871 }
4872 }
4873
4874 bmsr = 0;
4875 for (i = 0; i < 100; i++) {
4876 tg3_readphy(tp, MII_BMSR, &bmsr);
4877 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4878 (bmsr & BMSR_LSTATUS))
4879 break;
4880 udelay(40);
4881 }
4882
4883 if (bmsr & BMSR_LSTATUS) {
4884 u32 aux_stat, bmcr;
4885
4886 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4887 for (i = 0; i < 2000; i++) {
4888 udelay(10);
4889 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4890 aux_stat)
4891 break;
4892 }
4893
4894 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4895 ¤t_speed,
4896 ¤t_duplex);
4897
4898 bmcr = 0;
4899 for (i = 0; i < 200; i++) {
4900 tg3_readphy(tp, MII_BMCR, &bmcr);
4901 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4902 continue;
4903 if (bmcr && bmcr != 0x7fff)
4904 break;
4905 udelay(10);
4906 }
4907
4908 lcl_adv = 0;
4909 rmt_adv = 0;
4910
4911 tp->link_config.active_speed = current_speed;
4912 tp->link_config.active_duplex = current_duplex;
4913
4914 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4915 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4916
4917 if ((bmcr & BMCR_ANENABLE) &&
4918 eee_config_ok &&
4919 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4920 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4921 current_link_up = true;
4922
4923 /* EEE settings changes take effect only after a phy
4924 * reset. If we have skipped a reset due to Link Flap
4925 * Avoidance being enabled, do it now.
4926 */
4927 if (!eee_config_ok &&
4928 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4929 !force_reset) {
4930 tg3_setup_eee(tp);
4931 tg3_phy_reset(tp);
4932 }
4933 } else {
4934 if (!(bmcr & BMCR_ANENABLE) &&
4935 tp->link_config.speed == current_speed &&
4936 tp->link_config.duplex == current_duplex) {
4937 current_link_up = true;
4938 }
4939 }
4940
4941 if (current_link_up &&
4942 tp->link_config.active_duplex == DUPLEX_FULL) {
4943 u32 reg, bit;
4944
4945 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4946 reg = MII_TG3_FET_GEN_STAT;
4947 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4948 } else {
4949 reg = MII_TG3_EXT_STAT;
4950 bit = MII_TG3_EXT_STAT_MDIX;
4951 }
4952
4953 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4954 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4955
4956 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4957 }
4958 }
4959
4960 relink:
4961 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4962 tg3_phy_copper_begin(tp);
4963
4964 if (tg3_flag(tp, ROBOSWITCH)) {
4965 current_link_up = true;
4966 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4967 current_speed = SPEED_1000;
4968 current_duplex = DUPLEX_FULL;
4969 tp->link_config.active_speed = current_speed;
4970 tp->link_config.active_duplex = current_duplex;
4971 }
4972
4973 tg3_readphy(tp, MII_BMSR, &bmsr);
4974 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4975 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4976 current_link_up = true;
4977 }
4978
4979 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4980 if (current_link_up) {
4981 if (tp->link_config.active_speed == SPEED_100 ||
4982 tp->link_config.active_speed == SPEED_10)
4983 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4984 else
4985 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4986 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4987 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4988 else
4989 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4990
4991 /* In order for the 5750 core in BCM4785 chip to work properly
4992 * in RGMII mode, the Led Control Register must be set up.
4993 */
4994 if (tg3_flag(tp, RGMII_MODE)) {
4995 u32 led_ctrl = tr32(MAC_LED_CTRL);
4996 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4997
4998 if (tp->link_config.active_speed == SPEED_10)
4999 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5000 else if (tp->link_config.active_speed == SPEED_100)
5001 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5002 LED_CTRL_100MBPS_ON);
5003 else if (tp->link_config.active_speed == SPEED_1000)
5004 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5005 LED_CTRL_1000MBPS_ON);
5006
5007 tw32(MAC_LED_CTRL, led_ctrl);
5008 udelay(40);
5009 }
5010
5011 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5012 if (tp->link_config.active_duplex == DUPLEX_HALF)
5013 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5014
5015 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5016 if (current_link_up &&
5017 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5018 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5019 else
5020 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5021 }
5022
5023 /* ??? Without this setting Netgear GA302T PHY does not
5024 * ??? send/receive packets...
5025 */
5026 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5027 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5028 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5029 tw32_f(MAC_MI_MODE, tp->mi_mode);
5030 udelay(80);
5031 }
5032
5033 tw32_f(MAC_MODE, tp->mac_mode);
5034 udelay(40);
5035
5036 tg3_phy_eee_adjust(tp, current_link_up);
5037
5038 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5039 /* Polled via timer. */
5040 tw32_f(MAC_EVENT, 0);
5041 } else {
5042 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5043 }
5044 udelay(40);
5045
5046 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5047 current_link_up &&
5048 tp->link_config.active_speed == SPEED_1000 &&
5049 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5050 udelay(120);
5051 tw32_f(MAC_STATUS,
5052 (MAC_STATUS_SYNC_CHANGED |
5053 MAC_STATUS_CFG_CHANGED));
5054 udelay(40);
5055 tg3_write_mem(tp,
5056 NIC_SRAM_FIRMWARE_MBOX,
5057 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5058 }
5059
5060 /* Prevent send BD corruption. */
5061 if (tg3_flag(tp, CLKREQ_BUG)) {
5062 if (tp->link_config.active_speed == SPEED_100 ||
5063 tp->link_config.active_speed == SPEED_10)
5064 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5065 PCI_EXP_LNKCTL_CLKREQ_EN);
5066 else
5067 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5068 PCI_EXP_LNKCTL_CLKREQ_EN);
5069 }
5070
5071 tg3_test_and_report_link_chg(tp, current_link_up);
5072
5073 return 0;
5074 }
5075
5076 struct tg3_fiber_aneginfo {
5077 int state;
5078 #define ANEG_STATE_UNKNOWN 0
5079 #define ANEG_STATE_AN_ENABLE 1
5080 #define ANEG_STATE_RESTART_INIT 2
5081 #define ANEG_STATE_RESTART 3
5082 #define ANEG_STATE_DISABLE_LINK_OK 4
5083 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5084 #define ANEG_STATE_ABILITY_DETECT 6
5085 #define ANEG_STATE_ACK_DETECT_INIT 7
5086 #define ANEG_STATE_ACK_DETECT 8
5087 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5088 #define ANEG_STATE_COMPLETE_ACK 10
5089 #define ANEG_STATE_IDLE_DETECT_INIT 11
5090 #define ANEG_STATE_IDLE_DETECT 12
5091 #define ANEG_STATE_LINK_OK 13
5092 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5093 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5094
5095 u32 flags;
5096 #define MR_AN_ENABLE 0x00000001
5097 #define MR_RESTART_AN 0x00000002
5098 #define MR_AN_COMPLETE 0x00000004
5099 #define MR_PAGE_RX 0x00000008
5100 #define MR_NP_LOADED 0x00000010
5101 #define MR_TOGGLE_TX 0x00000020
5102 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5103 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5104 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5105 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5106 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5107 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5108 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5109 #define MR_TOGGLE_RX 0x00002000
5110 #define MR_NP_RX 0x00004000
5111
5112 #define MR_LINK_OK 0x80000000
5113
5114 unsigned long link_time, cur_time;
5115
5116 u32 ability_match_cfg;
5117 int ability_match_count;
5118
5119 char ability_match, idle_match, ack_match;
5120
5121 u32 txconfig, rxconfig;
5122 #define ANEG_CFG_NP 0x00000080
5123 #define ANEG_CFG_ACK 0x00000040
5124 #define ANEG_CFG_RF2 0x00000020
5125 #define ANEG_CFG_RF1 0x00000010
5126 #define ANEG_CFG_PS2 0x00000001
5127 #define ANEG_CFG_PS1 0x00008000
5128 #define ANEG_CFG_HD 0x00004000
5129 #define ANEG_CFG_FD 0x00002000
5130 #define ANEG_CFG_INVAL 0x00001f06
5131
5132 };
5133 #define ANEG_OK 0
5134 #define ANEG_DONE 1
5135 #define ANEG_TIMER_ENAB 2
5136 #define ANEG_FAILED -1
5137
5138 #define ANEG_STATE_SETTLE_TIME 10000
5139
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5140 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5141 struct tg3_fiber_aneginfo *ap)
5142 {
5143 u16 flowctrl;
5144 unsigned long delta;
5145 u32 rx_cfg_reg;
5146 int ret;
5147
5148 if (ap->state == ANEG_STATE_UNKNOWN) {
5149 ap->rxconfig = 0;
5150 ap->link_time = 0;
5151 ap->cur_time = 0;
5152 ap->ability_match_cfg = 0;
5153 ap->ability_match_count = 0;
5154 ap->ability_match = 0;
5155 ap->idle_match = 0;
5156 ap->ack_match = 0;
5157 }
5158 ap->cur_time++;
5159
5160 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5161 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5162
5163 if (rx_cfg_reg != ap->ability_match_cfg) {
5164 ap->ability_match_cfg = rx_cfg_reg;
5165 ap->ability_match = 0;
5166 ap->ability_match_count = 0;
5167 } else {
5168 if (++ap->ability_match_count > 1) {
5169 ap->ability_match = 1;
5170 ap->ability_match_cfg = rx_cfg_reg;
5171 }
5172 }
5173 if (rx_cfg_reg & ANEG_CFG_ACK)
5174 ap->ack_match = 1;
5175 else
5176 ap->ack_match = 0;
5177
5178 ap->idle_match = 0;
5179 } else {
5180 ap->idle_match = 1;
5181 ap->ability_match_cfg = 0;
5182 ap->ability_match_count = 0;
5183 ap->ability_match = 0;
5184 ap->ack_match = 0;
5185
5186 rx_cfg_reg = 0;
5187 }
5188
5189 ap->rxconfig = rx_cfg_reg;
5190 ret = ANEG_OK;
5191
5192 switch (ap->state) {
5193 case ANEG_STATE_UNKNOWN:
5194 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5195 ap->state = ANEG_STATE_AN_ENABLE;
5196
5197 fallthrough;
5198 case ANEG_STATE_AN_ENABLE:
5199 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5200 if (ap->flags & MR_AN_ENABLE) {
5201 ap->link_time = 0;
5202 ap->cur_time = 0;
5203 ap->ability_match_cfg = 0;
5204 ap->ability_match_count = 0;
5205 ap->ability_match = 0;
5206 ap->idle_match = 0;
5207 ap->ack_match = 0;
5208
5209 ap->state = ANEG_STATE_RESTART_INIT;
5210 } else {
5211 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5212 }
5213 break;
5214
5215 case ANEG_STATE_RESTART_INIT:
5216 ap->link_time = ap->cur_time;
5217 ap->flags &= ~(MR_NP_LOADED);
5218 ap->txconfig = 0;
5219 tw32(MAC_TX_AUTO_NEG, 0);
5220 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5221 tw32_f(MAC_MODE, tp->mac_mode);
5222 udelay(40);
5223
5224 ret = ANEG_TIMER_ENAB;
5225 ap->state = ANEG_STATE_RESTART;
5226
5227 fallthrough;
5228 case ANEG_STATE_RESTART:
5229 delta = ap->cur_time - ap->link_time;
5230 if (delta > ANEG_STATE_SETTLE_TIME)
5231 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5232 else
5233 ret = ANEG_TIMER_ENAB;
5234 break;
5235
5236 case ANEG_STATE_DISABLE_LINK_OK:
5237 ret = ANEG_DONE;
5238 break;
5239
5240 case ANEG_STATE_ABILITY_DETECT_INIT:
5241 ap->flags &= ~(MR_TOGGLE_TX);
5242 ap->txconfig = ANEG_CFG_FD;
5243 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5244 if (flowctrl & ADVERTISE_1000XPAUSE)
5245 ap->txconfig |= ANEG_CFG_PS1;
5246 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5247 ap->txconfig |= ANEG_CFG_PS2;
5248 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5249 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5250 tw32_f(MAC_MODE, tp->mac_mode);
5251 udelay(40);
5252
5253 ap->state = ANEG_STATE_ABILITY_DETECT;
5254 break;
5255
5256 case ANEG_STATE_ABILITY_DETECT:
5257 if (ap->ability_match != 0 && ap->rxconfig != 0)
5258 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5259 break;
5260
5261 case ANEG_STATE_ACK_DETECT_INIT:
5262 ap->txconfig |= ANEG_CFG_ACK;
5263 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5264 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5265 tw32_f(MAC_MODE, tp->mac_mode);
5266 udelay(40);
5267
5268 ap->state = ANEG_STATE_ACK_DETECT;
5269
5270 fallthrough;
5271 case ANEG_STATE_ACK_DETECT:
5272 if (ap->ack_match != 0) {
5273 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5274 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5275 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5276 } else {
5277 ap->state = ANEG_STATE_AN_ENABLE;
5278 }
5279 } else if (ap->ability_match != 0 &&
5280 ap->rxconfig == 0) {
5281 ap->state = ANEG_STATE_AN_ENABLE;
5282 }
5283 break;
5284
5285 case ANEG_STATE_COMPLETE_ACK_INIT:
5286 if (ap->rxconfig & ANEG_CFG_INVAL) {
5287 ret = ANEG_FAILED;
5288 break;
5289 }
5290 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5291 MR_LP_ADV_HALF_DUPLEX |
5292 MR_LP_ADV_SYM_PAUSE |
5293 MR_LP_ADV_ASYM_PAUSE |
5294 MR_LP_ADV_REMOTE_FAULT1 |
5295 MR_LP_ADV_REMOTE_FAULT2 |
5296 MR_LP_ADV_NEXT_PAGE |
5297 MR_TOGGLE_RX |
5298 MR_NP_RX);
5299 if (ap->rxconfig & ANEG_CFG_FD)
5300 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5301 if (ap->rxconfig & ANEG_CFG_HD)
5302 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5303 if (ap->rxconfig & ANEG_CFG_PS1)
5304 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5305 if (ap->rxconfig & ANEG_CFG_PS2)
5306 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5307 if (ap->rxconfig & ANEG_CFG_RF1)
5308 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5309 if (ap->rxconfig & ANEG_CFG_RF2)
5310 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5311 if (ap->rxconfig & ANEG_CFG_NP)
5312 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5313
5314 ap->link_time = ap->cur_time;
5315
5316 ap->flags ^= (MR_TOGGLE_TX);
5317 if (ap->rxconfig & 0x0008)
5318 ap->flags |= MR_TOGGLE_RX;
5319 if (ap->rxconfig & ANEG_CFG_NP)
5320 ap->flags |= MR_NP_RX;
5321 ap->flags |= MR_PAGE_RX;
5322
5323 ap->state = ANEG_STATE_COMPLETE_ACK;
5324 ret = ANEG_TIMER_ENAB;
5325 break;
5326
5327 case ANEG_STATE_COMPLETE_ACK:
5328 if (ap->ability_match != 0 &&
5329 ap->rxconfig == 0) {
5330 ap->state = ANEG_STATE_AN_ENABLE;
5331 break;
5332 }
5333 delta = ap->cur_time - ap->link_time;
5334 if (delta > ANEG_STATE_SETTLE_TIME) {
5335 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5336 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5337 } else {
5338 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5339 !(ap->flags & MR_NP_RX)) {
5340 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5341 } else {
5342 ret = ANEG_FAILED;
5343 }
5344 }
5345 }
5346 break;
5347
5348 case ANEG_STATE_IDLE_DETECT_INIT:
5349 ap->link_time = ap->cur_time;
5350 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5351 tw32_f(MAC_MODE, tp->mac_mode);
5352 udelay(40);
5353
5354 ap->state = ANEG_STATE_IDLE_DETECT;
5355 ret = ANEG_TIMER_ENAB;
5356 break;
5357
5358 case ANEG_STATE_IDLE_DETECT:
5359 if (ap->ability_match != 0 &&
5360 ap->rxconfig == 0) {
5361 ap->state = ANEG_STATE_AN_ENABLE;
5362 break;
5363 }
5364 delta = ap->cur_time - ap->link_time;
5365 if (delta > ANEG_STATE_SETTLE_TIME) {
5366 /* XXX another gem from the Broadcom driver :( */
5367 ap->state = ANEG_STATE_LINK_OK;
5368 }
5369 break;
5370
5371 case ANEG_STATE_LINK_OK:
5372 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5373 ret = ANEG_DONE;
5374 break;
5375
5376 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5377 /* ??? unimplemented */
5378 break;
5379
5380 case ANEG_STATE_NEXT_PAGE_WAIT:
5381 /* ??? unimplemented */
5382 break;
5383
5384 default:
5385 ret = ANEG_FAILED;
5386 break;
5387 }
5388
5389 return ret;
5390 }
5391
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5392 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5393 {
5394 int res = 0;
5395 struct tg3_fiber_aneginfo aninfo;
5396 int status = ANEG_FAILED;
5397 unsigned int tick;
5398 u32 tmp;
5399
5400 tw32_f(MAC_TX_AUTO_NEG, 0);
5401
5402 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5403 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5404 udelay(40);
5405
5406 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5407 udelay(40);
5408
5409 memset(&aninfo, 0, sizeof(aninfo));
5410 aninfo.flags |= MR_AN_ENABLE;
5411 aninfo.state = ANEG_STATE_UNKNOWN;
5412 aninfo.cur_time = 0;
5413 tick = 0;
5414 while (++tick < 195000) {
5415 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5416 if (status == ANEG_DONE || status == ANEG_FAILED)
5417 break;
5418
5419 udelay(1);
5420 }
5421
5422 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5423 tw32_f(MAC_MODE, tp->mac_mode);
5424 udelay(40);
5425
5426 *txflags = aninfo.txconfig;
5427 *rxflags = aninfo.flags;
5428
5429 if (status == ANEG_DONE &&
5430 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5431 MR_LP_ADV_FULL_DUPLEX)))
5432 res = 1;
5433
5434 return res;
5435 }
5436
tg3_init_bcm8002(struct tg3 * tp)5437 static void tg3_init_bcm8002(struct tg3 *tp)
5438 {
5439 u32 mac_status = tr32(MAC_STATUS);
5440 int i;
5441
5442 /* Reset when initting first time or we have a link. */
5443 if (tg3_flag(tp, INIT_COMPLETE) &&
5444 !(mac_status & MAC_STATUS_PCS_SYNCED))
5445 return;
5446
5447 /* Set PLL lock range. */
5448 tg3_writephy(tp, 0x16, 0x8007);
5449
5450 /* SW reset */
5451 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5452
5453 /* Wait for reset to complete. */
5454 /* XXX schedule_timeout() ... */
5455 for (i = 0; i < 500; i++)
5456 udelay(10);
5457
5458 /* Config mode; select PMA/Ch 1 regs. */
5459 tg3_writephy(tp, 0x10, 0x8411);
5460
5461 /* Enable auto-lock and comdet, select txclk for tx. */
5462 tg3_writephy(tp, 0x11, 0x0a10);
5463
5464 tg3_writephy(tp, 0x18, 0x00a0);
5465 tg3_writephy(tp, 0x16, 0x41ff);
5466
5467 /* Assert and deassert POR. */
5468 tg3_writephy(tp, 0x13, 0x0400);
5469 udelay(40);
5470 tg3_writephy(tp, 0x13, 0x0000);
5471
5472 tg3_writephy(tp, 0x11, 0x0a50);
5473 udelay(40);
5474 tg3_writephy(tp, 0x11, 0x0a10);
5475
5476 /* Wait for signal to stabilize */
5477 /* XXX schedule_timeout() ... */
5478 for (i = 0; i < 15000; i++)
5479 udelay(10);
5480
5481 /* Deselect the channel register so we can read the PHYID
5482 * later.
5483 */
5484 tg3_writephy(tp, 0x10, 0x8011);
5485 }
5486
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5487 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5488 {
5489 u16 flowctrl;
5490 bool current_link_up;
5491 u32 sg_dig_ctrl, sg_dig_status;
5492 u32 serdes_cfg, expected_sg_dig_ctrl;
5493 int workaround, port_a;
5494
5495 serdes_cfg = 0;
5496 workaround = 0;
5497 port_a = 1;
5498 current_link_up = false;
5499
5500 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5501 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5502 workaround = 1;
5503 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5504 port_a = 0;
5505
5506 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5507 /* preserve bits 20-23 for voltage regulator */
5508 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5509 }
5510
5511 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5512
5513 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5514 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5515 if (workaround) {
5516 u32 val = serdes_cfg;
5517
5518 if (port_a)
5519 val |= 0xc010000;
5520 else
5521 val |= 0x4010000;
5522 tw32_f(MAC_SERDES_CFG, val);
5523 }
5524
5525 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5526 }
5527 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5528 tg3_setup_flow_control(tp, 0, 0);
5529 current_link_up = true;
5530 }
5531 goto out;
5532 }
5533
5534 /* Want auto-negotiation. */
5535 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5536
5537 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5538 if (flowctrl & ADVERTISE_1000XPAUSE)
5539 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5540 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5541 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5542
5543 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5544 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5545 tp->serdes_counter &&
5546 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5547 MAC_STATUS_RCVD_CFG)) ==
5548 MAC_STATUS_PCS_SYNCED)) {
5549 tp->serdes_counter--;
5550 current_link_up = true;
5551 goto out;
5552 }
5553 restart_autoneg:
5554 if (workaround)
5555 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5556 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5557 udelay(5);
5558 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5559
5560 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5561 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5562 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5563 MAC_STATUS_SIGNAL_DET)) {
5564 sg_dig_status = tr32(SG_DIG_STATUS);
5565 mac_status = tr32(MAC_STATUS);
5566
5567 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5568 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5569 u32 local_adv = 0, remote_adv = 0;
5570
5571 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5572 local_adv |= ADVERTISE_1000XPAUSE;
5573 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5574 local_adv |= ADVERTISE_1000XPSE_ASYM;
5575
5576 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5577 remote_adv |= LPA_1000XPAUSE;
5578 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5579 remote_adv |= LPA_1000XPAUSE_ASYM;
5580
5581 tp->link_config.rmt_adv =
5582 mii_adv_to_ethtool_adv_x(remote_adv);
5583
5584 tg3_setup_flow_control(tp, local_adv, remote_adv);
5585 current_link_up = true;
5586 tp->serdes_counter = 0;
5587 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5588 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5589 if (tp->serdes_counter)
5590 tp->serdes_counter--;
5591 else {
5592 if (workaround) {
5593 u32 val = serdes_cfg;
5594
5595 if (port_a)
5596 val |= 0xc010000;
5597 else
5598 val |= 0x4010000;
5599
5600 tw32_f(MAC_SERDES_CFG, val);
5601 }
5602
5603 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5604 udelay(40);
5605
5606 /* Link parallel detection - link is up */
5607 /* only if we have PCS_SYNC and not */
5608 /* receiving config code words */
5609 mac_status = tr32(MAC_STATUS);
5610 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5611 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5612 tg3_setup_flow_control(tp, 0, 0);
5613 current_link_up = true;
5614 tp->phy_flags |=
5615 TG3_PHYFLG_PARALLEL_DETECT;
5616 tp->serdes_counter =
5617 SERDES_PARALLEL_DET_TIMEOUT;
5618 } else
5619 goto restart_autoneg;
5620 }
5621 }
5622 } else {
5623 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5624 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5625 }
5626
5627 out:
5628 return current_link_up;
5629 }
5630
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5631 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5632 {
5633 bool current_link_up = false;
5634
5635 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5636 goto out;
5637
5638 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5639 u32 txflags, rxflags;
5640 int i;
5641
5642 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5643 u32 local_adv = 0, remote_adv = 0;
5644
5645 if (txflags & ANEG_CFG_PS1)
5646 local_adv |= ADVERTISE_1000XPAUSE;
5647 if (txflags & ANEG_CFG_PS2)
5648 local_adv |= ADVERTISE_1000XPSE_ASYM;
5649
5650 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5651 remote_adv |= LPA_1000XPAUSE;
5652 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5653 remote_adv |= LPA_1000XPAUSE_ASYM;
5654
5655 tp->link_config.rmt_adv =
5656 mii_adv_to_ethtool_adv_x(remote_adv);
5657
5658 tg3_setup_flow_control(tp, local_adv, remote_adv);
5659
5660 current_link_up = true;
5661 }
5662 for (i = 0; i < 30; i++) {
5663 udelay(20);
5664 tw32_f(MAC_STATUS,
5665 (MAC_STATUS_SYNC_CHANGED |
5666 MAC_STATUS_CFG_CHANGED));
5667 udelay(40);
5668 if ((tr32(MAC_STATUS) &
5669 (MAC_STATUS_SYNC_CHANGED |
5670 MAC_STATUS_CFG_CHANGED)) == 0)
5671 break;
5672 }
5673
5674 mac_status = tr32(MAC_STATUS);
5675 if (!current_link_up &&
5676 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5677 !(mac_status & MAC_STATUS_RCVD_CFG))
5678 current_link_up = true;
5679 } else {
5680 tg3_setup_flow_control(tp, 0, 0);
5681
5682 /* Forcing 1000FD link up. */
5683 current_link_up = true;
5684
5685 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5686 udelay(40);
5687
5688 tw32_f(MAC_MODE, tp->mac_mode);
5689 udelay(40);
5690 }
5691
5692 out:
5693 return current_link_up;
5694 }
5695
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5696 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5697 {
5698 u32 orig_pause_cfg;
5699 u32 orig_active_speed;
5700 u8 orig_active_duplex;
5701 u32 mac_status;
5702 bool current_link_up;
5703 int i;
5704
5705 orig_pause_cfg = tp->link_config.active_flowctrl;
5706 orig_active_speed = tp->link_config.active_speed;
5707 orig_active_duplex = tp->link_config.active_duplex;
5708
5709 if (!tg3_flag(tp, HW_AUTONEG) &&
5710 tp->link_up &&
5711 tg3_flag(tp, INIT_COMPLETE)) {
5712 mac_status = tr32(MAC_STATUS);
5713 mac_status &= (MAC_STATUS_PCS_SYNCED |
5714 MAC_STATUS_SIGNAL_DET |
5715 MAC_STATUS_CFG_CHANGED |
5716 MAC_STATUS_RCVD_CFG);
5717 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5718 MAC_STATUS_SIGNAL_DET)) {
5719 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5720 MAC_STATUS_CFG_CHANGED));
5721 return 0;
5722 }
5723 }
5724
5725 tw32_f(MAC_TX_AUTO_NEG, 0);
5726
5727 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5728 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5729 tw32_f(MAC_MODE, tp->mac_mode);
5730 udelay(40);
5731
5732 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5733 tg3_init_bcm8002(tp);
5734
5735 /* Enable link change event even when serdes polling. */
5736 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5737 udelay(40);
5738
5739 tp->link_config.rmt_adv = 0;
5740 mac_status = tr32(MAC_STATUS);
5741
5742 if (tg3_flag(tp, HW_AUTONEG))
5743 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5744 else
5745 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5746
5747 tp->napi[0].hw_status->status =
5748 (SD_STATUS_UPDATED |
5749 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5750
5751 for (i = 0; i < 100; i++) {
5752 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5753 MAC_STATUS_CFG_CHANGED));
5754 udelay(5);
5755 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5756 MAC_STATUS_CFG_CHANGED |
5757 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5758 break;
5759 }
5760
5761 mac_status = tr32(MAC_STATUS);
5762 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5763 current_link_up = false;
5764 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5765 tp->serdes_counter == 0) {
5766 tw32_f(MAC_MODE, (tp->mac_mode |
5767 MAC_MODE_SEND_CONFIGS));
5768 udelay(1);
5769 tw32_f(MAC_MODE, tp->mac_mode);
5770 }
5771 }
5772
5773 if (current_link_up) {
5774 tp->link_config.active_speed = SPEED_1000;
5775 tp->link_config.active_duplex = DUPLEX_FULL;
5776 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5777 LED_CTRL_LNKLED_OVERRIDE |
5778 LED_CTRL_1000MBPS_ON));
5779 } else {
5780 tp->link_config.active_speed = SPEED_UNKNOWN;
5781 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5782 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5783 LED_CTRL_LNKLED_OVERRIDE |
5784 LED_CTRL_TRAFFIC_OVERRIDE));
5785 }
5786
5787 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5788 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5789 if (orig_pause_cfg != now_pause_cfg ||
5790 orig_active_speed != tp->link_config.active_speed ||
5791 orig_active_duplex != tp->link_config.active_duplex)
5792 tg3_link_report(tp);
5793 }
5794
5795 return 0;
5796 }
5797
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5798 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5799 {
5800 int err = 0;
5801 u32 bmsr, bmcr;
5802 u32 current_speed = SPEED_UNKNOWN;
5803 u8 current_duplex = DUPLEX_UNKNOWN;
5804 bool current_link_up = false;
5805 u32 local_adv, remote_adv, sgsr;
5806
5807 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5808 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5809 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5810 (sgsr & SERDES_TG3_SGMII_MODE)) {
5811
5812 if (force_reset)
5813 tg3_phy_reset(tp);
5814
5815 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5816
5817 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5818 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5819 } else {
5820 current_link_up = true;
5821 if (sgsr & SERDES_TG3_SPEED_1000) {
5822 current_speed = SPEED_1000;
5823 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5824 } else if (sgsr & SERDES_TG3_SPEED_100) {
5825 current_speed = SPEED_100;
5826 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5827 } else {
5828 current_speed = SPEED_10;
5829 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5830 }
5831
5832 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5833 current_duplex = DUPLEX_FULL;
5834 else
5835 current_duplex = DUPLEX_HALF;
5836 }
5837
5838 tw32_f(MAC_MODE, tp->mac_mode);
5839 udelay(40);
5840
5841 tg3_clear_mac_status(tp);
5842
5843 goto fiber_setup_done;
5844 }
5845
5846 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5847 tw32_f(MAC_MODE, tp->mac_mode);
5848 udelay(40);
5849
5850 tg3_clear_mac_status(tp);
5851
5852 if (force_reset)
5853 tg3_phy_reset(tp);
5854
5855 tp->link_config.rmt_adv = 0;
5856
5857 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5858 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5859 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5860 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5861 bmsr |= BMSR_LSTATUS;
5862 else
5863 bmsr &= ~BMSR_LSTATUS;
5864 }
5865
5866 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5867
5868 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5869 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5870 /* do nothing, just check for link up at the end */
5871 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5872 u32 adv, newadv;
5873
5874 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5875 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5876 ADVERTISE_1000XPAUSE |
5877 ADVERTISE_1000XPSE_ASYM |
5878 ADVERTISE_SLCT);
5879
5880 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5881 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5882
5883 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5884 tg3_writephy(tp, MII_ADVERTISE, newadv);
5885 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5886 tg3_writephy(tp, MII_BMCR, bmcr);
5887
5888 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5889 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5890 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5891
5892 return err;
5893 }
5894 } else {
5895 u32 new_bmcr;
5896
5897 bmcr &= ~BMCR_SPEED1000;
5898 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5899
5900 if (tp->link_config.duplex == DUPLEX_FULL)
5901 new_bmcr |= BMCR_FULLDPLX;
5902
5903 if (new_bmcr != bmcr) {
5904 /* BMCR_SPEED1000 is a reserved bit that needs
5905 * to be set on write.
5906 */
5907 new_bmcr |= BMCR_SPEED1000;
5908
5909 /* Force a linkdown */
5910 if (tp->link_up) {
5911 u32 adv;
5912
5913 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5914 adv &= ~(ADVERTISE_1000XFULL |
5915 ADVERTISE_1000XHALF |
5916 ADVERTISE_SLCT);
5917 tg3_writephy(tp, MII_ADVERTISE, adv);
5918 tg3_writephy(tp, MII_BMCR, bmcr |
5919 BMCR_ANRESTART |
5920 BMCR_ANENABLE);
5921 udelay(10);
5922 tg3_carrier_off(tp);
5923 }
5924 tg3_writephy(tp, MII_BMCR, new_bmcr);
5925 bmcr = new_bmcr;
5926 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5927 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5928 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5929 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5930 bmsr |= BMSR_LSTATUS;
5931 else
5932 bmsr &= ~BMSR_LSTATUS;
5933 }
5934 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5935 }
5936 }
5937
5938 if (bmsr & BMSR_LSTATUS) {
5939 current_speed = SPEED_1000;
5940 current_link_up = true;
5941 if (bmcr & BMCR_FULLDPLX)
5942 current_duplex = DUPLEX_FULL;
5943 else
5944 current_duplex = DUPLEX_HALF;
5945
5946 local_adv = 0;
5947 remote_adv = 0;
5948
5949 if (bmcr & BMCR_ANENABLE) {
5950 u32 common;
5951
5952 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5953 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5954 common = local_adv & remote_adv;
5955 if (common & (ADVERTISE_1000XHALF |
5956 ADVERTISE_1000XFULL)) {
5957 if (common & ADVERTISE_1000XFULL)
5958 current_duplex = DUPLEX_FULL;
5959 else
5960 current_duplex = DUPLEX_HALF;
5961
5962 tp->link_config.rmt_adv =
5963 mii_adv_to_ethtool_adv_x(remote_adv);
5964 } else if (!tg3_flag(tp, 5780_CLASS)) {
5965 /* Link is up via parallel detect */
5966 } else {
5967 current_link_up = false;
5968 }
5969 }
5970 }
5971
5972 fiber_setup_done:
5973 if (current_link_up && current_duplex == DUPLEX_FULL)
5974 tg3_setup_flow_control(tp, local_adv, remote_adv);
5975
5976 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5977 if (tp->link_config.active_duplex == DUPLEX_HALF)
5978 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5979
5980 tw32_f(MAC_MODE, tp->mac_mode);
5981 udelay(40);
5982
5983 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5984
5985 tp->link_config.active_speed = current_speed;
5986 tp->link_config.active_duplex = current_duplex;
5987
5988 tg3_test_and_report_link_chg(tp, current_link_up);
5989 return err;
5990 }
5991
tg3_serdes_parallel_detect(struct tg3 * tp)5992 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5993 {
5994 if (tp->serdes_counter) {
5995 /* Give autoneg time to complete. */
5996 tp->serdes_counter--;
5997 return;
5998 }
5999
6000 if (!tp->link_up &&
6001 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6002 u32 bmcr;
6003
6004 tg3_readphy(tp, MII_BMCR, &bmcr);
6005 if (bmcr & BMCR_ANENABLE) {
6006 u32 phy1, phy2;
6007
6008 /* Select shadow register 0x1f */
6009 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6010 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6011
6012 /* Select expansion interrupt status register */
6013 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6014 MII_TG3_DSP_EXP1_INT_STAT);
6015 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6016 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6017
6018 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6019 /* We have signal detect and not receiving
6020 * config code words, link is up by parallel
6021 * detection.
6022 */
6023
6024 bmcr &= ~BMCR_ANENABLE;
6025 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6026 tg3_writephy(tp, MII_BMCR, bmcr);
6027 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6028 }
6029 }
6030 } else if (tp->link_up &&
6031 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6032 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6033 u32 phy2;
6034
6035 /* Select expansion interrupt status register */
6036 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6037 MII_TG3_DSP_EXP1_INT_STAT);
6038 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6039 if (phy2 & 0x20) {
6040 u32 bmcr;
6041
6042 /* Config code words received, turn on autoneg. */
6043 tg3_readphy(tp, MII_BMCR, &bmcr);
6044 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6045
6046 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6047
6048 }
6049 }
6050 }
6051
tg3_setup_phy(struct tg3 * tp,bool force_reset)6052 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6053 {
6054 u32 val;
6055 int err;
6056
6057 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6058 err = tg3_setup_fiber_phy(tp, force_reset);
6059 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6060 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6061 else
6062 err = tg3_setup_copper_phy(tp, force_reset);
6063
6064 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6065 u32 scale;
6066
6067 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6068 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6069 scale = 65;
6070 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6071 scale = 6;
6072 else
6073 scale = 12;
6074
6075 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6076 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6077 tw32(GRC_MISC_CFG, val);
6078 }
6079
6080 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6081 (6 << TX_LENGTHS_IPG_SHIFT);
6082 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6083 tg3_asic_rev(tp) == ASIC_REV_5762)
6084 val |= tr32(MAC_TX_LENGTHS) &
6085 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6086 TX_LENGTHS_CNT_DWN_VAL_MSK);
6087
6088 if (tp->link_config.active_speed == SPEED_1000 &&
6089 tp->link_config.active_duplex == DUPLEX_HALF)
6090 tw32(MAC_TX_LENGTHS, val |
6091 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6092 else
6093 tw32(MAC_TX_LENGTHS, val |
6094 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6095
6096 if (!tg3_flag(tp, 5705_PLUS)) {
6097 if (tp->link_up) {
6098 tw32(HOSTCC_STAT_COAL_TICKS,
6099 tp->coal.stats_block_coalesce_usecs);
6100 } else {
6101 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6102 }
6103 }
6104
6105 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6106 val = tr32(PCIE_PWR_MGMT_THRESH);
6107 if (!tp->link_up)
6108 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6109 tp->pwrmgmt_thresh;
6110 else
6111 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6112 tw32(PCIE_PWR_MGMT_THRESH, val);
6113 }
6114
6115 return err;
6116 }
6117
6118 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp,struct ptp_system_timestamp * sts)6119 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6120 {
6121 u64 stamp;
6122
6123 ptp_read_system_prets(sts);
6124 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6125 ptp_read_system_postts(sts);
6126 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6127
6128 return stamp;
6129 }
6130
6131 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6132 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6133 {
6134 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6135
6136 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6137 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6138 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6139 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6140 }
6141
6142 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6143 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * info)6144 static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
6145 {
6146 struct tg3 *tp = netdev_priv(dev);
6147
6148 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
6149
6150 if (tg3_flag(tp, PTP_CAPABLE)) {
6151 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6152 SOF_TIMESTAMPING_RX_HARDWARE |
6153 SOF_TIMESTAMPING_RAW_HARDWARE;
6154 }
6155
6156 if (tp->ptp_clock)
6157 info->phc_index = ptp_clock_index(tp->ptp_clock);
6158
6159 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6160
6161 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6162 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6163 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6164 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6165 return 0;
6166 }
6167
tg3_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)6168 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6169 {
6170 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6171 u64 correction;
6172 bool neg_adj;
6173
6174 /* Frequency adjustment is performed using hardware with a 24 bit
6175 * accumulator and a programmable correction value. On each clk, the
6176 * correction value gets added to the accumulator and when it
6177 * overflows, the time counter is incremented/decremented.
6178 */
6179 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6180
6181 tg3_full_lock(tp, 0);
6182
6183 if (correction)
6184 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6185 TG3_EAV_REF_CLK_CORRECT_EN |
6186 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6187 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6188 else
6189 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6190
6191 tg3_full_unlock(tp);
6192
6193 return 0;
6194 }
6195
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6196 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6197 {
6198 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6199
6200 tg3_full_lock(tp, 0);
6201 tp->ptp_adjust += delta;
6202 tg3_full_unlock(tp);
6203
6204 return 0;
6205 }
6206
tg3_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)6207 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6208 struct ptp_system_timestamp *sts)
6209 {
6210 u64 ns;
6211 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6212
6213 tg3_full_lock(tp, 0);
6214 ns = tg3_refclk_read(tp, sts);
6215 ns += tp->ptp_adjust;
6216 tg3_full_unlock(tp);
6217
6218 *ts = ns_to_timespec64(ns);
6219
6220 return 0;
6221 }
6222
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6223 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6224 const struct timespec64 *ts)
6225 {
6226 u64 ns;
6227 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228
6229 ns = timespec64_to_ns(ts);
6230
6231 tg3_full_lock(tp, 0);
6232 tg3_refclk_write(tp, ns);
6233 tp->ptp_adjust = 0;
6234 tg3_full_unlock(tp);
6235
6236 return 0;
6237 }
6238
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6239 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6240 struct ptp_clock_request *rq, int on)
6241 {
6242 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6243 u32 clock_ctl;
6244 int rval = 0;
6245
6246 switch (rq->type) {
6247 case PTP_CLK_REQ_PEROUT:
6248 /* Reject requests with unsupported flags */
6249 if (rq->perout.flags)
6250 return -EOPNOTSUPP;
6251
6252 if (rq->perout.index != 0)
6253 return -EINVAL;
6254
6255 tg3_full_lock(tp, 0);
6256 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6257 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6258
6259 if (on) {
6260 u64 nsec;
6261
6262 nsec = rq->perout.start.sec * 1000000000ULL +
6263 rq->perout.start.nsec;
6264
6265 if (rq->perout.period.sec || rq->perout.period.nsec) {
6266 netdev_warn(tp->dev,
6267 "Device supports only a one-shot timesync output, period must be 0\n");
6268 rval = -EINVAL;
6269 goto err_out;
6270 }
6271
6272 if (nsec & (1ULL << 63)) {
6273 netdev_warn(tp->dev,
6274 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6275 rval = -EINVAL;
6276 goto err_out;
6277 }
6278
6279 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6280 tw32(TG3_EAV_WATCHDOG0_MSB,
6281 TG3_EAV_WATCHDOG0_EN |
6282 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6283
6284 tw32(TG3_EAV_REF_CLCK_CTL,
6285 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6286 } else {
6287 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6288 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6289 }
6290
6291 err_out:
6292 tg3_full_unlock(tp);
6293 return rval;
6294
6295 default:
6296 break;
6297 }
6298
6299 return -EOPNOTSUPP;
6300 }
6301
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6302 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6303 struct skb_shared_hwtstamps *timestamp)
6304 {
6305 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6306 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6307 tp->ptp_adjust);
6308 }
6309
tg3_read_tx_tstamp(struct tg3 * tp,u64 * hwclock)6310 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
6311 {
6312 *hwclock = tr32(TG3_TX_TSTAMP_LSB);
6313 *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6314 }
6315
tg3_ptp_ts_aux_work(struct ptp_clock_info * ptp)6316 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
6317 {
6318 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6319 struct skb_shared_hwtstamps timestamp;
6320 u64 hwclock;
6321
6322 if (tp->ptp_txts_retrycnt > 2)
6323 goto done;
6324
6325 tg3_read_tx_tstamp(tp, &hwclock);
6326
6327 if (hwclock != tp->pre_tx_ts) {
6328 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6329 skb_tstamp_tx(tp->tx_tstamp_skb, ×tamp);
6330 goto done;
6331 }
6332 tp->ptp_txts_retrycnt++;
6333 return HZ / 10;
6334 done:
6335 dev_consume_skb_any(tp->tx_tstamp_skb);
6336 tp->tx_tstamp_skb = NULL;
6337 tp->ptp_txts_retrycnt = 0;
6338 tp->pre_tx_ts = 0;
6339 return -1;
6340 }
6341
6342 static const struct ptp_clock_info tg3_ptp_caps = {
6343 .owner = THIS_MODULE,
6344 .name = "tg3 clock",
6345 .max_adj = 250000000,
6346 .n_alarm = 0,
6347 .n_ext_ts = 0,
6348 .n_per_out = 1,
6349 .n_pins = 0,
6350 .pps = 0,
6351 .adjfine = tg3_ptp_adjfine,
6352 .adjtime = tg3_ptp_adjtime,
6353 .do_aux_work = tg3_ptp_ts_aux_work,
6354 .gettimex64 = tg3_ptp_gettimex,
6355 .settime64 = tg3_ptp_settime,
6356 .enable = tg3_ptp_enable,
6357 };
6358
6359 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6360 static void tg3_ptp_init(struct tg3 *tp)
6361 {
6362 if (!tg3_flag(tp, PTP_CAPABLE))
6363 return;
6364
6365 /* Initialize the hardware clock to the system time. */
6366 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6367 tp->ptp_adjust = 0;
6368 tp->ptp_info = tg3_ptp_caps;
6369 }
6370
6371 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6372 static void tg3_ptp_resume(struct tg3 *tp)
6373 {
6374 if (!tg3_flag(tp, PTP_CAPABLE))
6375 return;
6376
6377 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6378 tp->ptp_adjust = 0;
6379 }
6380
tg3_ptp_fini(struct tg3 * tp)6381 static void tg3_ptp_fini(struct tg3 *tp)
6382 {
6383 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6384 return;
6385
6386 ptp_clock_unregister(tp->ptp_clock);
6387 tp->ptp_clock = NULL;
6388 tp->ptp_adjust = 0;
6389 dev_consume_skb_any(tp->tx_tstamp_skb);
6390 tp->tx_tstamp_skb = NULL;
6391 }
6392
tg3_irq_sync(struct tg3 * tp)6393 static inline int tg3_irq_sync(struct tg3 *tp)
6394 {
6395 return tp->irq_sync;
6396 }
6397
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6398 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6399 {
6400 int i;
6401
6402 dst = (u32 *)((u8 *)dst + off);
6403 for (i = 0; i < len; i += sizeof(u32))
6404 *dst++ = tr32(off + i);
6405 }
6406
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6407 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6408 {
6409 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6410 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6411 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6412 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6413 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6414 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6415 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6416 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6417 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6418 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6419 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6420 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6421 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6422 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6423 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6424 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6425 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6426 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6427 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6428
6429 if (tg3_flag(tp, SUPPORT_MSIX))
6430 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6431
6432 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6433 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6434 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6435 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6436 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6437 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6438 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6439 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6440
6441 if (!tg3_flag(tp, 5705_PLUS)) {
6442 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6443 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6444 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6445 }
6446
6447 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6448 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6449 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6450 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6451 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6452
6453 if (tg3_flag(tp, NVRAM))
6454 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6455 }
6456
tg3_dump_state(struct tg3 * tp)6457 static void tg3_dump_state(struct tg3 *tp)
6458 {
6459 int i;
6460 u32 *regs;
6461
6462 /* If it is a PCI error, all registers will be 0xffff,
6463 * we don't dump them out, just report the error and return
6464 */
6465 if (tp->pdev->error_state != pci_channel_io_normal) {
6466 netdev_err(tp->dev, "PCI channel ERROR!\n");
6467 return;
6468 }
6469
6470 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6471 if (!regs)
6472 return;
6473
6474 if (tg3_flag(tp, PCI_EXPRESS)) {
6475 /* Read up to but not including private PCI registers */
6476 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6477 regs[i / sizeof(u32)] = tr32(i);
6478 } else
6479 tg3_dump_legacy_regs(tp, regs);
6480
6481 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6482 if (!regs[i + 0] && !regs[i + 1] &&
6483 !regs[i + 2] && !regs[i + 3])
6484 continue;
6485
6486 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6487 i * 4,
6488 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6489 }
6490
6491 kfree(regs);
6492
6493 for (i = 0; i < tp->irq_cnt; i++) {
6494 struct tg3_napi *tnapi = &tp->napi[i];
6495
6496 /* SW status block */
6497 netdev_err(tp->dev,
6498 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6499 i,
6500 tnapi->hw_status->status,
6501 tnapi->hw_status->status_tag,
6502 tnapi->hw_status->rx_jumbo_consumer,
6503 tnapi->hw_status->rx_consumer,
6504 tnapi->hw_status->rx_mini_consumer,
6505 tnapi->hw_status->idx[0].rx_producer,
6506 tnapi->hw_status->idx[0].tx_consumer);
6507
6508 netdev_err(tp->dev,
6509 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6510 i,
6511 tnapi->last_tag, tnapi->last_irq_tag,
6512 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6513 tnapi->rx_rcb_ptr,
6514 tnapi->prodring.rx_std_prod_idx,
6515 tnapi->prodring.rx_std_cons_idx,
6516 tnapi->prodring.rx_jmb_prod_idx,
6517 tnapi->prodring.rx_jmb_cons_idx);
6518 }
6519 }
6520
6521 /* This is called whenever we suspect that the system chipset is re-
6522 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6523 * is bogus tx completions. We try to recover by setting the
6524 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6525 * in the workqueue.
6526 */
tg3_tx_recover(struct tg3 * tp)6527 static void tg3_tx_recover(struct tg3 *tp)
6528 {
6529 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6530 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6531
6532 netdev_warn(tp->dev,
6533 "The system may be re-ordering memory-mapped I/O "
6534 "cycles to the network device, attempting to recover. "
6535 "Please report the problem to the driver maintainer "
6536 "and include system chipset information.\n");
6537
6538 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6539 }
6540
tg3_tx_avail(struct tg3_napi * tnapi)6541 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6542 {
6543 /* Tell compiler to fetch tx indices from memory. */
6544 barrier();
6545 return tnapi->tx_pending -
6546 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6547 }
6548
6549 /* Tigon3 never reports partial packet sends. So we do not
6550 * need special logic to handle SKBs that have not had all
6551 * of their frags sent yet, like SunGEM does.
6552 */
tg3_tx(struct tg3_napi * tnapi)6553 static void tg3_tx(struct tg3_napi *tnapi)
6554 {
6555 struct tg3 *tp = tnapi->tp;
6556 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6557 u32 sw_idx = tnapi->tx_cons;
6558 struct netdev_queue *txq;
6559 int index = tnapi - tp->napi;
6560 unsigned int pkts_compl = 0, bytes_compl = 0;
6561
6562 if (tg3_flag(tp, ENABLE_TSS))
6563 index--;
6564
6565 txq = netdev_get_tx_queue(tp->dev, index);
6566
6567 while (sw_idx != hw_idx) {
6568 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6569 bool complete_skb_later = false;
6570 struct sk_buff *skb = ri->skb;
6571 int i, tx_bug = 0;
6572
6573 if (unlikely(skb == NULL)) {
6574 tg3_tx_recover(tp);
6575 return;
6576 }
6577
6578 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6579 struct skb_shared_hwtstamps timestamp;
6580 u64 hwclock;
6581
6582 tg3_read_tx_tstamp(tp, &hwclock);
6583 if (hwclock != tp->pre_tx_ts) {
6584 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6585 skb_tstamp_tx(skb, ×tamp);
6586 tp->pre_tx_ts = 0;
6587 } else {
6588 tp->tx_tstamp_skb = skb;
6589 complete_skb_later = true;
6590 }
6591 }
6592
6593 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6594 skb_headlen(skb), DMA_TO_DEVICE);
6595
6596 ri->skb = NULL;
6597
6598 while (ri->fragmented) {
6599 ri->fragmented = false;
6600 sw_idx = NEXT_TX(sw_idx);
6601 ri = &tnapi->tx_buffers[sw_idx];
6602 }
6603
6604 sw_idx = NEXT_TX(sw_idx);
6605
6606 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6607 ri = &tnapi->tx_buffers[sw_idx];
6608 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6609 tx_bug = 1;
6610
6611 dma_unmap_page(&tp->pdev->dev,
6612 dma_unmap_addr(ri, mapping),
6613 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6614 DMA_TO_DEVICE);
6615
6616 while (ri->fragmented) {
6617 ri->fragmented = false;
6618 sw_idx = NEXT_TX(sw_idx);
6619 ri = &tnapi->tx_buffers[sw_idx];
6620 }
6621
6622 sw_idx = NEXT_TX(sw_idx);
6623 }
6624
6625 pkts_compl++;
6626 bytes_compl += skb->len;
6627
6628 if (!complete_skb_later)
6629 dev_consume_skb_any(skb);
6630 else
6631 ptp_schedule_worker(tp->ptp_clock, 0);
6632
6633 if (unlikely(tx_bug)) {
6634 tg3_tx_recover(tp);
6635 return;
6636 }
6637 }
6638
6639 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6640
6641 tnapi->tx_cons = sw_idx;
6642
6643 /* Need to make the tx_cons update visible to __tg3_start_xmit()
6644 * before checking for netif_queue_stopped(). Without the
6645 * memory barrier, there is a small possibility that __tg3_start_xmit()
6646 * will miss it and cause the queue to be stopped forever.
6647 */
6648 smp_mb();
6649
6650 if (unlikely(netif_tx_queue_stopped(txq) &&
6651 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6652 __netif_tx_lock(txq, smp_processor_id());
6653 if (netif_tx_queue_stopped(txq) &&
6654 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6655 netif_tx_wake_queue(txq);
6656 __netif_tx_unlock(txq);
6657 }
6658 }
6659
tg3_frag_free(bool is_frag,void * data)6660 static void tg3_frag_free(bool is_frag, void *data)
6661 {
6662 if (is_frag)
6663 skb_free_frag(data);
6664 else
6665 kfree(data);
6666 }
6667
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6668 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6669 {
6670 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6671 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6672
6673 if (!ri->data)
6674 return;
6675
6676 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6677 DMA_FROM_DEVICE);
6678 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6679 ri->data = NULL;
6680 }
6681
6682
6683 /* Returns size of skb allocated or < 0 on error.
6684 *
6685 * We only need to fill in the address because the other members
6686 * of the RX descriptor are invariant, see tg3_init_rings.
6687 *
6688 * Note the purposeful assymetry of cpu vs. chip accesses. For
6689 * posting buffers we only dirty the first cache line of the RX
6690 * descriptor (containing the address). Whereas for the RX status
6691 * buffers the cpu only reads the last cacheline of the RX descriptor
6692 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6693 */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6694 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6695 u32 opaque_key, u32 dest_idx_unmasked,
6696 unsigned int *frag_size)
6697 {
6698 struct tg3_rx_buffer_desc *desc;
6699 struct ring_info *map;
6700 u8 *data;
6701 dma_addr_t mapping;
6702 int skb_size, data_size, dest_idx;
6703
6704 switch (opaque_key) {
6705 case RXD_OPAQUE_RING_STD:
6706 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6707 desc = &tpr->rx_std[dest_idx];
6708 map = &tpr->rx_std_buffers[dest_idx];
6709 data_size = tp->rx_pkt_map_sz;
6710 break;
6711
6712 case RXD_OPAQUE_RING_JUMBO:
6713 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6714 desc = &tpr->rx_jmb[dest_idx].std;
6715 map = &tpr->rx_jmb_buffers[dest_idx];
6716 data_size = TG3_RX_JMB_MAP_SZ;
6717 break;
6718
6719 default:
6720 return -EINVAL;
6721 }
6722
6723 /* Do not overwrite any of the map or rp information
6724 * until we are sure we can commit to a new buffer.
6725 *
6726 * Callers depend upon this behavior and assume that
6727 * we leave everything unchanged if we fail.
6728 */
6729 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6730 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6731 if (skb_size <= PAGE_SIZE) {
6732 data = napi_alloc_frag(skb_size);
6733 *frag_size = skb_size;
6734 } else {
6735 data = kmalloc(skb_size, GFP_ATOMIC);
6736 *frag_size = 0;
6737 }
6738 if (!data)
6739 return -ENOMEM;
6740
6741 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6742 data_size, DMA_FROM_DEVICE);
6743 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6744 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6745 return -EIO;
6746 }
6747
6748 map->data = data;
6749 dma_unmap_addr_set(map, mapping, mapping);
6750
6751 desc->addr_hi = ((u64)mapping >> 32);
6752 desc->addr_lo = ((u64)mapping & 0xffffffff);
6753
6754 return data_size;
6755 }
6756
6757 /* We only need to move over in the address because the other
6758 * members of the RX descriptor are invariant. See notes above
6759 * tg3_alloc_rx_data for full details.
6760 */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6761 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6762 struct tg3_rx_prodring_set *dpr,
6763 u32 opaque_key, int src_idx,
6764 u32 dest_idx_unmasked)
6765 {
6766 struct tg3 *tp = tnapi->tp;
6767 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6768 struct ring_info *src_map, *dest_map;
6769 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6770 int dest_idx;
6771
6772 switch (opaque_key) {
6773 case RXD_OPAQUE_RING_STD:
6774 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6775 dest_desc = &dpr->rx_std[dest_idx];
6776 dest_map = &dpr->rx_std_buffers[dest_idx];
6777 src_desc = &spr->rx_std[src_idx];
6778 src_map = &spr->rx_std_buffers[src_idx];
6779 break;
6780
6781 case RXD_OPAQUE_RING_JUMBO:
6782 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6783 dest_desc = &dpr->rx_jmb[dest_idx].std;
6784 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6785 src_desc = &spr->rx_jmb[src_idx].std;
6786 src_map = &spr->rx_jmb_buffers[src_idx];
6787 break;
6788
6789 default:
6790 return;
6791 }
6792
6793 dest_map->data = src_map->data;
6794 dma_unmap_addr_set(dest_map, mapping,
6795 dma_unmap_addr(src_map, mapping));
6796 dest_desc->addr_hi = src_desc->addr_hi;
6797 dest_desc->addr_lo = src_desc->addr_lo;
6798
6799 /* Ensure that the update to the skb happens after the physical
6800 * addresses have been transferred to the new BD location.
6801 */
6802 smp_wmb();
6803
6804 src_map->data = NULL;
6805 }
6806
6807 /* The RX ring scheme is composed of multiple rings which post fresh
6808 * buffers to the chip, and one special ring the chip uses to report
6809 * status back to the host.
6810 *
6811 * The special ring reports the status of received packets to the
6812 * host. The chip does not write into the original descriptor the
6813 * RX buffer was obtained from. The chip simply takes the original
6814 * descriptor as provided by the host, updates the status and length
6815 * field, then writes this into the next status ring entry.
6816 *
6817 * Each ring the host uses to post buffers to the chip is described
6818 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6819 * it is first placed into the on-chip ram. When the packet's length
6820 * is known, it walks down the TG3_BDINFO entries to select the ring.
6821 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6822 * which is within the range of the new packet's length is chosen.
6823 *
6824 * The "separate ring for rx status" scheme may sound queer, but it makes
6825 * sense from a cache coherency perspective. If only the host writes
6826 * to the buffer post rings, and only the chip writes to the rx status
6827 * rings, then cache lines never move beyond shared-modified state.
6828 * If both the host and chip were to write into the same ring, cache line
6829 * eviction could occur since both entities want it in an exclusive state.
6830 */
tg3_rx(struct tg3_napi * tnapi,int budget)6831 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6832 {
6833 struct tg3 *tp = tnapi->tp;
6834 u32 work_mask, rx_std_posted = 0;
6835 u32 std_prod_idx, jmb_prod_idx;
6836 u32 sw_idx = tnapi->rx_rcb_ptr;
6837 u16 hw_idx;
6838 int received;
6839 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6840
6841 hw_idx = *(tnapi->rx_rcb_prod_idx);
6842 /*
6843 * We need to order the read of hw_idx and the read of
6844 * the opaque cookie.
6845 */
6846 rmb();
6847 work_mask = 0;
6848 received = 0;
6849 std_prod_idx = tpr->rx_std_prod_idx;
6850 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6851 while (sw_idx != hw_idx && budget > 0) {
6852 struct ring_info *ri;
6853 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6854 unsigned int len;
6855 struct sk_buff *skb;
6856 dma_addr_t dma_addr;
6857 u32 opaque_key, desc_idx, *post_ptr;
6858 u8 *data;
6859 u64 tstamp = 0;
6860
6861 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6862 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6863 if (opaque_key == RXD_OPAQUE_RING_STD) {
6864 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6865 dma_addr = dma_unmap_addr(ri, mapping);
6866 data = ri->data;
6867 post_ptr = &std_prod_idx;
6868 rx_std_posted++;
6869 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6870 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6871 dma_addr = dma_unmap_addr(ri, mapping);
6872 data = ri->data;
6873 post_ptr = &jmb_prod_idx;
6874 } else
6875 goto next_pkt_nopost;
6876
6877 work_mask |= opaque_key;
6878
6879 if (desc->err_vlan & RXD_ERR_MASK) {
6880 drop_it:
6881 tg3_recycle_rx(tnapi, tpr, opaque_key,
6882 desc_idx, *post_ptr);
6883 drop_it_no_recycle:
6884 /* Other statistics kept track of by card. */
6885 tnapi->rx_dropped++;
6886 goto next_pkt;
6887 }
6888
6889 prefetch(data + TG3_RX_OFFSET(tp));
6890 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6891 ETH_FCS_LEN;
6892
6893 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6894 RXD_FLAG_PTPSTAT_PTPV1 ||
6895 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6896 RXD_FLAG_PTPSTAT_PTPV2) {
6897 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6898 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6899 }
6900
6901 if (len > TG3_RX_COPY_THRESH(tp)) {
6902 int skb_size;
6903 unsigned int frag_size;
6904
6905 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6906 *post_ptr, &frag_size);
6907 if (skb_size < 0)
6908 goto drop_it;
6909
6910 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6911 DMA_FROM_DEVICE);
6912
6913 /* Ensure that the update to the data happens
6914 * after the usage of the old DMA mapping.
6915 */
6916 smp_wmb();
6917
6918 ri->data = NULL;
6919
6920 if (frag_size)
6921 skb = build_skb(data, frag_size);
6922 else
6923 skb = slab_build_skb(data);
6924 if (!skb) {
6925 tg3_frag_free(frag_size != 0, data);
6926 goto drop_it_no_recycle;
6927 }
6928 skb_reserve(skb, TG3_RX_OFFSET(tp));
6929 } else {
6930 tg3_recycle_rx(tnapi, tpr, opaque_key,
6931 desc_idx, *post_ptr);
6932
6933 skb = netdev_alloc_skb(tp->dev,
6934 len + TG3_RAW_IP_ALIGN);
6935 if (skb == NULL)
6936 goto drop_it_no_recycle;
6937
6938 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6939 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6940 DMA_FROM_DEVICE);
6941 memcpy(skb->data,
6942 data + TG3_RX_OFFSET(tp),
6943 len);
6944 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6945 len, DMA_FROM_DEVICE);
6946 }
6947
6948 skb_put(skb, len);
6949 if (tstamp)
6950 tg3_hwclock_to_timestamp(tp, tstamp,
6951 skb_hwtstamps(skb));
6952
6953 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6954 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6955 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6956 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6957 skb->ip_summed = CHECKSUM_UNNECESSARY;
6958 else
6959 skb_checksum_none_assert(skb);
6960
6961 skb->protocol = eth_type_trans(skb, tp->dev);
6962
6963 if (len > (tp->dev->mtu + ETH_HLEN) &&
6964 skb->protocol != htons(ETH_P_8021Q) &&
6965 skb->protocol != htons(ETH_P_8021AD)) {
6966 dev_kfree_skb_any(skb);
6967 goto drop_it_no_recycle;
6968 }
6969
6970 if (desc->type_flags & RXD_FLAG_VLAN &&
6971 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6972 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6973 desc->err_vlan & RXD_VLAN_MASK);
6974
6975 napi_gro_receive(&tnapi->napi, skb);
6976
6977 received++;
6978 budget--;
6979
6980 next_pkt:
6981 (*post_ptr)++;
6982
6983 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6984 tpr->rx_std_prod_idx = std_prod_idx &
6985 tp->rx_std_ring_mask;
6986 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6987 tpr->rx_std_prod_idx);
6988 work_mask &= ~RXD_OPAQUE_RING_STD;
6989 rx_std_posted = 0;
6990 }
6991 next_pkt_nopost:
6992 sw_idx++;
6993 sw_idx &= tp->rx_ret_ring_mask;
6994
6995 /* Refresh hw_idx to see if there is new work */
6996 if (sw_idx == hw_idx) {
6997 hw_idx = *(tnapi->rx_rcb_prod_idx);
6998 rmb();
6999 }
7000 }
7001
7002 /* ACK the status ring. */
7003 tnapi->rx_rcb_ptr = sw_idx;
7004 tw32_rx_mbox(tnapi->consmbox, sw_idx);
7005
7006 /* Refill RX ring(s). */
7007 if (!tg3_flag(tp, ENABLE_RSS)) {
7008 /* Sync BD data before updating mailbox */
7009 wmb();
7010
7011 if (work_mask & RXD_OPAQUE_RING_STD) {
7012 tpr->rx_std_prod_idx = std_prod_idx &
7013 tp->rx_std_ring_mask;
7014 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7015 tpr->rx_std_prod_idx);
7016 }
7017 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7018 tpr->rx_jmb_prod_idx = jmb_prod_idx &
7019 tp->rx_jmb_ring_mask;
7020 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7021 tpr->rx_jmb_prod_idx);
7022 }
7023 } else if (work_mask) {
7024 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7025 * updated before the producer indices can be updated.
7026 */
7027 smp_wmb();
7028
7029 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7030 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7031
7032 if (tnapi != &tp->napi[1]) {
7033 tp->rx_refill = true;
7034 napi_schedule(&tp->napi[1].napi);
7035 }
7036 }
7037
7038 return received;
7039 }
7040
tg3_poll_link(struct tg3 * tp)7041 static void tg3_poll_link(struct tg3 *tp)
7042 {
7043 /* handle link change and other phy events */
7044 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7045 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7046
7047 if (sblk->status & SD_STATUS_LINK_CHG) {
7048 sblk->status = SD_STATUS_UPDATED |
7049 (sblk->status & ~SD_STATUS_LINK_CHG);
7050 spin_lock(&tp->lock);
7051 if (tg3_flag(tp, USE_PHYLIB)) {
7052 tw32_f(MAC_STATUS,
7053 (MAC_STATUS_SYNC_CHANGED |
7054 MAC_STATUS_CFG_CHANGED |
7055 MAC_STATUS_MI_COMPLETION |
7056 MAC_STATUS_LNKSTATE_CHANGED));
7057 udelay(40);
7058 } else
7059 tg3_setup_phy(tp, false);
7060 spin_unlock(&tp->lock);
7061 }
7062 }
7063 }
7064
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7065 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7066 struct tg3_rx_prodring_set *dpr,
7067 struct tg3_rx_prodring_set *spr)
7068 {
7069 u32 si, di, cpycnt, src_prod_idx;
7070 int i, err = 0;
7071
7072 while (1) {
7073 src_prod_idx = spr->rx_std_prod_idx;
7074
7075 /* Make sure updates to the rx_std_buffers[] entries and the
7076 * standard producer index are seen in the correct order.
7077 */
7078 smp_rmb();
7079
7080 if (spr->rx_std_cons_idx == src_prod_idx)
7081 break;
7082
7083 if (spr->rx_std_cons_idx < src_prod_idx)
7084 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7085 else
7086 cpycnt = tp->rx_std_ring_mask + 1 -
7087 spr->rx_std_cons_idx;
7088
7089 cpycnt = min(cpycnt,
7090 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7091
7092 si = spr->rx_std_cons_idx;
7093 di = dpr->rx_std_prod_idx;
7094
7095 for (i = di; i < di + cpycnt; i++) {
7096 if (dpr->rx_std_buffers[i].data) {
7097 cpycnt = i - di;
7098 err = -ENOSPC;
7099 break;
7100 }
7101 }
7102
7103 if (!cpycnt)
7104 break;
7105
7106 /* Ensure that updates to the rx_std_buffers ring and the
7107 * shadowed hardware producer ring from tg3_recycle_skb() are
7108 * ordered correctly WRT the skb check above.
7109 */
7110 smp_rmb();
7111
7112 memcpy(&dpr->rx_std_buffers[di],
7113 &spr->rx_std_buffers[si],
7114 cpycnt * sizeof(struct ring_info));
7115
7116 for (i = 0; i < cpycnt; i++, di++, si++) {
7117 struct tg3_rx_buffer_desc *sbd, *dbd;
7118 sbd = &spr->rx_std[si];
7119 dbd = &dpr->rx_std[di];
7120 dbd->addr_hi = sbd->addr_hi;
7121 dbd->addr_lo = sbd->addr_lo;
7122 }
7123
7124 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7125 tp->rx_std_ring_mask;
7126 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7127 tp->rx_std_ring_mask;
7128 }
7129
7130 while (1) {
7131 src_prod_idx = spr->rx_jmb_prod_idx;
7132
7133 /* Make sure updates to the rx_jmb_buffers[] entries and
7134 * the jumbo producer index are seen in the correct order.
7135 */
7136 smp_rmb();
7137
7138 if (spr->rx_jmb_cons_idx == src_prod_idx)
7139 break;
7140
7141 if (spr->rx_jmb_cons_idx < src_prod_idx)
7142 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7143 else
7144 cpycnt = tp->rx_jmb_ring_mask + 1 -
7145 spr->rx_jmb_cons_idx;
7146
7147 cpycnt = min(cpycnt,
7148 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7149
7150 si = spr->rx_jmb_cons_idx;
7151 di = dpr->rx_jmb_prod_idx;
7152
7153 for (i = di; i < di + cpycnt; i++) {
7154 if (dpr->rx_jmb_buffers[i].data) {
7155 cpycnt = i - di;
7156 err = -ENOSPC;
7157 break;
7158 }
7159 }
7160
7161 if (!cpycnt)
7162 break;
7163
7164 /* Ensure that updates to the rx_jmb_buffers ring and the
7165 * shadowed hardware producer ring from tg3_recycle_skb() are
7166 * ordered correctly WRT the skb check above.
7167 */
7168 smp_rmb();
7169
7170 memcpy(&dpr->rx_jmb_buffers[di],
7171 &spr->rx_jmb_buffers[si],
7172 cpycnt * sizeof(struct ring_info));
7173
7174 for (i = 0; i < cpycnt; i++, di++, si++) {
7175 struct tg3_rx_buffer_desc *sbd, *dbd;
7176 sbd = &spr->rx_jmb[si].std;
7177 dbd = &dpr->rx_jmb[di].std;
7178 dbd->addr_hi = sbd->addr_hi;
7179 dbd->addr_lo = sbd->addr_lo;
7180 }
7181
7182 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7183 tp->rx_jmb_ring_mask;
7184 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7185 tp->rx_jmb_ring_mask;
7186 }
7187
7188 return err;
7189 }
7190
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7191 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7192 {
7193 struct tg3 *tp = tnapi->tp;
7194
7195 /* run TX completion thread */
7196 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7197 tg3_tx(tnapi);
7198 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7199 return work_done;
7200 }
7201
7202 if (!tnapi->rx_rcb_prod_idx)
7203 return work_done;
7204
7205 /* run RX thread, within the bounds set by NAPI.
7206 * All RX "locking" is done by ensuring outside
7207 * code synchronizes with tg3->napi.poll()
7208 */
7209 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7210 work_done += tg3_rx(tnapi, budget - work_done);
7211
7212 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7213 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7214 int i, err = 0;
7215 u32 std_prod_idx = dpr->rx_std_prod_idx;
7216 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7217
7218 tp->rx_refill = false;
7219 for (i = 1; i <= tp->rxq_cnt; i++)
7220 err |= tg3_rx_prodring_xfer(tp, dpr,
7221 &tp->napi[i].prodring);
7222
7223 wmb();
7224
7225 if (std_prod_idx != dpr->rx_std_prod_idx)
7226 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7227 dpr->rx_std_prod_idx);
7228
7229 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7230 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7231 dpr->rx_jmb_prod_idx);
7232
7233 if (err)
7234 tw32_f(HOSTCC_MODE, tp->coal_now);
7235 }
7236
7237 return work_done;
7238 }
7239
tg3_reset_task_schedule(struct tg3 * tp)7240 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7241 {
7242 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7243 schedule_work(&tp->reset_task);
7244 }
7245
tg3_reset_task_cancel(struct tg3 * tp)7246 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7247 {
7248 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7249 cancel_work_sync(&tp->reset_task);
7250 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7251 }
7252
tg3_poll_msix(struct napi_struct * napi,int budget)7253 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7254 {
7255 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7256 struct tg3 *tp = tnapi->tp;
7257 int work_done = 0;
7258 struct tg3_hw_status *sblk = tnapi->hw_status;
7259
7260 while (1) {
7261 work_done = tg3_poll_work(tnapi, work_done, budget);
7262
7263 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7264 goto tx_recovery;
7265
7266 if (unlikely(work_done >= budget))
7267 break;
7268
7269 /* tp->last_tag is used in tg3_int_reenable() below
7270 * to tell the hw how much work has been processed,
7271 * so we must read it before checking for more work.
7272 */
7273 tnapi->last_tag = sblk->status_tag;
7274 tnapi->last_irq_tag = tnapi->last_tag;
7275 rmb();
7276
7277 /* check for RX/TX work to do */
7278 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7279 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7280
7281 /* This test here is not race free, but will reduce
7282 * the number of interrupts by looping again.
7283 */
7284 if (tnapi == &tp->napi[1] && tp->rx_refill)
7285 continue;
7286
7287 napi_complete_done(napi, work_done);
7288 /* Reenable interrupts. */
7289 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7290
7291 /* This test here is synchronized by napi_schedule()
7292 * and napi_complete() to close the race condition.
7293 */
7294 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7295 tw32(HOSTCC_MODE, tp->coalesce_mode |
7296 HOSTCC_MODE_ENABLE |
7297 tnapi->coal_now);
7298 }
7299 break;
7300 }
7301 }
7302
7303 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7304 return work_done;
7305
7306 tx_recovery:
7307 /* work_done is guaranteed to be less than budget. */
7308 napi_complete(napi);
7309 tg3_reset_task_schedule(tp);
7310 return work_done;
7311 }
7312
tg3_process_error(struct tg3 * tp)7313 static void tg3_process_error(struct tg3 *tp)
7314 {
7315 u32 val;
7316 bool real_error = false;
7317
7318 if (tg3_flag(tp, ERROR_PROCESSED))
7319 return;
7320
7321 /* Check Flow Attention register */
7322 val = tr32(HOSTCC_FLOW_ATTN);
7323 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7324 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7325 real_error = true;
7326 }
7327
7328 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7329 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7330 real_error = true;
7331 }
7332
7333 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7334 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7335 real_error = true;
7336 }
7337
7338 if (!real_error)
7339 return;
7340
7341 tg3_dump_state(tp);
7342
7343 tg3_flag_set(tp, ERROR_PROCESSED);
7344 tg3_reset_task_schedule(tp);
7345 }
7346
tg3_poll(struct napi_struct * napi,int budget)7347 static int tg3_poll(struct napi_struct *napi, int budget)
7348 {
7349 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7350 struct tg3 *tp = tnapi->tp;
7351 int work_done = 0;
7352 struct tg3_hw_status *sblk = tnapi->hw_status;
7353
7354 while (1) {
7355 if (sblk->status & SD_STATUS_ERROR)
7356 tg3_process_error(tp);
7357
7358 tg3_poll_link(tp);
7359
7360 work_done = tg3_poll_work(tnapi, work_done, budget);
7361
7362 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7363 goto tx_recovery;
7364
7365 if (unlikely(work_done >= budget))
7366 break;
7367
7368 if (tg3_flag(tp, TAGGED_STATUS)) {
7369 /* tp->last_tag is used in tg3_int_reenable() below
7370 * to tell the hw how much work has been processed,
7371 * so we must read it before checking for more work.
7372 */
7373 tnapi->last_tag = sblk->status_tag;
7374 tnapi->last_irq_tag = tnapi->last_tag;
7375 rmb();
7376 } else
7377 sblk->status &= ~SD_STATUS_UPDATED;
7378
7379 if (likely(!tg3_has_work(tnapi))) {
7380 napi_complete_done(napi, work_done);
7381 tg3_int_reenable(tnapi);
7382 break;
7383 }
7384 }
7385
7386 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7387 return work_done;
7388
7389 tx_recovery:
7390 /* work_done is guaranteed to be less than budget. */
7391 napi_complete(napi);
7392 tg3_reset_task_schedule(tp);
7393 return work_done;
7394 }
7395
tg3_napi_disable(struct tg3 * tp)7396 static void tg3_napi_disable(struct tg3 *tp)
7397 {
7398 int txq_idx = tp->txq_cnt - 1;
7399 int rxq_idx = tp->rxq_cnt - 1;
7400 struct tg3_napi *tnapi;
7401 int i;
7402
7403 for (i = tp->irq_cnt - 1; i >= 0; i--) {
7404 tnapi = &tp->napi[i];
7405 if (tnapi->tx_buffers) {
7406 netif_queue_set_napi(tp->dev, txq_idx,
7407 NETDEV_QUEUE_TYPE_TX, NULL);
7408 txq_idx--;
7409 }
7410 if (tnapi->rx_rcb) {
7411 netif_queue_set_napi(tp->dev, rxq_idx,
7412 NETDEV_QUEUE_TYPE_RX, NULL);
7413 rxq_idx--;
7414 }
7415 napi_disable(&tnapi->napi);
7416 }
7417 }
7418
tg3_napi_enable(struct tg3 * tp)7419 static void tg3_napi_enable(struct tg3 *tp)
7420 {
7421 int txq_idx = 0, rxq_idx = 0;
7422 struct tg3_napi *tnapi;
7423 int i;
7424
7425 for (i = 0; i < tp->irq_cnt; i++) {
7426 tnapi = &tp->napi[i];
7427 napi_enable(&tnapi->napi);
7428 if (tnapi->tx_buffers) {
7429 netif_queue_set_napi(tp->dev, txq_idx,
7430 NETDEV_QUEUE_TYPE_TX,
7431 &tnapi->napi);
7432 txq_idx++;
7433 }
7434 if (tnapi->rx_rcb) {
7435 netif_queue_set_napi(tp->dev, rxq_idx,
7436 NETDEV_QUEUE_TYPE_RX,
7437 &tnapi->napi);
7438 rxq_idx++;
7439 }
7440 }
7441 }
7442
tg3_napi_init(struct tg3 * tp)7443 static void tg3_napi_init(struct tg3 *tp)
7444 {
7445 int i;
7446
7447 for (i = 0; i < tp->irq_cnt; i++) {
7448 netif_napi_add(tp->dev, &tp->napi[i].napi,
7449 i ? tg3_poll_msix : tg3_poll);
7450 netif_napi_set_irq(&tp->napi[i].napi, tp->napi[i].irq_vec);
7451 }
7452 }
7453
tg3_napi_fini(struct tg3 * tp)7454 static void tg3_napi_fini(struct tg3 *tp)
7455 {
7456 int i;
7457
7458 for (i = 0; i < tp->irq_cnt; i++)
7459 netif_napi_del(&tp->napi[i].napi);
7460 }
7461
tg3_netif_stop(struct tg3 * tp)7462 static inline void tg3_netif_stop(struct tg3 *tp)
7463 {
7464 netif_trans_update(tp->dev); /* prevent tx timeout */
7465 tg3_napi_disable(tp);
7466 netif_carrier_off(tp->dev);
7467 netif_tx_disable(tp->dev);
7468 }
7469
7470 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7471 static inline void tg3_netif_start(struct tg3 *tp)
7472 {
7473 tg3_ptp_resume(tp);
7474
7475 /* NOTE: unconditional netif_tx_wake_all_queues is only
7476 * appropriate so long as all callers are assured to
7477 * have free tx slots (such as after tg3_init_hw)
7478 */
7479 netif_tx_wake_all_queues(tp->dev);
7480
7481 if (tp->link_up)
7482 netif_carrier_on(tp->dev);
7483
7484 tg3_napi_enable(tp);
7485 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7486 tg3_enable_ints(tp);
7487 }
7488
tg3_irq_quiesce(struct tg3 * tp)7489 static void tg3_irq_quiesce(struct tg3 *tp)
7490 __releases(tp->lock)
7491 __acquires(tp->lock)
7492 {
7493 int i;
7494
7495 BUG_ON(tp->irq_sync);
7496
7497 tp->irq_sync = 1;
7498 smp_mb();
7499
7500 spin_unlock_bh(&tp->lock);
7501
7502 for (i = 0; i < tp->irq_cnt; i++)
7503 synchronize_irq(tp->napi[i].irq_vec);
7504
7505 spin_lock_bh(&tp->lock);
7506 }
7507
7508 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7509 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7510 * with as well. Most of the time, this is not necessary except when
7511 * shutting down the device.
7512 */
tg3_full_lock(struct tg3 * tp,int irq_sync)7513 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7514 {
7515 spin_lock_bh(&tp->lock);
7516 if (irq_sync)
7517 tg3_irq_quiesce(tp);
7518 }
7519
tg3_full_unlock(struct tg3 * tp)7520 static inline void tg3_full_unlock(struct tg3 *tp)
7521 {
7522 spin_unlock_bh(&tp->lock);
7523 }
7524
7525 /* One-shot MSI handler - Chip automatically disables interrupt
7526 * after sending MSI so driver doesn't have to do it.
7527 */
tg3_msi_1shot(int irq,void * dev_id)7528 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7529 {
7530 struct tg3_napi *tnapi = dev_id;
7531 struct tg3 *tp = tnapi->tp;
7532
7533 prefetch(tnapi->hw_status);
7534 if (tnapi->rx_rcb)
7535 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7536
7537 if (likely(!tg3_irq_sync(tp)))
7538 napi_schedule(&tnapi->napi);
7539
7540 return IRQ_HANDLED;
7541 }
7542
7543 /* MSI ISR - No need to check for interrupt sharing and no need to
7544 * flush status block and interrupt mailbox. PCI ordering rules
7545 * guarantee that MSI will arrive after the status block.
7546 */
tg3_msi(int irq,void * dev_id)7547 static irqreturn_t tg3_msi(int irq, void *dev_id)
7548 {
7549 struct tg3_napi *tnapi = dev_id;
7550 struct tg3 *tp = tnapi->tp;
7551
7552 prefetch(tnapi->hw_status);
7553 if (tnapi->rx_rcb)
7554 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7555 /*
7556 * Writing any value to intr-mbox-0 clears PCI INTA# and
7557 * chip-internal interrupt pending events.
7558 * Writing non-zero to intr-mbox-0 additional tells the
7559 * NIC to stop sending us irqs, engaging "in-intr-handler"
7560 * event coalescing.
7561 */
7562 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7563 if (likely(!tg3_irq_sync(tp)))
7564 napi_schedule(&tnapi->napi);
7565
7566 return IRQ_RETVAL(1);
7567 }
7568
tg3_interrupt(int irq,void * dev_id)7569 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7570 {
7571 struct tg3_napi *tnapi = dev_id;
7572 struct tg3 *tp = tnapi->tp;
7573 struct tg3_hw_status *sblk = tnapi->hw_status;
7574 unsigned int handled = 1;
7575
7576 /* In INTx mode, it is possible for the interrupt to arrive at
7577 * the CPU before the status block posted prior to the interrupt.
7578 * Reading the PCI State register will confirm whether the
7579 * interrupt is ours and will flush the status block.
7580 */
7581 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7582 if (tg3_flag(tp, CHIP_RESETTING) ||
7583 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7584 handled = 0;
7585 goto out;
7586 }
7587 }
7588
7589 /*
7590 * Writing any value to intr-mbox-0 clears PCI INTA# and
7591 * chip-internal interrupt pending events.
7592 * Writing non-zero to intr-mbox-0 additional tells the
7593 * NIC to stop sending us irqs, engaging "in-intr-handler"
7594 * event coalescing.
7595 *
7596 * Flush the mailbox to de-assert the IRQ immediately to prevent
7597 * spurious interrupts. The flush impacts performance but
7598 * excessive spurious interrupts can be worse in some cases.
7599 */
7600 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7601 if (tg3_irq_sync(tp))
7602 goto out;
7603 sblk->status &= ~SD_STATUS_UPDATED;
7604 if (likely(tg3_has_work(tnapi))) {
7605 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7606 napi_schedule(&tnapi->napi);
7607 } else {
7608 /* No work, shared interrupt perhaps? re-enable
7609 * interrupts, and flush that PCI write
7610 */
7611 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7612 0x00000000);
7613 }
7614 out:
7615 return IRQ_RETVAL(handled);
7616 }
7617
tg3_interrupt_tagged(int irq,void * dev_id)7618 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7619 {
7620 struct tg3_napi *tnapi = dev_id;
7621 struct tg3 *tp = tnapi->tp;
7622 struct tg3_hw_status *sblk = tnapi->hw_status;
7623 unsigned int handled = 1;
7624
7625 /* In INTx mode, it is possible for the interrupt to arrive at
7626 * the CPU before the status block posted prior to the interrupt.
7627 * Reading the PCI State register will confirm whether the
7628 * interrupt is ours and will flush the status block.
7629 */
7630 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7631 if (tg3_flag(tp, CHIP_RESETTING) ||
7632 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7633 handled = 0;
7634 goto out;
7635 }
7636 }
7637
7638 /*
7639 * writing any value to intr-mbox-0 clears PCI INTA# and
7640 * chip-internal interrupt pending events.
7641 * writing non-zero to intr-mbox-0 additional tells the
7642 * NIC to stop sending us irqs, engaging "in-intr-handler"
7643 * event coalescing.
7644 *
7645 * Flush the mailbox to de-assert the IRQ immediately to prevent
7646 * spurious interrupts. The flush impacts performance but
7647 * excessive spurious interrupts can be worse in some cases.
7648 */
7649 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7650
7651 /*
7652 * In a shared interrupt configuration, sometimes other devices'
7653 * interrupts will scream. We record the current status tag here
7654 * so that the above check can report that the screaming interrupts
7655 * are unhandled. Eventually they will be silenced.
7656 */
7657 tnapi->last_irq_tag = sblk->status_tag;
7658
7659 if (tg3_irq_sync(tp))
7660 goto out;
7661
7662 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7663
7664 napi_schedule(&tnapi->napi);
7665
7666 out:
7667 return IRQ_RETVAL(handled);
7668 }
7669
7670 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7671 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7672 {
7673 struct tg3_napi *tnapi = dev_id;
7674 struct tg3 *tp = tnapi->tp;
7675 struct tg3_hw_status *sblk = tnapi->hw_status;
7676
7677 if ((sblk->status & SD_STATUS_UPDATED) ||
7678 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7679 tg3_disable_ints(tp);
7680 return IRQ_RETVAL(1);
7681 }
7682 return IRQ_RETVAL(0);
7683 }
7684
7685 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7686 static void tg3_poll_controller(struct net_device *dev)
7687 {
7688 int i;
7689 struct tg3 *tp = netdev_priv(dev);
7690
7691 if (tg3_irq_sync(tp))
7692 return;
7693
7694 for (i = 0; i < tp->irq_cnt; i++)
7695 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7696 }
7697 #endif
7698
tg3_tx_timeout(struct net_device * dev,unsigned int txqueue)7699 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7700 {
7701 struct tg3 *tp = netdev_priv(dev);
7702
7703 if (netif_msg_tx_err(tp)) {
7704 netdev_err(dev, "transmit timed out, resetting\n");
7705 tg3_dump_state(tp);
7706 }
7707
7708 tg3_reset_task_schedule(tp);
7709 }
7710
7711 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7712 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7713 {
7714 u32 base = (u32) mapping & 0xffffffff;
7715
7716 return base + len + 8 < base;
7717 }
7718
7719 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7720 * of any 4GB boundaries: 4G, 8G, etc
7721 */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7722 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7723 u32 len, u32 mss)
7724 {
7725 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7726 u32 base = (u32) mapping & 0xffffffff;
7727
7728 return ((base + len + (mss & 0x3fff)) < base);
7729 }
7730 return 0;
7731 }
7732
7733 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7734 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7735 int len)
7736 {
7737 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7738 if (tg3_flag(tp, 40BIT_DMA_BUG))
7739 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7740 return 0;
7741 #else
7742 return 0;
7743 #endif
7744 }
7745
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7746 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7747 dma_addr_t mapping, u32 len, u32 flags,
7748 u32 mss, u32 vlan)
7749 {
7750 txbd->addr_hi = ((u64) mapping >> 32);
7751 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7752 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7753 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7754 }
7755
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7756 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7757 dma_addr_t map, u32 len, u32 flags,
7758 u32 mss, u32 vlan)
7759 {
7760 struct tg3 *tp = tnapi->tp;
7761 bool hwbug = false;
7762
7763 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7764 hwbug = true;
7765
7766 if (tg3_4g_overflow_test(map, len))
7767 hwbug = true;
7768
7769 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7770 hwbug = true;
7771
7772 if (tg3_40bit_overflow_test(tp, map, len))
7773 hwbug = true;
7774
7775 if (tp->dma_limit) {
7776 u32 prvidx = *entry;
7777 u32 tmp_flag = flags & ~TXD_FLAG_END;
7778 while (len > tp->dma_limit && *budget) {
7779 u32 frag_len = tp->dma_limit;
7780 len -= tp->dma_limit;
7781
7782 /* Avoid the 8byte DMA problem */
7783 if (len <= 8) {
7784 len += tp->dma_limit / 2;
7785 frag_len = tp->dma_limit / 2;
7786 }
7787
7788 tnapi->tx_buffers[*entry].fragmented = true;
7789
7790 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7791 frag_len, tmp_flag, mss, vlan);
7792 *budget -= 1;
7793 prvidx = *entry;
7794 *entry = NEXT_TX(*entry);
7795
7796 map += frag_len;
7797 }
7798
7799 if (len) {
7800 if (*budget) {
7801 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7802 len, flags, mss, vlan);
7803 *budget -= 1;
7804 *entry = NEXT_TX(*entry);
7805 } else {
7806 hwbug = true;
7807 tnapi->tx_buffers[prvidx].fragmented = false;
7808 }
7809 }
7810 } else {
7811 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7812 len, flags, mss, vlan);
7813 *entry = NEXT_TX(*entry);
7814 }
7815
7816 return hwbug;
7817 }
7818
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7819 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7820 {
7821 int i;
7822 struct sk_buff *skb;
7823 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7824
7825 skb = txb->skb;
7826 txb->skb = NULL;
7827
7828 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7829 skb_headlen(skb), DMA_TO_DEVICE);
7830
7831 while (txb->fragmented) {
7832 txb->fragmented = false;
7833 entry = NEXT_TX(entry);
7834 txb = &tnapi->tx_buffers[entry];
7835 }
7836
7837 for (i = 0; i <= last; i++) {
7838 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7839
7840 entry = NEXT_TX(entry);
7841 txb = &tnapi->tx_buffers[entry];
7842
7843 dma_unmap_page(&tnapi->tp->pdev->dev,
7844 dma_unmap_addr(txb, mapping),
7845 skb_frag_size(frag), DMA_TO_DEVICE);
7846
7847 while (txb->fragmented) {
7848 txb->fragmented = false;
7849 entry = NEXT_TX(entry);
7850 txb = &tnapi->tx_buffers[entry];
7851 }
7852 }
7853 }
7854
7855 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7856 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7857 struct sk_buff **pskb,
7858 u32 *entry, u32 *budget,
7859 u32 base_flags, u32 mss, u32 vlan)
7860 {
7861 struct tg3 *tp = tnapi->tp;
7862 struct sk_buff *new_skb, *skb = *pskb;
7863 dma_addr_t new_addr = 0;
7864 int ret = 0;
7865
7866 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7867 new_skb = skb_copy(skb, GFP_ATOMIC);
7868 else {
7869 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7870
7871 new_skb = skb_copy_expand(skb,
7872 skb_headroom(skb) + more_headroom,
7873 skb_tailroom(skb), GFP_ATOMIC);
7874 }
7875
7876 if (!new_skb) {
7877 ret = -1;
7878 } else {
7879 /* New SKB is guaranteed to be linear. */
7880 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7881 new_skb->len, DMA_TO_DEVICE);
7882 /* Make sure the mapping succeeded */
7883 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7884 dev_kfree_skb_any(new_skb);
7885 ret = -1;
7886 } else {
7887 u32 save_entry = *entry;
7888
7889 base_flags |= TXD_FLAG_END;
7890
7891 tnapi->tx_buffers[*entry].skb = new_skb;
7892 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7893 mapping, new_addr);
7894
7895 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7896 new_skb->len, base_flags,
7897 mss, vlan)) {
7898 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7899 dev_kfree_skb_any(new_skb);
7900 ret = -1;
7901 }
7902 }
7903 }
7904
7905 dev_consume_skb_any(skb);
7906 *pskb = new_skb;
7907 return ret;
7908 }
7909
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7910 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7911 {
7912 /* Check if we will never have enough descriptors,
7913 * as gso_segs can be more than current ring size
7914 */
7915 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7916 }
7917
7918 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
7919
7920 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7921 * indicated in tg3_tx_frag_set()
7922 */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7923 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7924 struct netdev_queue *txq, struct sk_buff *skb)
7925 {
7926 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7927 struct sk_buff *segs, *seg, *next;
7928
7929 /* Estimate the number of fragments in the worst case */
7930 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7931 netif_tx_stop_queue(txq);
7932
7933 /* netif_tx_stop_queue() must be done before checking
7934 * checking tx index in tg3_tx_avail() below, because in
7935 * tg3_tx(), we update tx index before checking for
7936 * netif_tx_queue_stopped().
7937 */
7938 smp_mb();
7939 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7940 return NETDEV_TX_BUSY;
7941
7942 netif_tx_wake_queue(txq);
7943 }
7944
7945 segs = skb_gso_segment(skb, tp->dev->features &
7946 ~(NETIF_F_TSO | NETIF_F_TSO6));
7947 if (IS_ERR(segs) || !segs) {
7948 tnapi->tx_dropped++;
7949 goto tg3_tso_bug_end;
7950 }
7951
7952 skb_list_walk_safe(segs, seg, next) {
7953 skb_mark_not_on_list(seg);
7954 __tg3_start_xmit(seg, tp->dev);
7955 }
7956
7957 tg3_tso_bug_end:
7958 dev_consume_skb_any(skb);
7959
7960 return NETDEV_TX_OK;
7961 }
7962
7963 /* hard_start_xmit for all devices */
__tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7964 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7965 {
7966 struct tg3 *tp = netdev_priv(dev);
7967 u32 len, entry, base_flags, mss, vlan = 0;
7968 u32 budget;
7969 int i = -1, would_hit_hwbug;
7970 dma_addr_t mapping;
7971 struct tg3_napi *tnapi;
7972 struct netdev_queue *txq;
7973 unsigned int last;
7974 struct iphdr *iph = NULL;
7975 struct tcphdr *tcph = NULL;
7976 __sum16 tcp_csum = 0, ip_csum = 0;
7977 __be16 ip_tot_len = 0;
7978
7979 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7980 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7981 if (tg3_flag(tp, ENABLE_TSS))
7982 tnapi++;
7983
7984 budget = tg3_tx_avail(tnapi);
7985
7986 /* We are running in BH disabled context with netif_tx_lock
7987 * and TX reclaim runs via tp->napi.poll inside of a software
7988 * interrupt. Furthermore, IRQ processing runs lockless so we have
7989 * no IRQ context deadlocks to worry about either. Rejoice!
7990 */
7991 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7992 if (!netif_tx_queue_stopped(txq)) {
7993 netif_tx_stop_queue(txq);
7994
7995 /* This is a hard error, log it. */
7996 netdev_err(dev,
7997 "BUG! Tx Ring full when queue awake!\n");
7998 }
7999 return NETDEV_TX_BUSY;
8000 }
8001
8002 entry = tnapi->tx_prod;
8003 base_flags = 0;
8004
8005 mss = skb_shinfo(skb)->gso_size;
8006 if (mss) {
8007 u32 tcp_opt_len, hdr_len;
8008
8009 if (skb_cow_head(skb, 0))
8010 goto drop;
8011
8012 iph = ip_hdr(skb);
8013 tcp_opt_len = tcp_optlen(skb);
8014
8015 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
8016
8017 /* HW/FW can not correctly segment packets that have been
8018 * vlan encapsulated.
8019 */
8020 if (skb->protocol == htons(ETH_P_8021Q) ||
8021 skb->protocol == htons(ETH_P_8021AD)) {
8022 if (tg3_tso_bug_gso_check(tnapi, skb))
8023 return tg3_tso_bug(tp, tnapi, txq, skb);
8024 goto drop;
8025 }
8026
8027 if (!skb_is_gso_v6(skb)) {
8028 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
8029 tg3_flag(tp, TSO_BUG)) {
8030 if (tg3_tso_bug_gso_check(tnapi, skb))
8031 return tg3_tso_bug(tp, tnapi, txq, skb);
8032 goto drop;
8033 }
8034 ip_csum = iph->check;
8035 ip_tot_len = iph->tot_len;
8036 iph->check = 0;
8037 iph->tot_len = htons(mss + hdr_len);
8038 }
8039
8040 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
8041 TXD_FLAG_CPU_POST_DMA);
8042
8043 tcph = tcp_hdr(skb);
8044 tcp_csum = tcph->check;
8045
8046 if (tg3_flag(tp, HW_TSO_1) ||
8047 tg3_flag(tp, HW_TSO_2) ||
8048 tg3_flag(tp, HW_TSO_3)) {
8049 tcph->check = 0;
8050 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8051 } else {
8052 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8053 0, IPPROTO_TCP, 0);
8054 }
8055
8056 if (tg3_flag(tp, HW_TSO_3)) {
8057 mss |= (hdr_len & 0xc) << 12;
8058 if (hdr_len & 0x10)
8059 base_flags |= 0x00000010;
8060 base_flags |= (hdr_len & 0x3e0) << 5;
8061 } else if (tg3_flag(tp, HW_TSO_2))
8062 mss |= hdr_len << 9;
8063 else if (tg3_flag(tp, HW_TSO_1) ||
8064 tg3_asic_rev(tp) == ASIC_REV_5705) {
8065 if (tcp_opt_len || iph->ihl > 5) {
8066 int tsflags;
8067
8068 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8069 mss |= (tsflags << 11);
8070 }
8071 } else {
8072 if (tcp_opt_len || iph->ihl > 5) {
8073 int tsflags;
8074
8075 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8076 base_flags |= tsflags << 12;
8077 }
8078 }
8079 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8080 /* HW/FW can not correctly checksum packets that have been
8081 * vlan encapsulated.
8082 */
8083 if (skb->protocol == htons(ETH_P_8021Q) ||
8084 skb->protocol == htons(ETH_P_8021AD)) {
8085 if (skb_checksum_help(skb))
8086 goto drop;
8087 } else {
8088 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8089 }
8090 }
8091
8092 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8093 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8094 base_flags |= TXD_FLAG_JMB_PKT;
8095
8096 if (skb_vlan_tag_present(skb)) {
8097 base_flags |= TXD_FLAG_VLAN;
8098 vlan = skb_vlan_tag_get(skb);
8099 }
8100
8101 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8102 tg3_flag(tp, TX_TSTAMP_EN)) {
8103 tg3_full_lock(tp, 0);
8104 if (!tp->pre_tx_ts) {
8105 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8106 base_flags |= TXD_FLAG_HWTSTAMP;
8107 tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
8108 }
8109 tg3_full_unlock(tp);
8110 }
8111
8112 len = skb_headlen(skb);
8113
8114 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8115 DMA_TO_DEVICE);
8116 if (dma_mapping_error(&tp->pdev->dev, mapping))
8117 goto drop;
8118
8119
8120 tnapi->tx_buffers[entry].skb = skb;
8121 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8122
8123 would_hit_hwbug = 0;
8124
8125 if (tg3_flag(tp, 5701_DMA_BUG))
8126 would_hit_hwbug = 1;
8127
8128 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8129 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8130 mss, vlan)) {
8131 would_hit_hwbug = 1;
8132 } else if (skb_shinfo(skb)->nr_frags > 0) {
8133 u32 tmp_mss = mss;
8134
8135 if (!tg3_flag(tp, HW_TSO_1) &&
8136 !tg3_flag(tp, HW_TSO_2) &&
8137 !tg3_flag(tp, HW_TSO_3))
8138 tmp_mss = 0;
8139
8140 /* Now loop through additional data
8141 * fragments, and queue them.
8142 */
8143 last = skb_shinfo(skb)->nr_frags - 1;
8144 for (i = 0; i <= last; i++) {
8145 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8146
8147 len = skb_frag_size(frag);
8148 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8149 len, DMA_TO_DEVICE);
8150
8151 tnapi->tx_buffers[entry].skb = NULL;
8152 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8153 mapping);
8154 if (dma_mapping_error(&tp->pdev->dev, mapping))
8155 goto dma_error;
8156
8157 if (!budget ||
8158 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8159 len, base_flags |
8160 ((i == last) ? TXD_FLAG_END : 0),
8161 tmp_mss, vlan)) {
8162 would_hit_hwbug = 1;
8163 break;
8164 }
8165 }
8166 }
8167
8168 if (would_hit_hwbug) {
8169 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8170
8171 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8172 /* If it's a TSO packet, do GSO instead of
8173 * allocating and copying to a large linear SKB
8174 */
8175 if (ip_tot_len) {
8176 iph->check = ip_csum;
8177 iph->tot_len = ip_tot_len;
8178 }
8179 tcph->check = tcp_csum;
8180 return tg3_tso_bug(tp, tnapi, txq, skb);
8181 }
8182
8183 /* If the workaround fails due to memory/mapping
8184 * failure, silently drop this packet.
8185 */
8186 entry = tnapi->tx_prod;
8187 budget = tg3_tx_avail(tnapi);
8188 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8189 base_flags, mss, vlan))
8190 goto drop_nofree;
8191 }
8192
8193 skb_tx_timestamp(skb);
8194 netdev_tx_sent_queue(txq, skb->len);
8195
8196 /* Sync BD data before updating mailbox */
8197 wmb();
8198
8199 tnapi->tx_prod = entry;
8200 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8201 netif_tx_stop_queue(txq);
8202
8203 /* netif_tx_stop_queue() must be done before checking
8204 * checking tx index in tg3_tx_avail() below, because in
8205 * tg3_tx(), we update tx index before checking for
8206 * netif_tx_queue_stopped().
8207 */
8208 smp_mb();
8209 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8210 netif_tx_wake_queue(txq);
8211 }
8212
8213 return NETDEV_TX_OK;
8214
8215 dma_error:
8216 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8217 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8218 drop:
8219 dev_kfree_skb_any(skb);
8220 drop_nofree:
8221 tnapi->tx_dropped++;
8222 return NETDEV_TX_OK;
8223 }
8224
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)8225 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8226 {
8227 struct netdev_queue *txq;
8228 u16 skb_queue_mapping;
8229 netdev_tx_t ret;
8230
8231 skb_queue_mapping = skb_get_queue_mapping(skb);
8232 txq = netdev_get_tx_queue(dev, skb_queue_mapping);
8233
8234 ret = __tg3_start_xmit(skb, dev);
8235
8236 /* Notify the hardware that packets are ready by updating the TX ring
8237 * tail pointer. We respect netdev_xmit_more() thus avoiding poking
8238 * the hardware for every packet. To guarantee forward progress the TX
8239 * ring must be drained when it is full as indicated by
8240 * netif_xmit_stopped(). This needs to happen even when the current
8241 * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
8242 * queued by previous __tg3_start_xmit() calls might get stuck in
8243 * the queue forever.
8244 */
8245 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8246 struct tg3_napi *tnapi;
8247 struct tg3 *tp;
8248
8249 tp = netdev_priv(dev);
8250 tnapi = &tp->napi[skb_queue_mapping];
8251
8252 if (tg3_flag(tp, ENABLE_TSS))
8253 tnapi++;
8254
8255 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
8256 }
8257
8258 return ret;
8259 }
8260
tg3_mac_loopback(struct tg3 * tp,bool enable)8261 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8262 {
8263 if (enable) {
8264 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8265 MAC_MODE_PORT_MODE_MASK);
8266
8267 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8268
8269 if (!tg3_flag(tp, 5705_PLUS))
8270 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8271
8272 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8273 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8274 else
8275 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8276 } else {
8277 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8278
8279 if (tg3_flag(tp, 5705_PLUS) ||
8280 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8281 tg3_asic_rev(tp) == ASIC_REV_5700)
8282 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8283 }
8284
8285 tw32(MAC_MODE, tp->mac_mode);
8286 udelay(40);
8287 }
8288
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8289 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8290 {
8291 u32 val, bmcr, mac_mode, ptest = 0;
8292
8293 tg3_phy_toggle_apd(tp, false);
8294 tg3_phy_toggle_automdix(tp, false);
8295
8296 if (extlpbk && tg3_phy_set_extloopbk(tp))
8297 return -EIO;
8298
8299 bmcr = BMCR_FULLDPLX;
8300 switch (speed) {
8301 case SPEED_10:
8302 break;
8303 case SPEED_100:
8304 bmcr |= BMCR_SPEED100;
8305 break;
8306 case SPEED_1000:
8307 default:
8308 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8309 speed = SPEED_100;
8310 bmcr |= BMCR_SPEED100;
8311 } else {
8312 speed = SPEED_1000;
8313 bmcr |= BMCR_SPEED1000;
8314 }
8315 }
8316
8317 if (extlpbk) {
8318 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8319 tg3_readphy(tp, MII_CTRL1000, &val);
8320 val |= CTL1000_AS_MASTER |
8321 CTL1000_ENABLE_MASTER;
8322 tg3_writephy(tp, MII_CTRL1000, val);
8323 } else {
8324 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8325 MII_TG3_FET_PTEST_TRIM_2;
8326 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8327 }
8328 } else
8329 bmcr |= BMCR_LOOPBACK;
8330
8331 tg3_writephy(tp, MII_BMCR, bmcr);
8332
8333 /* The write needs to be flushed for the FETs */
8334 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8335 tg3_readphy(tp, MII_BMCR, &bmcr);
8336
8337 udelay(40);
8338
8339 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8340 tg3_asic_rev(tp) == ASIC_REV_5785) {
8341 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8342 MII_TG3_FET_PTEST_FRC_TX_LINK |
8343 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8344
8345 /* The write needs to be flushed for the AC131 */
8346 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8347 }
8348
8349 /* Reset to prevent losing 1st rx packet intermittently */
8350 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8351 tg3_flag(tp, 5780_CLASS)) {
8352 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8353 udelay(10);
8354 tw32_f(MAC_RX_MODE, tp->rx_mode);
8355 }
8356
8357 mac_mode = tp->mac_mode &
8358 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8359 if (speed == SPEED_1000)
8360 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8361 else
8362 mac_mode |= MAC_MODE_PORT_MODE_MII;
8363
8364 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8365 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8366
8367 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8368 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8369 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8370 mac_mode |= MAC_MODE_LINK_POLARITY;
8371
8372 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8373 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8374 }
8375
8376 tw32(MAC_MODE, mac_mode);
8377 udelay(40);
8378
8379 return 0;
8380 }
8381
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8382 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8383 {
8384 struct tg3 *tp = netdev_priv(dev);
8385
8386 if (features & NETIF_F_LOOPBACK) {
8387 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8388 return;
8389
8390 spin_lock_bh(&tp->lock);
8391 tg3_mac_loopback(tp, true);
8392 netif_carrier_on(tp->dev);
8393 spin_unlock_bh(&tp->lock);
8394 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8395 } else {
8396 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8397 return;
8398
8399 spin_lock_bh(&tp->lock);
8400 tg3_mac_loopback(tp, false);
8401 /* Force link status check */
8402 tg3_setup_phy(tp, true);
8403 spin_unlock_bh(&tp->lock);
8404 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8405 }
8406 }
8407
tg3_fix_features(struct net_device * dev,netdev_features_t features)8408 static netdev_features_t tg3_fix_features(struct net_device *dev,
8409 netdev_features_t features)
8410 {
8411 struct tg3 *tp = netdev_priv(dev);
8412
8413 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8414 features &= ~NETIF_F_ALL_TSO;
8415
8416 return features;
8417 }
8418
tg3_set_features(struct net_device * dev,netdev_features_t features)8419 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8420 {
8421 netdev_features_t changed = dev->features ^ features;
8422
8423 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8424 tg3_set_loopback(dev, features);
8425
8426 return 0;
8427 }
8428
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8429 static void tg3_rx_prodring_free(struct tg3 *tp,
8430 struct tg3_rx_prodring_set *tpr)
8431 {
8432 int i;
8433
8434 if (tpr != &tp->napi[0].prodring) {
8435 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8436 i = (i + 1) & tp->rx_std_ring_mask)
8437 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8438 tp->rx_pkt_map_sz);
8439
8440 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8441 for (i = tpr->rx_jmb_cons_idx;
8442 i != tpr->rx_jmb_prod_idx;
8443 i = (i + 1) & tp->rx_jmb_ring_mask) {
8444 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8445 TG3_RX_JMB_MAP_SZ);
8446 }
8447 }
8448
8449 return;
8450 }
8451
8452 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8453 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8454 tp->rx_pkt_map_sz);
8455
8456 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8457 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8458 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8459 TG3_RX_JMB_MAP_SZ);
8460 }
8461 }
8462
8463 /* Initialize rx rings for packet processing.
8464 *
8465 * The chip has been shut down and the driver detached from
8466 * the networking, so no interrupts or new tx packets will
8467 * end up in the driver. tp->{tx,}lock are held and thus
8468 * we may not sleep.
8469 */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8470 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8471 struct tg3_rx_prodring_set *tpr)
8472 {
8473 u32 i, rx_pkt_dma_sz;
8474
8475 tpr->rx_std_cons_idx = 0;
8476 tpr->rx_std_prod_idx = 0;
8477 tpr->rx_jmb_cons_idx = 0;
8478 tpr->rx_jmb_prod_idx = 0;
8479
8480 if (tpr != &tp->napi[0].prodring) {
8481 memset(&tpr->rx_std_buffers[0], 0,
8482 TG3_RX_STD_BUFF_RING_SIZE(tp));
8483 if (tpr->rx_jmb_buffers)
8484 memset(&tpr->rx_jmb_buffers[0], 0,
8485 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8486 goto done;
8487 }
8488
8489 /* Zero out all descriptors. */
8490 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8491
8492 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8493 if (tg3_flag(tp, 5780_CLASS) &&
8494 tp->dev->mtu > ETH_DATA_LEN)
8495 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8496 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8497
8498 /* Initialize invariants of the rings, we only set this
8499 * stuff once. This works because the card does not
8500 * write into the rx buffer posting rings.
8501 */
8502 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8503 struct tg3_rx_buffer_desc *rxd;
8504
8505 rxd = &tpr->rx_std[i];
8506 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8507 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8508 rxd->opaque = (RXD_OPAQUE_RING_STD |
8509 (i << RXD_OPAQUE_INDEX_SHIFT));
8510 }
8511
8512 /* Now allocate fresh SKBs for each rx ring. */
8513 for (i = 0; i < tp->rx_pending; i++) {
8514 unsigned int frag_size;
8515
8516 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8517 &frag_size) < 0) {
8518 netdev_warn(tp->dev,
8519 "Using a smaller RX standard ring. Only "
8520 "%d out of %d buffers were allocated "
8521 "successfully\n", i, tp->rx_pending);
8522 if (i == 0)
8523 goto initfail;
8524 tp->rx_pending = i;
8525 break;
8526 }
8527 }
8528
8529 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8530 goto done;
8531
8532 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8533
8534 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8535 goto done;
8536
8537 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8538 struct tg3_rx_buffer_desc *rxd;
8539
8540 rxd = &tpr->rx_jmb[i].std;
8541 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8542 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8543 RXD_FLAG_JUMBO;
8544 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8545 (i << RXD_OPAQUE_INDEX_SHIFT));
8546 }
8547
8548 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8549 unsigned int frag_size;
8550
8551 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8552 &frag_size) < 0) {
8553 netdev_warn(tp->dev,
8554 "Using a smaller RX jumbo ring. Only %d "
8555 "out of %d buffers were allocated "
8556 "successfully\n", i, tp->rx_jumbo_pending);
8557 if (i == 0)
8558 goto initfail;
8559 tp->rx_jumbo_pending = i;
8560 break;
8561 }
8562 }
8563
8564 done:
8565 return 0;
8566
8567 initfail:
8568 tg3_rx_prodring_free(tp, tpr);
8569 return -ENOMEM;
8570 }
8571
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8572 static void tg3_rx_prodring_fini(struct tg3 *tp,
8573 struct tg3_rx_prodring_set *tpr)
8574 {
8575 kfree(tpr->rx_std_buffers);
8576 tpr->rx_std_buffers = NULL;
8577 kfree(tpr->rx_jmb_buffers);
8578 tpr->rx_jmb_buffers = NULL;
8579 if (tpr->rx_std) {
8580 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8581 tpr->rx_std, tpr->rx_std_mapping);
8582 tpr->rx_std = NULL;
8583 }
8584 if (tpr->rx_jmb) {
8585 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8586 tpr->rx_jmb, tpr->rx_jmb_mapping);
8587 tpr->rx_jmb = NULL;
8588 }
8589 }
8590
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8591 static int tg3_rx_prodring_init(struct tg3 *tp,
8592 struct tg3_rx_prodring_set *tpr)
8593 {
8594 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8595 GFP_KERNEL);
8596 if (!tpr->rx_std_buffers)
8597 return -ENOMEM;
8598
8599 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8600 TG3_RX_STD_RING_BYTES(tp),
8601 &tpr->rx_std_mapping,
8602 GFP_KERNEL);
8603 if (!tpr->rx_std)
8604 goto err_out;
8605
8606 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8607 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8608 GFP_KERNEL);
8609 if (!tpr->rx_jmb_buffers)
8610 goto err_out;
8611
8612 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8613 TG3_RX_JMB_RING_BYTES(tp),
8614 &tpr->rx_jmb_mapping,
8615 GFP_KERNEL);
8616 if (!tpr->rx_jmb)
8617 goto err_out;
8618 }
8619
8620 return 0;
8621
8622 err_out:
8623 tg3_rx_prodring_fini(tp, tpr);
8624 return -ENOMEM;
8625 }
8626
8627 /* Free up pending packets in all rx/tx rings.
8628 *
8629 * The chip has been shut down and the driver detached from
8630 * the networking, so no interrupts or new tx packets will
8631 * end up in the driver. tp->{tx,}lock is not held and we are not
8632 * in an interrupt context and thus may sleep.
8633 */
tg3_free_rings(struct tg3 * tp)8634 static void tg3_free_rings(struct tg3 *tp)
8635 {
8636 int i, j;
8637
8638 for (j = 0; j < tp->irq_cnt; j++) {
8639 struct tg3_napi *tnapi = &tp->napi[j];
8640
8641 tg3_rx_prodring_free(tp, &tnapi->prodring);
8642
8643 if (!tnapi->tx_buffers)
8644 continue;
8645
8646 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8647 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8648
8649 if (!skb)
8650 continue;
8651
8652 tg3_tx_skb_unmap(tnapi, i,
8653 skb_shinfo(skb)->nr_frags - 1);
8654
8655 dev_consume_skb_any(skb);
8656 }
8657 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8658 }
8659 }
8660
8661 /* Initialize tx/rx rings for packet processing.
8662 *
8663 * The chip has been shut down and the driver detached from
8664 * the networking, so no interrupts or new tx packets will
8665 * end up in the driver. tp->{tx,}lock are held and thus
8666 * we may not sleep.
8667 */
tg3_init_rings(struct tg3 * tp)8668 static int tg3_init_rings(struct tg3 *tp)
8669 {
8670 int i;
8671
8672 /* Free up all the SKBs. */
8673 tg3_free_rings(tp);
8674
8675 for (i = 0; i < tp->irq_cnt; i++) {
8676 struct tg3_napi *tnapi = &tp->napi[i];
8677
8678 tnapi->last_tag = 0;
8679 tnapi->last_irq_tag = 0;
8680 tnapi->hw_status->status = 0;
8681 tnapi->hw_status->status_tag = 0;
8682 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8683
8684 tnapi->tx_prod = 0;
8685 tnapi->tx_cons = 0;
8686 if (tnapi->tx_ring)
8687 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8688
8689 tnapi->rx_rcb_ptr = 0;
8690 if (tnapi->rx_rcb)
8691 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8692
8693 if (tnapi->prodring.rx_std &&
8694 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8695 tg3_free_rings(tp);
8696 return -ENOMEM;
8697 }
8698 }
8699
8700 return 0;
8701 }
8702
tg3_mem_tx_release(struct tg3 * tp)8703 static void tg3_mem_tx_release(struct tg3 *tp)
8704 {
8705 int i;
8706
8707 for (i = 0; i < tp->irq_max; i++) {
8708 struct tg3_napi *tnapi = &tp->napi[i];
8709
8710 if (tnapi->tx_ring) {
8711 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8712 tnapi->tx_ring, tnapi->tx_desc_mapping);
8713 tnapi->tx_ring = NULL;
8714 }
8715
8716 kfree(tnapi->tx_buffers);
8717 tnapi->tx_buffers = NULL;
8718 }
8719 }
8720
tg3_mem_tx_acquire(struct tg3 * tp)8721 static int tg3_mem_tx_acquire(struct tg3 *tp)
8722 {
8723 int i;
8724 struct tg3_napi *tnapi = &tp->napi[0];
8725
8726 /* If multivector TSS is enabled, vector 0 does not handle
8727 * tx interrupts. Don't allocate any resources for it.
8728 */
8729 if (tg3_flag(tp, ENABLE_TSS))
8730 tnapi++;
8731
8732 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8733 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8734 sizeof(struct tg3_tx_ring_info),
8735 GFP_KERNEL);
8736 if (!tnapi->tx_buffers)
8737 goto err_out;
8738
8739 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8740 TG3_TX_RING_BYTES,
8741 &tnapi->tx_desc_mapping,
8742 GFP_KERNEL);
8743 if (!tnapi->tx_ring)
8744 goto err_out;
8745 }
8746
8747 return 0;
8748
8749 err_out:
8750 tg3_mem_tx_release(tp);
8751 return -ENOMEM;
8752 }
8753
tg3_mem_rx_release(struct tg3 * tp)8754 static void tg3_mem_rx_release(struct tg3 *tp)
8755 {
8756 int i;
8757
8758 for (i = 0; i < tp->irq_max; i++) {
8759 struct tg3_napi *tnapi = &tp->napi[i];
8760
8761 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8762
8763 if (!tnapi->rx_rcb)
8764 continue;
8765
8766 dma_free_coherent(&tp->pdev->dev,
8767 TG3_RX_RCB_RING_BYTES(tp),
8768 tnapi->rx_rcb,
8769 tnapi->rx_rcb_mapping);
8770 tnapi->rx_rcb = NULL;
8771 }
8772 }
8773
tg3_mem_rx_acquire(struct tg3 * tp)8774 static int tg3_mem_rx_acquire(struct tg3 *tp)
8775 {
8776 unsigned int i, limit;
8777
8778 limit = tp->rxq_cnt;
8779
8780 /* If RSS is enabled, we need a (dummy) producer ring
8781 * set on vector zero. This is the true hw prodring.
8782 */
8783 if (tg3_flag(tp, ENABLE_RSS))
8784 limit++;
8785
8786 for (i = 0; i < limit; i++) {
8787 struct tg3_napi *tnapi = &tp->napi[i];
8788
8789 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8790 goto err_out;
8791
8792 /* If multivector RSS is enabled, vector 0
8793 * does not handle rx or tx interrupts.
8794 * Don't allocate any resources for it.
8795 */
8796 if (!i && tg3_flag(tp, ENABLE_RSS))
8797 continue;
8798
8799 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8800 TG3_RX_RCB_RING_BYTES(tp),
8801 &tnapi->rx_rcb_mapping,
8802 GFP_KERNEL);
8803 if (!tnapi->rx_rcb)
8804 goto err_out;
8805 }
8806
8807 return 0;
8808
8809 err_out:
8810 tg3_mem_rx_release(tp);
8811 return -ENOMEM;
8812 }
8813
8814 /*
8815 * Must not be invoked with interrupt sources disabled and
8816 * the hardware shutdown down.
8817 */
tg3_free_consistent(struct tg3 * tp)8818 static void tg3_free_consistent(struct tg3 *tp)
8819 {
8820 int i;
8821
8822 for (i = 0; i < tp->irq_cnt; i++) {
8823 struct tg3_napi *tnapi = &tp->napi[i];
8824
8825 if (tnapi->hw_status) {
8826 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8827 tnapi->hw_status,
8828 tnapi->status_mapping);
8829 tnapi->hw_status = NULL;
8830 }
8831 }
8832
8833 tg3_mem_rx_release(tp);
8834 tg3_mem_tx_release(tp);
8835
8836 /* tp->hw_stats can be referenced safely:
8837 * 1. under rtnl_lock
8838 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8839 */
8840 if (tp->hw_stats) {
8841 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8842 tp->hw_stats, tp->stats_mapping);
8843 tp->hw_stats = NULL;
8844 }
8845 }
8846
8847 /*
8848 * Must not be invoked with interrupt sources disabled and
8849 * the hardware shutdown down. Can sleep.
8850 */
tg3_alloc_consistent(struct tg3 * tp)8851 static int tg3_alloc_consistent(struct tg3 *tp)
8852 {
8853 int i;
8854
8855 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8856 sizeof(struct tg3_hw_stats),
8857 &tp->stats_mapping, GFP_KERNEL);
8858 if (!tp->hw_stats)
8859 goto err_out;
8860
8861 for (i = 0; i < tp->irq_cnt; i++) {
8862 struct tg3_napi *tnapi = &tp->napi[i];
8863 struct tg3_hw_status *sblk;
8864
8865 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8866 TG3_HW_STATUS_SIZE,
8867 &tnapi->status_mapping,
8868 GFP_KERNEL);
8869 if (!tnapi->hw_status)
8870 goto err_out;
8871
8872 sblk = tnapi->hw_status;
8873
8874 if (tg3_flag(tp, ENABLE_RSS)) {
8875 u16 *prodptr = NULL;
8876
8877 /*
8878 * When RSS is enabled, the status block format changes
8879 * slightly. The "rx_jumbo_consumer", "reserved",
8880 * and "rx_mini_consumer" members get mapped to the
8881 * other three rx return ring producer indexes.
8882 */
8883 switch (i) {
8884 case 1:
8885 prodptr = &sblk->idx[0].rx_producer;
8886 break;
8887 case 2:
8888 prodptr = &sblk->rx_jumbo_consumer;
8889 break;
8890 case 3:
8891 prodptr = &sblk->reserved;
8892 break;
8893 case 4:
8894 prodptr = &sblk->rx_mini_consumer;
8895 break;
8896 }
8897 tnapi->rx_rcb_prod_idx = prodptr;
8898 } else {
8899 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8900 }
8901 }
8902
8903 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8904 goto err_out;
8905
8906 return 0;
8907
8908 err_out:
8909 tg3_free_consistent(tp);
8910 return -ENOMEM;
8911 }
8912
8913 #define MAX_WAIT_CNT 1000
8914
8915 /* To stop a block, clear the enable bit and poll till it
8916 * clears. tp->lock is held.
8917 */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8918 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8919 {
8920 unsigned int i;
8921 u32 val;
8922
8923 if (tg3_flag(tp, 5705_PLUS)) {
8924 switch (ofs) {
8925 case RCVLSC_MODE:
8926 case DMAC_MODE:
8927 case MBFREE_MODE:
8928 case BUFMGR_MODE:
8929 case MEMARB_MODE:
8930 /* We can't enable/disable these bits of the
8931 * 5705/5750, just say success.
8932 */
8933 return 0;
8934
8935 default:
8936 break;
8937 }
8938 }
8939
8940 val = tr32(ofs);
8941 val &= ~enable_bit;
8942 tw32_f(ofs, val);
8943
8944 for (i = 0; i < MAX_WAIT_CNT; i++) {
8945 if (pci_channel_offline(tp->pdev)) {
8946 dev_err(&tp->pdev->dev,
8947 "tg3_stop_block device offline, "
8948 "ofs=%lx enable_bit=%x\n",
8949 ofs, enable_bit);
8950 return -ENODEV;
8951 }
8952
8953 udelay(100);
8954 val = tr32(ofs);
8955 if ((val & enable_bit) == 0)
8956 break;
8957 }
8958
8959 if (i == MAX_WAIT_CNT && !silent) {
8960 dev_err(&tp->pdev->dev,
8961 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8962 ofs, enable_bit);
8963 return -ENODEV;
8964 }
8965
8966 return 0;
8967 }
8968
8969 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8970 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8971 {
8972 int i, err;
8973
8974 tg3_disable_ints(tp);
8975
8976 if (pci_channel_offline(tp->pdev)) {
8977 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8978 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8979 err = -ENODEV;
8980 goto err_no_dev;
8981 }
8982
8983 tp->rx_mode &= ~RX_MODE_ENABLE;
8984 tw32_f(MAC_RX_MODE, tp->rx_mode);
8985 udelay(10);
8986
8987 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8988 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8989 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8990 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8991 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8992 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8993
8994 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8995 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8996 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8997 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8998 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8999 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
9000 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
9001
9002 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
9003 tw32_f(MAC_MODE, tp->mac_mode);
9004 udelay(40);
9005
9006 tp->tx_mode &= ~TX_MODE_ENABLE;
9007 tw32_f(MAC_TX_MODE, tp->tx_mode);
9008
9009 for (i = 0; i < MAX_WAIT_CNT; i++) {
9010 udelay(100);
9011 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
9012 break;
9013 }
9014 if (i >= MAX_WAIT_CNT) {
9015 dev_err(&tp->pdev->dev,
9016 "%s timed out, TX_MODE_ENABLE will not clear "
9017 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
9018 err |= -ENODEV;
9019 }
9020
9021 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
9022 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
9023 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
9024
9025 tw32(FTQ_RESET, 0xffffffff);
9026 tw32(FTQ_RESET, 0x00000000);
9027
9028 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
9029 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
9030
9031 err_no_dev:
9032 for (i = 0; i < tp->irq_cnt; i++) {
9033 struct tg3_napi *tnapi = &tp->napi[i];
9034 if (tnapi->hw_status)
9035 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9036 }
9037
9038 return err;
9039 }
9040
9041 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)9042 static void tg3_save_pci_state(struct tg3 *tp)
9043 {
9044 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
9045 }
9046
9047 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)9048 static void tg3_restore_pci_state(struct tg3 *tp)
9049 {
9050 u32 val;
9051
9052 /* Re-enable indirect register accesses. */
9053 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9054 tp->misc_host_ctrl);
9055
9056 /* Set MAX PCI retry to zero. */
9057 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
9058 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9059 tg3_flag(tp, PCIX_MODE))
9060 val |= PCISTATE_RETRY_SAME_DMA;
9061 /* Allow reads and writes to the APE register and memory space. */
9062 if (tg3_flag(tp, ENABLE_APE))
9063 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9064 PCISTATE_ALLOW_APE_SHMEM_WR |
9065 PCISTATE_ALLOW_APE_PSPACE_WR;
9066 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
9067
9068 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
9069
9070 if (!tg3_flag(tp, PCI_EXPRESS)) {
9071 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
9072 tp->pci_cacheline_sz);
9073 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
9074 tp->pci_lat_timer);
9075 }
9076
9077 /* Make sure PCI-X relaxed ordering bit is clear. */
9078 if (tg3_flag(tp, PCIX_MODE)) {
9079 u16 pcix_cmd;
9080
9081 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9082 &pcix_cmd);
9083 pcix_cmd &= ~PCI_X_CMD_ERO;
9084 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9085 pcix_cmd);
9086 }
9087
9088 if (tg3_flag(tp, 5780_CLASS)) {
9089
9090 /* Chip reset on 5780 will reset MSI enable bit,
9091 * so need to restore it.
9092 */
9093 if (tg3_flag(tp, USING_MSI)) {
9094 u16 ctrl;
9095
9096 pci_read_config_word(tp->pdev,
9097 tp->msi_cap + PCI_MSI_FLAGS,
9098 &ctrl);
9099 pci_write_config_word(tp->pdev,
9100 tp->msi_cap + PCI_MSI_FLAGS,
9101 ctrl | PCI_MSI_FLAGS_ENABLE);
9102 val = tr32(MSGINT_MODE);
9103 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9104 }
9105 }
9106 }
9107
tg3_override_clk(struct tg3 * tp)9108 static void tg3_override_clk(struct tg3 *tp)
9109 {
9110 u32 val;
9111
9112 switch (tg3_asic_rev(tp)) {
9113 case ASIC_REV_5717:
9114 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9115 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9116 TG3_CPMU_MAC_ORIDE_ENABLE);
9117 break;
9118
9119 case ASIC_REV_5719:
9120 case ASIC_REV_5720:
9121 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9122 break;
9123
9124 default:
9125 return;
9126 }
9127 }
9128
tg3_restore_clk(struct tg3 * tp)9129 static void tg3_restore_clk(struct tg3 *tp)
9130 {
9131 u32 val;
9132
9133 switch (tg3_asic_rev(tp)) {
9134 case ASIC_REV_5717:
9135 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9136 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9137 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9138 break;
9139
9140 case ASIC_REV_5719:
9141 case ASIC_REV_5720:
9142 val = tr32(TG3_CPMU_CLCK_ORIDE);
9143 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9144 break;
9145
9146 default:
9147 return;
9148 }
9149 }
9150
9151 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9152 static int tg3_chip_reset(struct tg3 *tp)
9153 __releases(tp->lock)
9154 __acquires(tp->lock)
9155 {
9156 u32 val;
9157 void (*write_op)(struct tg3 *, u32, u32);
9158 int i, err;
9159
9160 if (!pci_device_is_present(tp->pdev))
9161 return -ENODEV;
9162
9163 tg3_nvram_lock(tp);
9164
9165 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9166
9167 /* No matching tg3_nvram_unlock() after this because
9168 * chip reset below will undo the nvram lock.
9169 */
9170 tp->nvram_lock_cnt = 0;
9171
9172 /* GRC_MISC_CFG core clock reset will clear the memory
9173 * enable bit in PCI register 4 and the MSI enable bit
9174 * on some chips, so we save relevant registers here.
9175 */
9176 tg3_save_pci_state(tp);
9177
9178 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9179 tg3_flag(tp, 5755_PLUS))
9180 tw32(GRC_FASTBOOT_PC, 0);
9181
9182 /*
9183 * We must avoid the readl() that normally takes place.
9184 * It locks machines, causes machine checks, and other
9185 * fun things. So, temporarily disable the 5701
9186 * hardware workaround, while we do the reset.
9187 */
9188 write_op = tp->write32;
9189 if (write_op == tg3_write_flush_reg32)
9190 tp->write32 = tg3_write32;
9191
9192 /* Prevent the irq handler from reading or writing PCI registers
9193 * during chip reset when the memory enable bit in the PCI command
9194 * register may be cleared. The chip does not generate interrupt
9195 * at this time, but the irq handler may still be called due to irq
9196 * sharing or irqpoll.
9197 */
9198 tg3_flag_set(tp, CHIP_RESETTING);
9199 for (i = 0; i < tp->irq_cnt; i++) {
9200 struct tg3_napi *tnapi = &tp->napi[i];
9201 if (tnapi->hw_status) {
9202 tnapi->hw_status->status = 0;
9203 tnapi->hw_status->status_tag = 0;
9204 }
9205 tnapi->last_tag = 0;
9206 tnapi->last_irq_tag = 0;
9207 }
9208 smp_mb();
9209
9210 tg3_full_unlock(tp);
9211
9212 for (i = 0; i < tp->irq_cnt; i++)
9213 synchronize_irq(tp->napi[i].irq_vec);
9214
9215 tg3_full_lock(tp, 0);
9216
9217 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9218 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9219 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9220 }
9221
9222 /* do the reset */
9223 val = GRC_MISC_CFG_CORECLK_RESET;
9224
9225 if (tg3_flag(tp, PCI_EXPRESS)) {
9226 /* Force PCIe 1.0a mode */
9227 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9228 !tg3_flag(tp, 57765_PLUS) &&
9229 tr32(TG3_PCIE_PHY_TSTCTL) ==
9230 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9231 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9232
9233 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9234 tw32(GRC_MISC_CFG, (1 << 29));
9235 val |= (1 << 29);
9236 }
9237 }
9238
9239 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9240 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9241 tw32(GRC_VCPU_EXT_CTRL,
9242 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9243 }
9244
9245 /* Set the clock to the highest frequency to avoid timeouts. With link
9246 * aware mode, the clock speed could be slow and bootcode does not
9247 * complete within the expected time. Override the clock to allow the
9248 * bootcode to finish sooner and then restore it.
9249 */
9250 tg3_override_clk(tp);
9251
9252 /* Manage gphy power for all CPMU absent PCIe devices. */
9253 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9254 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9255
9256 tw32(GRC_MISC_CFG, val);
9257
9258 /* restore 5701 hardware bug workaround write method */
9259 tp->write32 = write_op;
9260
9261 /* Unfortunately, we have to delay before the PCI read back.
9262 * Some 575X chips even will not respond to a PCI cfg access
9263 * when the reset command is given to the chip.
9264 *
9265 * How do these hardware designers expect things to work
9266 * properly if the PCI write is posted for a long period
9267 * of time? It is always necessary to have some method by
9268 * which a register read back can occur to push the write
9269 * out which does the reset.
9270 *
9271 * For most tg3 variants the trick below was working.
9272 * Ho hum...
9273 */
9274 udelay(120);
9275
9276 /* Flush PCI posted writes. The normal MMIO registers
9277 * are inaccessible at this time so this is the only
9278 * way to make this reliably (actually, this is no longer
9279 * the case, see above). I tried to use indirect
9280 * register read/write but this upset some 5701 variants.
9281 */
9282 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9283
9284 udelay(120);
9285
9286 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9287 u16 val16;
9288
9289 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9290 int j;
9291 u32 cfg_val;
9292
9293 /* Wait for link training to complete. */
9294 for (j = 0; j < 5000; j++)
9295 udelay(100);
9296
9297 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9298 pci_write_config_dword(tp->pdev, 0xc4,
9299 cfg_val | (1 << 15));
9300 }
9301
9302 /* Clear the "no snoop" and "relaxed ordering" bits. */
9303 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9304 /*
9305 * Older PCIe devices only support the 128 byte
9306 * MPS setting. Enforce the restriction.
9307 */
9308 if (!tg3_flag(tp, CPMU_PRESENT))
9309 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9310 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9311
9312 /* Clear error status */
9313 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9314 PCI_EXP_DEVSTA_CED |
9315 PCI_EXP_DEVSTA_NFED |
9316 PCI_EXP_DEVSTA_FED |
9317 PCI_EXP_DEVSTA_URD);
9318 }
9319
9320 tg3_restore_pci_state(tp);
9321
9322 tg3_flag_clear(tp, CHIP_RESETTING);
9323 tg3_flag_clear(tp, ERROR_PROCESSED);
9324
9325 val = 0;
9326 if (tg3_flag(tp, 5780_CLASS))
9327 val = tr32(MEMARB_MODE);
9328 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9329
9330 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9331 tg3_stop_fw(tp);
9332 tw32(0x5000, 0x400);
9333 }
9334
9335 if (tg3_flag(tp, IS_SSB_CORE)) {
9336 /*
9337 * BCM4785: In order to avoid repercussions from using
9338 * potentially defective internal ROM, stop the Rx RISC CPU,
9339 * which is not required.
9340 */
9341 tg3_stop_fw(tp);
9342 tg3_halt_cpu(tp, RX_CPU_BASE);
9343 }
9344
9345 err = tg3_poll_fw(tp);
9346 if (err)
9347 return err;
9348
9349 tw32(GRC_MODE, tp->grc_mode);
9350
9351 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9352 val = tr32(0xc4);
9353
9354 tw32(0xc4, val | (1 << 15));
9355 }
9356
9357 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9358 tg3_asic_rev(tp) == ASIC_REV_5705) {
9359 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9360 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9361 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9362 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9363 }
9364
9365 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9366 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9367 val = tp->mac_mode;
9368 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9369 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9370 val = tp->mac_mode;
9371 } else
9372 val = 0;
9373
9374 tw32_f(MAC_MODE, val);
9375 udelay(40);
9376
9377 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9378
9379 tg3_mdio_start(tp);
9380
9381 if (tg3_flag(tp, PCI_EXPRESS) &&
9382 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9383 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9384 !tg3_flag(tp, 57765_PLUS)) {
9385 val = tr32(0x7c00);
9386
9387 tw32(0x7c00, val | (1 << 25));
9388 }
9389
9390 tg3_restore_clk(tp);
9391
9392 /* Increase the core clock speed to fix tx timeout issue for 5762
9393 * with 100Mbps link speed.
9394 */
9395 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9396 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9397 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9398 TG3_CPMU_MAC_ORIDE_ENABLE);
9399 }
9400
9401 /* Reprobe ASF enable state. */
9402 tg3_flag_clear(tp, ENABLE_ASF);
9403 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9404 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9405
9406 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9407 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9408 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9409 u32 nic_cfg;
9410
9411 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9412 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9413 tg3_flag_set(tp, ENABLE_ASF);
9414 tp->last_event_jiffies = jiffies;
9415 if (tg3_flag(tp, 5750_PLUS))
9416 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9417
9418 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9419 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9420 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9421 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9422 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9423 }
9424 }
9425
9426 return 0;
9427 }
9428
9429 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9430 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9431 static void __tg3_set_rx_mode(struct net_device *);
9432
9433 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9434 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9435 {
9436 int err, i;
9437
9438 tg3_stop_fw(tp);
9439
9440 tg3_write_sig_pre_reset(tp, kind);
9441
9442 tg3_abort_hw(tp, silent);
9443 err = tg3_chip_reset(tp);
9444
9445 __tg3_set_mac_addr(tp, false);
9446
9447 tg3_write_sig_legacy(tp, kind);
9448 tg3_write_sig_post_reset(tp, kind);
9449
9450 if (tp->hw_stats) {
9451 /* Save the stats across chip resets... */
9452 tg3_get_nstats(tp, &tp->net_stats_prev);
9453 tg3_get_estats(tp, &tp->estats_prev);
9454
9455 /* And make sure the next sample is new data */
9456 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9457
9458 for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
9459 struct tg3_napi *tnapi = &tp->napi[i];
9460
9461 tnapi->rx_dropped = 0;
9462 tnapi->tx_dropped = 0;
9463 }
9464 }
9465
9466 return err;
9467 }
9468
tg3_set_mac_addr(struct net_device * dev,void * p)9469 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9470 {
9471 struct tg3 *tp = netdev_priv(dev);
9472 struct sockaddr *addr = p;
9473 int err = 0;
9474 bool skip_mac_1 = false;
9475
9476 if (!is_valid_ether_addr(addr->sa_data))
9477 return -EADDRNOTAVAIL;
9478
9479 eth_hw_addr_set(dev, addr->sa_data);
9480
9481 if (!netif_running(dev))
9482 return 0;
9483
9484 if (tg3_flag(tp, ENABLE_ASF)) {
9485 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9486
9487 addr0_high = tr32(MAC_ADDR_0_HIGH);
9488 addr0_low = tr32(MAC_ADDR_0_LOW);
9489 addr1_high = tr32(MAC_ADDR_1_HIGH);
9490 addr1_low = tr32(MAC_ADDR_1_LOW);
9491
9492 /* Skip MAC addr 1 if ASF is using it. */
9493 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9494 !(addr1_high == 0 && addr1_low == 0))
9495 skip_mac_1 = true;
9496 }
9497 spin_lock_bh(&tp->lock);
9498 __tg3_set_mac_addr(tp, skip_mac_1);
9499 __tg3_set_rx_mode(dev);
9500 spin_unlock_bh(&tp->lock);
9501
9502 return err;
9503 }
9504
9505 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9506 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9507 dma_addr_t mapping, u32 maxlen_flags,
9508 u32 nic_addr)
9509 {
9510 tg3_write_mem(tp,
9511 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9512 ((u64) mapping >> 32));
9513 tg3_write_mem(tp,
9514 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9515 ((u64) mapping & 0xffffffff));
9516 tg3_write_mem(tp,
9517 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9518 maxlen_flags);
9519
9520 if (!tg3_flag(tp, 5705_PLUS))
9521 tg3_write_mem(tp,
9522 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9523 nic_addr);
9524 }
9525
9526
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9527 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9528 {
9529 int i = 0;
9530
9531 if (!tg3_flag(tp, ENABLE_TSS)) {
9532 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9533 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9534 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9535 } else {
9536 tw32(HOSTCC_TXCOL_TICKS, 0);
9537 tw32(HOSTCC_TXMAX_FRAMES, 0);
9538 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9539
9540 for (; i < tp->txq_cnt; i++) {
9541 u32 reg;
9542
9543 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9544 tw32(reg, ec->tx_coalesce_usecs);
9545 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9546 tw32(reg, ec->tx_max_coalesced_frames);
9547 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9548 tw32(reg, ec->tx_max_coalesced_frames_irq);
9549 }
9550 }
9551
9552 for (; i < tp->irq_max - 1; i++) {
9553 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9554 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9555 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9556 }
9557 }
9558
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9559 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9560 {
9561 int i = 0;
9562 u32 limit = tp->rxq_cnt;
9563
9564 if (!tg3_flag(tp, ENABLE_RSS)) {
9565 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9566 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9567 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9568 limit--;
9569 } else {
9570 tw32(HOSTCC_RXCOL_TICKS, 0);
9571 tw32(HOSTCC_RXMAX_FRAMES, 0);
9572 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9573 }
9574
9575 for (; i < limit; i++) {
9576 u32 reg;
9577
9578 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9579 tw32(reg, ec->rx_coalesce_usecs);
9580 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9581 tw32(reg, ec->rx_max_coalesced_frames);
9582 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9583 tw32(reg, ec->rx_max_coalesced_frames_irq);
9584 }
9585
9586 for (; i < tp->irq_max - 1; i++) {
9587 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9588 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9589 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9590 }
9591 }
9592
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9593 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9594 {
9595 tg3_coal_tx_init(tp, ec);
9596 tg3_coal_rx_init(tp, ec);
9597
9598 if (!tg3_flag(tp, 5705_PLUS)) {
9599 u32 val = ec->stats_block_coalesce_usecs;
9600
9601 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9602 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9603
9604 if (!tp->link_up)
9605 val = 0;
9606
9607 tw32(HOSTCC_STAT_COAL_TICKS, val);
9608 }
9609 }
9610
9611 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9612 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9613 {
9614 u32 txrcb, limit;
9615
9616 /* Disable all transmit rings but the first. */
9617 if (!tg3_flag(tp, 5705_PLUS))
9618 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9619 else if (tg3_flag(tp, 5717_PLUS))
9620 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9621 else if (tg3_flag(tp, 57765_CLASS) ||
9622 tg3_asic_rev(tp) == ASIC_REV_5762)
9623 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9624 else
9625 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9626
9627 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9628 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9629 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9630 BDINFO_FLAGS_DISABLED);
9631 }
9632
9633 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9634 static void tg3_tx_rcbs_init(struct tg3 *tp)
9635 {
9636 int i = 0;
9637 u32 txrcb = NIC_SRAM_SEND_RCB;
9638
9639 if (tg3_flag(tp, ENABLE_TSS))
9640 i++;
9641
9642 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9643 struct tg3_napi *tnapi = &tp->napi[i];
9644
9645 if (!tnapi->tx_ring)
9646 continue;
9647
9648 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9649 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9650 NIC_SRAM_TX_BUFFER_DESC);
9651 }
9652 }
9653
9654 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9655 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9656 {
9657 u32 rxrcb, limit;
9658
9659 /* Disable all receive return rings but the first. */
9660 if (tg3_flag(tp, 5717_PLUS))
9661 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9662 else if (!tg3_flag(tp, 5705_PLUS))
9663 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9664 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9665 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9666 tg3_flag(tp, 57765_CLASS))
9667 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9668 else
9669 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9670
9671 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9672 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9673 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9674 BDINFO_FLAGS_DISABLED);
9675 }
9676
9677 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9678 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9679 {
9680 int i = 0;
9681 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9682
9683 if (tg3_flag(tp, ENABLE_RSS))
9684 i++;
9685
9686 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9687 struct tg3_napi *tnapi = &tp->napi[i];
9688
9689 if (!tnapi->rx_rcb)
9690 continue;
9691
9692 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9693 (tp->rx_ret_ring_mask + 1) <<
9694 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9695 }
9696 }
9697
9698 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9699 static void tg3_rings_reset(struct tg3 *tp)
9700 {
9701 int i;
9702 u32 stblk;
9703 struct tg3_napi *tnapi = &tp->napi[0];
9704
9705 tg3_tx_rcbs_disable(tp);
9706
9707 tg3_rx_ret_rcbs_disable(tp);
9708
9709 /* Disable interrupts */
9710 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9711 tp->napi[0].chk_msi_cnt = 0;
9712 tp->napi[0].last_rx_cons = 0;
9713 tp->napi[0].last_tx_cons = 0;
9714
9715 /* Zero mailbox registers. */
9716 if (tg3_flag(tp, SUPPORT_MSIX)) {
9717 for (i = 1; i < tp->irq_max; i++) {
9718 tp->napi[i].tx_prod = 0;
9719 tp->napi[i].tx_cons = 0;
9720 if (tg3_flag(tp, ENABLE_TSS))
9721 tw32_mailbox(tp->napi[i].prodmbox, 0);
9722 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9723 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9724 tp->napi[i].chk_msi_cnt = 0;
9725 tp->napi[i].last_rx_cons = 0;
9726 tp->napi[i].last_tx_cons = 0;
9727 }
9728 if (!tg3_flag(tp, ENABLE_TSS))
9729 tw32_mailbox(tp->napi[0].prodmbox, 0);
9730 } else {
9731 tp->napi[0].tx_prod = 0;
9732 tp->napi[0].tx_cons = 0;
9733 tw32_mailbox(tp->napi[0].prodmbox, 0);
9734 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9735 }
9736
9737 /* Make sure the NIC-based send BD rings are disabled. */
9738 if (!tg3_flag(tp, 5705_PLUS)) {
9739 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9740 for (i = 0; i < 16; i++)
9741 tw32_tx_mbox(mbox + i * 8, 0);
9742 }
9743
9744 /* Clear status block in ram. */
9745 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9746
9747 /* Set status block DMA address */
9748 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9749 ((u64) tnapi->status_mapping >> 32));
9750 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9751 ((u64) tnapi->status_mapping & 0xffffffff));
9752
9753 stblk = HOSTCC_STATBLCK_RING1;
9754
9755 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9756 u64 mapping = (u64)tnapi->status_mapping;
9757 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9758 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9759 stblk += 8;
9760
9761 /* Clear status block in ram. */
9762 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9763 }
9764
9765 tg3_tx_rcbs_init(tp);
9766 tg3_rx_ret_rcbs_init(tp);
9767 }
9768
tg3_setup_rxbd_thresholds(struct tg3 * tp)9769 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9770 {
9771 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9772
9773 if (!tg3_flag(tp, 5750_PLUS) ||
9774 tg3_flag(tp, 5780_CLASS) ||
9775 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9776 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9777 tg3_flag(tp, 57765_PLUS))
9778 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9779 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9780 tg3_asic_rev(tp) == ASIC_REV_5787)
9781 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9782 else
9783 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9784
9785 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9786 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9787
9788 val = min(nic_rep_thresh, host_rep_thresh);
9789 tw32(RCVBDI_STD_THRESH, val);
9790
9791 if (tg3_flag(tp, 57765_PLUS))
9792 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9793
9794 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9795 return;
9796
9797 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9798
9799 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9800
9801 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9802 tw32(RCVBDI_JUMBO_THRESH, val);
9803
9804 if (tg3_flag(tp, 57765_PLUS))
9805 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9806 }
9807
calc_crc(unsigned char * buf,int len)9808 static inline u32 calc_crc(unsigned char *buf, int len)
9809 {
9810 u32 reg;
9811 u32 tmp;
9812 int j, k;
9813
9814 reg = 0xffffffff;
9815
9816 for (j = 0; j < len; j++) {
9817 reg ^= buf[j];
9818
9819 for (k = 0; k < 8; k++) {
9820 tmp = reg & 0x01;
9821
9822 reg >>= 1;
9823
9824 if (tmp)
9825 reg ^= CRC32_POLY_LE;
9826 }
9827 }
9828
9829 return ~reg;
9830 }
9831
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9832 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9833 {
9834 /* accept or reject all multicast frames */
9835 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9836 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9837 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9838 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9839 }
9840
__tg3_set_rx_mode(struct net_device * dev)9841 static void __tg3_set_rx_mode(struct net_device *dev)
9842 {
9843 struct tg3 *tp = netdev_priv(dev);
9844 u32 rx_mode;
9845
9846 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9847 RX_MODE_KEEP_VLAN_TAG);
9848
9849 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9850 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9851 * flag clear.
9852 */
9853 if (!tg3_flag(tp, ENABLE_ASF))
9854 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9855 #endif
9856
9857 if (dev->flags & IFF_PROMISC) {
9858 /* Promiscuous mode. */
9859 rx_mode |= RX_MODE_PROMISC;
9860 } else if (dev->flags & IFF_ALLMULTI) {
9861 /* Accept all multicast. */
9862 tg3_set_multi(tp, 1);
9863 } else if (netdev_mc_empty(dev)) {
9864 /* Reject all multicast. */
9865 tg3_set_multi(tp, 0);
9866 } else {
9867 /* Accept one or more multicast(s). */
9868 struct netdev_hw_addr *ha;
9869 u32 mc_filter[4] = { 0, };
9870 u32 regidx;
9871 u32 bit;
9872 u32 crc;
9873
9874 netdev_for_each_mc_addr(ha, dev) {
9875 crc = calc_crc(ha->addr, ETH_ALEN);
9876 bit = ~crc & 0x7f;
9877 regidx = (bit & 0x60) >> 5;
9878 bit &= 0x1f;
9879 mc_filter[regidx] |= (1 << bit);
9880 }
9881
9882 tw32(MAC_HASH_REG_0, mc_filter[0]);
9883 tw32(MAC_HASH_REG_1, mc_filter[1]);
9884 tw32(MAC_HASH_REG_2, mc_filter[2]);
9885 tw32(MAC_HASH_REG_3, mc_filter[3]);
9886 }
9887
9888 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9889 rx_mode |= RX_MODE_PROMISC;
9890 } else if (!(dev->flags & IFF_PROMISC)) {
9891 /* Add all entries into to the mac addr filter list */
9892 int i = 0;
9893 struct netdev_hw_addr *ha;
9894
9895 netdev_for_each_uc_addr(ha, dev) {
9896 __tg3_set_one_mac_addr(tp, ha->addr,
9897 i + TG3_UCAST_ADDR_IDX(tp));
9898 i++;
9899 }
9900 }
9901
9902 if (rx_mode != tp->rx_mode) {
9903 tp->rx_mode = rx_mode;
9904 tw32_f(MAC_RX_MODE, rx_mode);
9905 udelay(10);
9906 }
9907 }
9908
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9909 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9910 {
9911 int i;
9912
9913 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9914 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9915 }
9916
tg3_rss_check_indir_tbl(struct tg3 * tp)9917 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9918 {
9919 int i;
9920
9921 if (!tg3_flag(tp, SUPPORT_MSIX))
9922 return;
9923
9924 if (tp->rxq_cnt == 1) {
9925 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9926 return;
9927 }
9928
9929 /* Validate table against current IRQ count */
9930 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9931 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9932 break;
9933 }
9934
9935 if (i != TG3_RSS_INDIR_TBL_SIZE)
9936 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9937 }
9938
tg3_rss_write_indir_tbl(struct tg3 * tp)9939 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9940 {
9941 int i = 0;
9942 u32 reg = MAC_RSS_INDIR_TBL_0;
9943
9944 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9945 u32 val = tp->rss_ind_tbl[i];
9946 i++;
9947 for (; i % 8; i++) {
9948 val <<= 4;
9949 val |= tp->rss_ind_tbl[i];
9950 }
9951 tw32(reg, val);
9952 reg += 4;
9953 }
9954 }
9955
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9956 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9957 {
9958 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9959 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9960 else
9961 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9962 }
9963
9964 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9965 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9966 {
9967 u32 val, rdmac_mode;
9968 int i, err, limit;
9969 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9970
9971 tg3_disable_ints(tp);
9972
9973 tg3_stop_fw(tp);
9974
9975 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9976
9977 if (tg3_flag(tp, INIT_COMPLETE))
9978 tg3_abort_hw(tp, 1);
9979
9980 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9981 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9982 tg3_phy_pull_config(tp);
9983 tg3_eee_pull_config(tp, NULL);
9984 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9985 }
9986
9987 /* Enable MAC control of LPI */
9988 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9989 tg3_setup_eee(tp);
9990
9991 if (reset_phy)
9992 tg3_phy_reset(tp);
9993
9994 err = tg3_chip_reset(tp);
9995 if (err)
9996 return err;
9997
9998 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9999
10000 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
10001 val = tr32(TG3_CPMU_CTRL);
10002 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
10003 tw32(TG3_CPMU_CTRL, val);
10004
10005 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10006 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10007 val |= CPMU_LSPD_10MB_MACCLK_6_25;
10008 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10009
10010 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
10011 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
10012 val |= CPMU_LNK_AWARE_MACCLK_6_25;
10013 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
10014
10015 val = tr32(TG3_CPMU_HST_ACC);
10016 val &= ~CPMU_HST_ACC_MACCLK_MASK;
10017 val |= CPMU_HST_ACC_MACCLK_6_25;
10018 tw32(TG3_CPMU_HST_ACC, val);
10019 }
10020
10021 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
10022 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
10023 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
10024 PCIE_PWR_MGMT_L1_THRESH_4MS;
10025 tw32(PCIE_PWR_MGMT_THRESH, val);
10026
10027 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
10028 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
10029
10030 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
10031
10032 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
10033 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
10034 }
10035
10036 if (tg3_flag(tp, L1PLLPD_EN)) {
10037 u32 grc_mode = tr32(GRC_MODE);
10038
10039 /* Access the lower 1K of PL PCIE block registers. */
10040 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10041 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10042
10043 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
10044 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
10045 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
10046
10047 tw32(GRC_MODE, grc_mode);
10048 }
10049
10050 if (tg3_flag(tp, 57765_CLASS)) {
10051 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
10052 u32 grc_mode = tr32(GRC_MODE);
10053
10054 /* Access the lower 1K of PL PCIE block registers. */
10055 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10056 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10057
10058 val = tr32(TG3_PCIE_TLDLPL_PORT +
10059 TG3_PCIE_PL_LO_PHYCTL5);
10060 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
10061 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
10062
10063 tw32(GRC_MODE, grc_mode);
10064 }
10065
10066 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
10067 u32 grc_mode;
10068
10069 /* Fix transmit hangs */
10070 val = tr32(TG3_CPMU_PADRNG_CTL);
10071 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
10072 tw32(TG3_CPMU_PADRNG_CTL, val);
10073
10074 grc_mode = tr32(GRC_MODE);
10075
10076 /* Access the lower 1K of DL PCIE block registers. */
10077 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10078 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
10079
10080 val = tr32(TG3_PCIE_TLDLPL_PORT +
10081 TG3_PCIE_DL_LO_FTSMAX);
10082 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10083 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10084 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10085
10086 tw32(GRC_MODE, grc_mode);
10087 }
10088
10089 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10090 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10091 val |= CPMU_LSPD_10MB_MACCLK_6_25;
10092 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10093 }
10094
10095 /* This works around an issue with Athlon chipsets on
10096 * B3 tigon3 silicon. This bit has no effect on any
10097 * other revision. But do not set this on PCI Express
10098 * chips and don't even touch the clocks if the CPMU is present.
10099 */
10100 if (!tg3_flag(tp, CPMU_PRESENT)) {
10101 if (!tg3_flag(tp, PCI_EXPRESS))
10102 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10103 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10104 }
10105
10106 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10107 tg3_flag(tp, PCIX_MODE)) {
10108 val = tr32(TG3PCI_PCISTATE);
10109 val |= PCISTATE_RETRY_SAME_DMA;
10110 tw32(TG3PCI_PCISTATE, val);
10111 }
10112
10113 if (tg3_flag(tp, ENABLE_APE)) {
10114 /* Allow reads and writes to the
10115 * APE register and memory space.
10116 */
10117 val = tr32(TG3PCI_PCISTATE);
10118 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10119 PCISTATE_ALLOW_APE_SHMEM_WR |
10120 PCISTATE_ALLOW_APE_PSPACE_WR;
10121 tw32(TG3PCI_PCISTATE, val);
10122 }
10123
10124 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10125 /* Enable some hw fixes. */
10126 val = tr32(TG3PCI_MSI_DATA);
10127 val |= (1 << 26) | (1 << 28) | (1 << 29);
10128 tw32(TG3PCI_MSI_DATA, val);
10129 }
10130
10131 /* Descriptor ring init may make accesses to the
10132 * NIC SRAM area to setup the TX descriptors, so we
10133 * can only do this after the hardware has been
10134 * successfully reset.
10135 */
10136 err = tg3_init_rings(tp);
10137 if (err)
10138 return err;
10139
10140 if (tg3_flag(tp, 57765_PLUS)) {
10141 val = tr32(TG3PCI_DMA_RW_CTRL) &
10142 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10143 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10144 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10145 if (!tg3_flag(tp, 57765_CLASS) &&
10146 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10147 tg3_asic_rev(tp) != ASIC_REV_5762)
10148 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10149 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10150 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10151 tg3_asic_rev(tp) != ASIC_REV_5761) {
10152 /* This value is determined during the probe time DMA
10153 * engine test, tg3_test_dma.
10154 */
10155 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10156 }
10157
10158 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10159 GRC_MODE_4X_NIC_SEND_RINGS |
10160 GRC_MODE_NO_TX_PHDR_CSUM |
10161 GRC_MODE_NO_RX_PHDR_CSUM);
10162 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10163
10164 /* Pseudo-header checksum is done by hardware logic and not
10165 * the offload processers, so make the chip do the pseudo-
10166 * header checksums on receive. For transmit it is more
10167 * convenient to do the pseudo-header checksum in software
10168 * as Linux does that on transmit for us in all cases.
10169 */
10170 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10171
10172 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10173 if (tp->rxptpctl)
10174 tw32(TG3_RX_PTP_CTL,
10175 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10176
10177 if (tg3_flag(tp, PTP_CAPABLE))
10178 val |= GRC_MODE_TIME_SYNC_ENABLE;
10179
10180 tw32(GRC_MODE, tp->grc_mode | val);
10181
10182 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10183 * south bridge limitation. As a workaround, Driver is setting MRRS
10184 * to 2048 instead of default 4096.
10185 */
10186 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10187 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10188 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10189 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10190 }
10191
10192 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10193 val = tr32(GRC_MISC_CFG);
10194 val &= ~0xff;
10195 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10196 tw32(GRC_MISC_CFG, val);
10197
10198 /* Initialize MBUF/DESC pool. */
10199 if (tg3_flag(tp, 5750_PLUS)) {
10200 /* Do nothing. */
10201 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10202 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10203 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10204 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10205 else
10206 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10207 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10208 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10209 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10210 int fw_len;
10211
10212 fw_len = tp->fw_len;
10213 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10214 tw32(BUFMGR_MB_POOL_ADDR,
10215 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10216 tw32(BUFMGR_MB_POOL_SIZE,
10217 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10218 }
10219
10220 if (tp->dev->mtu <= ETH_DATA_LEN) {
10221 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10222 tp->bufmgr_config.mbuf_read_dma_low_water);
10223 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10224 tp->bufmgr_config.mbuf_mac_rx_low_water);
10225 tw32(BUFMGR_MB_HIGH_WATER,
10226 tp->bufmgr_config.mbuf_high_water);
10227 } else {
10228 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10229 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10230 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10231 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10232 tw32(BUFMGR_MB_HIGH_WATER,
10233 tp->bufmgr_config.mbuf_high_water_jumbo);
10234 }
10235 tw32(BUFMGR_DMA_LOW_WATER,
10236 tp->bufmgr_config.dma_low_water);
10237 tw32(BUFMGR_DMA_HIGH_WATER,
10238 tp->bufmgr_config.dma_high_water);
10239
10240 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10241 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10242 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10243 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10244 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10245 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10246 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10247 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10248 tw32(BUFMGR_MODE, val);
10249 for (i = 0; i < 2000; i++) {
10250 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10251 break;
10252 udelay(10);
10253 }
10254 if (i >= 2000) {
10255 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10256 return -ENODEV;
10257 }
10258
10259 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10260 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10261
10262 tg3_setup_rxbd_thresholds(tp);
10263
10264 /* Initialize TG3_BDINFO's at:
10265 * RCVDBDI_STD_BD: standard eth size rx ring
10266 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10267 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10268 *
10269 * like so:
10270 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10271 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10272 * ring attribute flags
10273 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10274 *
10275 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10276 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10277 *
10278 * The size of each ring is fixed in the firmware, but the location is
10279 * configurable.
10280 */
10281 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10282 ((u64) tpr->rx_std_mapping >> 32));
10283 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10284 ((u64) tpr->rx_std_mapping & 0xffffffff));
10285 if (!tg3_flag(tp, 5717_PLUS))
10286 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10287 NIC_SRAM_RX_BUFFER_DESC);
10288
10289 /* Disable the mini ring */
10290 if (!tg3_flag(tp, 5705_PLUS))
10291 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10292 BDINFO_FLAGS_DISABLED);
10293
10294 /* Program the jumbo buffer descriptor ring control
10295 * blocks on those devices that have them.
10296 */
10297 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10298 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10299
10300 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10301 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10302 ((u64) tpr->rx_jmb_mapping >> 32));
10303 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10304 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10305 val = TG3_RX_JMB_RING_SIZE(tp) <<
10306 BDINFO_FLAGS_MAXLEN_SHIFT;
10307 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10308 val | BDINFO_FLAGS_USE_EXT_RECV);
10309 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10310 tg3_flag(tp, 57765_CLASS) ||
10311 tg3_asic_rev(tp) == ASIC_REV_5762)
10312 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10313 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10314 } else {
10315 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10316 BDINFO_FLAGS_DISABLED);
10317 }
10318
10319 if (tg3_flag(tp, 57765_PLUS)) {
10320 val = TG3_RX_STD_RING_SIZE(tp);
10321 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10322 val |= (TG3_RX_STD_DMA_SZ << 2);
10323 } else
10324 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10325 } else
10326 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10327
10328 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10329
10330 tpr->rx_std_prod_idx = tp->rx_pending;
10331 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10332
10333 tpr->rx_jmb_prod_idx =
10334 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10335 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10336
10337 tg3_rings_reset(tp);
10338
10339 /* Initialize MAC address and backoff seed. */
10340 __tg3_set_mac_addr(tp, false);
10341
10342 /* MTU + ethernet header + FCS + optional VLAN tag */
10343 tw32(MAC_RX_MTU_SIZE,
10344 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10345
10346 /* The slot time is changed by tg3_setup_phy if we
10347 * run at gigabit with half duplex.
10348 */
10349 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10350 (6 << TX_LENGTHS_IPG_SHIFT) |
10351 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10352
10353 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10354 tg3_asic_rev(tp) == ASIC_REV_5762)
10355 val |= tr32(MAC_TX_LENGTHS) &
10356 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10357 TX_LENGTHS_CNT_DWN_VAL_MSK);
10358
10359 tw32(MAC_TX_LENGTHS, val);
10360
10361 /* Receive rules. */
10362 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10363 tw32(RCVLPC_CONFIG, 0x0181);
10364
10365 /* Calculate RDMAC_MODE setting early, we need it to determine
10366 * the RCVLPC_STATE_ENABLE mask.
10367 */
10368 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10369 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10370 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10371 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10372 RDMAC_MODE_LNGREAD_ENAB);
10373
10374 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10375 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10376
10377 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10378 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10379 tg3_asic_rev(tp) == ASIC_REV_57780)
10380 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10381 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10382 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10383
10384 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10385 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10386 if (tg3_flag(tp, TSO_CAPABLE)) {
10387 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10388 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10389 !tg3_flag(tp, IS_5788)) {
10390 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10391 }
10392 }
10393
10394 if (tg3_flag(tp, PCI_EXPRESS))
10395 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10396
10397 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10398 tp->dma_limit = 0;
10399 if (tp->dev->mtu <= ETH_DATA_LEN) {
10400 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10401 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10402 }
10403 }
10404
10405 if (tg3_flag(tp, HW_TSO_1) ||
10406 tg3_flag(tp, HW_TSO_2) ||
10407 tg3_flag(tp, HW_TSO_3))
10408 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10409
10410 if (tg3_flag(tp, 57765_PLUS) ||
10411 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10412 tg3_asic_rev(tp) == ASIC_REV_57780)
10413 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10414
10415 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10416 tg3_asic_rev(tp) == ASIC_REV_5762)
10417 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10418
10419 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10420 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10421 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10422 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10423 tg3_flag(tp, 57765_PLUS)) {
10424 u32 tgtreg;
10425
10426 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10427 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10428 else
10429 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10430
10431 val = tr32(tgtreg);
10432 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10433 tg3_asic_rev(tp) == ASIC_REV_5762) {
10434 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10435 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10436 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10437 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10438 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10439 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10440 }
10441 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10442 }
10443
10444 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10445 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10446 tg3_asic_rev(tp) == ASIC_REV_5762) {
10447 u32 tgtreg;
10448
10449 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10450 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10451 else
10452 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10453
10454 val = tr32(tgtreg);
10455 tw32(tgtreg, val |
10456 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10457 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10458 }
10459
10460 /* Receive/send statistics. */
10461 if (tg3_flag(tp, 5750_PLUS)) {
10462 val = tr32(RCVLPC_STATS_ENABLE);
10463 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10464 tw32(RCVLPC_STATS_ENABLE, val);
10465 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10466 tg3_flag(tp, TSO_CAPABLE)) {
10467 val = tr32(RCVLPC_STATS_ENABLE);
10468 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10469 tw32(RCVLPC_STATS_ENABLE, val);
10470 } else {
10471 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10472 }
10473 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10474 tw32(SNDDATAI_STATSENAB, 0xffffff);
10475 tw32(SNDDATAI_STATSCTRL,
10476 (SNDDATAI_SCTRL_ENABLE |
10477 SNDDATAI_SCTRL_FASTUPD));
10478
10479 /* Setup host coalescing engine. */
10480 tw32(HOSTCC_MODE, 0);
10481 for (i = 0; i < 2000; i++) {
10482 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10483 break;
10484 udelay(10);
10485 }
10486
10487 __tg3_set_coalesce(tp, &tp->coal);
10488
10489 if (!tg3_flag(tp, 5705_PLUS)) {
10490 /* Status/statistics block address. See tg3_timer,
10491 * the tg3_periodic_fetch_stats call there, and
10492 * tg3_get_stats to see how this works for 5705/5750 chips.
10493 */
10494 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10495 ((u64) tp->stats_mapping >> 32));
10496 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10497 ((u64) tp->stats_mapping & 0xffffffff));
10498 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10499
10500 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10501
10502 /* Clear statistics and status block memory areas */
10503 for (i = NIC_SRAM_STATS_BLK;
10504 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10505 i += sizeof(u32)) {
10506 tg3_write_mem(tp, i, 0);
10507 udelay(40);
10508 }
10509 }
10510
10511 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10512
10513 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10514 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10515 if (!tg3_flag(tp, 5705_PLUS))
10516 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10517
10518 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10519 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10520 /* reset to prevent losing 1st rx packet intermittently */
10521 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10522 udelay(10);
10523 }
10524
10525 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10526 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10527 MAC_MODE_FHDE_ENABLE;
10528 if (tg3_flag(tp, ENABLE_APE))
10529 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10530 if (!tg3_flag(tp, 5705_PLUS) &&
10531 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10532 tg3_asic_rev(tp) != ASIC_REV_5700)
10533 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10534 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10535 udelay(40);
10536
10537 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10538 * If TG3_FLAG_IS_NIC is zero, we should read the
10539 * register to preserve the GPIO settings for LOMs. The GPIOs,
10540 * whether used as inputs or outputs, are set by boot code after
10541 * reset.
10542 */
10543 if (!tg3_flag(tp, IS_NIC)) {
10544 u32 gpio_mask;
10545
10546 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10547 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10548 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10549
10550 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10551 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10552 GRC_LCLCTRL_GPIO_OUTPUT3;
10553
10554 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10555 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10556
10557 tp->grc_local_ctrl &= ~gpio_mask;
10558 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10559
10560 /* GPIO1 must be driven high for eeprom write protect */
10561 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10562 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10563 GRC_LCLCTRL_GPIO_OUTPUT1);
10564 }
10565 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10566 udelay(100);
10567
10568 if (tg3_flag(tp, USING_MSIX)) {
10569 val = tr32(MSGINT_MODE);
10570 val |= MSGINT_MODE_ENABLE;
10571 if (tp->irq_cnt > 1)
10572 val |= MSGINT_MODE_MULTIVEC_EN;
10573 if (!tg3_flag(tp, 1SHOT_MSI))
10574 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10575 tw32(MSGINT_MODE, val);
10576 }
10577
10578 if (!tg3_flag(tp, 5705_PLUS)) {
10579 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10580 udelay(40);
10581 }
10582
10583 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10584 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10585 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10586 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10587 WDMAC_MODE_LNGREAD_ENAB);
10588
10589 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10590 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10591 if (tg3_flag(tp, TSO_CAPABLE) &&
10592 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10593 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10594 /* nothing */
10595 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10596 !tg3_flag(tp, IS_5788)) {
10597 val |= WDMAC_MODE_RX_ACCEL;
10598 }
10599 }
10600
10601 /* Enable host coalescing bug fix */
10602 if (tg3_flag(tp, 5755_PLUS))
10603 val |= WDMAC_MODE_STATUS_TAG_FIX;
10604
10605 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10606 val |= WDMAC_MODE_BURST_ALL_DATA;
10607
10608 tw32_f(WDMAC_MODE, val);
10609 udelay(40);
10610
10611 if (tg3_flag(tp, PCIX_MODE)) {
10612 u16 pcix_cmd;
10613
10614 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10615 &pcix_cmd);
10616 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10617 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10618 pcix_cmd |= PCI_X_CMD_READ_2K;
10619 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10620 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10621 pcix_cmd |= PCI_X_CMD_READ_2K;
10622 }
10623 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10624 pcix_cmd);
10625 }
10626
10627 tw32_f(RDMAC_MODE, rdmac_mode);
10628 udelay(40);
10629
10630 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10631 tg3_asic_rev(tp) == ASIC_REV_5720) {
10632 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10633 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10634 break;
10635 }
10636 if (i < TG3_NUM_RDMA_CHANNELS) {
10637 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10638 val |= tg3_lso_rd_dma_workaround_bit(tp);
10639 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10640 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10641 }
10642 }
10643
10644 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10645 if (!tg3_flag(tp, 5705_PLUS))
10646 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10647
10648 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10649 tw32(SNDDATAC_MODE,
10650 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10651 else
10652 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10653
10654 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10655 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10656 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10657 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10658 val |= RCVDBDI_MODE_LRG_RING_SZ;
10659 tw32(RCVDBDI_MODE, val);
10660 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10661 if (tg3_flag(tp, HW_TSO_1) ||
10662 tg3_flag(tp, HW_TSO_2) ||
10663 tg3_flag(tp, HW_TSO_3))
10664 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10665 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10666 if (tg3_flag(tp, ENABLE_TSS))
10667 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10668 tw32(SNDBDI_MODE, val);
10669 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10670
10671 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10672 err = tg3_load_5701_a0_firmware_fix(tp);
10673 if (err)
10674 return err;
10675 }
10676
10677 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10678 /* Ignore any errors for the firmware download. If download
10679 * fails, the device will operate with EEE disabled
10680 */
10681 tg3_load_57766_firmware(tp);
10682 }
10683
10684 if (tg3_flag(tp, TSO_CAPABLE)) {
10685 err = tg3_load_tso_firmware(tp);
10686 if (err)
10687 return err;
10688 }
10689
10690 tp->tx_mode = TX_MODE_ENABLE;
10691
10692 if (tg3_flag(tp, 5755_PLUS) ||
10693 tg3_asic_rev(tp) == ASIC_REV_5906)
10694 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10695
10696 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10697 tg3_asic_rev(tp) == ASIC_REV_5762) {
10698 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10699 tp->tx_mode &= ~val;
10700 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10701 }
10702
10703 tw32_f(MAC_TX_MODE, tp->tx_mode);
10704 udelay(100);
10705
10706 if (tg3_flag(tp, ENABLE_RSS)) {
10707 u32 rss_key[10];
10708
10709 tg3_rss_write_indir_tbl(tp);
10710
10711 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10712
10713 for (i = 0; i < 10 ; i++)
10714 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10715 }
10716
10717 tp->rx_mode = RX_MODE_ENABLE;
10718 if (tg3_flag(tp, 5755_PLUS))
10719 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10720
10721 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10722 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10723
10724 if (tg3_flag(tp, ENABLE_RSS))
10725 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10726 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10727 RX_MODE_RSS_IPV6_HASH_EN |
10728 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10729 RX_MODE_RSS_IPV4_HASH_EN |
10730 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10731
10732 tw32_f(MAC_RX_MODE, tp->rx_mode);
10733 udelay(10);
10734
10735 tw32(MAC_LED_CTRL, tp->led_ctrl);
10736
10737 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10738 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10739 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10740 udelay(10);
10741 }
10742 tw32_f(MAC_RX_MODE, tp->rx_mode);
10743 udelay(10);
10744
10745 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10746 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10747 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10748 /* Set drive transmission level to 1.2V */
10749 /* only if the signal pre-emphasis bit is not set */
10750 val = tr32(MAC_SERDES_CFG);
10751 val &= 0xfffff000;
10752 val |= 0x880;
10753 tw32(MAC_SERDES_CFG, val);
10754 }
10755 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10756 tw32(MAC_SERDES_CFG, 0x616000);
10757 }
10758
10759 /* Prevent chip from dropping frames when flow control
10760 * is enabled.
10761 */
10762 if (tg3_flag(tp, 57765_CLASS))
10763 val = 1;
10764 else
10765 val = 2;
10766 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10767
10768 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10769 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10770 /* Use hardware link auto-negotiation */
10771 tg3_flag_set(tp, HW_AUTONEG);
10772 }
10773
10774 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10775 tg3_asic_rev(tp) == ASIC_REV_5714) {
10776 u32 tmp;
10777
10778 tmp = tr32(SERDES_RX_CTRL);
10779 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10780 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10781 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10782 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10783 }
10784
10785 if (!tg3_flag(tp, USE_PHYLIB)) {
10786 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10787 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10788
10789 err = tg3_setup_phy(tp, false);
10790 if (err)
10791 return err;
10792
10793 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10794 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10795 u32 tmp;
10796
10797 /* Clear CRC stats. */
10798 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10799 tg3_writephy(tp, MII_TG3_TEST1,
10800 tmp | MII_TG3_TEST1_CRC_EN);
10801 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10802 }
10803 }
10804 }
10805
10806 __tg3_set_rx_mode(tp->dev);
10807
10808 /* Initialize receive rules. */
10809 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10810 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10811 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10812 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10813
10814 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10815 limit = 8;
10816 else
10817 limit = 16;
10818 if (tg3_flag(tp, ENABLE_ASF))
10819 limit -= 4;
10820 switch (limit) {
10821 case 16:
10822 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10823 fallthrough;
10824 case 15:
10825 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10826 fallthrough;
10827 case 14:
10828 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10829 fallthrough;
10830 case 13:
10831 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10832 fallthrough;
10833 case 12:
10834 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10835 fallthrough;
10836 case 11:
10837 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10838 fallthrough;
10839 case 10:
10840 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10841 fallthrough;
10842 case 9:
10843 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10844 fallthrough;
10845 case 8:
10846 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10847 fallthrough;
10848 case 7:
10849 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10850 fallthrough;
10851 case 6:
10852 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10853 fallthrough;
10854 case 5:
10855 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10856 fallthrough;
10857 case 4:
10858 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10859 case 3:
10860 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10861 case 2:
10862 case 1:
10863
10864 default:
10865 break;
10866 }
10867
10868 if (tg3_flag(tp, ENABLE_APE))
10869 /* Write our heartbeat update interval to APE. */
10870 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10871 APE_HOST_HEARTBEAT_INT_5SEC);
10872
10873 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10874
10875 return 0;
10876 }
10877
10878 /* Called at device open time to get the chip ready for
10879 * packet processing. Invoked with tp->lock held.
10880 */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10881 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10882 {
10883 /* Chip may have been just powered on. If so, the boot code may still
10884 * be running initialization. Wait for it to finish to avoid races in
10885 * accessing the hardware.
10886 */
10887 tg3_enable_register_access(tp);
10888 tg3_poll_fw(tp);
10889
10890 tg3_switch_clocks(tp);
10891
10892 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10893
10894 return tg3_reset_hw(tp, reset_phy);
10895 }
10896
10897 #ifdef CONFIG_TIGON3_HWMON
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10898 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10899 {
10900 u32 off, len = TG3_OCIR_LEN;
10901 int i;
10902
10903 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10904 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10905
10906 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10907 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10908 memset(ocir, 0, len);
10909 }
10910 }
10911
10912 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10913 static ssize_t tg3_show_temp(struct device *dev,
10914 struct device_attribute *devattr, char *buf)
10915 {
10916 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10917 struct tg3 *tp = dev_get_drvdata(dev);
10918 u32 temperature;
10919
10920 spin_lock_bh(&tp->lock);
10921 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10922 sizeof(temperature));
10923 spin_unlock_bh(&tp->lock);
10924 return sprintf(buf, "%u\n", temperature * 1000);
10925 }
10926
10927
10928 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10929 TG3_TEMP_SENSOR_OFFSET);
10930 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10931 TG3_TEMP_CAUTION_OFFSET);
10932 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10933 TG3_TEMP_MAX_OFFSET);
10934
10935 static struct attribute *tg3_attrs[] = {
10936 &sensor_dev_attr_temp1_input.dev_attr.attr,
10937 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10938 &sensor_dev_attr_temp1_max.dev_attr.attr,
10939 NULL
10940 };
10941 ATTRIBUTE_GROUPS(tg3);
10942
tg3_hwmon_close(struct tg3 * tp)10943 static void tg3_hwmon_close(struct tg3 *tp)
10944 {
10945 if (tp->hwmon_dev) {
10946 hwmon_device_unregister(tp->hwmon_dev);
10947 tp->hwmon_dev = NULL;
10948 }
10949 }
10950
tg3_hwmon_open(struct tg3 * tp)10951 static void tg3_hwmon_open(struct tg3 *tp)
10952 {
10953 int i;
10954 u32 size = 0;
10955 struct pci_dev *pdev = tp->pdev;
10956 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10957
10958 tg3_sd_scan_scratchpad(tp, ocirs);
10959
10960 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10961 if (!ocirs[i].src_data_length)
10962 continue;
10963
10964 size += ocirs[i].src_hdr_length;
10965 size += ocirs[i].src_data_length;
10966 }
10967
10968 if (!size)
10969 return;
10970
10971 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10972 tp, tg3_groups);
10973 if (IS_ERR(tp->hwmon_dev)) {
10974 tp->hwmon_dev = NULL;
10975 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10976 }
10977 }
10978 #else
tg3_hwmon_close(struct tg3 * tp)10979 static inline void tg3_hwmon_close(struct tg3 *tp) { }
tg3_hwmon_open(struct tg3 * tp)10980 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10981 #endif /* CONFIG_TIGON3_HWMON */
10982
10983
10984 #define TG3_STAT_ADD32(PSTAT, REG) \
10985 do { u32 __val = tr32(REG); \
10986 (PSTAT)->low += __val; \
10987 if ((PSTAT)->low < __val) \
10988 (PSTAT)->high += 1; \
10989 } while (0)
10990
tg3_periodic_fetch_stats(struct tg3 * tp)10991 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10992 {
10993 struct tg3_hw_stats *sp = tp->hw_stats;
10994
10995 if (!tp->link_up)
10996 return;
10997
10998 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10999 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
11000 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
11001 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
11002 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
11003 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
11004 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
11005 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
11006 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
11007 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
11008 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
11009 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
11010 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
11011 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
11012 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
11013 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
11014 u32 val;
11015
11016 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
11017 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
11018 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
11019 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
11020 }
11021
11022 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
11023 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
11024 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
11025 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
11026 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
11027 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
11028 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
11029 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
11030 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
11031 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
11032 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
11033 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
11034 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
11035 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
11036
11037 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
11038 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
11039 tg3_asic_rev(tp) != ASIC_REV_5762 &&
11040 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
11041 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
11042 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
11043 } else {
11044 u32 val = tr32(HOSTCC_FLOW_ATTN);
11045 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
11046 if (val) {
11047 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
11048 sp->rx_discards.low += val;
11049 if (sp->rx_discards.low < val)
11050 sp->rx_discards.high += 1;
11051 }
11052 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
11053 }
11054 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
11055 }
11056
tg3_chk_missed_msi(struct tg3 * tp)11057 static void tg3_chk_missed_msi(struct tg3 *tp)
11058 {
11059 u32 i;
11060
11061 for (i = 0; i < tp->irq_cnt; i++) {
11062 struct tg3_napi *tnapi = &tp->napi[i];
11063
11064 if (tg3_has_work(tnapi)) {
11065 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
11066 tnapi->last_tx_cons == tnapi->tx_cons) {
11067 if (tnapi->chk_msi_cnt < 1) {
11068 tnapi->chk_msi_cnt++;
11069 return;
11070 }
11071 tg3_msi(0, tnapi);
11072 }
11073 }
11074 tnapi->chk_msi_cnt = 0;
11075 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
11076 tnapi->last_tx_cons = tnapi->tx_cons;
11077 }
11078 }
11079
tg3_timer(struct timer_list * t)11080 static void tg3_timer(struct timer_list *t)
11081 {
11082 struct tg3 *tp = from_timer(tp, t, timer);
11083
11084 spin_lock(&tp->lock);
11085
11086 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11087 spin_unlock(&tp->lock);
11088 goto restart_timer;
11089 }
11090
11091 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11092 tg3_flag(tp, 57765_CLASS))
11093 tg3_chk_missed_msi(tp);
11094
11095 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11096 /* BCM4785: Flush posted writes from GbE to host memory. */
11097 tr32(HOSTCC_MODE);
11098 }
11099
11100 if (!tg3_flag(tp, TAGGED_STATUS)) {
11101 /* All of this garbage is because when using non-tagged
11102 * IRQ status the mailbox/status_block protocol the chip
11103 * uses with the cpu is race prone.
11104 */
11105 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11106 tw32(GRC_LOCAL_CTRL,
11107 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11108 } else {
11109 tw32(HOSTCC_MODE, tp->coalesce_mode |
11110 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11111 }
11112
11113 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11114 spin_unlock(&tp->lock);
11115 tg3_reset_task_schedule(tp);
11116 goto restart_timer;
11117 }
11118 }
11119
11120 /* This part only runs once per second. */
11121 if (!--tp->timer_counter) {
11122 if (tg3_flag(tp, 5705_PLUS))
11123 tg3_periodic_fetch_stats(tp);
11124
11125 if (tp->setlpicnt && !--tp->setlpicnt)
11126 tg3_phy_eee_enable(tp);
11127
11128 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11129 u32 mac_stat;
11130 int phy_event;
11131
11132 mac_stat = tr32(MAC_STATUS);
11133
11134 phy_event = 0;
11135 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11136 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11137 phy_event = 1;
11138 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11139 phy_event = 1;
11140
11141 if (phy_event)
11142 tg3_setup_phy(tp, false);
11143 } else if (tg3_flag(tp, POLL_SERDES)) {
11144 u32 mac_stat = tr32(MAC_STATUS);
11145 int need_setup = 0;
11146
11147 if (tp->link_up &&
11148 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11149 need_setup = 1;
11150 }
11151 if (!tp->link_up &&
11152 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11153 MAC_STATUS_SIGNAL_DET))) {
11154 need_setup = 1;
11155 }
11156 if (need_setup) {
11157 if (!tp->serdes_counter) {
11158 tw32_f(MAC_MODE,
11159 (tp->mac_mode &
11160 ~MAC_MODE_PORT_MODE_MASK));
11161 udelay(40);
11162 tw32_f(MAC_MODE, tp->mac_mode);
11163 udelay(40);
11164 }
11165 tg3_setup_phy(tp, false);
11166 }
11167 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11168 tg3_flag(tp, 5780_CLASS)) {
11169 tg3_serdes_parallel_detect(tp);
11170 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11171 u32 cpmu = tr32(TG3_CPMU_STATUS);
11172 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11173 TG3_CPMU_STATUS_LINK_MASK);
11174
11175 if (link_up != tp->link_up)
11176 tg3_setup_phy(tp, false);
11177 }
11178
11179 tp->timer_counter = tp->timer_multiplier;
11180 }
11181
11182 /* Heartbeat is only sent once every 2 seconds.
11183 *
11184 * The heartbeat is to tell the ASF firmware that the host
11185 * driver is still alive. In the event that the OS crashes,
11186 * ASF needs to reset the hardware to free up the FIFO space
11187 * that may be filled with rx packets destined for the host.
11188 * If the FIFO is full, ASF will no longer function properly.
11189 *
11190 * Unintended resets have been reported on real time kernels
11191 * where the timer doesn't run on time. Netpoll will also have
11192 * same problem.
11193 *
11194 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11195 * to check the ring condition when the heartbeat is expiring
11196 * before doing the reset. This will prevent most unintended
11197 * resets.
11198 */
11199 if (!--tp->asf_counter) {
11200 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11201 tg3_wait_for_event_ack(tp);
11202
11203 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11204 FWCMD_NICDRV_ALIVE3);
11205 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11206 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11207 TG3_FW_UPDATE_TIMEOUT_SEC);
11208
11209 tg3_generate_fw_event(tp);
11210 }
11211 tp->asf_counter = tp->asf_multiplier;
11212 }
11213
11214 /* Update the APE heartbeat every 5 seconds.*/
11215 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11216
11217 spin_unlock(&tp->lock);
11218
11219 restart_timer:
11220 tp->timer.expires = jiffies + tp->timer_offset;
11221 add_timer(&tp->timer);
11222 }
11223
tg3_timer_init(struct tg3 * tp)11224 static void tg3_timer_init(struct tg3 *tp)
11225 {
11226 if (tg3_flag(tp, TAGGED_STATUS) &&
11227 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11228 !tg3_flag(tp, 57765_CLASS))
11229 tp->timer_offset = HZ;
11230 else
11231 tp->timer_offset = HZ / 10;
11232
11233 BUG_ON(tp->timer_offset > HZ);
11234
11235 tp->timer_multiplier = (HZ / tp->timer_offset);
11236 tp->asf_multiplier = (HZ / tp->timer_offset) *
11237 TG3_FW_UPDATE_FREQ_SEC;
11238
11239 timer_setup(&tp->timer, tg3_timer, 0);
11240 }
11241
tg3_timer_start(struct tg3 * tp)11242 static void tg3_timer_start(struct tg3 *tp)
11243 {
11244 tp->asf_counter = tp->asf_multiplier;
11245 tp->timer_counter = tp->timer_multiplier;
11246
11247 tp->timer.expires = jiffies + tp->timer_offset;
11248 add_timer(&tp->timer);
11249 }
11250
tg3_timer_stop(struct tg3 * tp)11251 static void tg3_timer_stop(struct tg3 *tp)
11252 {
11253 del_timer_sync(&tp->timer);
11254 }
11255
11256 /* Restart hardware after configuration changes, self-test, etc.
11257 * Invoked with tp->lock held.
11258 */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11259 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11260 __releases(tp->lock)
11261 __acquires(tp->lock)
11262 {
11263 int err;
11264
11265 err = tg3_init_hw(tp, reset_phy);
11266 if (err) {
11267 netdev_err(tp->dev,
11268 "Failed to re-initialize device, aborting\n");
11269 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11270 tg3_full_unlock(tp);
11271 tg3_timer_stop(tp);
11272 tp->irq_sync = 0;
11273 tg3_napi_enable(tp);
11274 dev_close(tp->dev);
11275 tg3_full_lock(tp, 0);
11276 }
11277 return err;
11278 }
11279
tg3_reset_task(struct work_struct * work)11280 static void tg3_reset_task(struct work_struct *work)
11281 {
11282 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11283 int err;
11284
11285 rtnl_lock();
11286 tg3_full_lock(tp, 0);
11287
11288 if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11289 tp->pdev->error_state != pci_channel_io_normal) {
11290 tg3_flag_clear(tp, RESET_TASK_PENDING);
11291 tg3_full_unlock(tp);
11292 rtnl_unlock();
11293 return;
11294 }
11295
11296 tg3_full_unlock(tp);
11297
11298 tg3_phy_stop(tp);
11299
11300 tg3_netif_stop(tp);
11301
11302 tg3_full_lock(tp, 1);
11303
11304 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11305 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11306 tp->write32_rx_mbox = tg3_write_flush_reg32;
11307 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11308 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11309 }
11310
11311 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11312 err = tg3_init_hw(tp, true);
11313 if (err) {
11314 tg3_full_unlock(tp);
11315 tp->irq_sync = 0;
11316 tg3_napi_enable(tp);
11317 /* Clear this flag so that tg3_reset_task_cancel() will not
11318 * call cancel_work_sync() and wait forever.
11319 */
11320 tg3_flag_clear(tp, RESET_TASK_PENDING);
11321 dev_close(tp->dev);
11322 goto out;
11323 }
11324
11325 tg3_netif_start(tp);
11326 tg3_full_unlock(tp);
11327 tg3_phy_start(tp);
11328 tg3_flag_clear(tp, RESET_TASK_PENDING);
11329 out:
11330 rtnl_unlock();
11331 }
11332
tg3_request_irq(struct tg3 * tp,int irq_num)11333 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11334 {
11335 irq_handler_t fn;
11336 unsigned long flags;
11337 char *name;
11338 struct tg3_napi *tnapi = &tp->napi[irq_num];
11339
11340 if (tp->irq_cnt == 1)
11341 name = tp->dev->name;
11342 else {
11343 name = &tnapi->irq_lbl[0];
11344 if (tnapi->tx_buffers && tnapi->rx_rcb)
11345 snprintf(name, sizeof(tnapi->irq_lbl),
11346 "%s-txrx-%d", tp->dev->name, irq_num);
11347 else if (tnapi->tx_buffers)
11348 snprintf(name, sizeof(tnapi->irq_lbl),
11349 "%s-tx-%d", tp->dev->name, irq_num);
11350 else if (tnapi->rx_rcb)
11351 snprintf(name, sizeof(tnapi->irq_lbl),
11352 "%s-rx-%d", tp->dev->name, irq_num);
11353 else
11354 snprintf(name, sizeof(tnapi->irq_lbl),
11355 "%s-%d", tp->dev->name, irq_num);
11356 }
11357
11358 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11359 fn = tg3_msi;
11360 if (tg3_flag(tp, 1SHOT_MSI))
11361 fn = tg3_msi_1shot;
11362 flags = 0;
11363 } else {
11364 fn = tg3_interrupt;
11365 if (tg3_flag(tp, TAGGED_STATUS))
11366 fn = tg3_interrupt_tagged;
11367 flags = IRQF_SHARED;
11368 }
11369
11370 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11371 }
11372
tg3_test_interrupt(struct tg3 * tp)11373 static int tg3_test_interrupt(struct tg3 *tp)
11374 {
11375 struct tg3_napi *tnapi = &tp->napi[0];
11376 struct net_device *dev = tp->dev;
11377 int err, i, intr_ok = 0;
11378 u32 val;
11379
11380 if (!netif_running(dev))
11381 return -ENODEV;
11382
11383 tg3_disable_ints(tp);
11384
11385 free_irq(tnapi->irq_vec, tnapi);
11386
11387 /*
11388 * Turn off MSI one shot mode. Otherwise this test has no
11389 * observable way to know whether the interrupt was delivered.
11390 */
11391 if (tg3_flag(tp, 57765_PLUS)) {
11392 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11393 tw32(MSGINT_MODE, val);
11394 }
11395
11396 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11397 IRQF_SHARED, dev->name, tnapi);
11398 if (err)
11399 return err;
11400
11401 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11402 tg3_enable_ints(tp);
11403
11404 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11405 tnapi->coal_now);
11406
11407 for (i = 0; i < 5; i++) {
11408 u32 int_mbox, misc_host_ctrl;
11409
11410 int_mbox = tr32_mailbox(tnapi->int_mbox);
11411 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11412
11413 if ((int_mbox != 0) ||
11414 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11415 intr_ok = 1;
11416 break;
11417 }
11418
11419 if (tg3_flag(tp, 57765_PLUS) &&
11420 tnapi->hw_status->status_tag != tnapi->last_tag)
11421 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11422
11423 msleep(10);
11424 }
11425
11426 tg3_disable_ints(tp);
11427
11428 free_irq(tnapi->irq_vec, tnapi);
11429
11430 err = tg3_request_irq(tp, 0);
11431
11432 if (err)
11433 return err;
11434
11435 if (intr_ok) {
11436 /* Reenable MSI one shot mode. */
11437 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11438 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11439 tw32(MSGINT_MODE, val);
11440 }
11441 return 0;
11442 }
11443
11444 return -EIO;
11445 }
11446
11447 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11448 * successfully restored
11449 */
tg3_test_msi(struct tg3 * tp)11450 static int tg3_test_msi(struct tg3 *tp)
11451 {
11452 int err;
11453 u16 pci_cmd;
11454
11455 if (!tg3_flag(tp, USING_MSI))
11456 return 0;
11457
11458 /* Turn off SERR reporting in case MSI terminates with Master
11459 * Abort.
11460 */
11461 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11462 pci_write_config_word(tp->pdev, PCI_COMMAND,
11463 pci_cmd & ~PCI_COMMAND_SERR);
11464
11465 err = tg3_test_interrupt(tp);
11466
11467 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11468
11469 if (!err)
11470 return 0;
11471
11472 /* other failures */
11473 if (err != -EIO)
11474 return err;
11475
11476 /* MSI test failed, go back to INTx mode */
11477 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11478 "to INTx mode. Please report this failure to the PCI "
11479 "maintainer and include system chipset information\n");
11480
11481 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11482
11483 pci_disable_msi(tp->pdev);
11484
11485 tg3_flag_clear(tp, USING_MSI);
11486 tp->napi[0].irq_vec = tp->pdev->irq;
11487
11488 err = tg3_request_irq(tp, 0);
11489 if (err)
11490 return err;
11491
11492 /* Need to reset the chip because the MSI cycle may have terminated
11493 * with Master Abort.
11494 */
11495 tg3_full_lock(tp, 1);
11496
11497 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11498 err = tg3_init_hw(tp, true);
11499
11500 tg3_full_unlock(tp);
11501
11502 if (err)
11503 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11504
11505 return err;
11506 }
11507
tg3_request_firmware(struct tg3 * tp)11508 static int tg3_request_firmware(struct tg3 *tp)
11509 {
11510 const struct tg3_firmware_hdr *fw_hdr;
11511
11512 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11513 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11514 tp->fw_needed);
11515 return -ENOENT;
11516 }
11517
11518 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11519
11520 /* Firmware blob starts with version numbers, followed by
11521 * start address and _full_ length including BSS sections
11522 * (which must be longer than the actual data, of course
11523 */
11524
11525 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11526 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11527 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11528 tp->fw_len, tp->fw_needed);
11529 release_firmware(tp->fw);
11530 tp->fw = NULL;
11531 return -EINVAL;
11532 }
11533
11534 /* We no longer need firmware; we have it. */
11535 tp->fw_needed = NULL;
11536 return 0;
11537 }
11538
tg3_irq_count(struct tg3 * tp)11539 static u32 tg3_irq_count(struct tg3 *tp)
11540 {
11541 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11542
11543 if (irq_cnt > 1) {
11544 /* We want as many rx rings enabled as there are cpus.
11545 * In multiqueue MSI-X mode, the first MSI-X vector
11546 * only deals with link interrupts, etc, so we add
11547 * one to the number of vectors we are requesting.
11548 */
11549 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11550 }
11551
11552 return irq_cnt;
11553 }
11554
tg3_enable_msix(struct tg3 * tp)11555 static bool tg3_enable_msix(struct tg3 *tp)
11556 {
11557 int i, rc;
11558 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11559
11560 tp->txq_cnt = tp->txq_req;
11561 tp->rxq_cnt = tp->rxq_req;
11562 if (!tp->rxq_cnt)
11563 tp->rxq_cnt = netif_get_num_default_rss_queues();
11564 if (tp->rxq_cnt > tp->rxq_max)
11565 tp->rxq_cnt = tp->rxq_max;
11566
11567 /* Disable multiple TX rings by default. Simple round-robin hardware
11568 * scheduling of the TX rings can cause starvation of rings with
11569 * small packets when other rings have TSO or jumbo packets.
11570 */
11571 if (!tp->txq_req)
11572 tp->txq_cnt = 1;
11573
11574 tp->irq_cnt = tg3_irq_count(tp);
11575
11576 for (i = 0; i < tp->irq_max; i++) {
11577 msix_ent[i].entry = i;
11578 msix_ent[i].vector = 0;
11579 }
11580
11581 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11582 if (rc < 0) {
11583 return false;
11584 } else if (rc < tp->irq_cnt) {
11585 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11586 tp->irq_cnt, rc);
11587 tp->irq_cnt = rc;
11588 tp->rxq_cnt = max(rc - 1, 1);
11589 if (tp->txq_cnt)
11590 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11591 }
11592
11593 for (i = 0; i < tp->irq_max; i++)
11594 tp->napi[i].irq_vec = msix_ent[i].vector;
11595
11596 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11597 pci_disable_msix(tp->pdev);
11598 return false;
11599 }
11600
11601 if (tp->irq_cnt == 1)
11602 return true;
11603
11604 tg3_flag_set(tp, ENABLE_RSS);
11605
11606 if (tp->txq_cnt > 1)
11607 tg3_flag_set(tp, ENABLE_TSS);
11608
11609 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11610
11611 return true;
11612 }
11613
tg3_ints_init(struct tg3 * tp)11614 static void tg3_ints_init(struct tg3 *tp)
11615 {
11616 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11617 !tg3_flag(tp, TAGGED_STATUS)) {
11618 /* All MSI supporting chips should support tagged
11619 * status. Assert that this is the case.
11620 */
11621 netdev_warn(tp->dev,
11622 "MSI without TAGGED_STATUS? Not using MSI\n");
11623 goto defcfg;
11624 }
11625
11626 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11627 tg3_flag_set(tp, USING_MSIX);
11628 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11629 tg3_flag_set(tp, USING_MSI);
11630
11631 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11632 u32 msi_mode = tr32(MSGINT_MODE);
11633 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11634 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11635 if (!tg3_flag(tp, 1SHOT_MSI))
11636 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11637 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11638 }
11639 defcfg:
11640 if (!tg3_flag(tp, USING_MSIX)) {
11641 tp->irq_cnt = 1;
11642 tp->napi[0].irq_vec = tp->pdev->irq;
11643 }
11644
11645 if (tp->irq_cnt == 1) {
11646 tp->txq_cnt = 1;
11647 tp->rxq_cnt = 1;
11648 netif_set_real_num_tx_queues(tp->dev, 1);
11649 netif_set_real_num_rx_queues(tp->dev, 1);
11650 }
11651 }
11652
tg3_ints_fini(struct tg3 * tp)11653 static void tg3_ints_fini(struct tg3 *tp)
11654 {
11655 if (tg3_flag(tp, USING_MSIX))
11656 pci_disable_msix(tp->pdev);
11657 else if (tg3_flag(tp, USING_MSI))
11658 pci_disable_msi(tp->pdev);
11659 tg3_flag_clear(tp, USING_MSI);
11660 tg3_flag_clear(tp, USING_MSIX);
11661 tg3_flag_clear(tp, ENABLE_RSS);
11662 tg3_flag_clear(tp, ENABLE_TSS);
11663 }
11664
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11665 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11666 bool init)
11667 {
11668 struct net_device *dev = tp->dev;
11669 int i, err;
11670
11671 /*
11672 * Setup interrupts first so we know how
11673 * many NAPI resources to allocate
11674 */
11675 tg3_ints_init(tp);
11676
11677 tg3_rss_check_indir_tbl(tp);
11678
11679 /* The placement of this call is tied
11680 * to the setup and use of Host TX descriptors.
11681 */
11682 err = tg3_alloc_consistent(tp);
11683 if (err)
11684 goto out_ints_fini;
11685
11686 tg3_napi_init(tp);
11687
11688 tg3_napi_enable(tp);
11689
11690 for (i = 0; i < tp->irq_cnt; i++) {
11691 err = tg3_request_irq(tp, i);
11692 if (err) {
11693 for (i--; i >= 0; i--) {
11694 struct tg3_napi *tnapi = &tp->napi[i];
11695
11696 free_irq(tnapi->irq_vec, tnapi);
11697 }
11698 goto out_napi_fini;
11699 }
11700 }
11701
11702 tg3_full_lock(tp, 0);
11703
11704 if (init)
11705 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11706
11707 err = tg3_init_hw(tp, reset_phy);
11708 if (err) {
11709 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11710 tg3_free_rings(tp);
11711 }
11712
11713 tg3_full_unlock(tp);
11714
11715 if (err)
11716 goto out_free_irq;
11717
11718 if (test_irq && tg3_flag(tp, USING_MSI)) {
11719 err = tg3_test_msi(tp);
11720
11721 if (err) {
11722 tg3_full_lock(tp, 0);
11723 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11724 tg3_free_rings(tp);
11725 tg3_full_unlock(tp);
11726
11727 goto out_napi_fini;
11728 }
11729
11730 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11731 u32 val = tr32(PCIE_TRANSACTION_CFG);
11732
11733 tw32(PCIE_TRANSACTION_CFG,
11734 val | PCIE_TRANS_CFG_1SHOT_MSI);
11735 }
11736 }
11737
11738 tg3_phy_start(tp);
11739
11740 tg3_hwmon_open(tp);
11741
11742 tg3_full_lock(tp, 0);
11743
11744 tg3_timer_start(tp);
11745 tg3_flag_set(tp, INIT_COMPLETE);
11746 tg3_enable_ints(tp);
11747
11748 tg3_ptp_resume(tp);
11749
11750 tg3_full_unlock(tp);
11751
11752 netif_tx_start_all_queues(dev);
11753
11754 /*
11755 * Reset loopback feature if it was turned on while the device was down
11756 * make sure that it's installed properly now.
11757 */
11758 if (dev->features & NETIF_F_LOOPBACK)
11759 tg3_set_loopback(dev, dev->features);
11760
11761 return 0;
11762
11763 out_free_irq:
11764 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11765 struct tg3_napi *tnapi = &tp->napi[i];
11766 free_irq(tnapi->irq_vec, tnapi);
11767 }
11768
11769 out_napi_fini:
11770 tg3_napi_disable(tp);
11771 tg3_napi_fini(tp);
11772 tg3_free_consistent(tp);
11773
11774 out_ints_fini:
11775 tg3_ints_fini(tp);
11776
11777 return err;
11778 }
11779
tg3_stop(struct tg3 * tp)11780 static void tg3_stop(struct tg3 *tp)
11781 {
11782 int i;
11783
11784 tg3_reset_task_cancel(tp);
11785 tg3_netif_stop(tp);
11786
11787 tg3_timer_stop(tp);
11788
11789 tg3_hwmon_close(tp);
11790
11791 tg3_phy_stop(tp);
11792
11793 tg3_full_lock(tp, 1);
11794
11795 tg3_disable_ints(tp);
11796
11797 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11798 tg3_free_rings(tp);
11799 tg3_flag_clear(tp, INIT_COMPLETE);
11800
11801 tg3_full_unlock(tp);
11802
11803 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11804 struct tg3_napi *tnapi = &tp->napi[i];
11805 free_irq(tnapi->irq_vec, tnapi);
11806 }
11807
11808 tg3_ints_fini(tp);
11809
11810 tg3_napi_fini(tp);
11811
11812 tg3_free_consistent(tp);
11813 }
11814
tg3_open(struct net_device * dev)11815 static int tg3_open(struct net_device *dev)
11816 {
11817 struct tg3 *tp = netdev_priv(dev);
11818 int err;
11819
11820 if (tp->pcierr_recovery) {
11821 netdev_err(dev, "Failed to open device. PCI error recovery "
11822 "in progress\n");
11823 return -EAGAIN;
11824 }
11825
11826 if (tp->fw_needed) {
11827 err = tg3_request_firmware(tp);
11828 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11829 if (err) {
11830 netdev_warn(tp->dev, "EEE capability disabled\n");
11831 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11832 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11833 netdev_warn(tp->dev, "EEE capability restored\n");
11834 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11835 }
11836 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11837 if (err)
11838 return err;
11839 } else if (err) {
11840 netdev_warn(tp->dev, "TSO capability disabled\n");
11841 tg3_flag_clear(tp, TSO_CAPABLE);
11842 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11843 netdev_notice(tp->dev, "TSO capability restored\n");
11844 tg3_flag_set(tp, TSO_CAPABLE);
11845 }
11846 }
11847
11848 tg3_carrier_off(tp);
11849
11850 err = tg3_power_up(tp);
11851 if (err)
11852 return err;
11853
11854 tg3_full_lock(tp, 0);
11855
11856 tg3_disable_ints(tp);
11857 tg3_flag_clear(tp, INIT_COMPLETE);
11858
11859 tg3_full_unlock(tp);
11860
11861 err = tg3_start(tp,
11862 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11863 true, true);
11864 if (err) {
11865 tg3_frob_aux_power(tp, false);
11866 pci_set_power_state(tp->pdev, PCI_D3hot);
11867 }
11868
11869 return err;
11870 }
11871
tg3_close(struct net_device * dev)11872 static int tg3_close(struct net_device *dev)
11873 {
11874 struct tg3 *tp = netdev_priv(dev);
11875
11876 if (tp->pcierr_recovery) {
11877 netdev_err(dev, "Failed to close device. PCI error recovery "
11878 "in progress\n");
11879 return -EAGAIN;
11880 }
11881
11882 tg3_stop(tp);
11883
11884 if (pci_device_is_present(tp->pdev)) {
11885 tg3_power_down_prepare(tp);
11886
11887 tg3_carrier_off(tp);
11888 }
11889 return 0;
11890 }
11891
get_stat64(tg3_stat64_t * val)11892 static inline u64 get_stat64(tg3_stat64_t *val)
11893 {
11894 return ((u64)val->high << 32) | ((u64)val->low);
11895 }
11896
tg3_calc_crc_errors(struct tg3 * tp)11897 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11898 {
11899 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11900
11901 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11902 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11903 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11904 u32 val;
11905
11906 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11907 tg3_writephy(tp, MII_TG3_TEST1,
11908 val | MII_TG3_TEST1_CRC_EN);
11909 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11910 } else
11911 val = 0;
11912
11913 tp->phy_crc_errors += val;
11914
11915 return tp->phy_crc_errors;
11916 }
11917
11918 return get_stat64(&hw_stats->rx_fcs_errors);
11919 }
11920
11921 #define ESTAT_ADD(member) \
11922 estats->member = old_estats->member + \
11923 get_stat64(&hw_stats->member)
11924
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11925 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11926 {
11927 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11928 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11929
11930 ESTAT_ADD(rx_octets);
11931 ESTAT_ADD(rx_fragments);
11932 ESTAT_ADD(rx_ucast_packets);
11933 ESTAT_ADD(rx_mcast_packets);
11934 ESTAT_ADD(rx_bcast_packets);
11935 ESTAT_ADD(rx_fcs_errors);
11936 ESTAT_ADD(rx_align_errors);
11937 ESTAT_ADD(rx_xon_pause_rcvd);
11938 ESTAT_ADD(rx_xoff_pause_rcvd);
11939 ESTAT_ADD(rx_mac_ctrl_rcvd);
11940 ESTAT_ADD(rx_xoff_entered);
11941 ESTAT_ADD(rx_frame_too_long_errors);
11942 ESTAT_ADD(rx_jabbers);
11943 ESTAT_ADD(rx_undersize_packets);
11944 ESTAT_ADD(rx_in_length_errors);
11945 ESTAT_ADD(rx_out_length_errors);
11946 ESTAT_ADD(rx_64_or_less_octet_packets);
11947 ESTAT_ADD(rx_65_to_127_octet_packets);
11948 ESTAT_ADD(rx_128_to_255_octet_packets);
11949 ESTAT_ADD(rx_256_to_511_octet_packets);
11950 ESTAT_ADD(rx_512_to_1023_octet_packets);
11951 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11952 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11953 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11954 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11955 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11956
11957 ESTAT_ADD(tx_octets);
11958 ESTAT_ADD(tx_collisions);
11959 ESTAT_ADD(tx_xon_sent);
11960 ESTAT_ADD(tx_xoff_sent);
11961 ESTAT_ADD(tx_flow_control);
11962 ESTAT_ADD(tx_mac_errors);
11963 ESTAT_ADD(tx_single_collisions);
11964 ESTAT_ADD(tx_mult_collisions);
11965 ESTAT_ADD(tx_deferred);
11966 ESTAT_ADD(tx_excessive_collisions);
11967 ESTAT_ADD(tx_late_collisions);
11968 ESTAT_ADD(tx_collide_2times);
11969 ESTAT_ADD(tx_collide_3times);
11970 ESTAT_ADD(tx_collide_4times);
11971 ESTAT_ADD(tx_collide_5times);
11972 ESTAT_ADD(tx_collide_6times);
11973 ESTAT_ADD(tx_collide_7times);
11974 ESTAT_ADD(tx_collide_8times);
11975 ESTAT_ADD(tx_collide_9times);
11976 ESTAT_ADD(tx_collide_10times);
11977 ESTAT_ADD(tx_collide_11times);
11978 ESTAT_ADD(tx_collide_12times);
11979 ESTAT_ADD(tx_collide_13times);
11980 ESTAT_ADD(tx_collide_14times);
11981 ESTAT_ADD(tx_collide_15times);
11982 ESTAT_ADD(tx_ucast_packets);
11983 ESTAT_ADD(tx_mcast_packets);
11984 ESTAT_ADD(tx_bcast_packets);
11985 ESTAT_ADD(tx_carrier_sense_errors);
11986 ESTAT_ADD(tx_discards);
11987 ESTAT_ADD(tx_errors);
11988
11989 ESTAT_ADD(dma_writeq_full);
11990 ESTAT_ADD(dma_write_prioq_full);
11991 ESTAT_ADD(rxbds_empty);
11992 ESTAT_ADD(rx_discards);
11993 ESTAT_ADD(rx_errors);
11994 ESTAT_ADD(rx_threshold_hit);
11995
11996 ESTAT_ADD(dma_readq_full);
11997 ESTAT_ADD(dma_read_prioq_full);
11998 ESTAT_ADD(tx_comp_queue_full);
11999
12000 ESTAT_ADD(ring_set_send_prod_index);
12001 ESTAT_ADD(ring_status_update);
12002 ESTAT_ADD(nic_irqs);
12003 ESTAT_ADD(nic_avoided_irqs);
12004 ESTAT_ADD(nic_tx_threshold_hit);
12005
12006 ESTAT_ADD(mbuf_lwm_thresh_hit);
12007 }
12008
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)12009 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
12010 {
12011 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
12012 struct tg3_hw_stats *hw_stats = tp->hw_stats;
12013 unsigned long rx_dropped;
12014 unsigned long tx_dropped;
12015 int i;
12016
12017 stats->rx_packets = old_stats->rx_packets +
12018 get_stat64(&hw_stats->rx_ucast_packets) +
12019 get_stat64(&hw_stats->rx_mcast_packets) +
12020 get_stat64(&hw_stats->rx_bcast_packets);
12021
12022 stats->tx_packets = old_stats->tx_packets +
12023 get_stat64(&hw_stats->tx_ucast_packets) +
12024 get_stat64(&hw_stats->tx_mcast_packets) +
12025 get_stat64(&hw_stats->tx_bcast_packets);
12026
12027 stats->rx_bytes = old_stats->rx_bytes +
12028 get_stat64(&hw_stats->rx_octets);
12029 stats->tx_bytes = old_stats->tx_bytes +
12030 get_stat64(&hw_stats->tx_octets);
12031
12032 stats->rx_errors = old_stats->rx_errors +
12033 get_stat64(&hw_stats->rx_errors);
12034 stats->tx_errors = old_stats->tx_errors +
12035 get_stat64(&hw_stats->tx_errors) +
12036 get_stat64(&hw_stats->tx_mac_errors) +
12037 get_stat64(&hw_stats->tx_carrier_sense_errors) +
12038 get_stat64(&hw_stats->tx_discards);
12039
12040 stats->multicast = old_stats->multicast +
12041 get_stat64(&hw_stats->rx_mcast_packets);
12042 stats->collisions = old_stats->collisions +
12043 get_stat64(&hw_stats->tx_collisions);
12044
12045 stats->rx_length_errors = old_stats->rx_length_errors +
12046 get_stat64(&hw_stats->rx_frame_too_long_errors) +
12047 get_stat64(&hw_stats->rx_undersize_packets);
12048
12049 stats->rx_frame_errors = old_stats->rx_frame_errors +
12050 get_stat64(&hw_stats->rx_align_errors);
12051 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
12052 get_stat64(&hw_stats->tx_discards);
12053 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
12054 get_stat64(&hw_stats->tx_carrier_sense_errors);
12055
12056 stats->rx_crc_errors = old_stats->rx_crc_errors +
12057 tg3_calc_crc_errors(tp);
12058
12059 stats->rx_missed_errors = old_stats->rx_missed_errors +
12060 get_stat64(&hw_stats->rx_discards);
12061
12062 /* Aggregate per-queue counters. The per-queue counters are updated
12063 * by a single writer, race-free. The result computed by this loop
12064 * might not be 100% accurate (counters can be updated in the middle of
12065 * the loop) but the next tg3_get_nstats() will recompute the current
12066 * value so it is acceptable.
12067 *
12068 * Note that these counters wrap around at 4G on 32bit machines.
12069 */
12070 rx_dropped = (unsigned long)(old_stats->rx_dropped);
12071 tx_dropped = (unsigned long)(old_stats->tx_dropped);
12072
12073 for (i = 0; i < tp->irq_cnt; i++) {
12074 struct tg3_napi *tnapi = &tp->napi[i];
12075
12076 rx_dropped += tnapi->rx_dropped;
12077 tx_dropped += tnapi->tx_dropped;
12078 }
12079
12080 stats->rx_dropped = rx_dropped;
12081 stats->tx_dropped = tx_dropped;
12082 }
12083
tg3_get_regs_len(struct net_device * dev)12084 static int tg3_get_regs_len(struct net_device *dev)
12085 {
12086 return TG3_REG_BLK_SIZE;
12087 }
12088
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)12089 static void tg3_get_regs(struct net_device *dev,
12090 struct ethtool_regs *regs, void *_p)
12091 {
12092 struct tg3 *tp = netdev_priv(dev);
12093
12094 regs->version = 0;
12095
12096 memset(_p, 0, TG3_REG_BLK_SIZE);
12097
12098 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12099 return;
12100
12101 tg3_full_lock(tp, 0);
12102
12103 tg3_dump_legacy_regs(tp, (u32 *)_p);
12104
12105 tg3_full_unlock(tp);
12106 }
12107
tg3_get_eeprom_len(struct net_device * dev)12108 static int tg3_get_eeprom_len(struct net_device *dev)
12109 {
12110 struct tg3 *tp = netdev_priv(dev);
12111
12112 return tp->nvram_size;
12113 }
12114
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12115 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12116 {
12117 struct tg3 *tp = netdev_priv(dev);
12118 int ret, cpmu_restore = 0;
12119 u8 *pd;
12120 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12121 __be32 val;
12122
12123 if (tg3_flag(tp, NO_NVRAM))
12124 return -EINVAL;
12125
12126 offset = eeprom->offset;
12127 len = eeprom->len;
12128 eeprom->len = 0;
12129
12130 eeprom->magic = TG3_EEPROM_MAGIC;
12131
12132 /* Override clock, link aware and link idle modes */
12133 if (tg3_flag(tp, CPMU_PRESENT)) {
12134 cpmu_val = tr32(TG3_CPMU_CTRL);
12135 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12136 CPMU_CTRL_LINK_IDLE_MODE)) {
12137 tw32(TG3_CPMU_CTRL, cpmu_val &
12138 ~(CPMU_CTRL_LINK_AWARE_MODE |
12139 CPMU_CTRL_LINK_IDLE_MODE));
12140 cpmu_restore = 1;
12141 }
12142 }
12143 tg3_override_clk(tp);
12144
12145 if (offset & 3) {
12146 /* adjustments to start on required 4 byte boundary */
12147 b_offset = offset & 3;
12148 b_count = 4 - b_offset;
12149 if (b_count > len) {
12150 /* i.e. offset=1 len=2 */
12151 b_count = len;
12152 }
12153 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12154 if (ret)
12155 goto eeprom_done;
12156 memcpy(data, ((char *)&val) + b_offset, b_count);
12157 len -= b_count;
12158 offset += b_count;
12159 eeprom->len += b_count;
12160 }
12161
12162 /* read bytes up to the last 4 byte boundary */
12163 pd = &data[eeprom->len];
12164 for (i = 0; i < (len - (len & 3)); i += 4) {
12165 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12166 if (ret) {
12167 if (i)
12168 i -= 4;
12169 eeprom->len += i;
12170 goto eeprom_done;
12171 }
12172 memcpy(pd + i, &val, 4);
12173 if (need_resched()) {
12174 if (signal_pending(current)) {
12175 eeprom->len += i;
12176 ret = -EINTR;
12177 goto eeprom_done;
12178 }
12179 cond_resched();
12180 }
12181 }
12182 eeprom->len += i;
12183
12184 if (len & 3) {
12185 /* read last bytes not ending on 4 byte boundary */
12186 pd = &data[eeprom->len];
12187 b_count = len & 3;
12188 b_offset = offset + len - b_count;
12189 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12190 if (ret)
12191 goto eeprom_done;
12192 memcpy(pd, &val, b_count);
12193 eeprom->len += b_count;
12194 }
12195 ret = 0;
12196
12197 eeprom_done:
12198 /* Restore clock, link aware and link idle modes */
12199 tg3_restore_clk(tp);
12200 if (cpmu_restore)
12201 tw32(TG3_CPMU_CTRL, cpmu_val);
12202
12203 return ret;
12204 }
12205
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12206 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12207 {
12208 struct tg3 *tp = netdev_priv(dev);
12209 int ret;
12210 u32 offset, len, b_offset, odd_len;
12211 u8 *buf;
12212 __be32 start = 0, end;
12213
12214 if (tg3_flag(tp, NO_NVRAM) ||
12215 eeprom->magic != TG3_EEPROM_MAGIC)
12216 return -EINVAL;
12217
12218 offset = eeprom->offset;
12219 len = eeprom->len;
12220
12221 if ((b_offset = (offset & 3))) {
12222 /* adjustments to start on required 4 byte boundary */
12223 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12224 if (ret)
12225 return ret;
12226 len += b_offset;
12227 offset &= ~3;
12228 if (len < 4)
12229 len = 4;
12230 }
12231
12232 odd_len = 0;
12233 if (len & 3) {
12234 /* adjustments to end on required 4 byte boundary */
12235 odd_len = 1;
12236 len = (len + 3) & ~3;
12237 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12238 if (ret)
12239 return ret;
12240 }
12241
12242 buf = data;
12243 if (b_offset || odd_len) {
12244 buf = kmalloc(len, GFP_KERNEL);
12245 if (!buf)
12246 return -ENOMEM;
12247 if (b_offset)
12248 memcpy(buf, &start, 4);
12249 if (odd_len)
12250 memcpy(buf+len-4, &end, 4);
12251 memcpy(buf + b_offset, data, eeprom->len);
12252 }
12253
12254 ret = tg3_nvram_write_block(tp, offset, len, buf);
12255
12256 if (buf != data)
12257 kfree(buf);
12258
12259 return ret;
12260 }
12261
tg3_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)12262 static int tg3_get_link_ksettings(struct net_device *dev,
12263 struct ethtool_link_ksettings *cmd)
12264 {
12265 struct tg3 *tp = netdev_priv(dev);
12266 u32 supported, advertising;
12267
12268 if (tg3_flag(tp, USE_PHYLIB)) {
12269 struct phy_device *phydev;
12270 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12271 return -EAGAIN;
12272 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12273 phy_ethtool_ksettings_get(phydev, cmd);
12274
12275 return 0;
12276 }
12277
12278 supported = (SUPPORTED_Autoneg);
12279
12280 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12281 supported |= (SUPPORTED_1000baseT_Half |
12282 SUPPORTED_1000baseT_Full);
12283
12284 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12285 supported |= (SUPPORTED_100baseT_Half |
12286 SUPPORTED_100baseT_Full |
12287 SUPPORTED_10baseT_Half |
12288 SUPPORTED_10baseT_Full |
12289 SUPPORTED_TP);
12290 cmd->base.port = PORT_TP;
12291 } else {
12292 supported |= SUPPORTED_FIBRE;
12293 cmd->base.port = PORT_FIBRE;
12294 }
12295 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12296 supported);
12297
12298 advertising = tp->link_config.advertising;
12299 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12300 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12301 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12302 advertising |= ADVERTISED_Pause;
12303 } else {
12304 advertising |= ADVERTISED_Pause |
12305 ADVERTISED_Asym_Pause;
12306 }
12307 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12308 advertising |= ADVERTISED_Asym_Pause;
12309 }
12310 }
12311 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12312 advertising);
12313
12314 if (netif_running(dev) && tp->link_up) {
12315 cmd->base.speed = tp->link_config.active_speed;
12316 cmd->base.duplex = tp->link_config.active_duplex;
12317 ethtool_convert_legacy_u32_to_link_mode(
12318 cmd->link_modes.lp_advertising,
12319 tp->link_config.rmt_adv);
12320
12321 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12322 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12323 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12324 else
12325 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12326 }
12327 } else {
12328 cmd->base.speed = SPEED_UNKNOWN;
12329 cmd->base.duplex = DUPLEX_UNKNOWN;
12330 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12331 }
12332 cmd->base.phy_address = tp->phy_addr;
12333 cmd->base.autoneg = tp->link_config.autoneg;
12334 return 0;
12335 }
12336
tg3_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)12337 static int tg3_set_link_ksettings(struct net_device *dev,
12338 const struct ethtool_link_ksettings *cmd)
12339 {
12340 struct tg3 *tp = netdev_priv(dev);
12341 u32 speed = cmd->base.speed;
12342 u32 advertising;
12343
12344 if (tg3_flag(tp, USE_PHYLIB)) {
12345 struct phy_device *phydev;
12346 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12347 return -EAGAIN;
12348 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12349 return phy_ethtool_ksettings_set(phydev, cmd);
12350 }
12351
12352 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12353 cmd->base.autoneg != AUTONEG_DISABLE)
12354 return -EINVAL;
12355
12356 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12357 cmd->base.duplex != DUPLEX_FULL &&
12358 cmd->base.duplex != DUPLEX_HALF)
12359 return -EINVAL;
12360
12361 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12362 cmd->link_modes.advertising);
12363
12364 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12365 u32 mask = ADVERTISED_Autoneg |
12366 ADVERTISED_Pause |
12367 ADVERTISED_Asym_Pause;
12368
12369 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12370 mask |= ADVERTISED_1000baseT_Half |
12371 ADVERTISED_1000baseT_Full;
12372
12373 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12374 mask |= ADVERTISED_100baseT_Half |
12375 ADVERTISED_100baseT_Full |
12376 ADVERTISED_10baseT_Half |
12377 ADVERTISED_10baseT_Full |
12378 ADVERTISED_TP;
12379 else
12380 mask |= ADVERTISED_FIBRE;
12381
12382 if (advertising & ~mask)
12383 return -EINVAL;
12384
12385 mask &= (ADVERTISED_1000baseT_Half |
12386 ADVERTISED_1000baseT_Full |
12387 ADVERTISED_100baseT_Half |
12388 ADVERTISED_100baseT_Full |
12389 ADVERTISED_10baseT_Half |
12390 ADVERTISED_10baseT_Full);
12391
12392 advertising &= mask;
12393 } else {
12394 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12395 if (speed != SPEED_1000)
12396 return -EINVAL;
12397
12398 if (cmd->base.duplex != DUPLEX_FULL)
12399 return -EINVAL;
12400 } else {
12401 if (speed != SPEED_100 &&
12402 speed != SPEED_10)
12403 return -EINVAL;
12404 }
12405 }
12406
12407 tg3_full_lock(tp, 0);
12408
12409 tp->link_config.autoneg = cmd->base.autoneg;
12410 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12411 tp->link_config.advertising = (advertising |
12412 ADVERTISED_Autoneg);
12413 tp->link_config.speed = SPEED_UNKNOWN;
12414 tp->link_config.duplex = DUPLEX_UNKNOWN;
12415 } else {
12416 tp->link_config.advertising = 0;
12417 tp->link_config.speed = speed;
12418 tp->link_config.duplex = cmd->base.duplex;
12419 }
12420
12421 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12422
12423 tg3_warn_mgmt_link_flap(tp);
12424
12425 if (netif_running(dev))
12426 tg3_setup_phy(tp, true);
12427
12428 tg3_full_unlock(tp);
12429
12430 return 0;
12431 }
12432
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12433 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12434 {
12435 struct tg3 *tp = netdev_priv(dev);
12436
12437 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12438 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12439 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12440 }
12441
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12442 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12443 {
12444 struct tg3 *tp = netdev_priv(dev);
12445
12446 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12447 wol->supported = WAKE_MAGIC;
12448 else
12449 wol->supported = 0;
12450 wol->wolopts = 0;
12451 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12452 wol->wolopts = WAKE_MAGIC;
12453 memset(&wol->sopass, 0, sizeof(wol->sopass));
12454 }
12455
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12456 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12457 {
12458 struct tg3 *tp = netdev_priv(dev);
12459 struct device *dp = &tp->pdev->dev;
12460
12461 if (wol->wolopts & ~WAKE_MAGIC)
12462 return -EINVAL;
12463 if ((wol->wolopts & WAKE_MAGIC) &&
12464 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12465 return -EINVAL;
12466
12467 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12468
12469 if (device_may_wakeup(dp))
12470 tg3_flag_set(tp, WOL_ENABLE);
12471 else
12472 tg3_flag_clear(tp, WOL_ENABLE);
12473
12474 return 0;
12475 }
12476
tg3_get_msglevel(struct net_device * dev)12477 static u32 tg3_get_msglevel(struct net_device *dev)
12478 {
12479 struct tg3 *tp = netdev_priv(dev);
12480 return tp->msg_enable;
12481 }
12482
tg3_set_msglevel(struct net_device * dev,u32 value)12483 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12484 {
12485 struct tg3 *tp = netdev_priv(dev);
12486 tp->msg_enable = value;
12487 }
12488
tg3_nway_reset(struct net_device * dev)12489 static int tg3_nway_reset(struct net_device *dev)
12490 {
12491 struct tg3 *tp = netdev_priv(dev);
12492 int r;
12493
12494 if (!netif_running(dev))
12495 return -EAGAIN;
12496
12497 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12498 return -EINVAL;
12499
12500 tg3_warn_mgmt_link_flap(tp);
12501
12502 if (tg3_flag(tp, USE_PHYLIB)) {
12503 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12504 return -EAGAIN;
12505 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12506 } else {
12507 u32 bmcr;
12508
12509 spin_lock_bh(&tp->lock);
12510 r = -EINVAL;
12511 tg3_readphy(tp, MII_BMCR, &bmcr);
12512 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12513 ((bmcr & BMCR_ANENABLE) ||
12514 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12515 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12516 BMCR_ANENABLE);
12517 r = 0;
12518 }
12519 spin_unlock_bh(&tp->lock);
12520 }
12521
12522 return r;
12523 }
12524
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12525 static void tg3_get_ringparam(struct net_device *dev,
12526 struct ethtool_ringparam *ering,
12527 struct kernel_ethtool_ringparam *kernel_ering,
12528 struct netlink_ext_ack *extack)
12529 {
12530 struct tg3 *tp = netdev_priv(dev);
12531
12532 ering->rx_max_pending = tp->rx_std_ring_mask;
12533 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12534 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12535 else
12536 ering->rx_jumbo_max_pending = 0;
12537
12538 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12539
12540 ering->rx_pending = tp->rx_pending;
12541 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12542 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12543 else
12544 ering->rx_jumbo_pending = 0;
12545
12546 ering->tx_pending = tp->napi[0].tx_pending;
12547 }
12548
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12549 static int tg3_set_ringparam(struct net_device *dev,
12550 struct ethtool_ringparam *ering,
12551 struct kernel_ethtool_ringparam *kernel_ering,
12552 struct netlink_ext_ack *extack)
12553 {
12554 struct tg3 *tp = netdev_priv(dev);
12555 int i, irq_sync = 0, err = 0;
12556 bool reset_phy = false;
12557
12558 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12559 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12560 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12561 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12562 (tg3_flag(tp, TSO_BUG) &&
12563 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12564 return -EINVAL;
12565
12566 if (netif_running(dev)) {
12567 tg3_phy_stop(tp);
12568 tg3_netif_stop(tp);
12569 irq_sync = 1;
12570 }
12571
12572 tg3_full_lock(tp, irq_sync);
12573
12574 tp->rx_pending = ering->rx_pending;
12575
12576 if (tg3_flag(tp, MAX_RXPEND_64) &&
12577 tp->rx_pending > 63)
12578 tp->rx_pending = 63;
12579
12580 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12581 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12582
12583 for (i = 0; i < tp->irq_max; i++)
12584 tp->napi[i].tx_pending = ering->tx_pending;
12585
12586 if (netif_running(dev)) {
12587 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12588 /* Reset PHY to avoid PHY lock up */
12589 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12590 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12591 tg3_asic_rev(tp) == ASIC_REV_5720)
12592 reset_phy = true;
12593
12594 err = tg3_restart_hw(tp, reset_phy);
12595 if (!err)
12596 tg3_netif_start(tp);
12597 }
12598
12599 tg3_full_unlock(tp);
12600
12601 if (irq_sync && !err)
12602 tg3_phy_start(tp);
12603
12604 return err;
12605 }
12606
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12607 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12608 {
12609 struct tg3 *tp = netdev_priv(dev);
12610
12611 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12612
12613 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12614 epause->rx_pause = 1;
12615 else
12616 epause->rx_pause = 0;
12617
12618 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12619 epause->tx_pause = 1;
12620 else
12621 epause->tx_pause = 0;
12622 }
12623
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12624 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12625 {
12626 struct tg3 *tp = netdev_priv(dev);
12627 int err = 0;
12628 bool reset_phy = false;
12629
12630 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12631 tg3_warn_mgmt_link_flap(tp);
12632
12633 if (tg3_flag(tp, USE_PHYLIB)) {
12634 struct phy_device *phydev;
12635
12636 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12637
12638 if (!phy_validate_pause(phydev, epause))
12639 return -EINVAL;
12640
12641 tp->link_config.flowctrl = 0;
12642 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12643 if (epause->rx_pause) {
12644 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12645
12646 if (epause->tx_pause) {
12647 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12648 }
12649 } else if (epause->tx_pause) {
12650 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12651 }
12652
12653 if (epause->autoneg)
12654 tg3_flag_set(tp, PAUSE_AUTONEG);
12655 else
12656 tg3_flag_clear(tp, PAUSE_AUTONEG);
12657
12658 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12659 if (phydev->autoneg) {
12660 /* phy_set_asym_pause() will
12661 * renegotiate the link to inform our
12662 * link partner of our flow control
12663 * settings, even if the flow control
12664 * is forced. Let tg3_adjust_link()
12665 * do the final flow control setup.
12666 */
12667 return 0;
12668 }
12669
12670 if (!epause->autoneg)
12671 tg3_setup_flow_control(tp, 0, 0);
12672 }
12673 } else {
12674 int irq_sync = 0;
12675
12676 if (netif_running(dev)) {
12677 tg3_netif_stop(tp);
12678 irq_sync = 1;
12679 }
12680
12681 tg3_full_lock(tp, irq_sync);
12682
12683 if (epause->autoneg)
12684 tg3_flag_set(tp, PAUSE_AUTONEG);
12685 else
12686 tg3_flag_clear(tp, PAUSE_AUTONEG);
12687 if (epause->rx_pause)
12688 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12689 else
12690 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12691 if (epause->tx_pause)
12692 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12693 else
12694 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12695
12696 if (netif_running(dev)) {
12697 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12698 /* Reset PHY to avoid PHY lock up */
12699 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12700 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12701 tg3_asic_rev(tp) == ASIC_REV_5720)
12702 reset_phy = true;
12703
12704 err = tg3_restart_hw(tp, reset_phy);
12705 if (!err)
12706 tg3_netif_start(tp);
12707 }
12708
12709 tg3_full_unlock(tp);
12710 }
12711
12712 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12713
12714 return err;
12715 }
12716
tg3_get_sset_count(struct net_device * dev,int sset)12717 static int tg3_get_sset_count(struct net_device *dev, int sset)
12718 {
12719 switch (sset) {
12720 case ETH_SS_TEST:
12721 return TG3_NUM_TEST;
12722 case ETH_SS_STATS:
12723 return TG3_NUM_STATS;
12724 default:
12725 return -EOPNOTSUPP;
12726 }
12727 }
12728
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)12729 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12730 u32 *rules __always_unused)
12731 {
12732 struct tg3 *tp = netdev_priv(dev);
12733
12734 if (!tg3_flag(tp, SUPPORT_MSIX))
12735 return -EOPNOTSUPP;
12736
12737 switch (info->cmd) {
12738 case ETHTOOL_GRXRINGS:
12739 if (netif_running(tp->dev))
12740 info->data = tp->rxq_cnt;
12741 else {
12742 info->data = num_online_cpus();
12743 if (info->data > TG3_RSS_MAX_NUM_QS)
12744 info->data = TG3_RSS_MAX_NUM_QS;
12745 }
12746
12747 return 0;
12748
12749 default:
12750 return -EOPNOTSUPP;
12751 }
12752 }
12753
tg3_get_rxfh_indir_size(struct net_device * dev)12754 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12755 {
12756 u32 size = 0;
12757 struct tg3 *tp = netdev_priv(dev);
12758
12759 if (tg3_flag(tp, SUPPORT_MSIX))
12760 size = TG3_RSS_INDIR_TBL_SIZE;
12761
12762 return size;
12763 }
12764
tg3_get_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh)12765 static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
12766 {
12767 struct tg3 *tp = netdev_priv(dev);
12768 int i;
12769
12770 rxfh->hfunc = ETH_RSS_HASH_TOP;
12771 if (!rxfh->indir)
12772 return 0;
12773
12774 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12775 rxfh->indir[i] = tp->rss_ind_tbl[i];
12776
12777 return 0;
12778 }
12779
tg3_set_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)12780 static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
12781 struct netlink_ext_ack *extack)
12782 {
12783 struct tg3 *tp = netdev_priv(dev);
12784 size_t i;
12785
12786 /* We require at least one supported parameter to be changed and no
12787 * change in any of the unsupported parameters
12788 */
12789 if (rxfh->key ||
12790 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
12791 rxfh->hfunc != ETH_RSS_HASH_TOP))
12792 return -EOPNOTSUPP;
12793
12794 if (!rxfh->indir)
12795 return 0;
12796
12797 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12798 tp->rss_ind_tbl[i] = rxfh->indir[i];
12799
12800 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12801 return 0;
12802
12803 /* It is legal to write the indirection
12804 * table while the device is running.
12805 */
12806 tg3_full_lock(tp, 0);
12807 tg3_rss_write_indir_tbl(tp);
12808 tg3_full_unlock(tp);
12809
12810 return 0;
12811 }
12812
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12813 static void tg3_get_channels(struct net_device *dev,
12814 struct ethtool_channels *channel)
12815 {
12816 struct tg3 *tp = netdev_priv(dev);
12817 u32 deflt_qs = netif_get_num_default_rss_queues();
12818
12819 channel->max_rx = tp->rxq_max;
12820 channel->max_tx = tp->txq_max;
12821
12822 if (netif_running(dev)) {
12823 channel->rx_count = tp->rxq_cnt;
12824 channel->tx_count = tp->txq_cnt;
12825 } else {
12826 if (tp->rxq_req)
12827 channel->rx_count = tp->rxq_req;
12828 else
12829 channel->rx_count = min(deflt_qs, tp->rxq_max);
12830
12831 if (tp->txq_req)
12832 channel->tx_count = tp->txq_req;
12833 else
12834 channel->tx_count = min(deflt_qs, tp->txq_max);
12835 }
12836 }
12837
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12838 static int tg3_set_channels(struct net_device *dev,
12839 struct ethtool_channels *channel)
12840 {
12841 struct tg3 *tp = netdev_priv(dev);
12842
12843 if (!tg3_flag(tp, SUPPORT_MSIX))
12844 return -EOPNOTSUPP;
12845
12846 if (channel->rx_count > tp->rxq_max ||
12847 channel->tx_count > tp->txq_max)
12848 return -EINVAL;
12849
12850 tp->rxq_req = channel->rx_count;
12851 tp->txq_req = channel->tx_count;
12852
12853 if (!netif_running(dev))
12854 return 0;
12855
12856 tg3_stop(tp);
12857
12858 tg3_carrier_off(tp);
12859
12860 tg3_start(tp, true, false, false);
12861
12862 return 0;
12863 }
12864
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12865 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12866 {
12867 switch (stringset) {
12868 case ETH_SS_STATS:
12869 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12870 break;
12871 case ETH_SS_TEST:
12872 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12873 break;
12874 default:
12875 WARN_ON(1); /* we need a WARN() */
12876 break;
12877 }
12878 }
12879
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12880 static int tg3_set_phys_id(struct net_device *dev,
12881 enum ethtool_phys_id_state state)
12882 {
12883 struct tg3 *tp = netdev_priv(dev);
12884
12885 switch (state) {
12886 case ETHTOOL_ID_ACTIVE:
12887 return 1; /* cycle on/off once per second */
12888
12889 case ETHTOOL_ID_ON:
12890 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12891 LED_CTRL_1000MBPS_ON |
12892 LED_CTRL_100MBPS_ON |
12893 LED_CTRL_10MBPS_ON |
12894 LED_CTRL_TRAFFIC_OVERRIDE |
12895 LED_CTRL_TRAFFIC_BLINK |
12896 LED_CTRL_TRAFFIC_LED);
12897 break;
12898
12899 case ETHTOOL_ID_OFF:
12900 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12901 LED_CTRL_TRAFFIC_OVERRIDE);
12902 break;
12903
12904 case ETHTOOL_ID_INACTIVE:
12905 tw32(MAC_LED_CTRL, tp->led_ctrl);
12906 break;
12907 }
12908
12909 return 0;
12910 }
12911
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12912 static void tg3_get_ethtool_stats(struct net_device *dev,
12913 struct ethtool_stats *estats, u64 *tmp_stats)
12914 {
12915 struct tg3 *tp = netdev_priv(dev);
12916
12917 if (tp->hw_stats)
12918 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12919 else
12920 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12921 }
12922
tg3_vpd_readblock(struct tg3 * tp,unsigned int * vpdlen)12923 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12924 {
12925 int i;
12926 __be32 *buf;
12927 u32 offset = 0, len = 0;
12928 u32 magic, val;
12929
12930 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12931 return NULL;
12932
12933 if (magic == TG3_EEPROM_MAGIC) {
12934 for (offset = TG3_NVM_DIR_START;
12935 offset < TG3_NVM_DIR_END;
12936 offset += TG3_NVM_DIRENT_SIZE) {
12937 if (tg3_nvram_read(tp, offset, &val))
12938 return NULL;
12939
12940 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12941 TG3_NVM_DIRTYPE_EXTVPD)
12942 break;
12943 }
12944
12945 if (offset != TG3_NVM_DIR_END) {
12946 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12947 if (tg3_nvram_read(tp, offset + 4, &offset))
12948 return NULL;
12949
12950 offset = tg3_nvram_logical_addr(tp, offset);
12951 }
12952
12953 if (!offset || !len) {
12954 offset = TG3_NVM_VPD_OFF;
12955 len = TG3_NVM_VPD_LEN;
12956 }
12957
12958 buf = kmalloc(len, GFP_KERNEL);
12959 if (!buf)
12960 return NULL;
12961
12962 for (i = 0; i < len; i += 4) {
12963 /* The data is in little-endian format in NVRAM.
12964 * Use the big-endian read routines to preserve
12965 * the byte order as it exists in NVRAM.
12966 */
12967 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12968 goto error;
12969 }
12970 *vpdlen = len;
12971 } else {
12972 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12973 if (IS_ERR(buf))
12974 return NULL;
12975 }
12976
12977 return buf;
12978
12979 error:
12980 kfree(buf);
12981 return NULL;
12982 }
12983
12984 #define NVRAM_TEST_SIZE 0x100
12985 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12986 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12987 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12988 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12989 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12990 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12991 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12992 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12993
tg3_test_nvram(struct tg3 * tp)12994 static int tg3_test_nvram(struct tg3 *tp)
12995 {
12996 u32 csum, magic;
12997 __be32 *buf;
12998 int i, j, k, err = 0, size;
12999 unsigned int len;
13000
13001 if (tg3_flag(tp, NO_NVRAM))
13002 return 0;
13003
13004 if (tg3_nvram_read(tp, 0, &magic) != 0)
13005 return -EIO;
13006
13007 if (magic == TG3_EEPROM_MAGIC)
13008 size = NVRAM_TEST_SIZE;
13009 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
13010 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
13011 TG3_EEPROM_SB_FORMAT_1) {
13012 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
13013 case TG3_EEPROM_SB_REVISION_0:
13014 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
13015 break;
13016 case TG3_EEPROM_SB_REVISION_2:
13017 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
13018 break;
13019 case TG3_EEPROM_SB_REVISION_3:
13020 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
13021 break;
13022 case TG3_EEPROM_SB_REVISION_4:
13023 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
13024 break;
13025 case TG3_EEPROM_SB_REVISION_5:
13026 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
13027 break;
13028 case TG3_EEPROM_SB_REVISION_6:
13029 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
13030 break;
13031 default:
13032 return -EIO;
13033 }
13034 } else
13035 return 0;
13036 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13037 size = NVRAM_SELFBOOT_HW_SIZE;
13038 else
13039 return -EIO;
13040
13041 buf = kmalloc(size, GFP_KERNEL);
13042 if (buf == NULL)
13043 return -ENOMEM;
13044
13045 err = -EIO;
13046 for (i = 0, j = 0; i < size; i += 4, j++) {
13047 err = tg3_nvram_read_be32(tp, i, &buf[j]);
13048 if (err)
13049 break;
13050 }
13051 if (i < size)
13052 goto out;
13053
13054 /* Selfboot format */
13055 magic = be32_to_cpu(buf[0]);
13056 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
13057 TG3_EEPROM_MAGIC_FW) {
13058 u8 *buf8 = (u8 *) buf, csum8 = 0;
13059
13060 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
13061 TG3_EEPROM_SB_REVISION_2) {
13062 /* For rev 2, the csum doesn't include the MBA. */
13063 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
13064 csum8 += buf8[i];
13065 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
13066 csum8 += buf8[i];
13067 } else {
13068 for (i = 0; i < size; i++)
13069 csum8 += buf8[i];
13070 }
13071
13072 if (csum8 == 0) {
13073 err = 0;
13074 goto out;
13075 }
13076
13077 err = -EIO;
13078 goto out;
13079 }
13080
13081 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13082 TG3_EEPROM_MAGIC_HW) {
13083 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13084 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13085 u8 *buf8 = (u8 *) buf;
13086
13087 /* Separate the parity bits and the data bytes. */
13088 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13089 if ((i == 0) || (i == 8)) {
13090 int l;
13091 u8 msk;
13092
13093 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13094 parity[k++] = buf8[i] & msk;
13095 i++;
13096 } else if (i == 16) {
13097 int l;
13098 u8 msk;
13099
13100 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13101 parity[k++] = buf8[i] & msk;
13102 i++;
13103
13104 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13105 parity[k++] = buf8[i] & msk;
13106 i++;
13107 }
13108 data[j++] = buf8[i];
13109 }
13110
13111 err = -EIO;
13112 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13113 u8 hw8 = hweight8(data[i]);
13114
13115 if ((hw8 & 0x1) && parity[i])
13116 goto out;
13117 else if (!(hw8 & 0x1) && !parity[i])
13118 goto out;
13119 }
13120 err = 0;
13121 goto out;
13122 }
13123
13124 err = -EIO;
13125
13126 /* Bootstrap checksum at offset 0x10 */
13127 csum = calc_crc((unsigned char *) buf, 0x10);
13128
13129 /* The type of buf is __be32 *, but this value is __le32 */
13130 if (csum != le32_to_cpu((__force __le32)buf[0x10 / 4]))
13131 goto out;
13132
13133 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13134 csum = calc_crc((unsigned char *)&buf[0x74 / 4], 0x88);
13135
13136 /* The type of buf is __be32 *, but this value is __le32 */
13137 if (csum != le32_to_cpu((__force __le32)buf[0xfc / 4]))
13138 goto out;
13139
13140 kfree(buf);
13141
13142 buf = tg3_vpd_readblock(tp, &len);
13143 if (!buf)
13144 return -ENOMEM;
13145
13146 err = pci_vpd_check_csum(buf, len);
13147 /* go on if no checksum found */
13148 if (err == 1)
13149 err = 0;
13150 out:
13151 kfree(buf);
13152 return err;
13153 }
13154
13155 #define TG3_SERDES_TIMEOUT_SEC 2
13156 #define TG3_COPPER_TIMEOUT_SEC 6
13157
tg3_test_link(struct tg3 * tp)13158 static int tg3_test_link(struct tg3 *tp)
13159 {
13160 int i, max;
13161
13162 if (!netif_running(tp->dev))
13163 return -ENODEV;
13164
13165 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13166 max = TG3_SERDES_TIMEOUT_SEC;
13167 else
13168 max = TG3_COPPER_TIMEOUT_SEC;
13169
13170 for (i = 0; i < max; i++) {
13171 if (tp->link_up)
13172 return 0;
13173
13174 if (msleep_interruptible(1000))
13175 break;
13176 }
13177
13178 return -EIO;
13179 }
13180
13181 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13182 static int tg3_test_registers(struct tg3 *tp)
13183 {
13184 int i, is_5705, is_5750;
13185 u32 offset, read_mask, write_mask, val, save_val, read_val;
13186 static struct {
13187 u16 offset;
13188 u16 flags;
13189 #define TG3_FL_5705 0x1
13190 #define TG3_FL_NOT_5705 0x2
13191 #define TG3_FL_NOT_5788 0x4
13192 #define TG3_FL_NOT_5750 0x8
13193 u32 read_mask;
13194 u32 write_mask;
13195 } reg_tbl[] = {
13196 /* MAC Control Registers */
13197 { MAC_MODE, TG3_FL_NOT_5705,
13198 0x00000000, 0x00ef6f8c },
13199 { MAC_MODE, TG3_FL_5705,
13200 0x00000000, 0x01ef6b8c },
13201 { MAC_STATUS, TG3_FL_NOT_5705,
13202 0x03800107, 0x00000000 },
13203 { MAC_STATUS, TG3_FL_5705,
13204 0x03800100, 0x00000000 },
13205 { MAC_ADDR_0_HIGH, 0x0000,
13206 0x00000000, 0x0000ffff },
13207 { MAC_ADDR_0_LOW, 0x0000,
13208 0x00000000, 0xffffffff },
13209 { MAC_RX_MTU_SIZE, 0x0000,
13210 0x00000000, 0x0000ffff },
13211 { MAC_TX_MODE, 0x0000,
13212 0x00000000, 0x00000070 },
13213 { MAC_TX_LENGTHS, 0x0000,
13214 0x00000000, 0x00003fff },
13215 { MAC_RX_MODE, TG3_FL_NOT_5705,
13216 0x00000000, 0x000007fc },
13217 { MAC_RX_MODE, TG3_FL_5705,
13218 0x00000000, 0x000007dc },
13219 { MAC_HASH_REG_0, 0x0000,
13220 0x00000000, 0xffffffff },
13221 { MAC_HASH_REG_1, 0x0000,
13222 0x00000000, 0xffffffff },
13223 { MAC_HASH_REG_2, 0x0000,
13224 0x00000000, 0xffffffff },
13225 { MAC_HASH_REG_3, 0x0000,
13226 0x00000000, 0xffffffff },
13227
13228 /* Receive Data and Receive BD Initiator Control Registers. */
13229 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13230 0x00000000, 0xffffffff },
13231 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13232 0x00000000, 0xffffffff },
13233 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13234 0x00000000, 0x00000003 },
13235 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13236 0x00000000, 0xffffffff },
13237 { RCVDBDI_STD_BD+0, 0x0000,
13238 0x00000000, 0xffffffff },
13239 { RCVDBDI_STD_BD+4, 0x0000,
13240 0x00000000, 0xffffffff },
13241 { RCVDBDI_STD_BD+8, 0x0000,
13242 0x00000000, 0xffff0002 },
13243 { RCVDBDI_STD_BD+0xc, 0x0000,
13244 0x00000000, 0xffffffff },
13245
13246 /* Receive BD Initiator Control Registers. */
13247 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13248 0x00000000, 0xffffffff },
13249 { RCVBDI_STD_THRESH, TG3_FL_5705,
13250 0x00000000, 0x000003ff },
13251 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13252 0x00000000, 0xffffffff },
13253
13254 /* Host Coalescing Control Registers. */
13255 { HOSTCC_MODE, TG3_FL_NOT_5705,
13256 0x00000000, 0x00000004 },
13257 { HOSTCC_MODE, TG3_FL_5705,
13258 0x00000000, 0x000000f6 },
13259 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13260 0x00000000, 0xffffffff },
13261 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13262 0x00000000, 0x000003ff },
13263 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13264 0x00000000, 0xffffffff },
13265 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13266 0x00000000, 0x000003ff },
13267 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13268 0x00000000, 0xffffffff },
13269 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13270 0x00000000, 0x000000ff },
13271 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13272 0x00000000, 0xffffffff },
13273 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13274 0x00000000, 0x000000ff },
13275 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13276 0x00000000, 0xffffffff },
13277 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13278 0x00000000, 0xffffffff },
13279 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13280 0x00000000, 0xffffffff },
13281 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13282 0x00000000, 0x000000ff },
13283 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13284 0x00000000, 0xffffffff },
13285 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13286 0x00000000, 0x000000ff },
13287 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13288 0x00000000, 0xffffffff },
13289 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13290 0x00000000, 0xffffffff },
13291 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13292 0x00000000, 0xffffffff },
13293 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13294 0x00000000, 0xffffffff },
13295 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13296 0x00000000, 0xffffffff },
13297 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13298 0xffffffff, 0x00000000 },
13299 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13300 0xffffffff, 0x00000000 },
13301
13302 /* Buffer Manager Control Registers. */
13303 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13304 0x00000000, 0x007fff80 },
13305 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13306 0x00000000, 0x007fffff },
13307 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13308 0x00000000, 0x0000003f },
13309 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13310 0x00000000, 0x000001ff },
13311 { BUFMGR_MB_HIGH_WATER, 0x0000,
13312 0x00000000, 0x000001ff },
13313 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13314 0xffffffff, 0x00000000 },
13315 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13316 0xffffffff, 0x00000000 },
13317
13318 /* Mailbox Registers */
13319 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13320 0x00000000, 0x000001ff },
13321 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13322 0x00000000, 0x000001ff },
13323 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13324 0x00000000, 0x000007ff },
13325 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13326 0x00000000, 0x000001ff },
13327
13328 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13329 };
13330
13331 is_5705 = is_5750 = 0;
13332 if (tg3_flag(tp, 5705_PLUS)) {
13333 is_5705 = 1;
13334 if (tg3_flag(tp, 5750_PLUS))
13335 is_5750 = 1;
13336 }
13337
13338 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13339 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13340 continue;
13341
13342 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13343 continue;
13344
13345 if (tg3_flag(tp, IS_5788) &&
13346 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13347 continue;
13348
13349 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13350 continue;
13351
13352 offset = (u32) reg_tbl[i].offset;
13353 read_mask = reg_tbl[i].read_mask;
13354 write_mask = reg_tbl[i].write_mask;
13355
13356 /* Save the original register content */
13357 save_val = tr32(offset);
13358
13359 /* Determine the read-only value. */
13360 read_val = save_val & read_mask;
13361
13362 /* Write zero to the register, then make sure the read-only bits
13363 * are not changed and the read/write bits are all zeros.
13364 */
13365 tw32(offset, 0);
13366
13367 val = tr32(offset);
13368
13369 /* Test the read-only and read/write bits. */
13370 if (((val & read_mask) != read_val) || (val & write_mask))
13371 goto out;
13372
13373 /* Write ones to all the bits defined by RdMask and WrMask, then
13374 * make sure the read-only bits are not changed and the
13375 * read/write bits are all ones.
13376 */
13377 tw32(offset, read_mask | write_mask);
13378
13379 val = tr32(offset);
13380
13381 /* Test the read-only bits. */
13382 if ((val & read_mask) != read_val)
13383 goto out;
13384
13385 /* Test the read/write bits. */
13386 if ((val & write_mask) != write_mask)
13387 goto out;
13388
13389 tw32(offset, save_val);
13390 }
13391
13392 return 0;
13393
13394 out:
13395 if (netif_msg_hw(tp))
13396 netdev_err(tp->dev,
13397 "Register test failed at offset %x\n", offset);
13398 tw32(offset, save_val);
13399 return -EIO;
13400 }
13401
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13402 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13403 {
13404 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13405 int i;
13406 u32 j;
13407
13408 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13409 for (j = 0; j < len; j += 4) {
13410 u32 val;
13411
13412 tg3_write_mem(tp, offset + j, test_pattern[i]);
13413 tg3_read_mem(tp, offset + j, &val);
13414 if (val != test_pattern[i])
13415 return -EIO;
13416 }
13417 }
13418 return 0;
13419 }
13420
tg3_test_memory(struct tg3 * tp)13421 static int tg3_test_memory(struct tg3 *tp)
13422 {
13423 static struct mem_entry {
13424 u32 offset;
13425 u32 len;
13426 } mem_tbl_570x[] = {
13427 { 0x00000000, 0x00b50},
13428 { 0x00002000, 0x1c000},
13429 { 0xffffffff, 0x00000}
13430 }, mem_tbl_5705[] = {
13431 { 0x00000100, 0x0000c},
13432 { 0x00000200, 0x00008},
13433 { 0x00004000, 0x00800},
13434 { 0x00006000, 0x01000},
13435 { 0x00008000, 0x02000},
13436 { 0x00010000, 0x0e000},
13437 { 0xffffffff, 0x00000}
13438 }, mem_tbl_5755[] = {
13439 { 0x00000200, 0x00008},
13440 { 0x00004000, 0x00800},
13441 { 0x00006000, 0x00800},
13442 { 0x00008000, 0x02000},
13443 { 0x00010000, 0x0c000},
13444 { 0xffffffff, 0x00000}
13445 }, mem_tbl_5906[] = {
13446 { 0x00000200, 0x00008},
13447 { 0x00004000, 0x00400},
13448 { 0x00006000, 0x00400},
13449 { 0x00008000, 0x01000},
13450 { 0x00010000, 0x01000},
13451 { 0xffffffff, 0x00000}
13452 }, mem_tbl_5717[] = {
13453 { 0x00000200, 0x00008},
13454 { 0x00010000, 0x0a000},
13455 { 0x00020000, 0x13c00},
13456 { 0xffffffff, 0x00000}
13457 }, mem_tbl_57765[] = {
13458 { 0x00000200, 0x00008},
13459 { 0x00004000, 0x00800},
13460 { 0x00006000, 0x09800},
13461 { 0x00010000, 0x0a000},
13462 { 0xffffffff, 0x00000}
13463 };
13464 struct mem_entry *mem_tbl;
13465 int err = 0;
13466 int i;
13467
13468 if (tg3_flag(tp, 5717_PLUS))
13469 mem_tbl = mem_tbl_5717;
13470 else if (tg3_flag(tp, 57765_CLASS) ||
13471 tg3_asic_rev(tp) == ASIC_REV_5762)
13472 mem_tbl = mem_tbl_57765;
13473 else if (tg3_flag(tp, 5755_PLUS))
13474 mem_tbl = mem_tbl_5755;
13475 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13476 mem_tbl = mem_tbl_5906;
13477 else if (tg3_flag(tp, 5705_PLUS))
13478 mem_tbl = mem_tbl_5705;
13479 else
13480 mem_tbl = mem_tbl_570x;
13481
13482 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13483 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13484 if (err)
13485 break;
13486 }
13487
13488 return err;
13489 }
13490
13491 #define TG3_TSO_MSS 500
13492
13493 #define TG3_TSO_IP_HDR_LEN 20
13494 #define TG3_TSO_TCP_HDR_LEN 20
13495 #define TG3_TSO_TCP_OPT_LEN 12
13496
13497 static const u8 tg3_tso_header[] = {
13498 0x08, 0x00,
13499 0x45, 0x00, 0x00, 0x00,
13500 0x00, 0x00, 0x40, 0x00,
13501 0x40, 0x06, 0x00, 0x00,
13502 0x0a, 0x00, 0x00, 0x01,
13503 0x0a, 0x00, 0x00, 0x02,
13504 0x0d, 0x00, 0xe0, 0x00,
13505 0x00, 0x00, 0x01, 0x00,
13506 0x00, 0x00, 0x02, 0x00,
13507 0x80, 0x10, 0x10, 0x00,
13508 0x14, 0x09, 0x00, 0x00,
13509 0x01, 0x01, 0x08, 0x0a,
13510 0x11, 0x11, 0x11, 0x11,
13511 0x11, 0x11, 0x11, 0x11,
13512 };
13513
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13514 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13515 {
13516 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13517 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13518 u32 budget;
13519 struct sk_buff *skb;
13520 u8 *tx_data, *rx_data;
13521 dma_addr_t map;
13522 int num_pkts, tx_len, rx_len, i, err;
13523 struct tg3_rx_buffer_desc *desc;
13524 struct tg3_napi *tnapi, *rnapi;
13525 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13526
13527 tnapi = &tp->napi[0];
13528 rnapi = &tp->napi[0];
13529 if (tp->irq_cnt > 1) {
13530 if (tg3_flag(tp, ENABLE_RSS))
13531 rnapi = &tp->napi[1];
13532 if (tg3_flag(tp, ENABLE_TSS))
13533 tnapi = &tp->napi[1];
13534 }
13535 coal_now = tnapi->coal_now | rnapi->coal_now;
13536
13537 err = -EIO;
13538
13539 tx_len = pktsz;
13540 skb = netdev_alloc_skb(tp->dev, tx_len);
13541 if (!skb)
13542 return -ENOMEM;
13543
13544 tx_data = skb_put(skb, tx_len);
13545 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13546 memset(tx_data + ETH_ALEN, 0x0, 8);
13547
13548 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13549
13550 if (tso_loopback) {
13551 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13552
13553 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13554 TG3_TSO_TCP_OPT_LEN;
13555
13556 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13557 sizeof(tg3_tso_header));
13558 mss = TG3_TSO_MSS;
13559
13560 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13561 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13562
13563 /* Set the total length field in the IP header */
13564 iph->tot_len = htons((u16)(mss + hdr_len));
13565
13566 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13567 TXD_FLAG_CPU_POST_DMA);
13568
13569 if (tg3_flag(tp, HW_TSO_1) ||
13570 tg3_flag(tp, HW_TSO_2) ||
13571 tg3_flag(tp, HW_TSO_3)) {
13572 struct tcphdr *th;
13573 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13574 th = (struct tcphdr *)&tx_data[val];
13575 th->check = 0;
13576 } else
13577 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13578
13579 if (tg3_flag(tp, HW_TSO_3)) {
13580 mss |= (hdr_len & 0xc) << 12;
13581 if (hdr_len & 0x10)
13582 base_flags |= 0x00000010;
13583 base_flags |= (hdr_len & 0x3e0) << 5;
13584 } else if (tg3_flag(tp, HW_TSO_2))
13585 mss |= hdr_len << 9;
13586 else if (tg3_flag(tp, HW_TSO_1) ||
13587 tg3_asic_rev(tp) == ASIC_REV_5705) {
13588 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13589 } else {
13590 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13591 }
13592
13593 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13594 } else {
13595 num_pkts = 1;
13596 data_off = ETH_HLEN;
13597
13598 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13599 tx_len > VLAN_ETH_FRAME_LEN)
13600 base_flags |= TXD_FLAG_JMB_PKT;
13601 }
13602
13603 for (i = data_off; i < tx_len; i++)
13604 tx_data[i] = (u8) (i & 0xff);
13605
13606 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13607 if (dma_mapping_error(&tp->pdev->dev, map)) {
13608 dev_kfree_skb(skb);
13609 return -EIO;
13610 }
13611
13612 val = tnapi->tx_prod;
13613 tnapi->tx_buffers[val].skb = skb;
13614 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13615
13616 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13617 rnapi->coal_now);
13618
13619 udelay(10);
13620
13621 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13622
13623 budget = tg3_tx_avail(tnapi);
13624 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13625 base_flags | TXD_FLAG_END, mss, 0)) {
13626 tnapi->tx_buffers[val].skb = NULL;
13627 dev_kfree_skb(skb);
13628 return -EIO;
13629 }
13630
13631 tnapi->tx_prod++;
13632
13633 /* Sync BD data before updating mailbox */
13634 wmb();
13635
13636 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13637 tr32_mailbox(tnapi->prodmbox);
13638
13639 udelay(10);
13640
13641 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13642 for (i = 0; i < 35; i++) {
13643 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13644 coal_now);
13645
13646 udelay(10);
13647
13648 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13649 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13650 if ((tx_idx == tnapi->tx_prod) &&
13651 (rx_idx == (rx_start_idx + num_pkts)))
13652 break;
13653 }
13654
13655 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13656 dev_kfree_skb(skb);
13657
13658 if (tx_idx != tnapi->tx_prod)
13659 goto out;
13660
13661 if (rx_idx != rx_start_idx + num_pkts)
13662 goto out;
13663
13664 val = data_off;
13665 while (rx_idx != rx_start_idx) {
13666 desc = &rnapi->rx_rcb[rx_start_idx++];
13667 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13668 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13669
13670 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13671 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13672 goto out;
13673
13674 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13675 - ETH_FCS_LEN;
13676
13677 if (!tso_loopback) {
13678 if (rx_len != tx_len)
13679 goto out;
13680
13681 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13682 if (opaque_key != RXD_OPAQUE_RING_STD)
13683 goto out;
13684 } else {
13685 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13686 goto out;
13687 }
13688 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13689 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13690 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13691 goto out;
13692 }
13693
13694 if (opaque_key == RXD_OPAQUE_RING_STD) {
13695 rx_data = tpr->rx_std_buffers[desc_idx].data;
13696 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13697 mapping);
13698 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13699 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13700 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13701 mapping);
13702 } else
13703 goto out;
13704
13705 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13706 DMA_FROM_DEVICE);
13707
13708 rx_data += TG3_RX_OFFSET(tp);
13709 for (i = data_off; i < rx_len; i++, val++) {
13710 if (*(rx_data + i) != (u8) (val & 0xff))
13711 goto out;
13712 }
13713 }
13714
13715 err = 0;
13716
13717 /* tg3_free_rings will unmap and free the rx_data */
13718 out:
13719 return err;
13720 }
13721
13722 #define TG3_STD_LOOPBACK_FAILED 1
13723 #define TG3_JMB_LOOPBACK_FAILED 2
13724 #define TG3_TSO_LOOPBACK_FAILED 4
13725 #define TG3_LOOPBACK_FAILED \
13726 (TG3_STD_LOOPBACK_FAILED | \
13727 TG3_JMB_LOOPBACK_FAILED | \
13728 TG3_TSO_LOOPBACK_FAILED)
13729
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13730 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13731 {
13732 int err = -EIO;
13733 u32 eee_cap;
13734 u32 jmb_pkt_sz = 9000;
13735
13736 if (tp->dma_limit)
13737 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13738
13739 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13740 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13741
13742 if (!netif_running(tp->dev)) {
13743 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13744 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13745 if (do_extlpbk)
13746 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13747 goto done;
13748 }
13749
13750 err = tg3_reset_hw(tp, true);
13751 if (err) {
13752 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13753 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13754 if (do_extlpbk)
13755 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13756 goto done;
13757 }
13758
13759 if (tg3_flag(tp, ENABLE_RSS)) {
13760 int i;
13761
13762 /* Reroute all rx packets to the 1st queue */
13763 for (i = MAC_RSS_INDIR_TBL_0;
13764 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13765 tw32(i, 0x0);
13766 }
13767
13768 /* HW errata - mac loopback fails in some cases on 5780.
13769 * Normal traffic and PHY loopback are not affected by
13770 * errata. Also, the MAC loopback test is deprecated for
13771 * all newer ASIC revisions.
13772 */
13773 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13774 !tg3_flag(tp, CPMU_PRESENT)) {
13775 tg3_mac_loopback(tp, true);
13776
13777 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13778 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13779
13780 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13781 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13782 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13783
13784 tg3_mac_loopback(tp, false);
13785 }
13786
13787 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13788 !tg3_flag(tp, USE_PHYLIB)) {
13789 int i;
13790
13791 tg3_phy_lpbk_set(tp, 0, false);
13792
13793 /* Wait for link */
13794 for (i = 0; i < 100; i++) {
13795 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13796 break;
13797 mdelay(1);
13798 }
13799
13800 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13801 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13802 if (tg3_flag(tp, TSO_CAPABLE) &&
13803 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13804 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13805 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13806 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13807 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13808
13809 if (do_extlpbk) {
13810 tg3_phy_lpbk_set(tp, 0, true);
13811
13812 /* All link indications report up, but the hardware
13813 * isn't really ready for about 20 msec. Double it
13814 * to be sure.
13815 */
13816 mdelay(40);
13817
13818 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13819 data[TG3_EXT_LOOPB_TEST] |=
13820 TG3_STD_LOOPBACK_FAILED;
13821 if (tg3_flag(tp, TSO_CAPABLE) &&
13822 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13823 data[TG3_EXT_LOOPB_TEST] |=
13824 TG3_TSO_LOOPBACK_FAILED;
13825 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13826 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13827 data[TG3_EXT_LOOPB_TEST] |=
13828 TG3_JMB_LOOPBACK_FAILED;
13829 }
13830
13831 /* Re-enable gphy autopowerdown. */
13832 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13833 tg3_phy_toggle_apd(tp, true);
13834 }
13835
13836 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13837 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13838
13839 done:
13840 tp->phy_flags |= eee_cap;
13841
13842 return err;
13843 }
13844
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13845 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13846 u64 *data)
13847 {
13848 struct tg3 *tp = netdev_priv(dev);
13849 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13850
13851 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13852 if (tg3_power_up(tp)) {
13853 etest->flags |= ETH_TEST_FL_FAILED;
13854 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13855 return;
13856 }
13857 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13858 }
13859
13860 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13861
13862 if (tg3_test_nvram(tp) != 0) {
13863 etest->flags |= ETH_TEST_FL_FAILED;
13864 data[TG3_NVRAM_TEST] = 1;
13865 }
13866 if (!doextlpbk && tg3_test_link(tp)) {
13867 etest->flags |= ETH_TEST_FL_FAILED;
13868 data[TG3_LINK_TEST] = 1;
13869 }
13870 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13871 int err, err2 = 0, irq_sync = 0;
13872
13873 if (netif_running(dev)) {
13874 tg3_phy_stop(tp);
13875 tg3_netif_stop(tp);
13876 irq_sync = 1;
13877 }
13878
13879 tg3_full_lock(tp, irq_sync);
13880 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13881 err = tg3_nvram_lock(tp);
13882 tg3_halt_cpu(tp, RX_CPU_BASE);
13883 if (!tg3_flag(tp, 5705_PLUS))
13884 tg3_halt_cpu(tp, TX_CPU_BASE);
13885 if (!err)
13886 tg3_nvram_unlock(tp);
13887
13888 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13889 tg3_phy_reset(tp);
13890
13891 if (tg3_test_registers(tp) != 0) {
13892 etest->flags |= ETH_TEST_FL_FAILED;
13893 data[TG3_REGISTER_TEST] = 1;
13894 }
13895
13896 if (tg3_test_memory(tp) != 0) {
13897 etest->flags |= ETH_TEST_FL_FAILED;
13898 data[TG3_MEMORY_TEST] = 1;
13899 }
13900
13901 if (doextlpbk)
13902 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13903
13904 if (tg3_test_loopback(tp, data, doextlpbk))
13905 etest->flags |= ETH_TEST_FL_FAILED;
13906
13907 tg3_full_unlock(tp);
13908
13909 if (tg3_test_interrupt(tp) != 0) {
13910 etest->flags |= ETH_TEST_FL_FAILED;
13911 data[TG3_INTERRUPT_TEST] = 1;
13912 }
13913
13914 tg3_full_lock(tp, 0);
13915
13916 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13917 if (netif_running(dev)) {
13918 tg3_flag_set(tp, INIT_COMPLETE);
13919 err2 = tg3_restart_hw(tp, true);
13920 if (!err2)
13921 tg3_netif_start(tp);
13922 }
13923
13924 tg3_full_unlock(tp);
13925
13926 if (irq_sync && !err2)
13927 tg3_phy_start(tp);
13928 }
13929 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13930 tg3_power_down_prepare(tp);
13931
13932 }
13933
tg3_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)13934 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13935 {
13936 struct tg3 *tp = netdev_priv(dev);
13937 struct hwtstamp_config stmpconf;
13938
13939 if (!tg3_flag(tp, PTP_CAPABLE))
13940 return -EOPNOTSUPP;
13941
13942 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13943 return -EFAULT;
13944
13945 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13946 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13947 return -ERANGE;
13948
13949 switch (stmpconf.rx_filter) {
13950 case HWTSTAMP_FILTER_NONE:
13951 tp->rxptpctl = 0;
13952 break;
13953 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13954 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13955 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13956 break;
13957 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13958 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13959 TG3_RX_PTP_CTL_SYNC_EVNT;
13960 break;
13961 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13962 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13963 TG3_RX_PTP_CTL_DELAY_REQ;
13964 break;
13965 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13966 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13967 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13968 break;
13969 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13970 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13971 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13972 break;
13973 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13974 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13975 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13976 break;
13977 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13978 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13979 TG3_RX_PTP_CTL_SYNC_EVNT;
13980 break;
13981 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13982 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13983 TG3_RX_PTP_CTL_SYNC_EVNT;
13984 break;
13985 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13986 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13987 TG3_RX_PTP_CTL_SYNC_EVNT;
13988 break;
13989 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13990 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13991 TG3_RX_PTP_CTL_DELAY_REQ;
13992 break;
13993 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13994 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13995 TG3_RX_PTP_CTL_DELAY_REQ;
13996 break;
13997 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13998 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13999 TG3_RX_PTP_CTL_DELAY_REQ;
14000 break;
14001 default:
14002 return -ERANGE;
14003 }
14004
14005 if (netif_running(dev) && tp->rxptpctl)
14006 tw32(TG3_RX_PTP_CTL,
14007 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
14008
14009 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
14010 tg3_flag_set(tp, TX_TSTAMP_EN);
14011 else
14012 tg3_flag_clear(tp, TX_TSTAMP_EN);
14013
14014 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14015 -EFAULT : 0;
14016 }
14017
tg3_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)14018 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
14019 {
14020 struct tg3 *tp = netdev_priv(dev);
14021 struct hwtstamp_config stmpconf;
14022
14023 if (!tg3_flag(tp, PTP_CAPABLE))
14024 return -EOPNOTSUPP;
14025
14026 stmpconf.flags = 0;
14027 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
14028 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
14029
14030 switch (tp->rxptpctl) {
14031 case 0:
14032 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
14033 break;
14034 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
14035 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14036 break;
14037 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14038 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
14039 break;
14040 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14041 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
14042 break;
14043 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14044 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14045 break;
14046 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14047 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14048 break;
14049 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14050 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14051 break;
14052 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14053 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
14054 break;
14055 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14056 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
14057 break;
14058 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14059 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14060 break;
14061 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14062 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14063 break;
14064 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14065 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14066 break;
14067 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14068 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14069 break;
14070 default:
14071 WARN_ON_ONCE(1);
14072 return -ERANGE;
14073 }
14074
14075 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14076 -EFAULT : 0;
14077 }
14078
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)14079 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14080 {
14081 struct mii_ioctl_data *data = if_mii(ifr);
14082 struct tg3 *tp = netdev_priv(dev);
14083 int err;
14084
14085 if (tg3_flag(tp, USE_PHYLIB)) {
14086 struct phy_device *phydev;
14087 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14088 return -EAGAIN;
14089 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14090 return phy_mii_ioctl(phydev, ifr, cmd);
14091 }
14092
14093 switch (cmd) {
14094 case SIOCGMIIPHY:
14095 data->phy_id = tp->phy_addr;
14096
14097 fallthrough;
14098 case SIOCGMIIREG: {
14099 u32 mii_regval;
14100
14101 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14102 break; /* We have no PHY */
14103
14104 if (!netif_running(dev))
14105 return -EAGAIN;
14106
14107 spin_lock_bh(&tp->lock);
14108 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14109 data->reg_num & 0x1f, &mii_regval);
14110 spin_unlock_bh(&tp->lock);
14111
14112 data->val_out = mii_regval;
14113
14114 return err;
14115 }
14116
14117 case SIOCSMIIREG:
14118 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14119 break; /* We have no PHY */
14120
14121 if (!netif_running(dev))
14122 return -EAGAIN;
14123
14124 spin_lock_bh(&tp->lock);
14125 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14126 data->reg_num & 0x1f, data->val_in);
14127 spin_unlock_bh(&tp->lock);
14128
14129 return err;
14130
14131 case SIOCSHWTSTAMP:
14132 return tg3_hwtstamp_set(dev, ifr);
14133
14134 case SIOCGHWTSTAMP:
14135 return tg3_hwtstamp_get(dev, ifr);
14136
14137 default:
14138 /* do nothing */
14139 break;
14140 }
14141 return -EOPNOTSUPP;
14142 }
14143
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14144 static int tg3_get_coalesce(struct net_device *dev,
14145 struct ethtool_coalesce *ec,
14146 struct kernel_ethtool_coalesce *kernel_coal,
14147 struct netlink_ext_ack *extack)
14148 {
14149 struct tg3 *tp = netdev_priv(dev);
14150
14151 memcpy(ec, &tp->coal, sizeof(*ec));
14152 return 0;
14153 }
14154
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14155 static int tg3_set_coalesce(struct net_device *dev,
14156 struct ethtool_coalesce *ec,
14157 struct kernel_ethtool_coalesce *kernel_coal,
14158 struct netlink_ext_ack *extack)
14159 {
14160 struct tg3 *tp = netdev_priv(dev);
14161 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14162 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14163
14164 if (!tg3_flag(tp, 5705_PLUS)) {
14165 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14166 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14167 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14168 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14169 }
14170
14171 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14172 (!ec->rx_coalesce_usecs) ||
14173 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14174 (!ec->tx_coalesce_usecs) ||
14175 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14176 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14177 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14178 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14179 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14180 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14181 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14182 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14183 return -EINVAL;
14184
14185 /* Only copy relevant parameters, ignore all others. */
14186 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14187 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14188 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14189 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14190 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14191 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14192 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14193 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14194 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14195
14196 if (netif_running(dev)) {
14197 tg3_full_lock(tp, 0);
14198 __tg3_set_coalesce(tp, &tp->coal);
14199 tg3_full_unlock(tp);
14200 }
14201 return 0;
14202 }
14203
tg3_set_eee(struct net_device * dev,struct ethtool_keee * edata)14204 static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
14205 {
14206 struct tg3 *tp = netdev_priv(dev);
14207
14208 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14209 netdev_warn(tp->dev, "Board does not support EEE!\n");
14210 return -EOPNOTSUPP;
14211 }
14212
14213 if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
14214 netdev_warn(tp->dev,
14215 "Direct manipulation of EEE advertisement is not supported\n");
14216 return -EINVAL;
14217 }
14218
14219 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14220 netdev_warn(tp->dev,
14221 "Maximal Tx Lpi timer supported is %#x(u)\n",
14222 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14223 return -EINVAL;
14224 }
14225
14226 tp->eee.eee_enabled = edata->eee_enabled;
14227 tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
14228 tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
14229
14230 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14231 tg3_warn_mgmt_link_flap(tp);
14232
14233 if (netif_running(tp->dev)) {
14234 tg3_full_lock(tp, 0);
14235 tg3_setup_eee(tp);
14236 tg3_phy_reset(tp);
14237 tg3_full_unlock(tp);
14238 }
14239
14240 return 0;
14241 }
14242
tg3_get_eee(struct net_device * dev,struct ethtool_keee * edata)14243 static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
14244 {
14245 struct tg3 *tp = netdev_priv(dev);
14246
14247 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14248 netdev_warn(tp->dev,
14249 "Board does not support EEE!\n");
14250 return -EOPNOTSUPP;
14251 }
14252
14253 *edata = tp->eee;
14254 return 0;
14255 }
14256
14257 static const struct ethtool_ops tg3_ethtool_ops = {
14258 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14259 ETHTOOL_COALESCE_MAX_FRAMES |
14260 ETHTOOL_COALESCE_USECS_IRQ |
14261 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14262 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14263 .get_drvinfo = tg3_get_drvinfo,
14264 .get_regs_len = tg3_get_regs_len,
14265 .get_regs = tg3_get_regs,
14266 .get_wol = tg3_get_wol,
14267 .set_wol = tg3_set_wol,
14268 .get_msglevel = tg3_get_msglevel,
14269 .set_msglevel = tg3_set_msglevel,
14270 .nway_reset = tg3_nway_reset,
14271 .get_link = ethtool_op_get_link,
14272 .get_eeprom_len = tg3_get_eeprom_len,
14273 .get_eeprom = tg3_get_eeprom,
14274 .set_eeprom = tg3_set_eeprom,
14275 .get_ringparam = tg3_get_ringparam,
14276 .set_ringparam = tg3_set_ringparam,
14277 .get_pauseparam = tg3_get_pauseparam,
14278 .set_pauseparam = tg3_set_pauseparam,
14279 .self_test = tg3_self_test,
14280 .get_strings = tg3_get_strings,
14281 .set_phys_id = tg3_set_phys_id,
14282 .get_ethtool_stats = tg3_get_ethtool_stats,
14283 .get_coalesce = tg3_get_coalesce,
14284 .set_coalesce = tg3_set_coalesce,
14285 .get_sset_count = tg3_get_sset_count,
14286 .get_rxnfc = tg3_get_rxnfc,
14287 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14288 .get_rxfh = tg3_get_rxfh,
14289 .set_rxfh = tg3_set_rxfh,
14290 .get_channels = tg3_get_channels,
14291 .set_channels = tg3_set_channels,
14292 .get_ts_info = tg3_get_ts_info,
14293 .get_eee = tg3_get_eee,
14294 .set_eee = tg3_set_eee,
14295 .get_link_ksettings = tg3_get_link_ksettings,
14296 .set_link_ksettings = tg3_set_link_ksettings,
14297 };
14298
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14299 static void tg3_get_stats64(struct net_device *dev,
14300 struct rtnl_link_stats64 *stats)
14301 {
14302 struct tg3 *tp = netdev_priv(dev);
14303
14304 spin_lock_bh(&tp->lock);
14305 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14306 *stats = tp->net_stats_prev;
14307 spin_unlock_bh(&tp->lock);
14308 return;
14309 }
14310
14311 tg3_get_nstats(tp, stats);
14312 spin_unlock_bh(&tp->lock);
14313 }
14314
tg3_set_rx_mode(struct net_device * dev)14315 static void tg3_set_rx_mode(struct net_device *dev)
14316 {
14317 struct tg3 *tp = netdev_priv(dev);
14318
14319 if (!netif_running(dev))
14320 return;
14321
14322 tg3_full_lock(tp, 0);
14323 __tg3_set_rx_mode(dev);
14324 tg3_full_unlock(tp);
14325 }
14326
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14327 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14328 int new_mtu)
14329 {
14330 WRITE_ONCE(dev->mtu, new_mtu);
14331
14332 if (new_mtu > ETH_DATA_LEN) {
14333 if (tg3_flag(tp, 5780_CLASS)) {
14334 netdev_update_features(dev);
14335 tg3_flag_clear(tp, TSO_CAPABLE);
14336 } else {
14337 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14338 }
14339 } else {
14340 if (tg3_flag(tp, 5780_CLASS)) {
14341 tg3_flag_set(tp, TSO_CAPABLE);
14342 netdev_update_features(dev);
14343 }
14344 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14345 }
14346 }
14347
tg3_change_mtu(struct net_device * dev,int new_mtu)14348 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14349 {
14350 struct tg3 *tp = netdev_priv(dev);
14351 int err;
14352 bool reset_phy = false;
14353
14354 if (!netif_running(dev)) {
14355 /* We'll just catch it later when the
14356 * device is up'd.
14357 */
14358 tg3_set_mtu(dev, tp, new_mtu);
14359 return 0;
14360 }
14361
14362 tg3_phy_stop(tp);
14363
14364 tg3_netif_stop(tp);
14365
14366 tg3_set_mtu(dev, tp, new_mtu);
14367
14368 tg3_full_lock(tp, 1);
14369
14370 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14371
14372 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14373 * breaks all requests to 256 bytes.
14374 */
14375 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14376 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14377 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14378 tg3_asic_rev(tp) == ASIC_REV_5720)
14379 reset_phy = true;
14380
14381 err = tg3_restart_hw(tp, reset_phy);
14382
14383 if (!err)
14384 tg3_netif_start(tp);
14385
14386 tg3_full_unlock(tp);
14387
14388 if (!err)
14389 tg3_phy_start(tp);
14390
14391 return err;
14392 }
14393
14394 static const struct net_device_ops tg3_netdev_ops = {
14395 .ndo_open = tg3_open,
14396 .ndo_stop = tg3_close,
14397 .ndo_start_xmit = tg3_start_xmit,
14398 .ndo_get_stats64 = tg3_get_stats64,
14399 .ndo_validate_addr = eth_validate_addr,
14400 .ndo_set_rx_mode = tg3_set_rx_mode,
14401 .ndo_set_mac_address = tg3_set_mac_addr,
14402 .ndo_eth_ioctl = tg3_ioctl,
14403 .ndo_tx_timeout = tg3_tx_timeout,
14404 .ndo_change_mtu = tg3_change_mtu,
14405 .ndo_fix_features = tg3_fix_features,
14406 .ndo_set_features = tg3_set_features,
14407 #ifdef CONFIG_NET_POLL_CONTROLLER
14408 .ndo_poll_controller = tg3_poll_controller,
14409 #endif
14410 };
14411
tg3_get_eeprom_size(struct tg3 * tp)14412 static void tg3_get_eeprom_size(struct tg3 *tp)
14413 {
14414 u32 cursize, val, magic;
14415
14416 tp->nvram_size = EEPROM_CHIP_SIZE;
14417
14418 if (tg3_nvram_read(tp, 0, &magic) != 0)
14419 return;
14420
14421 if ((magic != TG3_EEPROM_MAGIC) &&
14422 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14423 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14424 return;
14425
14426 /*
14427 * Size the chip by reading offsets at increasing powers of two.
14428 * When we encounter our validation signature, we know the addressing
14429 * has wrapped around, and thus have our chip size.
14430 */
14431 cursize = 0x10;
14432
14433 while (cursize < tp->nvram_size) {
14434 if (tg3_nvram_read(tp, cursize, &val) != 0)
14435 return;
14436
14437 if (val == magic)
14438 break;
14439
14440 cursize <<= 1;
14441 }
14442
14443 tp->nvram_size = cursize;
14444 }
14445
tg3_get_nvram_size(struct tg3 * tp)14446 static void tg3_get_nvram_size(struct tg3 *tp)
14447 {
14448 u32 val;
14449
14450 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14451 return;
14452
14453 /* Selfboot format */
14454 if (val != TG3_EEPROM_MAGIC) {
14455 tg3_get_eeprom_size(tp);
14456 return;
14457 }
14458
14459 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14460 if (val != 0) {
14461 /* This is confusing. We want to operate on the
14462 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14463 * call will read from NVRAM and byteswap the data
14464 * according to the byteswapping settings for all
14465 * other register accesses. This ensures the data we
14466 * want will always reside in the lower 16-bits.
14467 * However, the data in NVRAM is in LE format, which
14468 * means the data from the NVRAM read will always be
14469 * opposite the endianness of the CPU. The 16-bit
14470 * byteswap then brings the data to CPU endianness.
14471 */
14472 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14473 return;
14474 }
14475 }
14476 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14477 }
14478
tg3_get_nvram_info(struct tg3 * tp)14479 static void tg3_get_nvram_info(struct tg3 *tp)
14480 {
14481 u32 nvcfg1;
14482
14483 nvcfg1 = tr32(NVRAM_CFG1);
14484 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14485 tg3_flag_set(tp, FLASH);
14486 } else {
14487 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14488 tw32(NVRAM_CFG1, nvcfg1);
14489 }
14490
14491 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14492 tg3_flag(tp, 5780_CLASS)) {
14493 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14494 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14495 tp->nvram_jedecnum = JEDEC_ATMEL;
14496 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14497 tg3_flag_set(tp, NVRAM_BUFFERED);
14498 break;
14499 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14500 tp->nvram_jedecnum = JEDEC_ATMEL;
14501 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14502 break;
14503 case FLASH_VENDOR_ATMEL_EEPROM:
14504 tp->nvram_jedecnum = JEDEC_ATMEL;
14505 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14506 tg3_flag_set(tp, NVRAM_BUFFERED);
14507 break;
14508 case FLASH_VENDOR_ST:
14509 tp->nvram_jedecnum = JEDEC_ST;
14510 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14511 tg3_flag_set(tp, NVRAM_BUFFERED);
14512 break;
14513 case FLASH_VENDOR_SAIFUN:
14514 tp->nvram_jedecnum = JEDEC_SAIFUN;
14515 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14516 break;
14517 case FLASH_VENDOR_SST_SMALL:
14518 case FLASH_VENDOR_SST_LARGE:
14519 tp->nvram_jedecnum = JEDEC_SST;
14520 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14521 break;
14522 }
14523 } else {
14524 tp->nvram_jedecnum = JEDEC_ATMEL;
14525 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14526 tg3_flag_set(tp, NVRAM_BUFFERED);
14527 }
14528 }
14529
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14530 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14531 {
14532 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14533 case FLASH_5752PAGE_SIZE_256:
14534 tp->nvram_pagesize = 256;
14535 break;
14536 case FLASH_5752PAGE_SIZE_512:
14537 tp->nvram_pagesize = 512;
14538 break;
14539 case FLASH_5752PAGE_SIZE_1K:
14540 tp->nvram_pagesize = 1024;
14541 break;
14542 case FLASH_5752PAGE_SIZE_2K:
14543 tp->nvram_pagesize = 2048;
14544 break;
14545 case FLASH_5752PAGE_SIZE_4K:
14546 tp->nvram_pagesize = 4096;
14547 break;
14548 case FLASH_5752PAGE_SIZE_264:
14549 tp->nvram_pagesize = 264;
14550 break;
14551 case FLASH_5752PAGE_SIZE_528:
14552 tp->nvram_pagesize = 528;
14553 break;
14554 }
14555 }
14556
tg3_get_5752_nvram_info(struct tg3 * tp)14557 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14558 {
14559 u32 nvcfg1;
14560
14561 nvcfg1 = tr32(NVRAM_CFG1);
14562
14563 /* NVRAM protection for TPM */
14564 if (nvcfg1 & (1 << 27))
14565 tg3_flag_set(tp, PROTECTED_NVRAM);
14566
14567 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14568 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14569 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14570 tp->nvram_jedecnum = JEDEC_ATMEL;
14571 tg3_flag_set(tp, NVRAM_BUFFERED);
14572 break;
14573 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14574 tp->nvram_jedecnum = JEDEC_ATMEL;
14575 tg3_flag_set(tp, NVRAM_BUFFERED);
14576 tg3_flag_set(tp, FLASH);
14577 break;
14578 case FLASH_5752VENDOR_ST_M45PE10:
14579 case FLASH_5752VENDOR_ST_M45PE20:
14580 case FLASH_5752VENDOR_ST_M45PE40:
14581 tp->nvram_jedecnum = JEDEC_ST;
14582 tg3_flag_set(tp, NVRAM_BUFFERED);
14583 tg3_flag_set(tp, FLASH);
14584 break;
14585 }
14586
14587 if (tg3_flag(tp, FLASH)) {
14588 tg3_nvram_get_pagesize(tp, nvcfg1);
14589 } else {
14590 /* For eeprom, set pagesize to maximum eeprom size */
14591 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14592
14593 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14594 tw32(NVRAM_CFG1, nvcfg1);
14595 }
14596 }
14597
tg3_get_5755_nvram_info(struct tg3 * tp)14598 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14599 {
14600 u32 nvcfg1, protect = 0;
14601
14602 nvcfg1 = tr32(NVRAM_CFG1);
14603
14604 /* NVRAM protection for TPM */
14605 if (nvcfg1 & (1 << 27)) {
14606 tg3_flag_set(tp, PROTECTED_NVRAM);
14607 protect = 1;
14608 }
14609
14610 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14611 switch (nvcfg1) {
14612 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14613 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14614 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14615 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14616 tp->nvram_jedecnum = JEDEC_ATMEL;
14617 tg3_flag_set(tp, NVRAM_BUFFERED);
14618 tg3_flag_set(tp, FLASH);
14619 tp->nvram_pagesize = 264;
14620 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14621 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14622 tp->nvram_size = (protect ? 0x3e200 :
14623 TG3_NVRAM_SIZE_512KB);
14624 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14625 tp->nvram_size = (protect ? 0x1f200 :
14626 TG3_NVRAM_SIZE_256KB);
14627 else
14628 tp->nvram_size = (protect ? 0x1f200 :
14629 TG3_NVRAM_SIZE_128KB);
14630 break;
14631 case FLASH_5752VENDOR_ST_M45PE10:
14632 case FLASH_5752VENDOR_ST_M45PE20:
14633 case FLASH_5752VENDOR_ST_M45PE40:
14634 tp->nvram_jedecnum = JEDEC_ST;
14635 tg3_flag_set(tp, NVRAM_BUFFERED);
14636 tg3_flag_set(tp, FLASH);
14637 tp->nvram_pagesize = 256;
14638 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14639 tp->nvram_size = (protect ?
14640 TG3_NVRAM_SIZE_64KB :
14641 TG3_NVRAM_SIZE_128KB);
14642 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14643 tp->nvram_size = (protect ?
14644 TG3_NVRAM_SIZE_64KB :
14645 TG3_NVRAM_SIZE_256KB);
14646 else
14647 tp->nvram_size = (protect ?
14648 TG3_NVRAM_SIZE_128KB :
14649 TG3_NVRAM_SIZE_512KB);
14650 break;
14651 }
14652 }
14653
tg3_get_5787_nvram_info(struct tg3 * tp)14654 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14655 {
14656 u32 nvcfg1;
14657
14658 nvcfg1 = tr32(NVRAM_CFG1);
14659
14660 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14661 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14662 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14663 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14664 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14665 tp->nvram_jedecnum = JEDEC_ATMEL;
14666 tg3_flag_set(tp, NVRAM_BUFFERED);
14667 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14668
14669 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14670 tw32(NVRAM_CFG1, nvcfg1);
14671 break;
14672 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14673 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14674 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14675 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14676 tp->nvram_jedecnum = JEDEC_ATMEL;
14677 tg3_flag_set(tp, NVRAM_BUFFERED);
14678 tg3_flag_set(tp, FLASH);
14679 tp->nvram_pagesize = 264;
14680 break;
14681 case FLASH_5752VENDOR_ST_M45PE10:
14682 case FLASH_5752VENDOR_ST_M45PE20:
14683 case FLASH_5752VENDOR_ST_M45PE40:
14684 tp->nvram_jedecnum = JEDEC_ST;
14685 tg3_flag_set(tp, NVRAM_BUFFERED);
14686 tg3_flag_set(tp, FLASH);
14687 tp->nvram_pagesize = 256;
14688 break;
14689 }
14690 }
14691
tg3_get_5761_nvram_info(struct tg3 * tp)14692 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14693 {
14694 u32 nvcfg1, protect = 0;
14695
14696 nvcfg1 = tr32(NVRAM_CFG1);
14697
14698 /* NVRAM protection for TPM */
14699 if (nvcfg1 & (1 << 27)) {
14700 tg3_flag_set(tp, PROTECTED_NVRAM);
14701 protect = 1;
14702 }
14703
14704 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14705 switch (nvcfg1) {
14706 case FLASH_5761VENDOR_ATMEL_ADB021D:
14707 case FLASH_5761VENDOR_ATMEL_ADB041D:
14708 case FLASH_5761VENDOR_ATMEL_ADB081D:
14709 case FLASH_5761VENDOR_ATMEL_ADB161D:
14710 case FLASH_5761VENDOR_ATMEL_MDB021D:
14711 case FLASH_5761VENDOR_ATMEL_MDB041D:
14712 case FLASH_5761VENDOR_ATMEL_MDB081D:
14713 case FLASH_5761VENDOR_ATMEL_MDB161D:
14714 tp->nvram_jedecnum = JEDEC_ATMEL;
14715 tg3_flag_set(tp, NVRAM_BUFFERED);
14716 tg3_flag_set(tp, FLASH);
14717 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14718 tp->nvram_pagesize = 256;
14719 break;
14720 case FLASH_5761VENDOR_ST_A_M45PE20:
14721 case FLASH_5761VENDOR_ST_A_M45PE40:
14722 case FLASH_5761VENDOR_ST_A_M45PE80:
14723 case FLASH_5761VENDOR_ST_A_M45PE16:
14724 case FLASH_5761VENDOR_ST_M_M45PE20:
14725 case FLASH_5761VENDOR_ST_M_M45PE40:
14726 case FLASH_5761VENDOR_ST_M_M45PE80:
14727 case FLASH_5761VENDOR_ST_M_M45PE16:
14728 tp->nvram_jedecnum = JEDEC_ST;
14729 tg3_flag_set(tp, NVRAM_BUFFERED);
14730 tg3_flag_set(tp, FLASH);
14731 tp->nvram_pagesize = 256;
14732 break;
14733 }
14734
14735 if (protect) {
14736 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14737 } else {
14738 switch (nvcfg1) {
14739 case FLASH_5761VENDOR_ATMEL_ADB161D:
14740 case FLASH_5761VENDOR_ATMEL_MDB161D:
14741 case FLASH_5761VENDOR_ST_A_M45PE16:
14742 case FLASH_5761VENDOR_ST_M_M45PE16:
14743 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14744 break;
14745 case FLASH_5761VENDOR_ATMEL_ADB081D:
14746 case FLASH_5761VENDOR_ATMEL_MDB081D:
14747 case FLASH_5761VENDOR_ST_A_M45PE80:
14748 case FLASH_5761VENDOR_ST_M_M45PE80:
14749 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14750 break;
14751 case FLASH_5761VENDOR_ATMEL_ADB041D:
14752 case FLASH_5761VENDOR_ATMEL_MDB041D:
14753 case FLASH_5761VENDOR_ST_A_M45PE40:
14754 case FLASH_5761VENDOR_ST_M_M45PE40:
14755 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14756 break;
14757 case FLASH_5761VENDOR_ATMEL_ADB021D:
14758 case FLASH_5761VENDOR_ATMEL_MDB021D:
14759 case FLASH_5761VENDOR_ST_A_M45PE20:
14760 case FLASH_5761VENDOR_ST_M_M45PE20:
14761 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14762 break;
14763 }
14764 }
14765 }
14766
tg3_get_5906_nvram_info(struct tg3 * tp)14767 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14768 {
14769 tp->nvram_jedecnum = JEDEC_ATMEL;
14770 tg3_flag_set(tp, NVRAM_BUFFERED);
14771 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14772 }
14773
tg3_get_57780_nvram_info(struct tg3 * tp)14774 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14775 {
14776 u32 nvcfg1;
14777
14778 nvcfg1 = tr32(NVRAM_CFG1);
14779
14780 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14781 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14782 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14783 tp->nvram_jedecnum = JEDEC_ATMEL;
14784 tg3_flag_set(tp, NVRAM_BUFFERED);
14785 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14786
14787 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14788 tw32(NVRAM_CFG1, nvcfg1);
14789 return;
14790 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14791 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14792 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14793 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14794 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14795 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14796 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14797 tp->nvram_jedecnum = JEDEC_ATMEL;
14798 tg3_flag_set(tp, NVRAM_BUFFERED);
14799 tg3_flag_set(tp, FLASH);
14800
14801 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14802 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14803 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14804 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14805 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14806 break;
14807 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14808 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14809 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14810 break;
14811 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14812 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14813 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14814 break;
14815 }
14816 break;
14817 case FLASH_5752VENDOR_ST_M45PE10:
14818 case FLASH_5752VENDOR_ST_M45PE20:
14819 case FLASH_5752VENDOR_ST_M45PE40:
14820 tp->nvram_jedecnum = JEDEC_ST;
14821 tg3_flag_set(tp, NVRAM_BUFFERED);
14822 tg3_flag_set(tp, FLASH);
14823
14824 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14825 case FLASH_5752VENDOR_ST_M45PE10:
14826 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14827 break;
14828 case FLASH_5752VENDOR_ST_M45PE20:
14829 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14830 break;
14831 case FLASH_5752VENDOR_ST_M45PE40:
14832 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14833 break;
14834 }
14835 break;
14836 default:
14837 tg3_flag_set(tp, NO_NVRAM);
14838 return;
14839 }
14840
14841 tg3_nvram_get_pagesize(tp, nvcfg1);
14842 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14843 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14844 }
14845
14846
tg3_get_5717_nvram_info(struct tg3 * tp)14847 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14848 {
14849 u32 nvcfg1;
14850
14851 nvcfg1 = tr32(NVRAM_CFG1);
14852
14853 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14854 case FLASH_5717VENDOR_ATMEL_EEPROM:
14855 case FLASH_5717VENDOR_MICRO_EEPROM:
14856 tp->nvram_jedecnum = JEDEC_ATMEL;
14857 tg3_flag_set(tp, NVRAM_BUFFERED);
14858 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14859
14860 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14861 tw32(NVRAM_CFG1, nvcfg1);
14862 return;
14863 case FLASH_5717VENDOR_ATMEL_MDB011D:
14864 case FLASH_5717VENDOR_ATMEL_ADB011B:
14865 case FLASH_5717VENDOR_ATMEL_ADB011D:
14866 case FLASH_5717VENDOR_ATMEL_MDB021D:
14867 case FLASH_5717VENDOR_ATMEL_ADB021B:
14868 case FLASH_5717VENDOR_ATMEL_ADB021D:
14869 case FLASH_5717VENDOR_ATMEL_45USPT:
14870 tp->nvram_jedecnum = JEDEC_ATMEL;
14871 tg3_flag_set(tp, NVRAM_BUFFERED);
14872 tg3_flag_set(tp, FLASH);
14873
14874 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14875 case FLASH_5717VENDOR_ATMEL_MDB021D:
14876 /* Detect size with tg3_nvram_get_size() */
14877 break;
14878 case FLASH_5717VENDOR_ATMEL_ADB021B:
14879 case FLASH_5717VENDOR_ATMEL_ADB021D:
14880 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14881 break;
14882 default:
14883 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14884 break;
14885 }
14886 break;
14887 case FLASH_5717VENDOR_ST_M_M25PE10:
14888 case FLASH_5717VENDOR_ST_A_M25PE10:
14889 case FLASH_5717VENDOR_ST_M_M45PE10:
14890 case FLASH_5717VENDOR_ST_A_M45PE10:
14891 case FLASH_5717VENDOR_ST_M_M25PE20:
14892 case FLASH_5717VENDOR_ST_A_M25PE20:
14893 case FLASH_5717VENDOR_ST_M_M45PE20:
14894 case FLASH_5717VENDOR_ST_A_M45PE20:
14895 case FLASH_5717VENDOR_ST_25USPT:
14896 case FLASH_5717VENDOR_ST_45USPT:
14897 tp->nvram_jedecnum = JEDEC_ST;
14898 tg3_flag_set(tp, NVRAM_BUFFERED);
14899 tg3_flag_set(tp, FLASH);
14900
14901 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14902 case FLASH_5717VENDOR_ST_M_M25PE20:
14903 case FLASH_5717VENDOR_ST_M_M45PE20:
14904 /* Detect size with tg3_nvram_get_size() */
14905 break;
14906 case FLASH_5717VENDOR_ST_A_M25PE20:
14907 case FLASH_5717VENDOR_ST_A_M45PE20:
14908 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14909 break;
14910 default:
14911 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14912 break;
14913 }
14914 break;
14915 default:
14916 tg3_flag_set(tp, NO_NVRAM);
14917 return;
14918 }
14919
14920 tg3_nvram_get_pagesize(tp, nvcfg1);
14921 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14922 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14923 }
14924
tg3_get_5720_nvram_info(struct tg3 * tp)14925 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14926 {
14927 u32 nvcfg1, nvmpinstrp, nv_status;
14928
14929 nvcfg1 = tr32(NVRAM_CFG1);
14930 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14931
14932 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14933 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14934 tg3_flag_set(tp, NO_NVRAM);
14935 return;
14936 }
14937
14938 switch (nvmpinstrp) {
14939 case FLASH_5762_MX25L_100:
14940 case FLASH_5762_MX25L_200:
14941 case FLASH_5762_MX25L_400:
14942 case FLASH_5762_MX25L_800:
14943 case FLASH_5762_MX25L_160_320:
14944 tp->nvram_pagesize = 4096;
14945 tp->nvram_jedecnum = JEDEC_MACRONIX;
14946 tg3_flag_set(tp, NVRAM_BUFFERED);
14947 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14948 tg3_flag_set(tp, FLASH);
14949 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14950 tp->nvram_size =
14951 (1 << (nv_status >> AUTOSENSE_DEVID &
14952 AUTOSENSE_DEVID_MASK)
14953 << AUTOSENSE_SIZE_IN_MB);
14954 return;
14955
14956 case FLASH_5762_EEPROM_HD:
14957 nvmpinstrp = FLASH_5720_EEPROM_HD;
14958 break;
14959 case FLASH_5762_EEPROM_LD:
14960 nvmpinstrp = FLASH_5720_EEPROM_LD;
14961 break;
14962 case FLASH_5720VENDOR_M_ST_M45PE20:
14963 /* This pinstrap supports multiple sizes, so force it
14964 * to read the actual size from location 0xf0.
14965 */
14966 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14967 break;
14968 }
14969 }
14970
14971 switch (nvmpinstrp) {
14972 case FLASH_5720_EEPROM_HD:
14973 case FLASH_5720_EEPROM_LD:
14974 tp->nvram_jedecnum = JEDEC_ATMEL;
14975 tg3_flag_set(tp, NVRAM_BUFFERED);
14976
14977 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14978 tw32(NVRAM_CFG1, nvcfg1);
14979 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14980 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14981 else
14982 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14983 return;
14984 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14985 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14986 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14987 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14988 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14989 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14990 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14991 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14992 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14993 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14994 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14995 case FLASH_5720VENDOR_ATMEL_45USPT:
14996 tp->nvram_jedecnum = JEDEC_ATMEL;
14997 tg3_flag_set(tp, NVRAM_BUFFERED);
14998 tg3_flag_set(tp, FLASH);
14999
15000 switch (nvmpinstrp) {
15001 case FLASH_5720VENDOR_M_ATMEL_DB021D:
15002 case FLASH_5720VENDOR_A_ATMEL_DB021B:
15003 case FLASH_5720VENDOR_A_ATMEL_DB021D:
15004 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15005 break;
15006 case FLASH_5720VENDOR_M_ATMEL_DB041D:
15007 case FLASH_5720VENDOR_A_ATMEL_DB041B:
15008 case FLASH_5720VENDOR_A_ATMEL_DB041D:
15009 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15010 break;
15011 case FLASH_5720VENDOR_M_ATMEL_DB081D:
15012 case FLASH_5720VENDOR_A_ATMEL_DB081D:
15013 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15014 break;
15015 default:
15016 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15017 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15018 break;
15019 }
15020 break;
15021 case FLASH_5720VENDOR_M_ST_M25PE10:
15022 case FLASH_5720VENDOR_M_ST_M45PE10:
15023 case FLASH_5720VENDOR_A_ST_M25PE10:
15024 case FLASH_5720VENDOR_A_ST_M45PE10:
15025 case FLASH_5720VENDOR_M_ST_M25PE20:
15026 case FLASH_5720VENDOR_M_ST_M45PE20:
15027 case FLASH_5720VENDOR_A_ST_M25PE20:
15028 case FLASH_5720VENDOR_A_ST_M45PE20:
15029 case FLASH_5720VENDOR_M_ST_M25PE40:
15030 case FLASH_5720VENDOR_M_ST_M45PE40:
15031 case FLASH_5720VENDOR_A_ST_M25PE40:
15032 case FLASH_5720VENDOR_A_ST_M45PE40:
15033 case FLASH_5720VENDOR_M_ST_M25PE80:
15034 case FLASH_5720VENDOR_M_ST_M45PE80:
15035 case FLASH_5720VENDOR_A_ST_M25PE80:
15036 case FLASH_5720VENDOR_A_ST_M45PE80:
15037 case FLASH_5720VENDOR_ST_25USPT:
15038 case FLASH_5720VENDOR_ST_45USPT:
15039 tp->nvram_jedecnum = JEDEC_ST;
15040 tg3_flag_set(tp, NVRAM_BUFFERED);
15041 tg3_flag_set(tp, FLASH);
15042
15043 switch (nvmpinstrp) {
15044 case FLASH_5720VENDOR_M_ST_M25PE20:
15045 case FLASH_5720VENDOR_M_ST_M45PE20:
15046 case FLASH_5720VENDOR_A_ST_M25PE20:
15047 case FLASH_5720VENDOR_A_ST_M45PE20:
15048 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15049 break;
15050 case FLASH_5720VENDOR_M_ST_M25PE40:
15051 case FLASH_5720VENDOR_M_ST_M45PE40:
15052 case FLASH_5720VENDOR_A_ST_M25PE40:
15053 case FLASH_5720VENDOR_A_ST_M45PE40:
15054 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15055 break;
15056 case FLASH_5720VENDOR_M_ST_M25PE80:
15057 case FLASH_5720VENDOR_M_ST_M45PE80:
15058 case FLASH_5720VENDOR_A_ST_M25PE80:
15059 case FLASH_5720VENDOR_A_ST_M45PE80:
15060 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15061 break;
15062 default:
15063 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15064 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15065 break;
15066 }
15067 break;
15068 default:
15069 tg3_flag_set(tp, NO_NVRAM);
15070 return;
15071 }
15072
15073 tg3_nvram_get_pagesize(tp, nvcfg1);
15074 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15075 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15076
15077 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15078 u32 val;
15079
15080 if (tg3_nvram_read(tp, 0, &val))
15081 return;
15082
15083 if (val != TG3_EEPROM_MAGIC &&
15084 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15085 tg3_flag_set(tp, NO_NVRAM);
15086 }
15087 }
15088
15089 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)15090 static void tg3_nvram_init(struct tg3 *tp)
15091 {
15092 if (tg3_flag(tp, IS_SSB_CORE)) {
15093 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15094 tg3_flag_clear(tp, NVRAM);
15095 tg3_flag_clear(tp, NVRAM_BUFFERED);
15096 tg3_flag_set(tp, NO_NVRAM);
15097 return;
15098 }
15099
15100 tw32_f(GRC_EEPROM_ADDR,
15101 (EEPROM_ADDR_FSM_RESET |
15102 (EEPROM_DEFAULT_CLOCK_PERIOD <<
15103 EEPROM_ADDR_CLKPERD_SHIFT)));
15104
15105 msleep(1);
15106
15107 /* Enable seeprom accesses. */
15108 tw32_f(GRC_LOCAL_CTRL,
15109 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15110 udelay(100);
15111
15112 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15113 tg3_asic_rev(tp) != ASIC_REV_5701) {
15114 tg3_flag_set(tp, NVRAM);
15115
15116 if (tg3_nvram_lock(tp)) {
15117 netdev_warn(tp->dev,
15118 "Cannot get nvram lock, %s failed\n",
15119 __func__);
15120 return;
15121 }
15122 tg3_enable_nvram_access(tp);
15123
15124 tp->nvram_size = 0;
15125
15126 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15127 tg3_get_5752_nvram_info(tp);
15128 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15129 tg3_get_5755_nvram_info(tp);
15130 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15131 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15132 tg3_asic_rev(tp) == ASIC_REV_5785)
15133 tg3_get_5787_nvram_info(tp);
15134 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15135 tg3_get_5761_nvram_info(tp);
15136 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15137 tg3_get_5906_nvram_info(tp);
15138 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15139 tg3_flag(tp, 57765_CLASS))
15140 tg3_get_57780_nvram_info(tp);
15141 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15142 tg3_asic_rev(tp) == ASIC_REV_5719)
15143 tg3_get_5717_nvram_info(tp);
15144 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15145 tg3_asic_rev(tp) == ASIC_REV_5762)
15146 tg3_get_5720_nvram_info(tp);
15147 else
15148 tg3_get_nvram_info(tp);
15149
15150 if (tp->nvram_size == 0)
15151 tg3_get_nvram_size(tp);
15152
15153 tg3_disable_nvram_access(tp);
15154 tg3_nvram_unlock(tp);
15155
15156 } else {
15157 tg3_flag_clear(tp, NVRAM);
15158 tg3_flag_clear(tp, NVRAM_BUFFERED);
15159
15160 tg3_get_eeprom_size(tp);
15161 }
15162 }
15163
15164 struct subsys_tbl_ent {
15165 u16 subsys_vendor, subsys_devid;
15166 u32 phy_id;
15167 };
15168
15169 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15170 /* Broadcom boards. */
15171 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15172 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15173 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15174 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15175 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15176 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15177 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15178 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15179 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15180 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15181 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15182 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15183 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15184 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15185 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15186 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15187 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15188 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15189 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15190 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15191 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15192 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15193
15194 /* 3com boards. */
15195 { TG3PCI_SUBVENDOR_ID_3COM,
15196 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15197 { TG3PCI_SUBVENDOR_ID_3COM,
15198 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15199 { TG3PCI_SUBVENDOR_ID_3COM,
15200 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15201 { TG3PCI_SUBVENDOR_ID_3COM,
15202 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15203 { TG3PCI_SUBVENDOR_ID_3COM,
15204 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15205
15206 /* DELL boards. */
15207 { TG3PCI_SUBVENDOR_ID_DELL,
15208 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15209 { TG3PCI_SUBVENDOR_ID_DELL,
15210 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15211 { TG3PCI_SUBVENDOR_ID_DELL,
15212 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15213 { TG3PCI_SUBVENDOR_ID_DELL,
15214 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15215
15216 /* Compaq boards. */
15217 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15218 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15219 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15220 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15221 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15222 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15223 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15224 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15225 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15226 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15227
15228 /* IBM boards. */
15229 { TG3PCI_SUBVENDOR_ID_IBM,
15230 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15231 };
15232
tg3_lookup_by_subsys(struct tg3 * tp)15233 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15234 {
15235 int i;
15236
15237 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15238 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15239 tp->pdev->subsystem_vendor) &&
15240 (subsys_id_to_phy_id[i].subsys_devid ==
15241 tp->pdev->subsystem_device))
15242 return &subsys_id_to_phy_id[i];
15243 }
15244 return NULL;
15245 }
15246
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15247 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15248 {
15249 u32 val;
15250
15251 tp->phy_id = TG3_PHY_ID_INVALID;
15252 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15253
15254 /* Assume an onboard device and WOL capable by default. */
15255 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15256 tg3_flag_set(tp, WOL_CAP);
15257
15258 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15259 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15260 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15261 tg3_flag_set(tp, IS_NIC);
15262 }
15263 val = tr32(VCPU_CFGSHDW);
15264 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15265 tg3_flag_set(tp, ASPM_WORKAROUND);
15266 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15267 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15268 tg3_flag_set(tp, WOL_ENABLE);
15269 device_set_wakeup_enable(&tp->pdev->dev, true);
15270 }
15271 goto done;
15272 }
15273
15274 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15275 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15276 u32 nic_cfg, led_cfg;
15277 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15278 u32 nic_phy_id, ver, eeprom_phy_id;
15279 int eeprom_phy_serdes = 0;
15280
15281 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15282 tp->nic_sram_data_cfg = nic_cfg;
15283
15284 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15285 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15286 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15287 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15288 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15289 (ver > 0) && (ver < 0x100))
15290 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15291
15292 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15293 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15294
15295 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15296 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15297 tg3_asic_rev(tp) == ASIC_REV_5720)
15298 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15299
15300 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15301 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15302 eeprom_phy_serdes = 1;
15303
15304 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15305 if (nic_phy_id != 0) {
15306 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15307 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15308
15309 eeprom_phy_id = (id1 >> 16) << 10;
15310 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15311 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15312 } else
15313 eeprom_phy_id = 0;
15314
15315 tp->phy_id = eeprom_phy_id;
15316 if (eeprom_phy_serdes) {
15317 if (!tg3_flag(tp, 5705_PLUS))
15318 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15319 else
15320 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15321 }
15322
15323 if (tg3_flag(tp, 5750_PLUS))
15324 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15325 SHASTA_EXT_LED_MODE_MASK);
15326 else
15327 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15328
15329 switch (led_cfg) {
15330 default:
15331 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15332 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15333 break;
15334
15335 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15336 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15337 break;
15338
15339 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15340 tp->led_ctrl = LED_CTRL_MODE_MAC;
15341
15342 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15343 * read on some older 5700/5701 bootcode.
15344 */
15345 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15346 tg3_asic_rev(tp) == ASIC_REV_5701)
15347 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15348
15349 break;
15350
15351 case SHASTA_EXT_LED_SHARED:
15352 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15353 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15354 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15355 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15356 LED_CTRL_MODE_PHY_2);
15357
15358 if (tg3_flag(tp, 5717_PLUS) ||
15359 tg3_asic_rev(tp) == ASIC_REV_5762)
15360 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15361 LED_CTRL_BLINK_RATE_MASK;
15362
15363 break;
15364
15365 case SHASTA_EXT_LED_MAC:
15366 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15367 break;
15368
15369 case SHASTA_EXT_LED_COMBO:
15370 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15371 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15372 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15373 LED_CTRL_MODE_PHY_2);
15374 break;
15375
15376 }
15377
15378 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15379 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15380 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15381 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15382
15383 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15384 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15385
15386 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15387 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15388 if ((tp->pdev->subsystem_vendor ==
15389 PCI_VENDOR_ID_ARIMA) &&
15390 (tp->pdev->subsystem_device == 0x205a ||
15391 tp->pdev->subsystem_device == 0x2063))
15392 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15393 } else {
15394 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15395 tg3_flag_set(tp, IS_NIC);
15396 }
15397
15398 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15399 tg3_flag_set(tp, ENABLE_ASF);
15400 if (tg3_flag(tp, 5750_PLUS))
15401 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15402 }
15403
15404 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15405 tg3_flag(tp, 5750_PLUS))
15406 tg3_flag_set(tp, ENABLE_APE);
15407
15408 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15409 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15410 tg3_flag_clear(tp, WOL_CAP);
15411
15412 if (tg3_flag(tp, WOL_CAP) &&
15413 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15414 tg3_flag_set(tp, WOL_ENABLE);
15415 device_set_wakeup_enable(&tp->pdev->dev, true);
15416 }
15417
15418 if (cfg2 & (1 << 17))
15419 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15420
15421 /* serdes signal pre-emphasis in register 0x590 set by */
15422 /* bootcode if bit 18 is set */
15423 if (cfg2 & (1 << 18))
15424 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15425
15426 if ((tg3_flag(tp, 57765_PLUS) ||
15427 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15428 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15429 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15430 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15431
15432 if (tg3_flag(tp, PCI_EXPRESS)) {
15433 u32 cfg3;
15434
15435 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15436 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15437 !tg3_flag(tp, 57765_PLUS) &&
15438 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15439 tg3_flag_set(tp, ASPM_WORKAROUND);
15440 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15441 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15442 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15443 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15444 }
15445
15446 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15447 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15448 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15449 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15450 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15451 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15452
15453 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15454 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15455 }
15456 done:
15457 if (tg3_flag(tp, WOL_CAP))
15458 device_set_wakeup_enable(&tp->pdev->dev,
15459 tg3_flag(tp, WOL_ENABLE));
15460 else
15461 device_set_wakeup_capable(&tp->pdev->dev, false);
15462 }
15463
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15464 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15465 {
15466 int i, err;
15467 u32 val2, off = offset * 8;
15468
15469 err = tg3_nvram_lock(tp);
15470 if (err)
15471 return err;
15472
15473 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15474 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15475 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15476 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15477 udelay(10);
15478
15479 for (i = 0; i < 100; i++) {
15480 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15481 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15482 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15483 break;
15484 }
15485 udelay(10);
15486 }
15487
15488 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15489
15490 tg3_nvram_unlock(tp);
15491 if (val2 & APE_OTP_STATUS_CMD_DONE)
15492 return 0;
15493
15494 return -EBUSY;
15495 }
15496
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15497 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15498 {
15499 int i;
15500 u32 val;
15501
15502 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15503 tw32(OTP_CTRL, cmd);
15504
15505 /* Wait for up to 1 ms for command to execute. */
15506 for (i = 0; i < 100; i++) {
15507 val = tr32(OTP_STATUS);
15508 if (val & OTP_STATUS_CMD_DONE)
15509 break;
15510 udelay(10);
15511 }
15512
15513 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15514 }
15515
15516 /* Read the gphy configuration from the OTP region of the chip. The gphy
15517 * configuration is a 32-bit value that straddles the alignment boundary.
15518 * We do two 32-bit reads and then shift and merge the results.
15519 */
tg3_read_otp_phycfg(struct tg3 * tp)15520 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15521 {
15522 u32 bhalf_otp, thalf_otp;
15523
15524 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15525
15526 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15527 return 0;
15528
15529 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15530
15531 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15532 return 0;
15533
15534 thalf_otp = tr32(OTP_READ_DATA);
15535
15536 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15537
15538 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15539 return 0;
15540
15541 bhalf_otp = tr32(OTP_READ_DATA);
15542
15543 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15544 }
15545
tg3_phy_init_link_config(struct tg3 * tp)15546 static void tg3_phy_init_link_config(struct tg3 *tp)
15547 {
15548 u32 adv = ADVERTISED_Autoneg;
15549
15550 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15551 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15552 adv |= ADVERTISED_1000baseT_Half;
15553 adv |= ADVERTISED_1000baseT_Full;
15554 }
15555
15556 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15557 adv |= ADVERTISED_100baseT_Half |
15558 ADVERTISED_100baseT_Full |
15559 ADVERTISED_10baseT_Half |
15560 ADVERTISED_10baseT_Full |
15561 ADVERTISED_TP;
15562 else
15563 adv |= ADVERTISED_FIBRE;
15564
15565 tp->link_config.advertising = adv;
15566 tp->link_config.speed = SPEED_UNKNOWN;
15567 tp->link_config.duplex = DUPLEX_UNKNOWN;
15568 tp->link_config.autoneg = AUTONEG_ENABLE;
15569 tp->link_config.active_speed = SPEED_UNKNOWN;
15570 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15571
15572 tp->old_link = -1;
15573 }
15574
tg3_phy_probe(struct tg3 * tp)15575 static int tg3_phy_probe(struct tg3 *tp)
15576 {
15577 u32 hw_phy_id_1, hw_phy_id_2;
15578 u32 hw_phy_id, hw_phy_id_masked;
15579 int err;
15580
15581 /* flow control autonegotiation is default behavior */
15582 tg3_flag_set(tp, PAUSE_AUTONEG);
15583 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15584
15585 if (tg3_flag(tp, ENABLE_APE)) {
15586 switch (tp->pci_fn) {
15587 case 0:
15588 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15589 break;
15590 case 1:
15591 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15592 break;
15593 case 2:
15594 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15595 break;
15596 case 3:
15597 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15598 break;
15599 }
15600 }
15601
15602 if (!tg3_flag(tp, ENABLE_ASF) &&
15603 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15604 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15605 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15606 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15607
15608 if (tg3_flag(tp, USE_PHYLIB))
15609 return tg3_phy_init(tp);
15610
15611 /* Reading the PHY ID register can conflict with ASF
15612 * firmware access to the PHY hardware.
15613 */
15614 err = 0;
15615 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15616 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15617 } else {
15618 /* Now read the physical PHY_ID from the chip and verify
15619 * that it is sane. If it doesn't look good, we fall back
15620 * to either the hard-coded table based PHY_ID and failing
15621 * that the value found in the eeprom area.
15622 */
15623 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15624 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15625
15626 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15627 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15628 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15629
15630 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15631 }
15632
15633 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15634 tp->phy_id = hw_phy_id;
15635 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15636 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15637 else
15638 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15639 } else {
15640 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15641 /* Do nothing, phy ID already set up in
15642 * tg3_get_eeprom_hw_cfg().
15643 */
15644 } else {
15645 struct subsys_tbl_ent *p;
15646
15647 /* No eeprom signature? Try the hardcoded
15648 * subsys device table.
15649 */
15650 p = tg3_lookup_by_subsys(tp);
15651 if (p) {
15652 tp->phy_id = p->phy_id;
15653 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15654 /* For now we saw the IDs 0xbc050cd0,
15655 * 0xbc050f80 and 0xbc050c30 on devices
15656 * connected to an BCM4785 and there are
15657 * probably more. Just assume that the phy is
15658 * supported when it is connected to a SSB core
15659 * for now.
15660 */
15661 return -ENODEV;
15662 }
15663
15664 if (!tp->phy_id ||
15665 tp->phy_id == TG3_PHY_ID_BCM8002)
15666 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15667 }
15668 }
15669
15670 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15671 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15672 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15673 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15674 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15675 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15676 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15677 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15678 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15679 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15680
15681 linkmode_zero(tp->eee.supported);
15682 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
15683 tp->eee.supported);
15684 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
15685 tp->eee.supported);
15686 linkmode_copy(tp->eee.advertised, tp->eee.supported);
15687
15688 tp->eee.eee_enabled = 1;
15689 tp->eee.tx_lpi_enabled = 1;
15690 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15691 }
15692
15693 tg3_phy_init_link_config(tp);
15694
15695 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15696 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15697 !tg3_flag(tp, ENABLE_APE) &&
15698 !tg3_flag(tp, ENABLE_ASF)) {
15699 u32 bmsr, dummy;
15700
15701 tg3_readphy(tp, MII_BMSR, &bmsr);
15702 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15703 (bmsr & BMSR_LSTATUS))
15704 goto skip_phy_reset;
15705
15706 err = tg3_phy_reset(tp);
15707 if (err)
15708 return err;
15709
15710 tg3_phy_set_wirespeed(tp);
15711
15712 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15713 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15714 tp->link_config.flowctrl);
15715
15716 tg3_writephy(tp, MII_BMCR,
15717 BMCR_ANENABLE | BMCR_ANRESTART);
15718 }
15719 }
15720
15721 skip_phy_reset:
15722 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15723 err = tg3_init_5401phy_dsp(tp);
15724 if (err)
15725 return err;
15726
15727 err = tg3_init_5401phy_dsp(tp);
15728 }
15729
15730 return err;
15731 }
15732
tg3_read_vpd(struct tg3 * tp)15733 static void tg3_read_vpd(struct tg3 *tp)
15734 {
15735 u8 *vpd_data;
15736 unsigned int len, vpdlen;
15737 int i;
15738
15739 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15740 if (!vpd_data)
15741 goto out_no_vpd;
15742
15743 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15744 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15745 if (i < 0)
15746 goto partno;
15747
15748 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15749 goto partno;
15750
15751 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15752 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15753 if (i < 0)
15754 goto partno;
15755
15756 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15757 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15758
15759 partno:
15760 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15761 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15762 if (i < 0)
15763 goto out_not_found;
15764
15765 if (len > TG3_BPN_SIZE)
15766 goto out_not_found;
15767
15768 memcpy(tp->board_part_number, &vpd_data[i], len);
15769
15770 out_not_found:
15771 kfree(vpd_data);
15772 if (tp->board_part_number[0])
15773 return;
15774
15775 out_no_vpd:
15776 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15777 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15778 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15779 strcpy(tp->board_part_number, "BCM5717");
15780 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15781 strcpy(tp->board_part_number, "BCM5718");
15782 else
15783 goto nomatch;
15784 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15785 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15786 strcpy(tp->board_part_number, "BCM57780");
15787 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15788 strcpy(tp->board_part_number, "BCM57760");
15789 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15790 strcpy(tp->board_part_number, "BCM57790");
15791 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15792 strcpy(tp->board_part_number, "BCM57788");
15793 else
15794 goto nomatch;
15795 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15796 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15797 strcpy(tp->board_part_number, "BCM57761");
15798 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15799 strcpy(tp->board_part_number, "BCM57765");
15800 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15801 strcpy(tp->board_part_number, "BCM57781");
15802 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15803 strcpy(tp->board_part_number, "BCM57785");
15804 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15805 strcpy(tp->board_part_number, "BCM57791");
15806 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15807 strcpy(tp->board_part_number, "BCM57795");
15808 else
15809 goto nomatch;
15810 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15811 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15812 strcpy(tp->board_part_number, "BCM57762");
15813 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15814 strcpy(tp->board_part_number, "BCM57766");
15815 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15816 strcpy(tp->board_part_number, "BCM57782");
15817 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15818 strcpy(tp->board_part_number, "BCM57786");
15819 else
15820 goto nomatch;
15821 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15822 strcpy(tp->board_part_number, "BCM95906");
15823 } else {
15824 nomatch:
15825 strcpy(tp->board_part_number, "none");
15826 }
15827 }
15828
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15829 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15830 {
15831 u32 val;
15832
15833 if (tg3_nvram_read(tp, offset, &val) ||
15834 (val & 0xfc000000) != 0x0c000000 ||
15835 tg3_nvram_read(tp, offset + 4, &val) ||
15836 val != 0)
15837 return 0;
15838
15839 return 1;
15840 }
15841
tg3_read_bc_ver(struct tg3 * tp)15842 static void tg3_read_bc_ver(struct tg3 *tp)
15843 {
15844 u32 val, offset, start, ver_offset;
15845 int i, dst_off;
15846 bool newver = false;
15847
15848 if (tg3_nvram_read(tp, 0xc, &offset) ||
15849 tg3_nvram_read(tp, 0x4, &start))
15850 return;
15851
15852 offset = tg3_nvram_logical_addr(tp, offset);
15853
15854 if (tg3_nvram_read(tp, offset, &val))
15855 return;
15856
15857 if ((val & 0xfc000000) == 0x0c000000) {
15858 if (tg3_nvram_read(tp, offset + 4, &val))
15859 return;
15860
15861 if (val == 0)
15862 newver = true;
15863 }
15864
15865 dst_off = strlen(tp->fw_ver);
15866
15867 if (newver) {
15868 if (TG3_VER_SIZE - dst_off < 16 ||
15869 tg3_nvram_read(tp, offset + 8, &ver_offset))
15870 return;
15871
15872 offset = offset + ver_offset - start;
15873 for (i = 0; i < 16; i += 4) {
15874 __be32 v;
15875 if (tg3_nvram_read_be32(tp, offset + i, &v))
15876 return;
15877
15878 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15879 }
15880 } else {
15881 u32 major, minor;
15882
15883 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15884 return;
15885
15886 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15887 TG3_NVM_BCVER_MAJSFT;
15888 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15889 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15890 "v%d.%02d", major, minor);
15891 }
15892 }
15893
tg3_read_hwsb_ver(struct tg3 * tp)15894 static void tg3_read_hwsb_ver(struct tg3 *tp)
15895 {
15896 u32 val, major, minor;
15897
15898 /* Use native endian representation */
15899 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15900 return;
15901
15902 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15903 TG3_NVM_HWSB_CFG1_MAJSFT;
15904 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15905 TG3_NVM_HWSB_CFG1_MINSFT;
15906
15907 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15908 }
15909
tg3_read_sb_ver(struct tg3 * tp,u32 val)15910 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15911 {
15912 u32 offset, major, minor, build;
15913
15914 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15915
15916 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15917 return;
15918
15919 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15920 case TG3_EEPROM_SB_REVISION_0:
15921 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15922 break;
15923 case TG3_EEPROM_SB_REVISION_2:
15924 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15925 break;
15926 case TG3_EEPROM_SB_REVISION_3:
15927 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15928 break;
15929 case TG3_EEPROM_SB_REVISION_4:
15930 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15931 break;
15932 case TG3_EEPROM_SB_REVISION_5:
15933 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15934 break;
15935 case TG3_EEPROM_SB_REVISION_6:
15936 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15937 break;
15938 default:
15939 return;
15940 }
15941
15942 if (tg3_nvram_read(tp, offset, &val))
15943 return;
15944
15945 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15946 TG3_EEPROM_SB_EDH_BLD_SHFT;
15947 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15948 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15949 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15950
15951 if (minor > 99 || build > 26)
15952 return;
15953
15954 offset = strlen(tp->fw_ver);
15955 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15956 " v%d.%02d", major, minor);
15957
15958 if (build > 0) {
15959 offset = strlen(tp->fw_ver);
15960 if (offset < TG3_VER_SIZE - 1)
15961 tp->fw_ver[offset] = 'a' + build - 1;
15962 }
15963 }
15964
tg3_read_mgmtfw_ver(struct tg3 * tp)15965 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15966 {
15967 u32 val, offset, start;
15968 int i, vlen;
15969
15970 for (offset = TG3_NVM_DIR_START;
15971 offset < TG3_NVM_DIR_END;
15972 offset += TG3_NVM_DIRENT_SIZE) {
15973 if (tg3_nvram_read(tp, offset, &val))
15974 return;
15975
15976 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15977 break;
15978 }
15979
15980 if (offset == TG3_NVM_DIR_END)
15981 return;
15982
15983 if (!tg3_flag(tp, 5705_PLUS))
15984 start = 0x08000000;
15985 else if (tg3_nvram_read(tp, offset - 4, &start))
15986 return;
15987
15988 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15989 !tg3_fw_img_is_valid(tp, offset) ||
15990 tg3_nvram_read(tp, offset + 8, &val))
15991 return;
15992
15993 offset += val - start;
15994
15995 vlen = strlen(tp->fw_ver);
15996
15997 tp->fw_ver[vlen++] = ',';
15998 tp->fw_ver[vlen++] = ' ';
15999
16000 for (i = 0; i < 4; i++) {
16001 __be32 v;
16002 if (tg3_nvram_read_be32(tp, offset, &v))
16003 return;
16004
16005 offset += sizeof(v);
16006
16007 if (vlen > TG3_VER_SIZE - sizeof(v)) {
16008 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
16009 break;
16010 }
16011
16012 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
16013 vlen += sizeof(v);
16014 }
16015 }
16016
tg3_probe_ncsi(struct tg3 * tp)16017 static void tg3_probe_ncsi(struct tg3 *tp)
16018 {
16019 u32 apedata;
16020
16021 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
16022 if (apedata != APE_SEG_SIG_MAGIC)
16023 return;
16024
16025 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
16026 if (!(apedata & APE_FW_STATUS_READY))
16027 return;
16028
16029 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
16030 tg3_flag_set(tp, APE_HAS_NCSI);
16031 }
16032
tg3_read_dash_ver(struct tg3 * tp)16033 static void tg3_read_dash_ver(struct tg3 *tp)
16034 {
16035 int vlen;
16036 u32 apedata;
16037 char *fwtype;
16038
16039 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
16040
16041 if (tg3_flag(tp, APE_HAS_NCSI))
16042 fwtype = "NCSI";
16043 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
16044 fwtype = "SMASH";
16045 else
16046 fwtype = "DASH";
16047
16048 vlen = strlen(tp->fw_ver);
16049
16050 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
16051 fwtype,
16052 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
16053 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
16054 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
16055 (apedata & APE_FW_VERSION_BLDMSK));
16056 }
16057
tg3_read_otp_ver(struct tg3 * tp)16058 static void tg3_read_otp_ver(struct tg3 *tp)
16059 {
16060 u32 val, val2;
16061
16062 if (tg3_asic_rev(tp) != ASIC_REV_5762)
16063 return;
16064
16065 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16066 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16067 TG3_OTP_MAGIC0_VALID(val)) {
16068 u64 val64 = (u64) val << 32 | val2;
16069 u32 ver = 0;
16070 int i, vlen;
16071
16072 for (i = 0; i < 7; i++) {
16073 if ((val64 & 0xff) == 0)
16074 break;
16075 ver = val64 & 0xff;
16076 val64 >>= 8;
16077 }
16078 vlen = strlen(tp->fw_ver);
16079 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16080 }
16081 }
16082
tg3_read_fw_ver(struct tg3 * tp)16083 static void tg3_read_fw_ver(struct tg3 *tp)
16084 {
16085 u32 val;
16086 bool vpd_vers = false;
16087
16088 if (tp->fw_ver[0] != 0)
16089 vpd_vers = true;
16090
16091 if (tg3_flag(tp, NO_NVRAM)) {
16092 strcat(tp->fw_ver, "sb");
16093 tg3_read_otp_ver(tp);
16094 return;
16095 }
16096
16097 if (tg3_nvram_read(tp, 0, &val))
16098 return;
16099
16100 if (val == TG3_EEPROM_MAGIC)
16101 tg3_read_bc_ver(tp);
16102 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16103 tg3_read_sb_ver(tp, val);
16104 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16105 tg3_read_hwsb_ver(tp);
16106
16107 if (tg3_flag(tp, ENABLE_ASF)) {
16108 if (tg3_flag(tp, ENABLE_APE)) {
16109 tg3_probe_ncsi(tp);
16110 if (!vpd_vers)
16111 tg3_read_dash_ver(tp);
16112 } else if (!vpd_vers) {
16113 tg3_read_mgmtfw_ver(tp);
16114 }
16115 }
16116
16117 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16118 }
16119
tg3_rx_ret_ring_size(struct tg3 * tp)16120 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16121 {
16122 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16123 return TG3_RX_RET_MAX_SIZE_5717;
16124 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16125 return TG3_RX_RET_MAX_SIZE_5700;
16126 else
16127 return TG3_RX_RET_MAX_SIZE_5705;
16128 }
16129
16130 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16131 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16132 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16133 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16134 { },
16135 };
16136
tg3_find_peer(struct tg3 * tp)16137 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16138 {
16139 struct pci_dev *peer;
16140 unsigned int func, devnr = tp->pdev->devfn & ~7;
16141
16142 for (func = 0; func < 8; func++) {
16143 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16144 if (peer && peer != tp->pdev)
16145 break;
16146 pci_dev_put(peer);
16147 }
16148 /* 5704 can be configured in single-port mode, set peer to
16149 * tp->pdev in that case.
16150 */
16151 if (!peer) {
16152 peer = tp->pdev;
16153 return peer;
16154 }
16155
16156 /*
16157 * We don't need to keep the refcount elevated; there's no way
16158 * to remove one half of this device without removing the other
16159 */
16160 pci_dev_put(peer);
16161
16162 return peer;
16163 }
16164
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16165 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16166 {
16167 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16168 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16169 u32 reg;
16170
16171 /* All devices that use the alternate
16172 * ASIC REV location have a CPMU.
16173 */
16174 tg3_flag_set(tp, CPMU_PRESENT);
16175
16176 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16177 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16178 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16179 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16180 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16181 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16182 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16183 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16184 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16185 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16186 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16187 reg = TG3PCI_GEN2_PRODID_ASICREV;
16188 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16189 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16190 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16191 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16192 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16193 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16194 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16195 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16196 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16197 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16198 reg = TG3PCI_GEN15_PRODID_ASICREV;
16199 else
16200 reg = TG3PCI_PRODID_ASICREV;
16201
16202 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16203 }
16204
16205 /* Wrong chip ID in 5752 A0. This code can be removed later
16206 * as A0 is not in production.
16207 */
16208 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16209 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16210
16211 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16212 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16213
16214 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16215 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16216 tg3_asic_rev(tp) == ASIC_REV_5720)
16217 tg3_flag_set(tp, 5717_PLUS);
16218
16219 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16220 tg3_asic_rev(tp) == ASIC_REV_57766)
16221 tg3_flag_set(tp, 57765_CLASS);
16222
16223 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16224 tg3_asic_rev(tp) == ASIC_REV_5762)
16225 tg3_flag_set(tp, 57765_PLUS);
16226
16227 /* Intentionally exclude ASIC_REV_5906 */
16228 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16229 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16230 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16231 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16232 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16233 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16234 tg3_flag(tp, 57765_PLUS))
16235 tg3_flag_set(tp, 5755_PLUS);
16236
16237 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16238 tg3_asic_rev(tp) == ASIC_REV_5714)
16239 tg3_flag_set(tp, 5780_CLASS);
16240
16241 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16242 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16243 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16244 tg3_flag(tp, 5755_PLUS) ||
16245 tg3_flag(tp, 5780_CLASS))
16246 tg3_flag_set(tp, 5750_PLUS);
16247
16248 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16249 tg3_flag(tp, 5750_PLUS))
16250 tg3_flag_set(tp, 5705_PLUS);
16251 }
16252
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16253 static bool tg3_10_100_only_device(struct tg3 *tp,
16254 const struct pci_device_id *ent)
16255 {
16256 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16257
16258 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16259 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16260 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16261 return true;
16262
16263 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16264 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16265 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16266 return true;
16267 } else {
16268 return true;
16269 }
16270 }
16271
16272 return false;
16273 }
16274
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16275 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16276 {
16277 u32 misc_ctrl_reg;
16278 u32 pci_state_reg, grc_misc_cfg;
16279 u32 val;
16280 u16 pci_cmd;
16281 int err;
16282
16283 /* Force memory write invalidate off. If we leave it on,
16284 * then on 5700_BX chips we have to enable a workaround.
16285 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16286 * to match the cacheline size. The Broadcom driver have this
16287 * workaround but turns MWI off all the times so never uses
16288 * it. This seems to suggest that the workaround is insufficient.
16289 */
16290 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16291 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16292 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16293
16294 /* Important! -- Make sure register accesses are byteswapped
16295 * correctly. Also, for those chips that require it, make
16296 * sure that indirect register accesses are enabled before
16297 * the first operation.
16298 */
16299 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16300 &misc_ctrl_reg);
16301 tp->misc_host_ctrl |= (misc_ctrl_reg &
16302 MISC_HOST_CTRL_CHIPREV);
16303 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16304 tp->misc_host_ctrl);
16305
16306 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16307
16308 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16309 * we need to disable memory and use config. cycles
16310 * only to access all registers. The 5702/03 chips
16311 * can mistakenly decode the special cycles from the
16312 * ICH chipsets as memory write cycles, causing corruption
16313 * of register and memory space. Only certain ICH bridges
16314 * will drive special cycles with non-zero data during the
16315 * address phase which can fall within the 5703's address
16316 * range. This is not an ICH bug as the PCI spec allows
16317 * non-zero address during special cycles. However, only
16318 * these ICH bridges are known to drive non-zero addresses
16319 * during special cycles.
16320 *
16321 * Since special cycles do not cross PCI bridges, we only
16322 * enable this workaround if the 5703 is on the secondary
16323 * bus of these ICH bridges.
16324 */
16325 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16326 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16327 static struct tg3_dev_id {
16328 u32 vendor;
16329 u32 device;
16330 u32 rev;
16331 } ich_chipsets[] = {
16332 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16333 PCI_ANY_ID },
16334 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16335 PCI_ANY_ID },
16336 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16337 0xa },
16338 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16339 PCI_ANY_ID },
16340 { },
16341 };
16342 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16343 struct pci_dev *bridge = NULL;
16344
16345 while (pci_id->vendor != 0) {
16346 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16347 bridge);
16348 if (!bridge) {
16349 pci_id++;
16350 continue;
16351 }
16352 if (pci_id->rev != PCI_ANY_ID) {
16353 if (bridge->revision > pci_id->rev)
16354 continue;
16355 }
16356 if (bridge->subordinate &&
16357 (bridge->subordinate->number ==
16358 tp->pdev->bus->number)) {
16359 tg3_flag_set(tp, ICH_WORKAROUND);
16360 pci_dev_put(bridge);
16361 break;
16362 }
16363 }
16364 }
16365
16366 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16367 static struct tg3_dev_id {
16368 u32 vendor;
16369 u32 device;
16370 } bridge_chipsets[] = {
16371 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16372 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16373 { },
16374 };
16375 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16376 struct pci_dev *bridge = NULL;
16377
16378 while (pci_id->vendor != 0) {
16379 bridge = pci_get_device(pci_id->vendor,
16380 pci_id->device,
16381 bridge);
16382 if (!bridge) {
16383 pci_id++;
16384 continue;
16385 }
16386 if (bridge->subordinate &&
16387 (bridge->subordinate->number <=
16388 tp->pdev->bus->number) &&
16389 (bridge->subordinate->busn_res.end >=
16390 tp->pdev->bus->number)) {
16391 tg3_flag_set(tp, 5701_DMA_BUG);
16392 pci_dev_put(bridge);
16393 break;
16394 }
16395 }
16396 }
16397
16398 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16399 * DMA addresses > 40-bit. This bridge may have other additional
16400 * 57xx devices behind it in some 4-port NIC designs for example.
16401 * Any tg3 device found behind the bridge will also need the 40-bit
16402 * DMA workaround.
16403 */
16404 if (tg3_flag(tp, 5780_CLASS)) {
16405 tg3_flag_set(tp, 40BIT_DMA_BUG);
16406 tp->msi_cap = tp->pdev->msi_cap;
16407 } else {
16408 struct pci_dev *bridge = NULL;
16409
16410 do {
16411 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16412 PCI_DEVICE_ID_SERVERWORKS_EPB,
16413 bridge);
16414 if (bridge && bridge->subordinate &&
16415 (bridge->subordinate->number <=
16416 tp->pdev->bus->number) &&
16417 (bridge->subordinate->busn_res.end >=
16418 tp->pdev->bus->number)) {
16419 tg3_flag_set(tp, 40BIT_DMA_BUG);
16420 pci_dev_put(bridge);
16421 break;
16422 }
16423 } while (bridge);
16424 }
16425
16426 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16427 tg3_asic_rev(tp) == ASIC_REV_5714)
16428 tp->pdev_peer = tg3_find_peer(tp);
16429
16430 /* Determine TSO capabilities */
16431 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16432 ; /* Do nothing. HW bug. */
16433 else if (tg3_flag(tp, 57765_PLUS))
16434 tg3_flag_set(tp, HW_TSO_3);
16435 else if (tg3_flag(tp, 5755_PLUS) ||
16436 tg3_asic_rev(tp) == ASIC_REV_5906)
16437 tg3_flag_set(tp, HW_TSO_2);
16438 else if (tg3_flag(tp, 5750_PLUS)) {
16439 tg3_flag_set(tp, HW_TSO_1);
16440 tg3_flag_set(tp, TSO_BUG);
16441 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16442 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16443 tg3_flag_clear(tp, TSO_BUG);
16444 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16445 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16446 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16447 tg3_flag_set(tp, FW_TSO);
16448 tg3_flag_set(tp, TSO_BUG);
16449 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16450 tp->fw_needed = FIRMWARE_TG3TSO5;
16451 else
16452 tp->fw_needed = FIRMWARE_TG3TSO;
16453 }
16454
16455 /* Selectively allow TSO based on operating conditions */
16456 if (tg3_flag(tp, HW_TSO_1) ||
16457 tg3_flag(tp, HW_TSO_2) ||
16458 tg3_flag(tp, HW_TSO_3) ||
16459 tg3_flag(tp, FW_TSO)) {
16460 /* For firmware TSO, assume ASF is disabled.
16461 * We'll disable TSO later if we discover ASF
16462 * is enabled in tg3_get_eeprom_hw_cfg().
16463 */
16464 tg3_flag_set(tp, TSO_CAPABLE);
16465 } else {
16466 tg3_flag_clear(tp, TSO_CAPABLE);
16467 tg3_flag_clear(tp, TSO_BUG);
16468 tp->fw_needed = NULL;
16469 }
16470
16471 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16472 tp->fw_needed = FIRMWARE_TG3;
16473
16474 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16475 tp->fw_needed = FIRMWARE_TG357766;
16476
16477 tp->irq_max = 1;
16478
16479 if (tg3_flag(tp, 5750_PLUS)) {
16480 tg3_flag_set(tp, SUPPORT_MSI);
16481 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16482 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16483 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16484 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16485 tp->pdev_peer == tp->pdev))
16486 tg3_flag_clear(tp, SUPPORT_MSI);
16487
16488 if (tg3_flag(tp, 5755_PLUS) ||
16489 tg3_asic_rev(tp) == ASIC_REV_5906) {
16490 tg3_flag_set(tp, 1SHOT_MSI);
16491 }
16492
16493 if (tg3_flag(tp, 57765_PLUS)) {
16494 tg3_flag_set(tp, SUPPORT_MSIX);
16495 tp->irq_max = TG3_IRQ_MAX_VECS;
16496 }
16497 }
16498
16499 tp->txq_max = 1;
16500 tp->rxq_max = 1;
16501 if (tp->irq_max > 1) {
16502 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16503 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16504
16505 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16506 tg3_asic_rev(tp) == ASIC_REV_5720)
16507 tp->txq_max = tp->irq_max - 1;
16508 }
16509
16510 if (tg3_flag(tp, 5755_PLUS) ||
16511 tg3_asic_rev(tp) == ASIC_REV_5906)
16512 tg3_flag_set(tp, SHORT_DMA_BUG);
16513
16514 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16515 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16516
16517 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16518 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16519 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16520 tg3_asic_rev(tp) == ASIC_REV_5762)
16521 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16522
16523 if (tg3_flag(tp, 57765_PLUS) &&
16524 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16525 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16526
16527 if (!tg3_flag(tp, 5705_PLUS) ||
16528 tg3_flag(tp, 5780_CLASS) ||
16529 tg3_flag(tp, USE_JUMBO_BDFLAG))
16530 tg3_flag_set(tp, JUMBO_CAPABLE);
16531
16532 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16533 &pci_state_reg);
16534
16535 if (pci_is_pcie(tp->pdev)) {
16536 u16 lnkctl;
16537
16538 tg3_flag_set(tp, PCI_EXPRESS);
16539
16540 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16541 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16542 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16543 tg3_flag_clear(tp, HW_TSO_2);
16544 tg3_flag_clear(tp, TSO_CAPABLE);
16545 }
16546 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16547 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16548 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16549 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16550 tg3_flag_set(tp, CLKREQ_BUG);
16551 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16552 tg3_flag_set(tp, L1PLLPD_EN);
16553 }
16554 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16555 /* BCM5785 devices are effectively PCIe devices, and should
16556 * follow PCIe codepaths, but do not have a PCIe capabilities
16557 * section.
16558 */
16559 tg3_flag_set(tp, PCI_EXPRESS);
16560 } else if (!tg3_flag(tp, 5705_PLUS) ||
16561 tg3_flag(tp, 5780_CLASS)) {
16562 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16563 if (!tp->pcix_cap) {
16564 dev_err(&tp->pdev->dev,
16565 "Cannot find PCI-X capability, aborting\n");
16566 return -EIO;
16567 }
16568
16569 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16570 tg3_flag_set(tp, PCIX_MODE);
16571 }
16572
16573 /* If we have an AMD 762 or VIA K8T800 chipset, write
16574 * reordering to the mailbox registers done by the host
16575 * controller can cause major troubles. We read back from
16576 * every mailbox register write to force the writes to be
16577 * posted to the chip in order.
16578 */
16579 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16580 !tg3_flag(tp, PCI_EXPRESS))
16581 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16582
16583 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16584 &tp->pci_cacheline_sz);
16585 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16586 &tp->pci_lat_timer);
16587 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16588 tp->pci_lat_timer < 64) {
16589 tp->pci_lat_timer = 64;
16590 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16591 tp->pci_lat_timer);
16592 }
16593
16594 /* Important! -- It is critical that the PCI-X hw workaround
16595 * situation is decided before the first MMIO register access.
16596 */
16597 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16598 /* 5700 BX chips need to have their TX producer index
16599 * mailboxes written twice to workaround a bug.
16600 */
16601 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16602
16603 /* If we are in PCI-X mode, enable register write workaround.
16604 *
16605 * The workaround is to use indirect register accesses
16606 * for all chip writes not to mailbox registers.
16607 */
16608 if (tg3_flag(tp, PCIX_MODE)) {
16609 u32 pm_reg;
16610
16611 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16612
16613 /* The chip can have it's power management PCI config
16614 * space registers clobbered due to this bug.
16615 * So explicitly force the chip into D0 here.
16616 */
16617 pci_read_config_dword(tp->pdev,
16618 tp->pdev->pm_cap + PCI_PM_CTRL,
16619 &pm_reg);
16620 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16621 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16622 pci_write_config_dword(tp->pdev,
16623 tp->pdev->pm_cap + PCI_PM_CTRL,
16624 pm_reg);
16625
16626 /* Also, force SERR#/PERR# in PCI command. */
16627 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16628 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16629 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16630 }
16631 }
16632
16633 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16634 tg3_flag_set(tp, PCI_HIGH_SPEED);
16635 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16636 tg3_flag_set(tp, PCI_32BIT);
16637
16638 /* Chip-specific fixup from Broadcom driver */
16639 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16640 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16641 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16642 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16643 }
16644
16645 /* Default fast path register access methods */
16646 tp->read32 = tg3_read32;
16647 tp->write32 = tg3_write32;
16648 tp->read32_mbox = tg3_read32;
16649 tp->write32_mbox = tg3_write32;
16650 tp->write32_tx_mbox = tg3_write32;
16651 tp->write32_rx_mbox = tg3_write32;
16652
16653 /* Various workaround register access methods */
16654 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16655 tp->write32 = tg3_write_indirect_reg32;
16656 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16657 (tg3_flag(tp, PCI_EXPRESS) &&
16658 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16659 /*
16660 * Back to back register writes can cause problems on these
16661 * chips, the workaround is to read back all reg writes
16662 * except those to mailbox regs.
16663 *
16664 * See tg3_write_indirect_reg32().
16665 */
16666 tp->write32 = tg3_write_flush_reg32;
16667 }
16668
16669 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16670 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16671 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16672 tp->write32_rx_mbox = tg3_write_flush_reg32;
16673 }
16674
16675 if (tg3_flag(tp, ICH_WORKAROUND)) {
16676 tp->read32 = tg3_read_indirect_reg32;
16677 tp->write32 = tg3_write_indirect_reg32;
16678 tp->read32_mbox = tg3_read_indirect_mbox;
16679 tp->write32_mbox = tg3_write_indirect_mbox;
16680 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16681 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16682
16683 iounmap(tp->regs);
16684 tp->regs = NULL;
16685
16686 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16687 pci_cmd &= ~PCI_COMMAND_MEMORY;
16688 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16689 }
16690 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16691 tp->read32_mbox = tg3_read32_mbox_5906;
16692 tp->write32_mbox = tg3_write32_mbox_5906;
16693 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16694 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16695 }
16696
16697 if (tp->write32 == tg3_write_indirect_reg32 ||
16698 (tg3_flag(tp, PCIX_MODE) &&
16699 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16700 tg3_asic_rev(tp) == ASIC_REV_5701)))
16701 tg3_flag_set(tp, SRAM_USE_CONFIG);
16702
16703 /* The memory arbiter has to be enabled in order for SRAM accesses
16704 * to succeed. Normally on powerup the tg3 chip firmware will make
16705 * sure it is enabled, but other entities such as system netboot
16706 * code might disable it.
16707 */
16708 val = tr32(MEMARB_MODE);
16709 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16710
16711 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16712 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16713 tg3_flag(tp, 5780_CLASS)) {
16714 if (tg3_flag(tp, PCIX_MODE)) {
16715 pci_read_config_dword(tp->pdev,
16716 tp->pcix_cap + PCI_X_STATUS,
16717 &val);
16718 tp->pci_fn = val & 0x7;
16719 }
16720 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16721 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16722 tg3_asic_rev(tp) == ASIC_REV_5720) {
16723 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16724 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16725 val = tr32(TG3_CPMU_STATUS);
16726
16727 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16728 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16729 else
16730 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16731 TG3_CPMU_STATUS_FSHFT_5719;
16732 }
16733
16734 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16735 tp->write32_tx_mbox = tg3_write_flush_reg32;
16736 tp->write32_rx_mbox = tg3_write_flush_reg32;
16737 }
16738
16739 /* Get eeprom hw config before calling tg3_set_power_state().
16740 * In particular, the TG3_FLAG_IS_NIC flag must be
16741 * determined before calling tg3_set_power_state() so that
16742 * we know whether or not to switch out of Vaux power.
16743 * When the flag is set, it means that GPIO1 is used for eeprom
16744 * write protect and also implies that it is a LOM where GPIOs
16745 * are not used to switch power.
16746 */
16747 tg3_get_eeprom_hw_cfg(tp);
16748
16749 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16750 tg3_flag_clear(tp, TSO_CAPABLE);
16751 tg3_flag_clear(tp, TSO_BUG);
16752 tp->fw_needed = NULL;
16753 }
16754
16755 if (tg3_flag(tp, ENABLE_APE)) {
16756 /* Allow reads and writes to the
16757 * APE register and memory space.
16758 */
16759 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16760 PCISTATE_ALLOW_APE_SHMEM_WR |
16761 PCISTATE_ALLOW_APE_PSPACE_WR;
16762 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16763 pci_state_reg);
16764
16765 tg3_ape_lock_init(tp);
16766 tp->ape_hb_interval =
16767 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16768 }
16769
16770 /* Set up tp->grc_local_ctrl before calling
16771 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16772 * will bring 5700's external PHY out of reset.
16773 * It is also used as eeprom write protect on LOMs.
16774 */
16775 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16776 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16777 tg3_flag(tp, EEPROM_WRITE_PROT))
16778 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16779 GRC_LCLCTRL_GPIO_OUTPUT1);
16780 /* Unused GPIO3 must be driven as output on 5752 because there
16781 * are no pull-up resistors on unused GPIO pins.
16782 */
16783 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16784 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16785
16786 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16787 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16788 tg3_flag(tp, 57765_CLASS))
16789 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16790
16791 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16792 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16793 /* Turn off the debug UART. */
16794 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16795 if (tg3_flag(tp, IS_NIC))
16796 /* Keep VMain power. */
16797 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16798 GRC_LCLCTRL_GPIO_OUTPUT0;
16799 }
16800
16801 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16802 tp->grc_local_ctrl |=
16803 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16804
16805 /* Switch out of Vaux if it is a NIC */
16806 tg3_pwrsrc_switch_to_vmain(tp);
16807
16808 /* Derive initial jumbo mode from MTU assigned in
16809 * ether_setup() via the alloc_etherdev() call
16810 */
16811 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16812 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16813
16814 /* Determine WakeOnLan speed to use. */
16815 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16816 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16817 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16818 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16819 tg3_flag_clear(tp, WOL_SPEED_100MB);
16820 } else {
16821 tg3_flag_set(tp, WOL_SPEED_100MB);
16822 }
16823
16824 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16825 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16826
16827 /* A few boards don't want Ethernet@WireSpeed phy feature */
16828 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16829 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16830 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16831 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16832 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16833 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16834 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16835
16836 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16837 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16838 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16839 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16840 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16841
16842 if (tg3_flag(tp, 5705_PLUS) &&
16843 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16844 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16845 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16846 !tg3_flag(tp, 57765_PLUS)) {
16847 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16848 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16849 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16850 tg3_asic_rev(tp) == ASIC_REV_5761) {
16851 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16852 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16853 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16854 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16855 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16856 } else
16857 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16858 }
16859
16860 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16861 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16862 tp->phy_otp = tg3_read_otp_phycfg(tp);
16863 if (tp->phy_otp == 0)
16864 tp->phy_otp = TG3_OTP_DEFAULT;
16865 }
16866
16867 if (tg3_flag(tp, CPMU_PRESENT))
16868 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16869 else
16870 tp->mi_mode = MAC_MI_MODE_BASE;
16871
16872 tp->coalesce_mode = 0;
16873 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16874 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16875 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16876
16877 /* Set these bits to enable statistics workaround. */
16878 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16879 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16880 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16881 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16882 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16883 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16884 }
16885
16886 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16887 tg3_asic_rev(tp) == ASIC_REV_57780)
16888 tg3_flag_set(tp, USE_PHYLIB);
16889
16890 err = tg3_mdio_init(tp);
16891 if (err)
16892 return err;
16893
16894 /* Initialize data/descriptor byte/word swapping. */
16895 val = tr32(GRC_MODE);
16896 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16897 tg3_asic_rev(tp) == ASIC_REV_5762)
16898 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16899 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16900 GRC_MODE_B2HRX_ENABLE |
16901 GRC_MODE_HTX2B_ENABLE |
16902 GRC_MODE_HOST_STACKUP);
16903 else
16904 val &= GRC_MODE_HOST_STACKUP;
16905
16906 tw32(GRC_MODE, val | tp->grc_mode);
16907
16908 tg3_switch_clocks(tp);
16909
16910 /* Clear this out for sanity. */
16911 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16912
16913 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16914 tw32(TG3PCI_REG_BASE_ADDR, 0);
16915
16916 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16917 &pci_state_reg);
16918 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16919 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16920 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16921 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16922 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16923 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16924 void __iomem *sram_base;
16925
16926 /* Write some dummy words into the SRAM status block
16927 * area, see if it reads back correctly. If the return
16928 * value is bad, force enable the PCIX workaround.
16929 */
16930 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16931
16932 writel(0x00000000, sram_base);
16933 writel(0x00000000, sram_base + 4);
16934 writel(0xffffffff, sram_base + 4);
16935 if (readl(sram_base) != 0x00000000)
16936 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16937 }
16938 }
16939
16940 udelay(50);
16941 tg3_nvram_init(tp);
16942
16943 /* If the device has an NVRAM, no need to load patch firmware */
16944 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16945 !tg3_flag(tp, NO_NVRAM))
16946 tp->fw_needed = NULL;
16947
16948 grc_misc_cfg = tr32(GRC_MISC_CFG);
16949 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16950
16951 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16952 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16953 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16954 tg3_flag_set(tp, IS_5788);
16955
16956 if (!tg3_flag(tp, IS_5788) &&
16957 tg3_asic_rev(tp) != ASIC_REV_5700)
16958 tg3_flag_set(tp, TAGGED_STATUS);
16959 if (tg3_flag(tp, TAGGED_STATUS)) {
16960 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16961 HOSTCC_MODE_CLRTICK_TXBD);
16962
16963 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16964 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16965 tp->misc_host_ctrl);
16966 }
16967
16968 /* Preserve the APE MAC_MODE bits */
16969 if (tg3_flag(tp, ENABLE_APE))
16970 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16971 else
16972 tp->mac_mode = 0;
16973
16974 if (tg3_10_100_only_device(tp, ent))
16975 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16976
16977 err = tg3_phy_probe(tp);
16978 if (err) {
16979 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16980 /* ... but do not return immediately ... */
16981 tg3_mdio_fini(tp);
16982 }
16983
16984 tg3_read_vpd(tp);
16985 tg3_read_fw_ver(tp);
16986
16987 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16988 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16989 } else {
16990 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16991 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16992 else
16993 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16994 }
16995
16996 /* 5700 {AX,BX} chips have a broken status block link
16997 * change bit implementation, so we must use the
16998 * status register in those cases.
16999 */
17000 if (tg3_asic_rev(tp) == ASIC_REV_5700)
17001 tg3_flag_set(tp, USE_LINKCHG_REG);
17002 else
17003 tg3_flag_clear(tp, USE_LINKCHG_REG);
17004
17005 /* The led_ctrl is set during tg3_phy_probe, here we might
17006 * have to force the link status polling mechanism based
17007 * upon subsystem IDs.
17008 */
17009 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
17010 tg3_asic_rev(tp) == ASIC_REV_5701 &&
17011 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
17012 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
17013 tg3_flag_set(tp, USE_LINKCHG_REG);
17014 }
17015
17016 /* For all SERDES we poll the MAC status register. */
17017 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
17018 tg3_flag_set(tp, POLL_SERDES);
17019 else
17020 tg3_flag_clear(tp, POLL_SERDES);
17021
17022 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
17023 tg3_flag_set(tp, POLL_CPMU_LINK);
17024
17025 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
17026 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
17027 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
17028 tg3_flag(tp, PCIX_MODE)) {
17029 tp->rx_offset = NET_SKB_PAD;
17030 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
17031 tp->rx_copy_thresh = ~(u16)0;
17032 #endif
17033 }
17034
17035 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
17036 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
17037 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
17038
17039 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
17040
17041 /* Increment the rx prod index on the rx std ring by at most
17042 * 8 for these chips to workaround hw errata.
17043 */
17044 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
17045 tg3_asic_rev(tp) == ASIC_REV_5752 ||
17046 tg3_asic_rev(tp) == ASIC_REV_5755)
17047 tp->rx_std_max_post = 8;
17048
17049 if (tg3_flag(tp, ASPM_WORKAROUND))
17050 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
17051 PCIE_PWR_MGMT_L1_THRESH_MSK;
17052
17053 return err;
17054 }
17055
tg3_get_device_address(struct tg3 * tp,u8 * addr)17056 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
17057 {
17058 u32 hi, lo, mac_offset;
17059 int addr_ok = 0;
17060 int err;
17061
17062 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
17063 return 0;
17064
17065 if (tg3_flag(tp, IS_SSB_CORE)) {
17066 err = ssb_gige_get_macaddr(tp->pdev, addr);
17067 if (!err && is_valid_ether_addr(addr))
17068 return 0;
17069 }
17070
17071 mac_offset = 0x7c;
17072 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17073 tg3_flag(tp, 5780_CLASS)) {
17074 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17075 mac_offset = 0xcc;
17076 if (tg3_nvram_lock(tp))
17077 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17078 else
17079 tg3_nvram_unlock(tp);
17080 } else if (tg3_flag(tp, 5717_PLUS)) {
17081 if (tp->pci_fn & 1)
17082 mac_offset = 0xcc;
17083 if (tp->pci_fn > 1)
17084 mac_offset += 0x18c;
17085 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17086 mac_offset = 0x10;
17087
17088 /* First try to get it from MAC address mailbox. */
17089 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17090 if ((hi >> 16) == 0x484b) {
17091 addr[0] = (hi >> 8) & 0xff;
17092 addr[1] = (hi >> 0) & 0xff;
17093
17094 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17095 addr[2] = (lo >> 24) & 0xff;
17096 addr[3] = (lo >> 16) & 0xff;
17097 addr[4] = (lo >> 8) & 0xff;
17098 addr[5] = (lo >> 0) & 0xff;
17099
17100 /* Some old bootcode may report a 0 MAC address in SRAM */
17101 addr_ok = is_valid_ether_addr(addr);
17102 }
17103 if (!addr_ok) {
17104 __be32 be_hi, be_lo;
17105
17106 /* Next, try NVRAM. */
17107 if (!tg3_flag(tp, NO_NVRAM) &&
17108 !tg3_nvram_read_be32(tp, mac_offset + 0, &be_hi) &&
17109 !tg3_nvram_read_be32(tp, mac_offset + 4, &be_lo)) {
17110 memcpy(&addr[0], ((char *)&be_hi) + 2, 2);
17111 memcpy(&addr[2], (char *)&be_lo, sizeof(be_lo));
17112 }
17113 /* Finally just fetch it out of the MAC control regs. */
17114 else {
17115 hi = tr32(MAC_ADDR_0_HIGH);
17116 lo = tr32(MAC_ADDR_0_LOW);
17117
17118 addr[5] = lo & 0xff;
17119 addr[4] = (lo >> 8) & 0xff;
17120 addr[3] = (lo >> 16) & 0xff;
17121 addr[2] = (lo >> 24) & 0xff;
17122 addr[1] = hi & 0xff;
17123 addr[0] = (hi >> 8) & 0xff;
17124 }
17125 }
17126
17127 if (!is_valid_ether_addr(addr))
17128 return -EINVAL;
17129 return 0;
17130 }
17131
17132 #define BOUNDARY_SINGLE_CACHELINE 1
17133 #define BOUNDARY_MULTI_CACHELINE 2
17134
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)17135 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17136 {
17137 int cacheline_size;
17138 u8 byte;
17139 int goal;
17140
17141 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17142 if (byte == 0)
17143 cacheline_size = 1024;
17144 else
17145 cacheline_size = (int) byte * 4;
17146
17147 /* On 5703 and later chips, the boundary bits have no
17148 * effect.
17149 */
17150 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17151 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17152 !tg3_flag(tp, PCI_EXPRESS))
17153 goto out;
17154
17155 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
17156 goal = BOUNDARY_MULTI_CACHELINE;
17157 #else
17158 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17159 goal = BOUNDARY_SINGLE_CACHELINE;
17160 #else
17161 goal = 0;
17162 #endif
17163 #endif
17164
17165 if (tg3_flag(tp, 57765_PLUS)) {
17166 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17167 goto out;
17168 }
17169
17170 if (!goal)
17171 goto out;
17172
17173 /* PCI controllers on most RISC systems tend to disconnect
17174 * when a device tries to burst across a cache-line boundary.
17175 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17176 *
17177 * Unfortunately, for PCI-E there are only limited
17178 * write-side controls for this, and thus for reads
17179 * we will still get the disconnects. We'll also waste
17180 * these PCI cycles for both read and write for chips
17181 * other than 5700 and 5701 which do not implement the
17182 * boundary bits.
17183 */
17184 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17185 switch (cacheline_size) {
17186 case 16:
17187 case 32:
17188 case 64:
17189 case 128:
17190 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17191 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17192 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17193 } else {
17194 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17195 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17196 }
17197 break;
17198
17199 case 256:
17200 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17201 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17202 break;
17203
17204 default:
17205 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17206 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17207 break;
17208 }
17209 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17210 switch (cacheline_size) {
17211 case 16:
17212 case 32:
17213 case 64:
17214 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17215 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17216 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17217 break;
17218 }
17219 fallthrough;
17220 case 128:
17221 default:
17222 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17223 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17224 break;
17225 }
17226 } else {
17227 switch (cacheline_size) {
17228 case 16:
17229 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17230 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17231 DMA_RWCTRL_WRITE_BNDRY_16);
17232 break;
17233 }
17234 fallthrough;
17235 case 32:
17236 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17237 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17238 DMA_RWCTRL_WRITE_BNDRY_32);
17239 break;
17240 }
17241 fallthrough;
17242 case 64:
17243 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17244 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17245 DMA_RWCTRL_WRITE_BNDRY_64);
17246 break;
17247 }
17248 fallthrough;
17249 case 128:
17250 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17251 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17252 DMA_RWCTRL_WRITE_BNDRY_128);
17253 break;
17254 }
17255 fallthrough;
17256 case 256:
17257 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17258 DMA_RWCTRL_WRITE_BNDRY_256);
17259 break;
17260 case 512:
17261 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17262 DMA_RWCTRL_WRITE_BNDRY_512);
17263 break;
17264 case 1024:
17265 default:
17266 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17267 DMA_RWCTRL_WRITE_BNDRY_1024);
17268 break;
17269 }
17270 }
17271
17272 out:
17273 return val;
17274 }
17275
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17276 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17277 int size, bool to_device)
17278 {
17279 struct tg3_internal_buffer_desc test_desc;
17280 u32 sram_dma_descs;
17281 int i, ret;
17282
17283 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17284
17285 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17286 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17287 tw32(RDMAC_STATUS, 0);
17288 tw32(WDMAC_STATUS, 0);
17289
17290 tw32(BUFMGR_MODE, 0);
17291 tw32(FTQ_RESET, 0);
17292
17293 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17294 test_desc.addr_lo = buf_dma & 0xffffffff;
17295 test_desc.nic_mbuf = 0x00002100;
17296 test_desc.len = size;
17297
17298 /*
17299 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17300 * the *second* time the tg3 driver was getting loaded after an
17301 * initial scan.
17302 *
17303 * Broadcom tells me:
17304 * ...the DMA engine is connected to the GRC block and a DMA
17305 * reset may affect the GRC block in some unpredictable way...
17306 * The behavior of resets to individual blocks has not been tested.
17307 *
17308 * Broadcom noted the GRC reset will also reset all sub-components.
17309 */
17310 if (to_device) {
17311 test_desc.cqid_sqid = (13 << 8) | 2;
17312
17313 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17314 udelay(40);
17315 } else {
17316 test_desc.cqid_sqid = (16 << 8) | 7;
17317
17318 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17319 udelay(40);
17320 }
17321 test_desc.flags = 0x00000005;
17322
17323 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17324 u32 val;
17325
17326 val = *(((u32 *)&test_desc) + i);
17327 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17328 sram_dma_descs + (i * sizeof(u32)));
17329 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17330 }
17331 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17332
17333 if (to_device)
17334 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17335 else
17336 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17337
17338 ret = -ENODEV;
17339 for (i = 0; i < 40; i++) {
17340 u32 val;
17341
17342 if (to_device)
17343 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17344 else
17345 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17346 if ((val & 0xffff) == sram_dma_descs) {
17347 ret = 0;
17348 break;
17349 }
17350
17351 udelay(100);
17352 }
17353
17354 return ret;
17355 }
17356
17357 #define TEST_BUFFER_SIZE 0x2000
17358
17359 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17360 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17361 { },
17362 };
17363
tg3_test_dma(struct tg3 * tp)17364 static int tg3_test_dma(struct tg3 *tp)
17365 {
17366 dma_addr_t buf_dma;
17367 u32 *buf, saved_dma_rwctrl;
17368 int ret = 0;
17369
17370 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17371 &buf_dma, GFP_KERNEL);
17372 if (!buf) {
17373 ret = -ENOMEM;
17374 goto out_nofree;
17375 }
17376
17377 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17378 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17379
17380 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17381
17382 if (tg3_flag(tp, 57765_PLUS))
17383 goto out;
17384
17385 if (tg3_flag(tp, PCI_EXPRESS)) {
17386 /* DMA read watermark not used on PCIE */
17387 tp->dma_rwctrl |= 0x00180000;
17388 } else if (!tg3_flag(tp, PCIX_MODE)) {
17389 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17390 tg3_asic_rev(tp) == ASIC_REV_5750)
17391 tp->dma_rwctrl |= 0x003f0000;
17392 else
17393 tp->dma_rwctrl |= 0x003f000f;
17394 } else {
17395 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17396 tg3_asic_rev(tp) == ASIC_REV_5704) {
17397 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17398 u32 read_water = 0x7;
17399
17400 /* If the 5704 is behind the EPB bridge, we can
17401 * do the less restrictive ONE_DMA workaround for
17402 * better performance.
17403 */
17404 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17405 tg3_asic_rev(tp) == ASIC_REV_5704)
17406 tp->dma_rwctrl |= 0x8000;
17407 else if (ccval == 0x6 || ccval == 0x7)
17408 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17409
17410 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17411 read_water = 4;
17412 /* Set bit 23 to enable PCIX hw bug fix */
17413 tp->dma_rwctrl |=
17414 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17415 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17416 (1 << 23);
17417 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17418 /* 5780 always in PCIX mode */
17419 tp->dma_rwctrl |= 0x00144000;
17420 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17421 /* 5714 always in PCIX mode */
17422 tp->dma_rwctrl |= 0x00148000;
17423 } else {
17424 tp->dma_rwctrl |= 0x001b000f;
17425 }
17426 }
17427 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17428 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17429
17430 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17431 tg3_asic_rev(tp) == ASIC_REV_5704)
17432 tp->dma_rwctrl &= 0xfffffff0;
17433
17434 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17435 tg3_asic_rev(tp) == ASIC_REV_5701) {
17436 /* Remove this if it causes problems for some boards. */
17437 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17438
17439 /* On 5700/5701 chips, we need to set this bit.
17440 * Otherwise the chip will issue cacheline transactions
17441 * to streamable DMA memory with not all the byte
17442 * enables turned on. This is an error on several
17443 * RISC PCI controllers, in particular sparc64.
17444 *
17445 * On 5703/5704 chips, this bit has been reassigned
17446 * a different meaning. In particular, it is used
17447 * on those chips to enable a PCI-X workaround.
17448 */
17449 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17450 }
17451
17452 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17453
17454
17455 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17456 tg3_asic_rev(tp) != ASIC_REV_5701)
17457 goto out;
17458
17459 /* It is best to perform DMA test with maximum write burst size
17460 * to expose the 5700/5701 write DMA bug.
17461 */
17462 saved_dma_rwctrl = tp->dma_rwctrl;
17463 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17464 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17465
17466 while (1) {
17467 u32 *p = buf, i;
17468
17469 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17470 p[i] = i;
17471
17472 /* Send the buffer to the chip. */
17473 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17474 if (ret) {
17475 dev_err(&tp->pdev->dev,
17476 "%s: Buffer write failed. err = %d\n",
17477 __func__, ret);
17478 break;
17479 }
17480
17481 /* Now read it back. */
17482 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17483 if (ret) {
17484 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17485 "err = %d\n", __func__, ret);
17486 break;
17487 }
17488
17489 /* Verify it. */
17490 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17491 if (p[i] == i)
17492 continue;
17493
17494 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17495 DMA_RWCTRL_WRITE_BNDRY_16) {
17496 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17497 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17498 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17499 break;
17500 } else {
17501 dev_err(&tp->pdev->dev,
17502 "%s: Buffer corrupted on read back! "
17503 "(%d != %d)\n", __func__, p[i], i);
17504 ret = -ENODEV;
17505 goto out;
17506 }
17507 }
17508
17509 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17510 /* Success. */
17511 ret = 0;
17512 break;
17513 }
17514 }
17515 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17516 DMA_RWCTRL_WRITE_BNDRY_16) {
17517 /* DMA test passed without adjusting DMA boundary,
17518 * now look for chipsets that are known to expose the
17519 * DMA bug without failing the test.
17520 */
17521 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17522 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17523 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17524 } else {
17525 /* Safe to use the calculated DMA boundary. */
17526 tp->dma_rwctrl = saved_dma_rwctrl;
17527 }
17528
17529 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17530 }
17531
17532 out:
17533 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17534 out_nofree:
17535 return ret;
17536 }
17537
tg3_init_bufmgr_config(struct tg3 * tp)17538 static void tg3_init_bufmgr_config(struct tg3 *tp)
17539 {
17540 if (tg3_flag(tp, 57765_PLUS)) {
17541 tp->bufmgr_config.mbuf_read_dma_low_water =
17542 DEFAULT_MB_RDMA_LOW_WATER_5705;
17543 tp->bufmgr_config.mbuf_mac_rx_low_water =
17544 DEFAULT_MB_MACRX_LOW_WATER_57765;
17545 tp->bufmgr_config.mbuf_high_water =
17546 DEFAULT_MB_HIGH_WATER_57765;
17547
17548 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17549 DEFAULT_MB_RDMA_LOW_WATER_5705;
17550 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17551 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17552 tp->bufmgr_config.mbuf_high_water_jumbo =
17553 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17554 } else if (tg3_flag(tp, 5705_PLUS)) {
17555 tp->bufmgr_config.mbuf_read_dma_low_water =
17556 DEFAULT_MB_RDMA_LOW_WATER_5705;
17557 tp->bufmgr_config.mbuf_mac_rx_low_water =
17558 DEFAULT_MB_MACRX_LOW_WATER_5705;
17559 tp->bufmgr_config.mbuf_high_water =
17560 DEFAULT_MB_HIGH_WATER_5705;
17561 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17562 tp->bufmgr_config.mbuf_mac_rx_low_water =
17563 DEFAULT_MB_MACRX_LOW_WATER_5906;
17564 tp->bufmgr_config.mbuf_high_water =
17565 DEFAULT_MB_HIGH_WATER_5906;
17566 }
17567
17568 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17569 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17570 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17571 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17572 tp->bufmgr_config.mbuf_high_water_jumbo =
17573 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17574 } else {
17575 tp->bufmgr_config.mbuf_read_dma_low_water =
17576 DEFAULT_MB_RDMA_LOW_WATER;
17577 tp->bufmgr_config.mbuf_mac_rx_low_water =
17578 DEFAULT_MB_MACRX_LOW_WATER;
17579 tp->bufmgr_config.mbuf_high_water =
17580 DEFAULT_MB_HIGH_WATER;
17581
17582 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17583 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17584 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17585 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17586 tp->bufmgr_config.mbuf_high_water_jumbo =
17587 DEFAULT_MB_HIGH_WATER_JUMBO;
17588 }
17589
17590 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17591 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17592 }
17593
tg3_phy_string(struct tg3 * tp)17594 static char *tg3_phy_string(struct tg3 *tp)
17595 {
17596 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17597 case TG3_PHY_ID_BCM5400: return "5400";
17598 case TG3_PHY_ID_BCM5401: return "5401";
17599 case TG3_PHY_ID_BCM5411: return "5411";
17600 case TG3_PHY_ID_BCM5701: return "5701";
17601 case TG3_PHY_ID_BCM5703: return "5703";
17602 case TG3_PHY_ID_BCM5704: return "5704";
17603 case TG3_PHY_ID_BCM5705: return "5705";
17604 case TG3_PHY_ID_BCM5750: return "5750";
17605 case TG3_PHY_ID_BCM5752: return "5752";
17606 case TG3_PHY_ID_BCM5714: return "5714";
17607 case TG3_PHY_ID_BCM5780: return "5780";
17608 case TG3_PHY_ID_BCM5755: return "5755";
17609 case TG3_PHY_ID_BCM5787: return "5787";
17610 case TG3_PHY_ID_BCM5784: return "5784";
17611 case TG3_PHY_ID_BCM5756: return "5722/5756";
17612 case TG3_PHY_ID_BCM5906: return "5906";
17613 case TG3_PHY_ID_BCM5761: return "5761";
17614 case TG3_PHY_ID_BCM5718C: return "5718C";
17615 case TG3_PHY_ID_BCM5718S: return "5718S";
17616 case TG3_PHY_ID_BCM57765: return "57765";
17617 case TG3_PHY_ID_BCM5719C: return "5719C";
17618 case TG3_PHY_ID_BCM5720C: return "5720C";
17619 case TG3_PHY_ID_BCM5762: return "5762C";
17620 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17621 case 0: return "serdes";
17622 default: return "unknown";
17623 }
17624 }
17625
tg3_bus_string(struct tg3 * tp,char * str)17626 static char *tg3_bus_string(struct tg3 *tp, char *str)
17627 {
17628 if (tg3_flag(tp, PCI_EXPRESS)) {
17629 strcpy(str, "PCI Express");
17630 return str;
17631 } else if (tg3_flag(tp, PCIX_MODE)) {
17632 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17633
17634 strcpy(str, "PCIX:");
17635
17636 if ((clock_ctrl == 7) ||
17637 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17638 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17639 strcat(str, "133MHz");
17640 else if (clock_ctrl == 0)
17641 strcat(str, "33MHz");
17642 else if (clock_ctrl == 2)
17643 strcat(str, "50MHz");
17644 else if (clock_ctrl == 4)
17645 strcat(str, "66MHz");
17646 else if (clock_ctrl == 6)
17647 strcat(str, "100MHz");
17648 } else {
17649 strcpy(str, "PCI:");
17650 if (tg3_flag(tp, PCI_HIGH_SPEED))
17651 strcat(str, "66MHz");
17652 else
17653 strcat(str, "33MHz");
17654 }
17655 if (tg3_flag(tp, PCI_32BIT))
17656 strcat(str, ":32-bit");
17657 else
17658 strcat(str, ":64-bit");
17659 return str;
17660 }
17661
tg3_init_coal(struct tg3 * tp)17662 static void tg3_init_coal(struct tg3 *tp)
17663 {
17664 struct ethtool_coalesce *ec = &tp->coal;
17665
17666 memset(ec, 0, sizeof(*ec));
17667 ec->cmd = ETHTOOL_GCOALESCE;
17668 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17669 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17670 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17671 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17672 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17673 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17674 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17675 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17676 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17677
17678 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17679 HOSTCC_MODE_CLRTICK_TXBD)) {
17680 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17681 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17682 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17683 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17684 }
17685
17686 if (tg3_flag(tp, 5705_PLUS)) {
17687 ec->rx_coalesce_usecs_irq = 0;
17688 ec->tx_coalesce_usecs_irq = 0;
17689 ec->stats_block_coalesce_usecs = 0;
17690 }
17691 }
17692
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17693 static int tg3_init_one(struct pci_dev *pdev,
17694 const struct pci_device_id *ent)
17695 {
17696 struct net_device *dev;
17697 struct tg3 *tp;
17698 int i, err;
17699 u32 sndmbx, rcvmbx, intmbx;
17700 char str[40];
17701 u64 dma_mask, persist_dma_mask;
17702 netdev_features_t features = 0;
17703 u8 addr[ETH_ALEN] __aligned(2);
17704
17705 err = pci_enable_device(pdev);
17706 if (err) {
17707 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17708 return err;
17709 }
17710
17711 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17712 if (err) {
17713 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17714 goto err_out_disable_pdev;
17715 }
17716
17717 pci_set_master(pdev);
17718
17719 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17720 if (!dev) {
17721 err = -ENOMEM;
17722 goto err_out_free_res;
17723 }
17724
17725 SET_NETDEV_DEV(dev, &pdev->dev);
17726
17727 tp = netdev_priv(dev);
17728 tp->pdev = pdev;
17729 tp->dev = dev;
17730 tp->rx_mode = TG3_DEF_RX_MODE;
17731 tp->tx_mode = TG3_DEF_TX_MODE;
17732 tp->irq_sync = 1;
17733 tp->pcierr_recovery = false;
17734
17735 if (tg3_debug > 0)
17736 tp->msg_enable = tg3_debug;
17737 else
17738 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17739
17740 if (pdev_is_ssb_gige_core(pdev)) {
17741 tg3_flag_set(tp, IS_SSB_CORE);
17742 if (ssb_gige_must_flush_posted_writes(pdev))
17743 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17744 if (ssb_gige_one_dma_at_once(pdev))
17745 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17746 if (ssb_gige_have_roboswitch(pdev)) {
17747 tg3_flag_set(tp, USE_PHYLIB);
17748 tg3_flag_set(tp, ROBOSWITCH);
17749 }
17750 if (ssb_gige_is_rgmii(pdev))
17751 tg3_flag_set(tp, RGMII_MODE);
17752 }
17753
17754 /* The word/byte swap controls here control register access byte
17755 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17756 * setting below.
17757 */
17758 tp->misc_host_ctrl =
17759 MISC_HOST_CTRL_MASK_PCI_INT |
17760 MISC_HOST_CTRL_WORD_SWAP |
17761 MISC_HOST_CTRL_INDIR_ACCESS |
17762 MISC_HOST_CTRL_PCISTATE_RW;
17763
17764 /* The NONFRM (non-frame) byte/word swap controls take effect
17765 * on descriptor entries, anything which isn't packet data.
17766 *
17767 * The StrongARM chips on the board (one for tx, one for rx)
17768 * are running in big-endian mode.
17769 */
17770 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17771 GRC_MODE_WSWAP_NONFRM_DATA);
17772 #ifdef __BIG_ENDIAN
17773 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17774 #endif
17775 spin_lock_init(&tp->lock);
17776 spin_lock_init(&tp->indirect_lock);
17777 INIT_WORK(&tp->reset_task, tg3_reset_task);
17778
17779 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17780 if (!tp->regs) {
17781 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17782 err = -ENOMEM;
17783 goto err_out_free_dev;
17784 }
17785
17786 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17787 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17788 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17789 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17790 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17791 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17792 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17793 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17794 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17795 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17796 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17797 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17798 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17799 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17800 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17801 tg3_flag_set(tp, ENABLE_APE);
17802 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17803 if (!tp->aperegs) {
17804 dev_err(&pdev->dev,
17805 "Cannot map APE registers, aborting\n");
17806 err = -ENOMEM;
17807 goto err_out_iounmap;
17808 }
17809 }
17810
17811 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17812 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17813
17814 dev->ethtool_ops = &tg3_ethtool_ops;
17815 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17816 dev->netdev_ops = &tg3_netdev_ops;
17817 dev->irq = pdev->irq;
17818
17819 err = tg3_get_invariants(tp, ent);
17820 if (err) {
17821 dev_err(&pdev->dev,
17822 "Problem fetching invariants of chip, aborting\n");
17823 goto err_out_apeunmap;
17824 }
17825
17826 /* The EPB bridge inside 5714, 5715, and 5780 and any
17827 * device behind the EPB cannot support DMA addresses > 40-bit.
17828 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17829 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17830 * do DMA address check in __tg3_start_xmit().
17831 */
17832 if (tg3_flag(tp, IS_5788))
17833 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17834 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17835 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17836 #ifdef CONFIG_HIGHMEM
17837 dma_mask = DMA_BIT_MASK(64);
17838 #endif
17839 } else
17840 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17841
17842 if (tg3_asic_rev(tp) == ASIC_REV_57766)
17843 persist_dma_mask = DMA_BIT_MASK(31);
17844
17845 /* Configure DMA attributes. */
17846 if (dma_mask > DMA_BIT_MASK(32)) {
17847 err = dma_set_mask(&pdev->dev, dma_mask);
17848 if (!err) {
17849 features |= NETIF_F_HIGHDMA;
17850 err = dma_set_coherent_mask(&pdev->dev,
17851 persist_dma_mask);
17852 if (err < 0) {
17853 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17854 "DMA for consistent allocations\n");
17855 goto err_out_apeunmap;
17856 }
17857 }
17858 }
17859 if (err || dma_mask == DMA_BIT_MASK(32)) {
17860 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17861 if (err) {
17862 dev_err(&pdev->dev,
17863 "No usable DMA configuration, aborting\n");
17864 goto err_out_apeunmap;
17865 }
17866 }
17867
17868 tg3_init_bufmgr_config(tp);
17869
17870 /* 5700 B0 chips do not support checksumming correctly due
17871 * to hardware bugs.
17872 */
17873 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17874 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17875
17876 if (tg3_flag(tp, 5755_PLUS))
17877 features |= NETIF_F_IPV6_CSUM;
17878 }
17879
17880 /* TSO is on by default on chips that support hardware TSO.
17881 * Firmware TSO on older chips gives lower performance, so it
17882 * is off by default, but can be enabled using ethtool.
17883 */
17884 if ((tg3_flag(tp, HW_TSO_1) ||
17885 tg3_flag(tp, HW_TSO_2) ||
17886 tg3_flag(tp, HW_TSO_3)) &&
17887 (features & NETIF_F_IP_CSUM))
17888 features |= NETIF_F_TSO;
17889 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17890 if (features & NETIF_F_IPV6_CSUM)
17891 features |= NETIF_F_TSO6;
17892 if (tg3_flag(tp, HW_TSO_3) ||
17893 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17894 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17895 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17896 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17897 tg3_asic_rev(tp) == ASIC_REV_57780)
17898 features |= NETIF_F_TSO_ECN;
17899 }
17900
17901 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17902 NETIF_F_HW_VLAN_CTAG_RX;
17903 dev->vlan_features |= features;
17904
17905 /*
17906 * Add loopback capability only for a subset of devices that support
17907 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17908 * loopback for the remaining devices.
17909 */
17910 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17911 !tg3_flag(tp, CPMU_PRESENT))
17912 /* Add the loopback capability */
17913 features |= NETIF_F_LOOPBACK;
17914
17915 dev->hw_features |= features;
17916 dev->priv_flags |= IFF_UNICAST_FLT;
17917
17918 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17919 dev->min_mtu = TG3_MIN_MTU;
17920 dev->max_mtu = TG3_MAX_MTU(tp);
17921
17922 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17923 !tg3_flag(tp, TSO_CAPABLE) &&
17924 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17925 tg3_flag_set(tp, MAX_RXPEND_64);
17926 tp->rx_pending = 63;
17927 }
17928
17929 err = tg3_get_device_address(tp, addr);
17930 if (err) {
17931 dev_err(&pdev->dev,
17932 "Could not obtain valid ethernet address, aborting\n");
17933 goto err_out_apeunmap;
17934 }
17935 eth_hw_addr_set(dev, addr);
17936
17937 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17938 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17939 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17940 for (i = 0; i < tp->irq_max; i++) {
17941 struct tg3_napi *tnapi = &tp->napi[i];
17942
17943 tnapi->tp = tp;
17944 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17945
17946 tnapi->int_mbox = intmbx;
17947 intmbx += 0x8;
17948
17949 tnapi->consmbox = rcvmbx;
17950 tnapi->prodmbox = sndmbx;
17951
17952 if (i)
17953 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17954 else
17955 tnapi->coal_now = HOSTCC_MODE_NOW;
17956
17957 if (!tg3_flag(tp, SUPPORT_MSIX))
17958 break;
17959
17960 /*
17961 * If we support MSIX, we'll be using RSS. If we're using
17962 * RSS, the first vector only handles link interrupts and the
17963 * remaining vectors handle rx and tx interrupts. Reuse the
17964 * mailbox values for the next iteration. The values we setup
17965 * above are still useful for the single vectored mode.
17966 */
17967 if (!i)
17968 continue;
17969
17970 rcvmbx += 0x8;
17971
17972 if (sndmbx & 0x4)
17973 sndmbx -= 0x4;
17974 else
17975 sndmbx += 0xc;
17976 }
17977
17978 /*
17979 * Reset chip in case UNDI or EFI driver did not shutdown
17980 * DMA self test will enable WDMAC and we'll see (spurious)
17981 * pending DMA on the PCI bus at that point.
17982 */
17983 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17984 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17985 tg3_full_lock(tp, 0);
17986 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17987 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17988 tg3_full_unlock(tp);
17989 }
17990
17991 err = tg3_test_dma(tp);
17992 if (err) {
17993 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17994 goto err_out_apeunmap;
17995 }
17996
17997 tg3_init_coal(tp);
17998
17999 pci_set_drvdata(pdev, dev);
18000
18001 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
18002 tg3_asic_rev(tp) == ASIC_REV_5720 ||
18003 tg3_asic_rev(tp) == ASIC_REV_5762)
18004 tg3_flag_set(tp, PTP_CAPABLE);
18005
18006 tg3_timer_init(tp);
18007
18008 tg3_carrier_off(tp);
18009
18010 err = register_netdev(dev);
18011 if (err) {
18012 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
18013 goto err_out_apeunmap;
18014 }
18015
18016 if (tg3_flag(tp, PTP_CAPABLE)) {
18017 tg3_ptp_init(tp);
18018 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
18019 &tp->pdev->dev);
18020 if (IS_ERR(tp->ptp_clock))
18021 tp->ptp_clock = NULL;
18022 }
18023
18024 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
18025 tp->board_part_number,
18026 tg3_chip_rev_id(tp),
18027 tg3_bus_string(tp, str),
18028 dev->dev_addr);
18029
18030 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
18031 char *ethtype;
18032
18033 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
18034 ethtype = "10/100Base-TX";
18035 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
18036 ethtype = "1000Base-SX";
18037 else
18038 ethtype = "10/100/1000Base-T";
18039
18040 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
18041 "(WireSpeed[%d], EEE[%d])\n",
18042 tg3_phy_string(tp), ethtype,
18043 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18044 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18045 }
18046
18047 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18048 (dev->features & NETIF_F_RXCSUM) != 0,
18049 tg3_flag(tp, USE_LINKCHG_REG) != 0,
18050 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18051 tg3_flag(tp, ENABLE_ASF) != 0,
18052 tg3_flag(tp, TSO_CAPABLE) != 0);
18053 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18054 tp->dma_rwctrl,
18055 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18056 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18057
18058 pci_save_state(pdev);
18059
18060 return 0;
18061
18062 err_out_apeunmap:
18063 if (tp->aperegs) {
18064 iounmap(tp->aperegs);
18065 tp->aperegs = NULL;
18066 }
18067
18068 err_out_iounmap:
18069 if (tp->regs) {
18070 iounmap(tp->regs);
18071 tp->regs = NULL;
18072 }
18073
18074 err_out_free_dev:
18075 free_netdev(dev);
18076
18077 err_out_free_res:
18078 pci_release_regions(pdev);
18079
18080 err_out_disable_pdev:
18081 if (pci_is_enabled(pdev))
18082 pci_disable_device(pdev);
18083 return err;
18084 }
18085
tg3_remove_one(struct pci_dev * pdev)18086 static void tg3_remove_one(struct pci_dev *pdev)
18087 {
18088 struct net_device *dev = pci_get_drvdata(pdev);
18089
18090 if (dev) {
18091 struct tg3 *tp = netdev_priv(dev);
18092
18093 tg3_ptp_fini(tp);
18094
18095 release_firmware(tp->fw);
18096
18097 tg3_reset_task_cancel(tp);
18098
18099 if (tg3_flag(tp, USE_PHYLIB)) {
18100 tg3_phy_fini(tp);
18101 tg3_mdio_fini(tp);
18102 }
18103
18104 unregister_netdev(dev);
18105 if (tp->aperegs) {
18106 iounmap(tp->aperegs);
18107 tp->aperegs = NULL;
18108 }
18109 if (tp->regs) {
18110 iounmap(tp->regs);
18111 tp->regs = NULL;
18112 }
18113 free_netdev(dev);
18114 pci_release_regions(pdev);
18115 pci_disable_device(pdev);
18116 }
18117 }
18118
18119 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)18120 static int tg3_suspend(struct device *device)
18121 {
18122 struct net_device *dev = dev_get_drvdata(device);
18123 struct tg3 *tp = netdev_priv(dev);
18124
18125 rtnl_lock();
18126
18127 if (!netif_running(dev))
18128 goto unlock;
18129
18130 tg3_reset_task_cancel(tp);
18131 tg3_phy_stop(tp);
18132 tg3_netif_stop(tp);
18133
18134 tg3_timer_stop(tp);
18135
18136 tg3_full_lock(tp, 1);
18137 tg3_disable_ints(tp);
18138 tg3_full_unlock(tp);
18139
18140 netif_device_detach(dev);
18141
18142 tg3_full_lock(tp, 0);
18143 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18144 tg3_flag_clear(tp, INIT_COMPLETE);
18145 tg3_full_unlock(tp);
18146
18147 tg3_power_down_prepare(tp);
18148
18149 unlock:
18150 rtnl_unlock();
18151 return 0;
18152 }
18153
tg3_resume(struct device * device)18154 static int tg3_resume(struct device *device)
18155 {
18156 struct net_device *dev = dev_get_drvdata(device);
18157 struct tg3 *tp = netdev_priv(dev);
18158 int err = 0;
18159
18160 rtnl_lock();
18161
18162 if (!netif_running(dev))
18163 goto unlock;
18164
18165 netif_device_attach(dev);
18166
18167 tg3_full_lock(tp, 0);
18168
18169 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18170
18171 tg3_flag_set(tp, INIT_COMPLETE);
18172 err = tg3_restart_hw(tp,
18173 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18174 if (err)
18175 goto out;
18176
18177 tg3_timer_start(tp);
18178
18179 tg3_netif_start(tp);
18180
18181 out:
18182 tg3_full_unlock(tp);
18183
18184 if (!err)
18185 tg3_phy_start(tp);
18186
18187 unlock:
18188 rtnl_unlock();
18189 return err;
18190 }
18191 #endif /* CONFIG_PM_SLEEP */
18192
18193 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18194
tg3_shutdown(struct pci_dev * pdev)18195 static void tg3_shutdown(struct pci_dev *pdev)
18196 {
18197 struct net_device *dev = pci_get_drvdata(pdev);
18198 struct tg3 *tp = netdev_priv(dev);
18199
18200 tg3_reset_task_cancel(tp);
18201
18202 rtnl_lock();
18203
18204 netif_device_detach(dev);
18205
18206 if (netif_running(dev))
18207 dev_close(dev);
18208
18209 if (system_state == SYSTEM_POWER_OFF)
18210 tg3_power_down(tp);
18211
18212 rtnl_unlock();
18213
18214 pci_disable_device(pdev);
18215 }
18216
18217 /**
18218 * tg3_io_error_detected - called when PCI error is detected
18219 * @pdev: Pointer to PCI device
18220 * @state: The current pci connection state
18221 *
18222 * This function is called after a PCI bus error affecting
18223 * this device has been detected.
18224 */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18225 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18226 pci_channel_state_t state)
18227 {
18228 struct net_device *netdev = pci_get_drvdata(pdev);
18229 struct tg3 *tp = netdev_priv(netdev);
18230 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18231
18232 netdev_info(netdev, "PCI I/O error detected\n");
18233
18234 /* Want to make sure that the reset task doesn't run */
18235 tg3_reset_task_cancel(tp);
18236
18237 rtnl_lock();
18238
18239 /* Could be second call or maybe we don't have netdev yet */
18240 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18241 goto done;
18242
18243 /* We needn't recover from permanent error */
18244 if (state == pci_channel_io_frozen)
18245 tp->pcierr_recovery = true;
18246
18247 tg3_phy_stop(tp);
18248
18249 tg3_netif_stop(tp);
18250
18251 tg3_timer_stop(tp);
18252
18253 netif_device_detach(netdev);
18254
18255 /* Clean up software state, even if MMIO is blocked */
18256 tg3_full_lock(tp, 0);
18257 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18258 tg3_full_unlock(tp);
18259
18260 done:
18261 if (state == pci_channel_io_perm_failure) {
18262 if (netdev) {
18263 tg3_napi_enable(tp);
18264 dev_close(netdev);
18265 }
18266 err = PCI_ERS_RESULT_DISCONNECT;
18267 } else {
18268 pci_disable_device(pdev);
18269 }
18270
18271 rtnl_unlock();
18272
18273 return err;
18274 }
18275
18276 /**
18277 * tg3_io_slot_reset - called after the pci bus has been reset.
18278 * @pdev: Pointer to PCI device
18279 *
18280 * Restart the card from scratch, as if from a cold-boot.
18281 * At this point, the card has experienced a hard reset,
18282 * followed by fixups by BIOS, and has its config space
18283 * set up identically to what it was at cold boot.
18284 */
tg3_io_slot_reset(struct pci_dev * pdev)18285 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18286 {
18287 struct net_device *netdev = pci_get_drvdata(pdev);
18288 struct tg3 *tp = netdev_priv(netdev);
18289 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18290 int err;
18291
18292 rtnl_lock();
18293
18294 if (pci_enable_device(pdev)) {
18295 dev_err(&pdev->dev,
18296 "Cannot re-enable PCI device after reset.\n");
18297 goto done;
18298 }
18299
18300 pci_set_master(pdev);
18301 pci_restore_state(pdev);
18302 pci_save_state(pdev);
18303
18304 if (!netdev || !netif_running(netdev)) {
18305 rc = PCI_ERS_RESULT_RECOVERED;
18306 goto done;
18307 }
18308
18309 err = tg3_power_up(tp);
18310 if (err)
18311 goto done;
18312
18313 rc = PCI_ERS_RESULT_RECOVERED;
18314
18315 done:
18316 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18317 tg3_napi_enable(tp);
18318 dev_close(netdev);
18319 }
18320 rtnl_unlock();
18321
18322 return rc;
18323 }
18324
18325 /**
18326 * tg3_io_resume - called when traffic can start flowing again.
18327 * @pdev: Pointer to PCI device
18328 *
18329 * This callback is called when the error recovery driver tells
18330 * us that its OK to resume normal operation.
18331 */
tg3_io_resume(struct pci_dev * pdev)18332 static void tg3_io_resume(struct pci_dev *pdev)
18333 {
18334 struct net_device *netdev = pci_get_drvdata(pdev);
18335 struct tg3 *tp = netdev_priv(netdev);
18336 int err;
18337
18338 rtnl_lock();
18339
18340 if (!netdev || !netif_running(netdev))
18341 goto done;
18342
18343 tg3_full_lock(tp, 0);
18344 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18345 tg3_flag_set(tp, INIT_COMPLETE);
18346 err = tg3_restart_hw(tp, true);
18347 if (err) {
18348 tg3_full_unlock(tp);
18349 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18350 goto done;
18351 }
18352
18353 netif_device_attach(netdev);
18354
18355 tg3_timer_start(tp);
18356
18357 tg3_netif_start(tp);
18358
18359 tg3_full_unlock(tp);
18360
18361 tg3_phy_start(tp);
18362
18363 done:
18364 tp->pcierr_recovery = false;
18365 rtnl_unlock();
18366 }
18367
18368 static const struct pci_error_handlers tg3_err_handler = {
18369 .error_detected = tg3_io_error_detected,
18370 .slot_reset = tg3_io_slot_reset,
18371 .resume = tg3_io_resume
18372 };
18373
18374 static struct pci_driver tg3_driver = {
18375 .name = DRV_MODULE_NAME,
18376 .id_table = tg3_pci_tbl,
18377 .probe = tg3_init_one,
18378 .remove = tg3_remove_one,
18379 .err_handler = &tg3_err_handler,
18380 .driver.pm = &tg3_pm_ops,
18381 .shutdown = tg3_shutdown,
18382 };
18383
18384 module_pci_driver(tg3_driver);
18385