1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
11 *
12 * Firmware is:
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
18 *
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
22 */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/gso.h>
61 #include <net/ip.h>
62
63 #include <linux/io.h>
64 #include <asm/byteorder.h>
65 #include <linux/uaccess.h>
66
67 #include <uapi/linux/net_tstamp.h>
68 #include <linux/ptp_clock_kernel.h>
69
70 #define BAR_0 0
71 #define BAR_2 2
72
73 #include "tg3.h"
74
75 /* Functions & macros to verify TG3_FLAGS types */
76
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)77 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79 return test_bit(flag, bits);
80 }
81
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)82 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84 set_bit(flag, bits);
85 }
86
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)87 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
88 {
89 clear_bit(flag, bits);
90 }
91
92 #define tg3_flag(tp, flag) \
93 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
94 #define tg3_flag_set(tp, flag) \
95 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
96 #define tg3_flag_clear(tp, flag) \
97 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
98
99 #define DRV_MODULE_NAME "tg3"
100 /* DO NOT UPDATE TG3_*_NUM defines */
101 #define TG3_MAJ_NUM 3
102 #define TG3_MIN_NUM 137
103
104 #define RESET_KIND_SHUTDOWN 0
105 #define RESET_KIND_INIT 1
106 #define RESET_KIND_SUSPEND 2
107
108 #define TG3_DEF_RX_MODE 0
109 #define TG3_DEF_TX_MODE 0
110 #define TG3_DEF_MSG_ENABLE \
111 (NETIF_MSG_DRV | \
112 NETIF_MSG_PROBE | \
113 NETIF_MSG_LINK | \
114 NETIF_MSG_TIMER | \
115 NETIF_MSG_IFDOWN | \
116 NETIF_MSG_IFUP | \
117 NETIF_MSG_RX_ERR | \
118 NETIF_MSG_TX_ERR)
119
120 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
121
122 /* length of time before we decide the hardware is borked,
123 * and dev->tx_timeout() should be called to fix the problem
124 */
125
126 #define TG3_TX_TIMEOUT (5 * HZ)
127
128 /* hardware minimum and maximum for a single frame's data payload */
129 #define TG3_MIN_MTU ETH_ZLEN
130 #define TG3_MAX_MTU(tp) \
131 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
132
133 /* These numbers seem to be hard coded in the NIC firmware somehow.
134 * You can't change the ring sizes, but you can change where you place
135 * them in the NIC onboard memory.
136 */
137 #define TG3_RX_STD_RING_SIZE(tp) \
138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
140 #define TG3_DEF_RX_RING_PENDING 200
141 #define TG3_RX_JMB_RING_SIZE(tp) \
142 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
143 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
144 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
145
146 /* Do not place this n-ring entries value into the tp struct itself,
147 * we really want to expose these constants to GCC so that modulo et
148 * al. operations are done with shifts and masks instead of with
149 * hw multiply/modulo instructions. Another solution would be to
150 * replace things like '% foo' with '& (foo - 1)'.
151 */
152
153 #define TG3_TX_RING_SIZE 512
154 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
155
156 #define TG3_RX_STD_RING_BYTES(tp) \
157 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
158 #define TG3_RX_JMB_RING_BYTES(tp) \
159 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
160 #define TG3_RX_RCB_RING_BYTES(tp) \
161 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
162 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
163 TG3_TX_RING_SIZE)
164 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
165
166 #define TG3_DMA_BYTE_ENAB 64
167
168 #define TG3_RX_STD_DMA_SZ 1536
169 #define TG3_RX_JMB_DMA_SZ 9046
170
171 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
172
173 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
174 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
175
176 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
177 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
178
179 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
180 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
181
182 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
183 * that are at least dword aligned when used in PCIX mode. The driver
184 * works around this bug by double copying the packet. This workaround
185 * is built into the normal double copy length check for efficiency.
186 *
187 * However, the double copy is only necessary on those architectures
188 * where unaligned memory accesses are inefficient. For those architectures
189 * where unaligned memory accesses incur little penalty, we can reintegrate
190 * the 5701 in the normal rx path. Doing so saves a device structure
191 * dereference by hardcoding the double copy threshold in place.
192 */
193 #define TG3_RX_COPY_THRESHOLD 256
194 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
195 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
196 #else
197 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #endif
199
200 #if (NET_IP_ALIGN != 0)
201 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
202 #else
203 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 #endif
205
206 /* minimum number of free TX descriptors required to wake up TX process */
207 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
208 #define TG3_TX_BD_DMA_MAX_2K 2048
209 #define TG3_TX_BD_DMA_MAX_4K 4096
210
211 #define TG3_RAW_IP_ALIGN 2
212
213 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
214 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
215
216 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
217 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
218
219 #define FIRMWARE_TG3 "tigon/tg3.bin"
220 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
221 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
222 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
223
224 MODULE_AUTHOR("David S. Miller <davem@redhat.com> and Jeff Garzik <jgarzik@pobox.com>");
225 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
226 MODULE_LICENSE("GPL");
227 MODULE_FIRMWARE(FIRMWARE_TG3);
228 MODULE_FIRMWARE(FIRMWARE_TG357766);
229 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
231
232 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
233 module_param(tg3_debug, int, 0);
234 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
235
236 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
237 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
238
239 static const struct pci_device_id tg3_pci_tbl[] = {
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
259 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
260 TG3_DRV_DATA_FLAG_5705_10_100},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
262 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263 TG3_DRV_DATA_FLAG_5705_10_100},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
266 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
267 TG3_DRV_DATA_FLAG_5705_10_100},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
274 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
280 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
288 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
289 PCI_VENDOR_ID_LENOVO,
290 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
291 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
294 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
313 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
314 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
315 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
316 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
317 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
318 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
322 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
334 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
347 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
353 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
354 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
355 {}
356 };
357
358 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
359
360 static const struct {
361 const char string[ETH_GSTRING_LEN];
362 } ethtool_stats_keys[] = {
363 { "rx_octets" },
364 { "rx_fragments" },
365 { "rx_ucast_packets" },
366 { "rx_mcast_packets" },
367 { "rx_bcast_packets" },
368 { "rx_fcs_errors" },
369 { "rx_align_errors" },
370 { "rx_xon_pause_rcvd" },
371 { "rx_xoff_pause_rcvd" },
372 { "rx_mac_ctrl_rcvd" },
373 { "rx_xoff_entered" },
374 { "rx_frame_too_long_errors" },
375 { "rx_jabbers" },
376 { "rx_undersize_packets" },
377 { "rx_in_length_errors" },
378 { "rx_out_length_errors" },
379 { "rx_64_or_less_octet_packets" },
380 { "rx_65_to_127_octet_packets" },
381 { "rx_128_to_255_octet_packets" },
382 { "rx_256_to_511_octet_packets" },
383 { "rx_512_to_1023_octet_packets" },
384 { "rx_1024_to_1522_octet_packets" },
385 { "rx_1523_to_2047_octet_packets" },
386 { "rx_2048_to_4095_octet_packets" },
387 { "rx_4096_to_8191_octet_packets" },
388 { "rx_8192_to_9022_octet_packets" },
389
390 { "tx_octets" },
391 { "tx_collisions" },
392
393 { "tx_xon_sent" },
394 { "tx_xoff_sent" },
395 { "tx_flow_control" },
396 { "tx_mac_errors" },
397 { "tx_single_collisions" },
398 { "tx_mult_collisions" },
399 { "tx_deferred" },
400 { "tx_excessive_collisions" },
401 { "tx_late_collisions" },
402 { "tx_collide_2times" },
403 { "tx_collide_3times" },
404 { "tx_collide_4times" },
405 { "tx_collide_5times" },
406 { "tx_collide_6times" },
407 { "tx_collide_7times" },
408 { "tx_collide_8times" },
409 { "tx_collide_9times" },
410 { "tx_collide_10times" },
411 { "tx_collide_11times" },
412 { "tx_collide_12times" },
413 { "tx_collide_13times" },
414 { "tx_collide_14times" },
415 { "tx_collide_15times" },
416 { "tx_ucast_packets" },
417 { "tx_mcast_packets" },
418 { "tx_bcast_packets" },
419 { "tx_carrier_sense_errors" },
420 { "tx_discards" },
421 { "tx_errors" },
422
423 { "dma_writeq_full" },
424 { "dma_write_prioq_full" },
425 { "rxbds_empty" },
426 { "rx_discards" },
427 { "rx_errors" },
428 { "rx_threshold_hit" },
429
430 { "dma_readq_full" },
431 { "dma_read_prioq_full" },
432 { "tx_comp_queue_full" },
433
434 { "ring_set_send_prod_index" },
435 { "ring_status_update" },
436 { "nic_irqs" },
437 { "nic_avoided_irqs" },
438 { "nic_tx_threshold_hit" },
439
440 { "mbuf_lwm_thresh_hit" },
441 };
442
443 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
444 #define TG3_NVRAM_TEST 0
445 #define TG3_LINK_TEST 1
446 #define TG3_REGISTER_TEST 2
447 #define TG3_MEMORY_TEST 3
448 #define TG3_MAC_LOOPB_TEST 4
449 #define TG3_PHY_LOOPB_TEST 5
450 #define TG3_EXT_LOOPB_TEST 6
451 #define TG3_INTERRUPT_TEST 7
452
453
454 static const struct {
455 const char string[ETH_GSTRING_LEN];
456 } ethtool_test_keys[] = {
457 [TG3_NVRAM_TEST] = { "nvram test (online) " },
458 [TG3_LINK_TEST] = { "link test (online) " },
459 [TG3_REGISTER_TEST] = { "register test (offline)" },
460 [TG3_MEMORY_TEST] = { "memory test (offline)" },
461 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
462 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
463 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
464 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
465 };
466
467 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
468
469
tg3_write32(struct tg3 * tp,u32 off,u32 val)470 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
471 {
472 writel(val, tp->regs + off);
473 }
474
tg3_read32(struct tg3 * tp,u32 off)475 static u32 tg3_read32(struct tg3 *tp, u32 off)
476 {
477 return readl(tp->regs + off);
478 }
479
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)480 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
481 {
482 writel(val, tp->aperegs + off);
483 }
484
tg3_ape_read32(struct tg3 * tp,u32 off)485 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
486 {
487 return readl(tp->aperegs + off);
488 }
489
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)490 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
491 {
492 unsigned long flags;
493
494 spin_lock_irqsave(&tp->indirect_lock, flags);
495 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
497 spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 }
499
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)500 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
501 {
502 writel(val, tp->regs + off);
503 readl(tp->regs + off);
504 }
505
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)506 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
507 {
508 unsigned long flags;
509 u32 val;
510
511 spin_lock_irqsave(&tp->indirect_lock, flags);
512 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
513 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
514 spin_unlock_irqrestore(&tp->indirect_lock, flags);
515 return val;
516 }
517
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)518 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
519 {
520 unsigned long flags;
521
522 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
523 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
524 TG3_64BIT_REG_LOW, val);
525 return;
526 }
527 if (off == TG3_RX_STD_PROD_IDX_REG) {
528 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
529 TG3_64BIT_REG_LOW, val);
530 return;
531 }
532
533 spin_lock_irqsave(&tp->indirect_lock, flags);
534 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
536 spin_unlock_irqrestore(&tp->indirect_lock, flags);
537
538 /* In indirect mode when disabling interrupts, we also need
539 * to clear the interrupt bit in the GRC local ctrl register.
540 */
541 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
542 (val == 0x1)) {
543 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
544 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
545 }
546 }
547
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)548 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
549 {
550 unsigned long flags;
551 u32 val;
552
553 spin_lock_irqsave(&tp->indirect_lock, flags);
554 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
555 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
556 spin_unlock_irqrestore(&tp->indirect_lock, flags);
557 return val;
558 }
559
560 /* usec_wait specifies the wait time in usec when writing to certain registers
561 * where it is unsafe to read back the register without some delay.
562 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
563 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
564 */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)565 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
566 {
567 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
568 /* Non-posted methods */
569 tp->write32(tp, off, val);
570 else {
571 /* Posted method */
572 tg3_write32(tp, off, val);
573 if (usec_wait)
574 udelay(usec_wait);
575 tp->read32(tp, off);
576 }
577 /* Wait again after the read for the posted method to guarantee that
578 * the wait time is met.
579 */
580 if (usec_wait)
581 udelay(usec_wait);
582 }
583
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)584 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
585 {
586 tp->write32_mbox(tp, off, val);
587 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
588 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
589 !tg3_flag(tp, ICH_WORKAROUND)))
590 tp->read32_mbox(tp, off);
591 }
592
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)593 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
594 {
595 void __iomem *mbox = tp->regs + off;
596 writel(val, mbox);
597 if (tg3_flag(tp, TXD_MBOX_HWBUG))
598 writel(val, mbox);
599 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
600 tg3_flag(tp, FLUSH_POSTED_WRITES))
601 readl(mbox);
602 }
603
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)604 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
605 {
606 return readl(tp->regs + off + GRCMBOX_BASE);
607 }
608
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)609 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
610 {
611 writel(val, tp->regs + off + GRCMBOX_BASE);
612 }
613
614 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
615 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
616 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
617 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
618 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
619
620 #define tw32(reg, val) tp->write32(tp, reg, val)
621 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
622 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
623 #define tr32(reg) tp->read32(tp, reg)
624
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)625 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
626 {
627 unsigned long flags;
628
629 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
630 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
631 return;
632
633 spin_lock_irqsave(&tp->indirect_lock, flags);
634 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
635 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
637
638 /* Always leave this as zero. */
639 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 } else {
641 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
642 tw32_f(TG3PCI_MEM_WIN_DATA, val);
643
644 /* Always leave this as zero. */
645 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
646 }
647 spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 }
649
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)650 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
651 {
652 unsigned long flags;
653
654 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
655 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
656 *val = 0;
657 return;
658 }
659
660 spin_lock_irqsave(&tp->indirect_lock, flags);
661 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
662 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
663 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
664
665 /* Always leave this as zero. */
666 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 } else {
668 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
669 *val = tr32(TG3PCI_MEM_WIN_DATA);
670
671 /* Always leave this as zero. */
672 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
673 }
674 spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 }
676
tg3_ape_lock_init(struct tg3 * tp)677 static void tg3_ape_lock_init(struct tg3 *tp)
678 {
679 int i;
680 u32 regbase, bit;
681
682 if (tg3_asic_rev(tp) == ASIC_REV_5761)
683 regbase = TG3_APE_LOCK_GRANT;
684 else
685 regbase = TG3_APE_PER_LOCK_GRANT;
686
687 /* Make sure the driver hasn't any stale locks. */
688 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
689 switch (i) {
690 case TG3_APE_LOCK_PHY0:
691 case TG3_APE_LOCK_PHY1:
692 case TG3_APE_LOCK_PHY2:
693 case TG3_APE_LOCK_PHY3:
694 bit = APE_LOCK_GRANT_DRIVER;
695 break;
696 default:
697 if (!tp->pci_fn)
698 bit = APE_LOCK_GRANT_DRIVER;
699 else
700 bit = 1 << tp->pci_fn;
701 }
702 tg3_ape_write32(tp, regbase + 4 * i, bit);
703 }
704
705 }
706
tg3_ape_lock(struct tg3 * tp,int locknum)707 static int tg3_ape_lock(struct tg3 *tp, int locknum)
708 {
709 int i, off;
710 int ret = 0;
711 u32 status, req, gnt, bit;
712
713 if (!tg3_flag(tp, ENABLE_APE))
714 return 0;
715
716 switch (locknum) {
717 case TG3_APE_LOCK_GPIO:
718 if (tg3_asic_rev(tp) == ASIC_REV_5761)
719 return 0;
720 fallthrough;
721 case TG3_APE_LOCK_GRC:
722 case TG3_APE_LOCK_MEM:
723 if (!tp->pci_fn)
724 bit = APE_LOCK_REQ_DRIVER;
725 else
726 bit = 1 << tp->pci_fn;
727 break;
728 case TG3_APE_LOCK_PHY0:
729 case TG3_APE_LOCK_PHY1:
730 case TG3_APE_LOCK_PHY2:
731 case TG3_APE_LOCK_PHY3:
732 bit = APE_LOCK_REQ_DRIVER;
733 break;
734 default:
735 return -EINVAL;
736 }
737
738 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739 req = TG3_APE_LOCK_REQ;
740 gnt = TG3_APE_LOCK_GRANT;
741 } else {
742 req = TG3_APE_PER_LOCK_REQ;
743 gnt = TG3_APE_PER_LOCK_GRANT;
744 }
745
746 off = 4 * locknum;
747
748 tg3_ape_write32(tp, req + off, bit);
749
750 /* Wait for up to 1 millisecond to acquire lock. */
751 for (i = 0; i < 100; i++) {
752 status = tg3_ape_read32(tp, gnt + off);
753 if (status == bit)
754 break;
755 if (pci_channel_offline(tp->pdev))
756 break;
757
758 udelay(10);
759 }
760
761 if (status != bit) {
762 /* Revoke the lock request. */
763 tg3_ape_write32(tp, gnt + off, bit);
764 ret = -EBUSY;
765 }
766
767 return ret;
768 }
769
tg3_ape_unlock(struct tg3 * tp,int locknum)770 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 {
772 u32 gnt, bit;
773
774 if (!tg3_flag(tp, ENABLE_APE))
775 return;
776
777 switch (locknum) {
778 case TG3_APE_LOCK_GPIO:
779 if (tg3_asic_rev(tp) == ASIC_REV_5761)
780 return;
781 fallthrough;
782 case TG3_APE_LOCK_GRC:
783 case TG3_APE_LOCK_MEM:
784 if (!tp->pci_fn)
785 bit = APE_LOCK_GRANT_DRIVER;
786 else
787 bit = 1 << tp->pci_fn;
788 break;
789 case TG3_APE_LOCK_PHY0:
790 case TG3_APE_LOCK_PHY1:
791 case TG3_APE_LOCK_PHY2:
792 case TG3_APE_LOCK_PHY3:
793 bit = APE_LOCK_GRANT_DRIVER;
794 break;
795 default:
796 return;
797 }
798
799 if (tg3_asic_rev(tp) == ASIC_REV_5761)
800 gnt = TG3_APE_LOCK_GRANT;
801 else
802 gnt = TG3_APE_PER_LOCK_GRANT;
803
804 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809 u32 apedata;
810
811 while (timeout_us) {
812 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813 return -EBUSY;
814
815 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817 break;
818
819 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820
821 udelay(10);
822 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823 }
824
825 return timeout_us ? 0 : -EBUSY;
826 }
827
828 #ifdef CONFIG_TIGON3_HWMON
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
830 {
831 u32 i, apedata;
832
833 for (i = 0; i < timeout_us / 10; i++) {
834 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
835
836 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
837 break;
838
839 udelay(10);
840 }
841
842 return i == timeout_us / 10;
843 }
844
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
846 u32 len)
847 {
848 int err;
849 u32 i, bufoff, msgoff, maxlen, apedata;
850
851 if (!tg3_flag(tp, APE_HAS_NCSI))
852 return 0;
853
854 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855 if (apedata != APE_SEG_SIG_MAGIC)
856 return -ENODEV;
857
858 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859 if (!(apedata & APE_FW_STATUS_READY))
860 return -EAGAIN;
861
862 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
863 TG3_APE_SHMEM_BASE;
864 msgoff = bufoff + 2 * sizeof(u32);
865 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
866
867 while (len) {
868 u32 length;
869
870 /* Cap xfer sizes to scratchpad limits. */
871 length = (len > maxlen) ? maxlen : len;
872 len -= length;
873
874 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875 if (!(apedata & APE_FW_STATUS_READY))
876 return -EAGAIN;
877
878 /* Wait for up to 1 msec for APE to service previous event. */
879 err = tg3_ape_event_lock(tp, 1000);
880 if (err)
881 return err;
882
883 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884 APE_EVENT_STATUS_SCRTCHPD_READ |
885 APE_EVENT_STATUS_EVENT_PENDING;
886 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
887
888 tg3_ape_write32(tp, bufoff, base_off);
889 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
890
891 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
893
894 base_off += length;
895
896 if (tg3_ape_wait_for_event(tp, 30000))
897 return -EAGAIN;
898
899 for (i = 0; length; i += 4, length -= 4) {
900 u32 val = tg3_ape_read32(tp, msgoff + i);
901 memcpy(data, &val, sizeof(u32));
902 data++;
903 }
904 }
905
906 return 0;
907 }
908 #endif
909
tg3_ape_send_event(struct tg3 * tp,u32 event)910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
911 {
912 int err;
913 u32 apedata;
914
915 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916 if (apedata != APE_SEG_SIG_MAGIC)
917 return -EAGAIN;
918
919 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920 if (!(apedata & APE_FW_STATUS_READY))
921 return -EAGAIN;
922
923 /* Wait for up to 20 millisecond for APE to service previous event. */
924 err = tg3_ape_event_lock(tp, 20000);
925 if (err)
926 return err;
927
928 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929 event | APE_EVENT_STATUS_EVENT_PENDING);
930
931 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
933
934 return 0;
935 }
936
tg3_ape_driver_state_change(struct tg3 * tp,int kind)937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
938 {
939 u32 event;
940 u32 apedata;
941
942 if (!tg3_flag(tp, ENABLE_APE))
943 return;
944
945 switch (kind) {
946 case RESET_KIND_INIT:
947 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
948 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
949 APE_HOST_SEG_SIG_MAGIC);
950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
951 APE_HOST_SEG_LEN_MAGIC);
952 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
953 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
954 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
955 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
956 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
957 APE_HOST_BEHAV_NO_PHYLOCK);
958 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
959 TG3_APE_HOST_DRVR_STATE_START);
960
961 event = APE_EVENT_STATUS_STATE_START;
962 break;
963 case RESET_KIND_SHUTDOWN:
964 if (device_may_wakeup(&tp->pdev->dev) &&
965 tg3_flag(tp, WOL_ENABLE)) {
966 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 TG3_APE_HOST_WOL_SPEED_AUTO);
968 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
969 } else
970 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
971
972 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
973
974 event = APE_EVENT_STATUS_STATE_UNLOAD;
975 break;
976 default:
977 return;
978 }
979
980 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
981
982 tg3_ape_send_event(tp, event);
983 }
984
tg3_send_ape_heartbeat(struct tg3 * tp,unsigned long interval)985 static void tg3_send_ape_heartbeat(struct tg3 *tp,
986 unsigned long interval)
987 {
988 /* Check if hb interval has exceeded */
989 if (!tg3_flag(tp, ENABLE_APE) ||
990 time_before(jiffies, tp->ape_hb_jiffies + interval))
991 return;
992
993 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
994 tp->ape_hb_jiffies = jiffies;
995 }
996
tg3_disable_ints(struct tg3 * tp)997 static void tg3_disable_ints(struct tg3 *tp)
998 {
999 int i;
1000
1001 tw32(TG3PCI_MISC_HOST_CTRL,
1002 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1003 for (i = 0; i < tp->irq_max; i++)
1004 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1005 }
1006
tg3_enable_ints(struct tg3 * tp)1007 static void tg3_enable_ints(struct tg3 *tp)
1008 {
1009 int i;
1010
1011 tp->irq_sync = 0;
1012 wmb();
1013
1014 tw32(TG3PCI_MISC_HOST_CTRL,
1015 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1016
1017 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1018 for (i = 0; i < tp->irq_cnt; i++) {
1019 struct tg3_napi *tnapi = &tp->napi[i];
1020
1021 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022 if (tg3_flag(tp, 1SHOT_MSI))
1023 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1024
1025 tp->coal_now |= tnapi->coal_now;
1026 }
1027
1028 /* Force an initial interrupt */
1029 if (!tg3_flag(tp, TAGGED_STATUS) &&
1030 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1031 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1032 else
1033 tw32(HOSTCC_MODE, tp->coal_now);
1034
1035 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1036 }
1037
tg3_has_work(struct tg3_napi * tnapi)1038 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1039 {
1040 struct tg3 *tp = tnapi->tp;
1041 struct tg3_hw_status *sblk = tnapi->hw_status;
1042 unsigned int work_exists = 0;
1043
1044 /* check for phy events */
1045 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1046 if (sblk->status & SD_STATUS_LINK_CHG)
1047 work_exists = 1;
1048 }
1049
1050 /* check for TX work to do */
1051 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1052 work_exists = 1;
1053
1054 /* check for RX work to do */
1055 if (tnapi->rx_rcb_prod_idx &&
1056 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1057 work_exists = 1;
1058
1059 return work_exists;
1060 }
1061
1062 /* tg3_int_reenable
1063 * similar to tg3_enable_ints, but it accurately determines whether there
1064 * is new work pending and can return without flushing the PIO write
1065 * which reenables interrupts
1066 */
tg3_int_reenable(struct tg3_napi * tnapi)1067 static void tg3_int_reenable(struct tg3_napi *tnapi)
1068 {
1069 struct tg3 *tp = tnapi->tp;
1070
1071 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1072
1073 /* When doing tagged status, this work check is unnecessary.
1074 * The last_tag we write above tells the chip which piece of
1075 * work we've completed.
1076 */
1077 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1078 tw32(HOSTCC_MODE, tp->coalesce_mode |
1079 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1080 }
1081
tg3_switch_clocks(struct tg3 * tp)1082 static void tg3_switch_clocks(struct tg3 *tp)
1083 {
1084 u32 clock_ctrl;
1085 u32 orig_clock_ctrl;
1086
1087 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1088 return;
1089
1090 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1091
1092 orig_clock_ctrl = clock_ctrl;
1093 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1094 CLOCK_CTRL_CLKRUN_OENABLE |
1095 0x1f);
1096 tp->pci_clock_ctrl = clock_ctrl;
1097
1098 if (tg3_flag(tp, 5705_PLUS)) {
1099 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1100 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1102 }
1103 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1104 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1105 clock_ctrl |
1106 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1107 40);
1108 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1109 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1110 40);
1111 }
1112 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1113 }
1114
1115 #define PHY_BUSY_LOOPS 5000
1116
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1117 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1118 u32 *val)
1119 {
1120 u32 frame_val;
1121 unsigned int loops;
1122 int ret;
1123
1124 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1125 tw32_f(MAC_MI_MODE,
1126 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1127 udelay(80);
1128 }
1129
1130 tg3_ape_lock(tp, tp->phy_ape_lock);
1131
1132 *val = 0x0;
1133
1134 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1135 MI_COM_PHY_ADDR_MASK);
1136 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1137 MI_COM_REG_ADDR_MASK);
1138 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1139
1140 tw32_f(MAC_MI_COM, frame_val);
1141
1142 loops = PHY_BUSY_LOOPS;
1143 while (loops != 0) {
1144 udelay(10);
1145 frame_val = tr32(MAC_MI_COM);
1146
1147 if ((frame_val & MI_COM_BUSY) == 0) {
1148 udelay(5);
1149 frame_val = tr32(MAC_MI_COM);
1150 break;
1151 }
1152 loops -= 1;
1153 }
1154
1155 ret = -EBUSY;
1156 if (loops != 0) {
1157 *val = frame_val & MI_COM_DATA_MASK;
1158 ret = 0;
1159 }
1160
1161 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1162 tw32_f(MAC_MI_MODE, tp->mi_mode);
1163 udelay(80);
1164 }
1165
1166 tg3_ape_unlock(tp, tp->phy_ape_lock);
1167
1168 return ret;
1169 }
1170
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1171 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1172 {
1173 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1174 }
1175
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1176 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1177 u32 val)
1178 {
1179 u32 frame_val;
1180 unsigned int loops;
1181 int ret;
1182
1183 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1184 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1185 return 0;
1186
1187 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188 tw32_f(MAC_MI_MODE,
1189 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1190 udelay(80);
1191 }
1192
1193 tg3_ape_lock(tp, tp->phy_ape_lock);
1194
1195 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1196 MI_COM_PHY_ADDR_MASK);
1197 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1198 MI_COM_REG_ADDR_MASK);
1199 frame_val |= (val & MI_COM_DATA_MASK);
1200 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1201
1202 tw32_f(MAC_MI_COM, frame_val);
1203
1204 loops = PHY_BUSY_LOOPS;
1205 while (loops != 0) {
1206 udelay(10);
1207 frame_val = tr32(MAC_MI_COM);
1208 if ((frame_val & MI_COM_BUSY) == 0) {
1209 udelay(5);
1210 frame_val = tr32(MAC_MI_COM);
1211 break;
1212 }
1213 loops -= 1;
1214 }
1215
1216 ret = -EBUSY;
1217 if (loops != 0)
1218 ret = 0;
1219
1220 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1221 tw32_f(MAC_MI_MODE, tp->mi_mode);
1222 udelay(80);
1223 }
1224
1225 tg3_ape_unlock(tp, tp->phy_ape_lock);
1226
1227 return ret;
1228 }
1229
tg3_writephy(struct tg3 * tp,int reg,u32 val)1230 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1231 {
1232 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1233 }
1234
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1235 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1236 {
1237 int err;
1238
1239 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1240 if (err)
1241 goto done;
1242
1243 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1244 if (err)
1245 goto done;
1246
1247 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1248 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1249 if (err)
1250 goto done;
1251
1252 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1253
1254 done:
1255 return err;
1256 }
1257
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1258 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1259 {
1260 int err;
1261
1262 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1263 if (err)
1264 goto done;
1265
1266 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1267 if (err)
1268 goto done;
1269
1270 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1271 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1272 if (err)
1273 goto done;
1274
1275 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1276
1277 done:
1278 return err;
1279 }
1280
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1281 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1282 {
1283 int err;
1284
1285 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1286 if (!err)
1287 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1288
1289 return err;
1290 }
1291
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1292 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1293 {
1294 int err;
1295
1296 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1297 if (!err)
1298 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1299
1300 return err;
1301 }
1302
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1303 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1304 {
1305 int err;
1306
1307 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1308 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1309 MII_TG3_AUXCTL_SHDWSEL_MISC);
1310 if (!err)
1311 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1312
1313 return err;
1314 }
1315
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1316 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1317 {
1318 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1319 set |= MII_TG3_AUXCTL_MISC_WREN;
1320
1321 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1322 }
1323
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1324 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1325 {
1326 u32 val;
1327 int err;
1328
1329 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1330
1331 if (err)
1332 return err;
1333
1334 if (enable)
1335 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336 else
1337 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1338
1339 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1340 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1341
1342 return err;
1343 }
1344
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1345 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1346 {
1347 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1348 reg | val | MII_TG3_MISC_SHDW_WREN);
1349 }
1350
tg3_bmcr_reset(struct tg3 * tp)1351 static int tg3_bmcr_reset(struct tg3 *tp)
1352 {
1353 u32 phy_control;
1354 int limit, err;
1355
1356 /* OK, reset it, and poll the BMCR_RESET bit until it
1357 * clears or we time out.
1358 */
1359 phy_control = BMCR_RESET;
1360 err = tg3_writephy(tp, MII_BMCR, phy_control);
1361 if (err != 0)
1362 return -EBUSY;
1363
1364 limit = 5000;
1365 while (limit--) {
1366 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1367 if (err != 0)
1368 return -EBUSY;
1369
1370 if ((phy_control & BMCR_RESET) == 0) {
1371 udelay(40);
1372 break;
1373 }
1374 udelay(10);
1375 }
1376 if (limit < 0)
1377 return -EBUSY;
1378
1379 return 0;
1380 }
1381
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1382 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1383 {
1384 struct tg3 *tp = bp->priv;
1385 u32 val;
1386
1387 spin_lock_bh(&tp->lock);
1388
1389 if (__tg3_readphy(tp, mii_id, reg, &val))
1390 val = -EIO;
1391
1392 spin_unlock_bh(&tp->lock);
1393
1394 return val;
1395 }
1396
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1397 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1398 {
1399 struct tg3 *tp = bp->priv;
1400 u32 ret = 0;
1401
1402 spin_lock_bh(&tp->lock);
1403
1404 if (__tg3_writephy(tp, mii_id, reg, val))
1405 ret = -EIO;
1406
1407 spin_unlock_bh(&tp->lock);
1408
1409 return ret;
1410 }
1411
tg3_mdio_config_5785(struct tg3 * tp)1412 static void tg3_mdio_config_5785(struct tg3 *tp)
1413 {
1414 u32 val;
1415 struct phy_device *phydev;
1416
1417 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1418 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1419 case PHY_ID_BCM50610:
1420 case PHY_ID_BCM50610M:
1421 val = MAC_PHYCFG2_50610_LED_MODES;
1422 break;
1423 case PHY_ID_BCMAC131:
1424 val = MAC_PHYCFG2_AC131_LED_MODES;
1425 break;
1426 case PHY_ID_RTL8211C:
1427 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1428 break;
1429 case PHY_ID_RTL8201E:
1430 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1431 break;
1432 default:
1433 return;
1434 }
1435
1436 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1437 tw32(MAC_PHYCFG2, val);
1438
1439 val = tr32(MAC_PHYCFG1);
1440 val &= ~(MAC_PHYCFG1_RGMII_INT |
1441 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1442 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1443 tw32(MAC_PHYCFG1, val);
1444
1445 return;
1446 }
1447
1448 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1449 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1450 MAC_PHYCFG2_FMODE_MASK_MASK |
1451 MAC_PHYCFG2_GMODE_MASK_MASK |
1452 MAC_PHYCFG2_ACT_MASK_MASK |
1453 MAC_PHYCFG2_QUAL_MASK_MASK |
1454 MAC_PHYCFG2_INBAND_ENABLE;
1455
1456 tw32(MAC_PHYCFG2, val);
1457
1458 val = tr32(MAC_PHYCFG1);
1459 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1460 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1461 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1462 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1463 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1464 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1465 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1466 }
1467 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1468 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1469 tw32(MAC_PHYCFG1, val);
1470
1471 val = tr32(MAC_EXT_RGMII_MODE);
1472 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1473 MAC_RGMII_MODE_RX_QUALITY |
1474 MAC_RGMII_MODE_RX_ACTIVITY |
1475 MAC_RGMII_MODE_RX_ENG_DET |
1476 MAC_RGMII_MODE_TX_ENABLE |
1477 MAC_RGMII_MODE_TX_LOWPWR |
1478 MAC_RGMII_MODE_TX_RESET);
1479 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1480 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1481 val |= MAC_RGMII_MODE_RX_INT_B |
1482 MAC_RGMII_MODE_RX_QUALITY |
1483 MAC_RGMII_MODE_RX_ACTIVITY |
1484 MAC_RGMII_MODE_RX_ENG_DET;
1485 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1486 val |= MAC_RGMII_MODE_TX_ENABLE |
1487 MAC_RGMII_MODE_TX_LOWPWR |
1488 MAC_RGMII_MODE_TX_RESET;
1489 }
1490 tw32(MAC_EXT_RGMII_MODE, val);
1491 }
1492
tg3_mdio_start(struct tg3 * tp)1493 static void tg3_mdio_start(struct tg3 *tp)
1494 {
1495 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1496 tw32_f(MAC_MI_MODE, tp->mi_mode);
1497 udelay(80);
1498
1499 if (tg3_flag(tp, MDIOBUS_INITED) &&
1500 tg3_asic_rev(tp) == ASIC_REV_5785)
1501 tg3_mdio_config_5785(tp);
1502 }
1503
tg3_mdio_init(struct tg3 * tp)1504 static int tg3_mdio_init(struct tg3 *tp)
1505 {
1506 int i;
1507 u32 reg;
1508 struct phy_device *phydev;
1509
1510 if (tg3_flag(tp, 5717_PLUS)) {
1511 u32 is_serdes;
1512
1513 tp->phy_addr = tp->pci_fn + 1;
1514
1515 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1516 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1517 else
1518 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1519 TG3_CPMU_PHY_STRAP_IS_SERDES;
1520 if (is_serdes)
1521 tp->phy_addr += 7;
1522 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1523 int addr;
1524
1525 addr = ssb_gige_get_phyaddr(tp->pdev);
1526 if (addr < 0)
1527 return addr;
1528 tp->phy_addr = addr;
1529 } else
1530 tp->phy_addr = TG3_PHY_MII_ADDR;
1531
1532 tg3_mdio_start(tp);
1533
1534 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1535 return 0;
1536
1537 tp->mdio_bus = mdiobus_alloc();
1538 if (tp->mdio_bus == NULL)
1539 return -ENOMEM;
1540
1541 tp->mdio_bus->name = "tg3 mdio bus";
1542 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1543 tp->mdio_bus->priv = tp;
1544 tp->mdio_bus->parent = &tp->pdev->dev;
1545 tp->mdio_bus->read = &tg3_mdio_read;
1546 tp->mdio_bus->write = &tg3_mdio_write;
1547 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1548
1549 /* The bus registration will look for all the PHYs on the mdio bus.
1550 * Unfortunately, it does not ensure the PHY is powered up before
1551 * accessing the PHY ID registers. A chip reset is the
1552 * quickest way to bring the device back to an operational state..
1553 */
1554 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1555 tg3_bmcr_reset(tp);
1556
1557 i = mdiobus_register(tp->mdio_bus);
1558 if (i) {
1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560 mdiobus_free(tp->mdio_bus);
1561 return i;
1562 }
1563
1564 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1565
1566 if (!phydev || !phydev->drv) {
1567 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568 mdiobus_unregister(tp->mdio_bus);
1569 mdiobus_free(tp->mdio_bus);
1570 return -ENODEV;
1571 }
1572
1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574 case PHY_ID_BCM57780:
1575 phydev->interface = PHY_INTERFACE_MODE_GMII;
1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 break;
1578 case PHY_ID_BCM50610:
1579 case PHY_ID_BCM50610M:
1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581 PHY_BRCM_RX_REFCLK_UNUSED |
1582 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 fallthrough;
1585 case PHY_ID_RTL8211C:
1586 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1587 break;
1588 case PHY_ID_RTL8201E:
1589 case PHY_ID_BCMAC131:
1590 phydev->interface = PHY_INTERFACE_MODE_MII;
1591 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1592 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1593 break;
1594 }
1595
1596 tg3_flag_set(tp, MDIOBUS_INITED);
1597
1598 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1599 tg3_mdio_config_5785(tp);
1600
1601 return 0;
1602 }
1603
tg3_mdio_fini(struct tg3 * tp)1604 static void tg3_mdio_fini(struct tg3 *tp)
1605 {
1606 if (tg3_flag(tp, MDIOBUS_INITED)) {
1607 tg3_flag_clear(tp, MDIOBUS_INITED);
1608 mdiobus_unregister(tp->mdio_bus);
1609 mdiobus_free(tp->mdio_bus);
1610 }
1611 }
1612
1613 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1614 static inline void tg3_generate_fw_event(struct tg3 *tp)
1615 {
1616 u32 val;
1617
1618 val = tr32(GRC_RX_CPU_EVENT);
1619 val |= GRC_RX_CPU_DRIVER_EVENT;
1620 tw32_f(GRC_RX_CPU_EVENT, val);
1621
1622 tp->last_event_jiffies = jiffies;
1623 }
1624
1625 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1626
1627 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1628 static void tg3_wait_for_event_ack(struct tg3 *tp)
1629 {
1630 int i;
1631 unsigned int delay_cnt;
1632 long time_remain;
1633
1634 /* If enough time has passed, no wait is necessary. */
1635 time_remain = (long)(tp->last_event_jiffies + 1 +
1636 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1637 (long)jiffies;
1638 if (time_remain < 0)
1639 return;
1640
1641 /* Check if we can shorten the wait time. */
1642 delay_cnt = jiffies_to_usecs(time_remain);
1643 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1644 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1645 delay_cnt = (delay_cnt >> 3) + 1;
1646
1647 for (i = 0; i < delay_cnt; i++) {
1648 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1649 break;
1650 if (pci_channel_offline(tp->pdev))
1651 break;
1652
1653 udelay(8);
1654 }
1655 }
1656
1657 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1658 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1659 {
1660 u32 reg, val;
1661
1662 val = 0;
1663 if (!tg3_readphy(tp, MII_BMCR, ®))
1664 val = reg << 16;
1665 if (!tg3_readphy(tp, MII_BMSR, ®))
1666 val |= (reg & 0xffff);
1667 *data++ = val;
1668
1669 val = 0;
1670 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1671 val = reg << 16;
1672 if (!tg3_readphy(tp, MII_LPA, ®))
1673 val |= (reg & 0xffff);
1674 *data++ = val;
1675
1676 val = 0;
1677 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1678 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1679 val = reg << 16;
1680 if (!tg3_readphy(tp, MII_STAT1000, ®))
1681 val |= (reg & 0xffff);
1682 }
1683 *data++ = val;
1684
1685 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1686 val = reg << 16;
1687 else
1688 val = 0;
1689 *data++ = val;
1690 }
1691
1692 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1693 static void tg3_ump_link_report(struct tg3 *tp)
1694 {
1695 u32 data[4];
1696
1697 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1698 return;
1699
1700 tg3_phy_gather_ump_data(tp, data);
1701
1702 tg3_wait_for_event_ack(tp);
1703
1704 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1710
1711 tg3_generate_fw_event(tp);
1712 }
1713
1714 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1715 static void tg3_stop_fw(struct tg3 *tp)
1716 {
1717 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1718 /* Wait for RX cpu to ACK the previous event. */
1719 tg3_wait_for_event_ack(tp);
1720
1721 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1722
1723 tg3_generate_fw_event(tp);
1724
1725 /* Wait for RX cpu to ACK this event. */
1726 tg3_wait_for_event_ack(tp);
1727 }
1728 }
1729
1730 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1731 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1732 {
1733 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1734 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1735
1736 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1737 switch (kind) {
1738 case RESET_KIND_INIT:
1739 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1740 DRV_STATE_START);
1741 break;
1742
1743 case RESET_KIND_SHUTDOWN:
1744 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745 DRV_STATE_UNLOAD);
1746 break;
1747
1748 case RESET_KIND_SUSPEND:
1749 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750 DRV_STATE_SUSPEND);
1751 break;
1752
1753 default:
1754 break;
1755 }
1756 }
1757 }
1758
1759 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1761 {
1762 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1763 switch (kind) {
1764 case RESET_KIND_INIT:
1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766 DRV_STATE_START_DONE);
1767 break;
1768
1769 case RESET_KIND_SHUTDOWN:
1770 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1771 DRV_STATE_UNLOAD_DONE);
1772 break;
1773
1774 default:
1775 break;
1776 }
1777 }
1778 }
1779
1780 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1782 {
1783 if (tg3_flag(tp, ENABLE_ASF)) {
1784 switch (kind) {
1785 case RESET_KIND_INIT:
1786 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1787 DRV_STATE_START);
1788 break;
1789
1790 case RESET_KIND_SHUTDOWN:
1791 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1792 DRV_STATE_UNLOAD);
1793 break;
1794
1795 case RESET_KIND_SUSPEND:
1796 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797 DRV_STATE_SUSPEND);
1798 break;
1799
1800 default:
1801 break;
1802 }
1803 }
1804 }
1805
tg3_poll_fw(struct tg3 * tp)1806 static int tg3_poll_fw(struct tg3 *tp)
1807 {
1808 int i;
1809 u32 val;
1810
1811 if (tg3_flag(tp, NO_FWARE_REPORTED))
1812 return 0;
1813
1814 if (tg3_flag(tp, IS_SSB_CORE)) {
1815 /* We don't use firmware. */
1816 return 0;
1817 }
1818
1819 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1820 /* Wait up to 20ms for init done. */
1821 for (i = 0; i < 200; i++) {
1822 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1823 return 0;
1824 if (pci_channel_offline(tp->pdev))
1825 return -ENODEV;
1826
1827 udelay(100);
1828 }
1829 return -ENODEV;
1830 }
1831
1832 /* Wait for firmware initialization to complete. */
1833 for (i = 0; i < 100000; i++) {
1834 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1835 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1836 break;
1837 if (pci_channel_offline(tp->pdev)) {
1838 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1839 tg3_flag_set(tp, NO_FWARE_REPORTED);
1840 netdev_info(tp->dev, "No firmware running\n");
1841 }
1842
1843 break;
1844 }
1845
1846 udelay(10);
1847 }
1848
1849 /* Chip might not be fitted with firmware. Some Sun onboard
1850 * parts are configured like that. So don't signal the timeout
1851 * of the above loop as an error, but do report the lack of
1852 * running firmware once.
1853 */
1854 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1855 tg3_flag_set(tp, NO_FWARE_REPORTED);
1856
1857 netdev_info(tp->dev, "No firmware running\n");
1858 }
1859
1860 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1861 /* The 57765 A0 needs a little more
1862 * time to do some important work.
1863 */
1864 mdelay(10);
1865 }
1866
1867 return 0;
1868 }
1869
tg3_link_report(struct tg3 * tp)1870 static void tg3_link_report(struct tg3 *tp)
1871 {
1872 if (!netif_carrier_ok(tp->dev)) {
1873 netif_info(tp, link, tp->dev, "Link is down\n");
1874 tg3_ump_link_report(tp);
1875 } else if (netif_msg_link(tp)) {
1876 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1877 (tp->link_config.active_speed == SPEED_1000 ?
1878 1000 :
1879 (tp->link_config.active_speed == SPEED_100 ?
1880 100 : 10)),
1881 (tp->link_config.active_duplex == DUPLEX_FULL ?
1882 "full" : "half"));
1883
1884 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1885 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1886 "on" : "off",
1887 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1888 "on" : "off");
1889
1890 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1891 netdev_info(tp->dev, "EEE is %s\n",
1892 tp->setlpicnt ? "enabled" : "disabled");
1893
1894 tg3_ump_link_report(tp);
1895 }
1896
1897 tp->link_up = netif_carrier_ok(tp->dev);
1898 }
1899
tg3_decode_flowctrl_1000T(u32 adv)1900 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1901 {
1902 u32 flowctrl = 0;
1903
1904 if (adv & ADVERTISE_PAUSE_CAP) {
1905 flowctrl |= FLOW_CTRL_RX;
1906 if (!(adv & ADVERTISE_PAUSE_ASYM))
1907 flowctrl |= FLOW_CTRL_TX;
1908 } else if (adv & ADVERTISE_PAUSE_ASYM)
1909 flowctrl |= FLOW_CTRL_TX;
1910
1911 return flowctrl;
1912 }
1913
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1914 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1915 {
1916 u16 miireg;
1917
1918 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1919 miireg = ADVERTISE_1000XPAUSE;
1920 else if (flow_ctrl & FLOW_CTRL_TX)
1921 miireg = ADVERTISE_1000XPSE_ASYM;
1922 else if (flow_ctrl & FLOW_CTRL_RX)
1923 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1924 else
1925 miireg = 0;
1926
1927 return miireg;
1928 }
1929
tg3_decode_flowctrl_1000X(u32 adv)1930 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1931 {
1932 u32 flowctrl = 0;
1933
1934 if (adv & ADVERTISE_1000XPAUSE) {
1935 flowctrl |= FLOW_CTRL_RX;
1936 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1937 flowctrl |= FLOW_CTRL_TX;
1938 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1939 flowctrl |= FLOW_CTRL_TX;
1940
1941 return flowctrl;
1942 }
1943
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1944 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1945 {
1946 u8 cap = 0;
1947
1948 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1949 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1950 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1951 if (lcladv & ADVERTISE_1000XPAUSE)
1952 cap = FLOW_CTRL_RX;
1953 if (rmtadv & ADVERTISE_1000XPAUSE)
1954 cap = FLOW_CTRL_TX;
1955 }
1956
1957 return cap;
1958 }
1959
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1960 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1961 {
1962 u8 autoneg;
1963 u8 flowctrl = 0;
1964 u32 old_rx_mode = tp->rx_mode;
1965 u32 old_tx_mode = tp->tx_mode;
1966
1967 if (tg3_flag(tp, USE_PHYLIB))
1968 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1969 else
1970 autoneg = tp->link_config.autoneg;
1971
1972 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1973 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1974 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1975 else
1976 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1977 } else
1978 flowctrl = tp->link_config.flowctrl;
1979
1980 tp->link_config.active_flowctrl = flowctrl;
1981
1982 if (flowctrl & FLOW_CTRL_RX)
1983 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1984 else
1985 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1986
1987 if (old_rx_mode != tp->rx_mode)
1988 tw32_f(MAC_RX_MODE, tp->rx_mode);
1989
1990 if (flowctrl & FLOW_CTRL_TX)
1991 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1992 else
1993 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1994
1995 if (old_tx_mode != tp->tx_mode)
1996 tw32_f(MAC_TX_MODE, tp->tx_mode);
1997 }
1998
tg3_adjust_link(struct net_device * dev)1999 static void tg3_adjust_link(struct net_device *dev)
2000 {
2001 u8 oldflowctrl, linkmesg = 0;
2002 u32 mac_mode, lcl_adv, rmt_adv;
2003 struct tg3 *tp = netdev_priv(dev);
2004 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2005
2006 spin_lock_bh(&tp->lock);
2007
2008 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2009 MAC_MODE_HALF_DUPLEX);
2010
2011 oldflowctrl = tp->link_config.active_flowctrl;
2012
2013 if (phydev->link) {
2014 lcl_adv = 0;
2015 rmt_adv = 0;
2016
2017 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2018 mac_mode |= MAC_MODE_PORT_MODE_MII;
2019 else if (phydev->speed == SPEED_1000 ||
2020 tg3_asic_rev(tp) != ASIC_REV_5785)
2021 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2022 else
2023 mac_mode |= MAC_MODE_PORT_MODE_MII;
2024
2025 if (phydev->duplex == DUPLEX_HALF)
2026 mac_mode |= MAC_MODE_HALF_DUPLEX;
2027 else {
2028 lcl_adv = mii_advertise_flowctrl(
2029 tp->link_config.flowctrl);
2030
2031 if (phydev->pause)
2032 rmt_adv = LPA_PAUSE_CAP;
2033 if (phydev->asym_pause)
2034 rmt_adv |= LPA_PAUSE_ASYM;
2035 }
2036
2037 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2038 } else
2039 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2040
2041 if (mac_mode != tp->mac_mode) {
2042 tp->mac_mode = mac_mode;
2043 tw32_f(MAC_MODE, tp->mac_mode);
2044 udelay(40);
2045 }
2046
2047 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2048 if (phydev->speed == SPEED_10)
2049 tw32(MAC_MI_STAT,
2050 MAC_MI_STAT_10MBPS_MODE |
2051 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052 else
2053 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2054 }
2055
2056 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2057 tw32(MAC_TX_LENGTHS,
2058 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2059 (6 << TX_LENGTHS_IPG_SHIFT) |
2060 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2061 else
2062 tw32(MAC_TX_LENGTHS,
2063 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2064 (6 << TX_LENGTHS_IPG_SHIFT) |
2065 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066
2067 if (phydev->link != tp->old_link ||
2068 phydev->speed != tp->link_config.active_speed ||
2069 phydev->duplex != tp->link_config.active_duplex ||
2070 oldflowctrl != tp->link_config.active_flowctrl)
2071 linkmesg = 1;
2072
2073 tp->old_link = phydev->link;
2074 tp->link_config.active_speed = phydev->speed;
2075 tp->link_config.active_duplex = phydev->duplex;
2076
2077 spin_unlock_bh(&tp->lock);
2078
2079 if (linkmesg)
2080 tg3_link_report(tp);
2081 }
2082
tg3_phy_init(struct tg3 * tp)2083 static int tg3_phy_init(struct tg3 *tp)
2084 {
2085 struct phy_device *phydev;
2086
2087 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2088 return 0;
2089
2090 /* Bring the PHY back to a known state. */
2091 tg3_bmcr_reset(tp);
2092
2093 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2094
2095 /* Attach the MAC to the PHY. */
2096 phydev = phy_connect(tp->dev, phydev_name(phydev),
2097 tg3_adjust_link, phydev->interface);
2098 if (IS_ERR(phydev)) {
2099 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2100 return PTR_ERR(phydev);
2101 }
2102
2103 /* Mask with MAC supported features. */
2104 switch (phydev->interface) {
2105 case PHY_INTERFACE_MODE_GMII:
2106 case PHY_INTERFACE_MODE_RGMII:
2107 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2108 phy_set_max_speed(phydev, SPEED_1000);
2109 phy_support_asym_pause(phydev);
2110 break;
2111 }
2112 fallthrough;
2113 case PHY_INTERFACE_MODE_MII:
2114 phy_set_max_speed(phydev, SPEED_100);
2115 phy_support_asym_pause(phydev);
2116 break;
2117 default:
2118 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2119 return -EINVAL;
2120 }
2121
2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123
2124 phy_attached_info(phydev);
2125
2126 return 0;
2127 }
2128
tg3_phy_start(struct tg3 * tp)2129 static void tg3_phy_start(struct tg3 *tp)
2130 {
2131 struct phy_device *phydev;
2132
2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2134 return;
2135
2136 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2137
2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140 phydev->speed = tp->link_config.speed;
2141 phydev->duplex = tp->link_config.duplex;
2142 phydev->autoneg = tp->link_config.autoneg;
2143 ethtool_convert_legacy_u32_to_link_mode(
2144 phydev->advertising, tp->link_config.advertising);
2145 }
2146
2147 phy_start(phydev);
2148
2149 phy_start_aneg(phydev);
2150 }
2151
tg3_phy_stop(struct tg3 * tp)2152 static void tg3_phy_stop(struct tg3 *tp)
2153 {
2154 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2155 return;
2156
2157 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2158 }
2159
tg3_phy_fini(struct tg3 * tp)2160 static void tg3_phy_fini(struct tg3 *tp)
2161 {
2162 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2163 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2164 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2165 }
2166 }
2167
tg3_phy_set_extloopbk(struct tg3 * tp)2168 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2169 {
2170 int err;
2171 u32 val;
2172
2173 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2174 return 0;
2175
2176 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2177 /* Cannot do read-modify-write on 5401 */
2178 err = tg3_phy_auxctl_write(tp,
2179 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2180 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2181 0x4c20);
2182 goto done;
2183 }
2184
2185 err = tg3_phy_auxctl_read(tp,
2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2187 if (err)
2188 return err;
2189
2190 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2191 err = tg3_phy_auxctl_write(tp,
2192 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2193
2194 done:
2195 return err;
2196 }
2197
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2198 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2199 {
2200 u32 phytest;
2201
2202 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2203 u32 phy;
2204
2205 tg3_writephy(tp, MII_TG3_FET_TEST,
2206 phytest | MII_TG3_FET_SHADOW_EN);
2207 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2208 if (enable)
2209 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2210 else
2211 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2212 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2213 }
2214 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2215 }
2216 }
2217
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2218 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2219 {
2220 u32 reg;
2221
2222 if (!tg3_flag(tp, 5705_PLUS) ||
2223 (tg3_flag(tp, 5717_PLUS) &&
2224 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2225 return;
2226
2227 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2228 tg3_phy_fet_toggle_apd(tp, enable);
2229 return;
2230 }
2231
2232 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2233 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2234 MII_TG3_MISC_SHDW_SCR5_SDTL |
2235 MII_TG3_MISC_SHDW_SCR5_C125OE;
2236 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2237 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2238
2239 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2240
2241
2242 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2243 if (enable)
2244 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2245
2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2247 }
2248
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2249 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2250 {
2251 u32 phy;
2252
2253 if (!tg3_flag(tp, 5705_PLUS) ||
2254 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2255 return;
2256
2257 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2258 u32 ephy;
2259
2260 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2261 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2262
2263 tg3_writephy(tp, MII_TG3_FET_TEST,
2264 ephy | MII_TG3_FET_SHADOW_EN);
2265 if (!tg3_readphy(tp, reg, &phy)) {
2266 if (enable)
2267 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2268 else
2269 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2270 tg3_writephy(tp, reg, phy);
2271 }
2272 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2273 }
2274 } else {
2275 int ret;
2276
2277 ret = tg3_phy_auxctl_read(tp,
2278 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2279 if (!ret) {
2280 if (enable)
2281 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2282 else
2283 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2284 tg3_phy_auxctl_write(tp,
2285 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2286 }
2287 }
2288 }
2289
tg3_phy_set_wirespeed(struct tg3 * tp)2290 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2291 {
2292 int ret;
2293 u32 val;
2294
2295 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2296 return;
2297
2298 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2299 if (!ret)
2300 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2301 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2302 }
2303
tg3_phy_apply_otp(struct tg3 * tp)2304 static void tg3_phy_apply_otp(struct tg3 *tp)
2305 {
2306 u32 otp, phy;
2307
2308 if (!tp->phy_otp)
2309 return;
2310
2311 otp = tp->phy_otp;
2312
2313 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2314 return;
2315
2316 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2317 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2318 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2319
2320 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2321 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2322 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2323
2324 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2325 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2326 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2327
2328 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2329 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2330
2331 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2333
2334 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2335 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2337
2338 tg3_phy_toggle_auxctl_smdsp(tp, false);
2339 }
2340
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_keee * eee)2341 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
2342 {
2343 u32 val;
2344 struct ethtool_keee *dest = &tp->eee;
2345
2346 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2347 return;
2348
2349 if (eee)
2350 dest = eee;
2351
2352 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2353 return;
2354
2355 /* Pull eee_active */
2356 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2357 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2358 dest->eee_active = 1;
2359 } else
2360 dest->eee_active = 0;
2361
2362 /* Pull lp advertised settings */
2363 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2364 return;
2365 mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
2366
2367 /* Pull advertised and eee_enabled settings */
2368 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2369 return;
2370 dest->eee_enabled = !!val;
2371 mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
2372
2373 /* Pull tx_lpi_enabled */
2374 val = tr32(TG3_CPMU_EEE_MODE);
2375 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2376
2377 /* Pull lpi timer value */
2378 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2379 }
2380
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2381 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2382 {
2383 u32 val;
2384
2385 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2386 return;
2387
2388 tp->setlpicnt = 0;
2389
2390 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2391 current_link_up &&
2392 tp->link_config.active_duplex == DUPLEX_FULL &&
2393 (tp->link_config.active_speed == SPEED_100 ||
2394 tp->link_config.active_speed == SPEED_1000)) {
2395 u32 eeectl;
2396
2397 if (tp->link_config.active_speed == SPEED_1000)
2398 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2399 else
2400 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2401
2402 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2403
2404 tg3_eee_pull_config(tp, NULL);
2405 if (tp->eee.eee_active)
2406 tp->setlpicnt = 2;
2407 }
2408
2409 if (!tp->setlpicnt) {
2410 if (current_link_up &&
2411 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2412 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2413 tg3_phy_toggle_auxctl_smdsp(tp, false);
2414 }
2415
2416 val = tr32(TG3_CPMU_EEE_MODE);
2417 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2418 }
2419 }
2420
tg3_phy_eee_enable(struct tg3 * tp)2421 static void tg3_phy_eee_enable(struct tg3 *tp)
2422 {
2423 u32 val;
2424
2425 if (tp->link_config.active_speed == SPEED_1000 &&
2426 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2427 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2428 tg3_flag(tp, 57765_CLASS)) &&
2429 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2430 val = MII_TG3_DSP_TAP26_ALNOKO |
2431 MII_TG3_DSP_TAP26_RMRXSTO;
2432 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2433 tg3_phy_toggle_auxctl_smdsp(tp, false);
2434 }
2435
2436 val = tr32(TG3_CPMU_EEE_MODE);
2437 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2438 }
2439
tg3_wait_macro_done(struct tg3 * tp)2440 static int tg3_wait_macro_done(struct tg3 *tp)
2441 {
2442 int limit = 100;
2443
2444 while (limit--) {
2445 u32 tmp32;
2446
2447 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2448 if ((tmp32 & 0x1000) == 0)
2449 break;
2450 }
2451 }
2452 if (limit < 0)
2453 return -EBUSY;
2454
2455 return 0;
2456 }
2457
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2458 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2459 {
2460 static const u32 test_pat[4][6] = {
2461 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2462 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2463 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2464 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2465 };
2466 int chan;
2467
2468 for (chan = 0; chan < 4; chan++) {
2469 int i;
2470
2471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2472 (chan * 0x2000) | 0x0200);
2473 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2474
2475 for (i = 0; i < 6; i++)
2476 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2477 test_pat[chan][i]);
2478
2479 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2480 if (tg3_wait_macro_done(tp)) {
2481 *resetp = 1;
2482 return -EBUSY;
2483 }
2484
2485 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2486 (chan * 0x2000) | 0x0200);
2487 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2488 if (tg3_wait_macro_done(tp)) {
2489 *resetp = 1;
2490 return -EBUSY;
2491 }
2492
2493 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2494 if (tg3_wait_macro_done(tp)) {
2495 *resetp = 1;
2496 return -EBUSY;
2497 }
2498
2499 for (i = 0; i < 6; i += 2) {
2500 u32 low, high;
2501
2502 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2503 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2504 tg3_wait_macro_done(tp)) {
2505 *resetp = 1;
2506 return -EBUSY;
2507 }
2508 low &= 0x7fff;
2509 high &= 0x000f;
2510 if (low != test_pat[chan][i] ||
2511 high != test_pat[chan][i+1]) {
2512 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2513 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2515
2516 return -EBUSY;
2517 }
2518 }
2519 }
2520
2521 return 0;
2522 }
2523
tg3_phy_reset_chanpat(struct tg3 * tp)2524 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2525 {
2526 int chan;
2527
2528 for (chan = 0; chan < 4; chan++) {
2529 int i;
2530
2531 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2532 (chan * 0x2000) | 0x0200);
2533 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2534 for (i = 0; i < 6; i++)
2535 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2536 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2537 if (tg3_wait_macro_done(tp))
2538 return -EBUSY;
2539 }
2540
2541 return 0;
2542 }
2543
tg3_phy_reset_5703_4_5(struct tg3 * tp)2544 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2545 {
2546 u32 reg32, phy9_orig;
2547 int retries, do_phy_reset, err;
2548
2549 retries = 10;
2550 do_phy_reset = 1;
2551 do {
2552 if (do_phy_reset) {
2553 err = tg3_bmcr_reset(tp);
2554 if (err)
2555 return err;
2556 do_phy_reset = 0;
2557 }
2558
2559 /* Disable transmitter and interrupt. */
2560 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2561 continue;
2562
2563 reg32 |= 0x3000;
2564 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2565
2566 /* Set full-duplex, 1000 mbps. */
2567 tg3_writephy(tp, MII_BMCR,
2568 BMCR_FULLDPLX | BMCR_SPEED1000);
2569
2570 /* Set to master mode. */
2571 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2572 continue;
2573
2574 tg3_writephy(tp, MII_CTRL1000,
2575 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2576
2577 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2578 if (err)
2579 return err;
2580
2581 /* Block the PHY control access. */
2582 tg3_phydsp_write(tp, 0x8005, 0x0800);
2583
2584 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2585 if (!err)
2586 break;
2587 } while (--retries);
2588
2589 err = tg3_phy_reset_chanpat(tp);
2590 if (err)
2591 return err;
2592
2593 tg3_phydsp_write(tp, 0x8005, 0x0000);
2594
2595 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2596 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2597
2598 tg3_phy_toggle_auxctl_smdsp(tp, false);
2599
2600 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2601
2602 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2603 if (err)
2604 return err;
2605
2606 reg32 &= ~0x3000;
2607 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2608
2609 return 0;
2610 }
2611
tg3_carrier_off(struct tg3 * tp)2612 static void tg3_carrier_off(struct tg3 *tp)
2613 {
2614 netif_carrier_off(tp->dev);
2615 tp->link_up = false;
2616 }
2617
tg3_warn_mgmt_link_flap(struct tg3 * tp)2618 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2619 {
2620 if (tg3_flag(tp, ENABLE_ASF))
2621 netdev_warn(tp->dev,
2622 "Management side-band traffic will be interrupted during phy settings change\n");
2623 }
2624
2625 /* This will reset the tigon3 PHY if there is no valid
2626 * link unless the FORCE argument is non-zero.
2627 */
tg3_phy_reset(struct tg3 * tp)2628 static int tg3_phy_reset(struct tg3 *tp)
2629 {
2630 u32 val, cpmuctrl;
2631 int err;
2632
2633 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2634 val = tr32(GRC_MISC_CFG);
2635 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2636 udelay(40);
2637 }
2638 err = tg3_readphy(tp, MII_BMSR, &val);
2639 err |= tg3_readphy(tp, MII_BMSR, &val);
2640 if (err != 0)
2641 return -EBUSY;
2642
2643 if (netif_running(tp->dev) && tp->link_up) {
2644 netif_carrier_off(tp->dev);
2645 tg3_link_report(tp);
2646 }
2647
2648 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2649 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2650 tg3_asic_rev(tp) == ASIC_REV_5705) {
2651 err = tg3_phy_reset_5703_4_5(tp);
2652 if (err)
2653 return err;
2654 goto out;
2655 }
2656
2657 cpmuctrl = 0;
2658 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2659 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2660 cpmuctrl = tr32(TG3_CPMU_CTRL);
2661 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2662 tw32(TG3_CPMU_CTRL,
2663 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2664 }
2665
2666 err = tg3_bmcr_reset(tp);
2667 if (err)
2668 return err;
2669
2670 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2671 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2672 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2673
2674 tw32(TG3_CPMU_CTRL, cpmuctrl);
2675 }
2676
2677 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2678 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2679 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2680 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2681 CPMU_LSPD_1000MB_MACCLK_12_5) {
2682 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2683 udelay(40);
2684 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2685 }
2686 }
2687
2688 if (tg3_flag(tp, 5717_PLUS) &&
2689 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2690 return 0;
2691
2692 tg3_phy_apply_otp(tp);
2693
2694 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2695 tg3_phy_toggle_apd(tp, true);
2696 else
2697 tg3_phy_toggle_apd(tp, false);
2698
2699 out:
2700 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2701 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2702 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2703 tg3_phydsp_write(tp, 0x000a, 0x0323);
2704 tg3_phy_toggle_auxctl_smdsp(tp, false);
2705 }
2706
2707 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2708 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710 }
2711
2712 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2713 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2714 tg3_phydsp_write(tp, 0x000a, 0x310b);
2715 tg3_phydsp_write(tp, 0x201f, 0x9506);
2716 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2717 tg3_phy_toggle_auxctl_smdsp(tp, false);
2718 }
2719 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2720 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2721 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2722 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2723 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2724 tg3_writephy(tp, MII_TG3_TEST1,
2725 MII_TG3_TEST1_TRIM_EN | 0x4);
2726 } else
2727 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2728
2729 tg3_phy_toggle_auxctl_smdsp(tp, false);
2730 }
2731 }
2732
2733 /* Set Extended packet length bit (bit 14) on all chips that */
2734 /* support jumbo frames */
2735 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2736 /* Cannot do read-modify-write on 5401 */
2737 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2738 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2739 /* Set bit 14 with read-modify-write to preserve other bits */
2740 err = tg3_phy_auxctl_read(tp,
2741 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2742 if (!err)
2743 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2744 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2745 }
2746
2747 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2748 * jumbo frames transmission.
2749 */
2750 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2751 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2752 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2753 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2754 }
2755
2756 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2757 /* adjust output voltage */
2758 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2759 }
2760
2761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2762 tg3_phydsp_write(tp, 0xffb, 0x4000);
2763
2764 tg3_phy_toggle_automdix(tp, true);
2765 tg3_phy_set_wirespeed(tp);
2766 return 0;
2767 }
2768
2769 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2770 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2771 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2772 TG3_GPIO_MSG_NEED_VAUX)
2773 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2774 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2775 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2776 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2777 (TG3_GPIO_MSG_DRVR_PRES << 12))
2778
2779 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2780 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2781 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2782 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2783 (TG3_GPIO_MSG_NEED_VAUX << 12))
2784
tg3_set_function_status(struct tg3 * tp,u32 newstat)2785 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2786 {
2787 u32 status, shift;
2788
2789 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2790 tg3_asic_rev(tp) == ASIC_REV_5719)
2791 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2792 else
2793 status = tr32(TG3_CPMU_DRV_STATUS);
2794
2795 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2796 status &= ~(TG3_GPIO_MSG_MASK << shift);
2797 status |= (newstat << shift);
2798
2799 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2800 tg3_asic_rev(tp) == ASIC_REV_5719)
2801 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2802 else
2803 tw32(TG3_CPMU_DRV_STATUS, status);
2804
2805 return status >> TG3_APE_GPIO_MSG_SHIFT;
2806 }
2807
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2808 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2809 {
2810 if (!tg3_flag(tp, IS_NIC))
2811 return 0;
2812
2813 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2814 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2815 tg3_asic_rev(tp) == ASIC_REV_5720) {
2816 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2817 return -EIO;
2818
2819 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2820
2821 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2822 TG3_GRC_LCLCTL_PWRSW_DELAY);
2823
2824 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2825 } else {
2826 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827 TG3_GRC_LCLCTL_PWRSW_DELAY);
2828 }
2829
2830 return 0;
2831 }
2832
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2833 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2834 {
2835 u32 grc_local_ctrl;
2836
2837 if (!tg3_flag(tp, IS_NIC) ||
2838 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2839 tg3_asic_rev(tp) == ASIC_REV_5701)
2840 return;
2841
2842 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2843
2844 tw32_wait_f(GRC_LOCAL_CTRL,
2845 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2847
2848 tw32_wait_f(GRC_LOCAL_CTRL,
2849 grc_local_ctrl,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852 tw32_wait_f(GRC_LOCAL_CTRL,
2853 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY);
2855 }
2856
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2857 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2858 {
2859 if (!tg3_flag(tp, IS_NIC))
2860 return;
2861
2862 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2863 tg3_asic_rev(tp) == ASIC_REV_5701) {
2864 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2865 (GRC_LCLCTRL_GPIO_OE0 |
2866 GRC_LCLCTRL_GPIO_OE1 |
2867 GRC_LCLCTRL_GPIO_OE2 |
2868 GRC_LCLCTRL_GPIO_OUTPUT0 |
2869 GRC_LCLCTRL_GPIO_OUTPUT1),
2870 TG3_GRC_LCLCTL_PWRSW_DELAY);
2871 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2873 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2874 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2875 GRC_LCLCTRL_GPIO_OE1 |
2876 GRC_LCLCTRL_GPIO_OE2 |
2877 GRC_LCLCTRL_GPIO_OUTPUT0 |
2878 GRC_LCLCTRL_GPIO_OUTPUT1 |
2879 tp->grc_local_ctrl;
2880 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2881 TG3_GRC_LCLCTL_PWRSW_DELAY);
2882
2883 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2884 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885 TG3_GRC_LCLCTL_PWRSW_DELAY);
2886
2887 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2888 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889 TG3_GRC_LCLCTL_PWRSW_DELAY);
2890 } else {
2891 u32 no_gpio2;
2892 u32 grc_local_ctrl = 0;
2893
2894 /* Workaround to prevent overdrawing Amps. */
2895 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2896 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2897 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2898 grc_local_ctrl,
2899 TG3_GRC_LCLCTL_PWRSW_DELAY);
2900 }
2901
2902 /* On 5753 and variants, GPIO2 cannot be used. */
2903 no_gpio2 = tp->nic_sram_data_cfg &
2904 NIC_SRAM_DATA_CFG_NO_GPIO2;
2905
2906 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2907 GRC_LCLCTRL_GPIO_OE1 |
2908 GRC_LCLCTRL_GPIO_OE2 |
2909 GRC_LCLCTRL_GPIO_OUTPUT1 |
2910 GRC_LCLCTRL_GPIO_OUTPUT2;
2911 if (no_gpio2) {
2912 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2913 GRC_LCLCTRL_GPIO_OUTPUT2);
2914 }
2915 tw32_wait_f(GRC_LOCAL_CTRL,
2916 tp->grc_local_ctrl | grc_local_ctrl,
2917 TG3_GRC_LCLCTL_PWRSW_DELAY);
2918
2919 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2920
2921 tw32_wait_f(GRC_LOCAL_CTRL,
2922 tp->grc_local_ctrl | grc_local_ctrl,
2923 TG3_GRC_LCLCTL_PWRSW_DELAY);
2924
2925 if (!no_gpio2) {
2926 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2927 tw32_wait_f(GRC_LOCAL_CTRL,
2928 tp->grc_local_ctrl | grc_local_ctrl,
2929 TG3_GRC_LCLCTL_PWRSW_DELAY);
2930 }
2931 }
2932 }
2933
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2934 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2935 {
2936 u32 msg = 0;
2937
2938 /* Serialize power state transitions */
2939 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2940 return;
2941
2942 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2943 msg = TG3_GPIO_MSG_NEED_VAUX;
2944
2945 msg = tg3_set_function_status(tp, msg);
2946
2947 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2948 goto done;
2949
2950 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2951 tg3_pwrsrc_switch_to_vaux(tp);
2952 else
2953 tg3_pwrsrc_die_with_vmain(tp);
2954
2955 done:
2956 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2957 }
2958
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2959 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2960 {
2961 bool need_vaux = false;
2962
2963 /* The GPIOs do something completely different on 57765. */
2964 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2965 return;
2966
2967 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2968 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2969 tg3_asic_rev(tp) == ASIC_REV_5720) {
2970 tg3_frob_aux_power_5717(tp, include_wol ?
2971 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2972 return;
2973 }
2974
2975 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2976 struct net_device *dev_peer;
2977
2978 dev_peer = pci_get_drvdata(tp->pdev_peer);
2979
2980 /* remove_one() may have been run on the peer. */
2981 if (dev_peer) {
2982 struct tg3 *tp_peer = netdev_priv(dev_peer);
2983
2984 if (tg3_flag(tp_peer, INIT_COMPLETE))
2985 return;
2986
2987 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2988 tg3_flag(tp_peer, ENABLE_ASF))
2989 need_vaux = true;
2990 }
2991 }
2992
2993 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2994 tg3_flag(tp, ENABLE_ASF))
2995 need_vaux = true;
2996
2997 if (need_vaux)
2998 tg3_pwrsrc_switch_to_vaux(tp);
2999 else
3000 tg3_pwrsrc_die_with_vmain(tp);
3001 }
3002
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3003 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3004 {
3005 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3006 return 1;
3007 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3008 if (speed != SPEED_10)
3009 return 1;
3010 } else if (speed == SPEED_10)
3011 return 1;
3012
3013 return 0;
3014 }
3015
tg3_phy_power_bug(struct tg3 * tp)3016 static bool tg3_phy_power_bug(struct tg3 *tp)
3017 {
3018 switch (tg3_asic_rev(tp)) {
3019 case ASIC_REV_5700:
3020 case ASIC_REV_5704:
3021 return true;
3022 case ASIC_REV_5780:
3023 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3024 return true;
3025 return false;
3026 case ASIC_REV_5717:
3027 if (!tp->pci_fn)
3028 return true;
3029 return false;
3030 case ASIC_REV_5719:
3031 case ASIC_REV_5720:
3032 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3033 !tp->pci_fn)
3034 return true;
3035 return false;
3036 }
3037
3038 return false;
3039 }
3040
tg3_phy_led_bug(struct tg3 * tp)3041 static bool tg3_phy_led_bug(struct tg3 *tp)
3042 {
3043 switch (tg3_asic_rev(tp)) {
3044 case ASIC_REV_5719:
3045 case ASIC_REV_5720:
3046 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3047 !tp->pci_fn)
3048 return true;
3049 return false;
3050 }
3051
3052 return false;
3053 }
3054
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3055 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3056 {
3057 u32 val;
3058
3059 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3060 return;
3061
3062 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3063 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3064 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3065 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3066
3067 sg_dig_ctrl |=
3068 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3069 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3070 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3071 }
3072 return;
3073 }
3074
3075 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3076 tg3_bmcr_reset(tp);
3077 val = tr32(GRC_MISC_CFG);
3078 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3079 udelay(40);
3080 return;
3081 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3082 u32 phytest;
3083 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3084 u32 phy;
3085
3086 tg3_writephy(tp, MII_ADVERTISE, 0);
3087 tg3_writephy(tp, MII_BMCR,
3088 BMCR_ANENABLE | BMCR_ANRESTART);
3089
3090 tg3_writephy(tp, MII_TG3_FET_TEST,
3091 phytest | MII_TG3_FET_SHADOW_EN);
3092 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3093 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3094 tg3_writephy(tp,
3095 MII_TG3_FET_SHDW_AUXMODE4,
3096 phy);
3097 }
3098 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3099 }
3100 return;
3101 } else if (do_low_power) {
3102 if (!tg3_phy_led_bug(tp))
3103 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3104 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3105
3106 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3107 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3108 MII_TG3_AUXCTL_PCTL_VREG_11V;
3109 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3110 }
3111
3112 /* The PHY should not be powered down on some chips because
3113 * of bugs.
3114 */
3115 if (tg3_phy_power_bug(tp))
3116 return;
3117
3118 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3119 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3120 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3121 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3122 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3123 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3124 }
3125
3126 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3127 }
3128
3129 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3130 static int tg3_nvram_lock(struct tg3 *tp)
3131 {
3132 if (tg3_flag(tp, NVRAM)) {
3133 int i;
3134
3135 if (tp->nvram_lock_cnt == 0) {
3136 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3137 for (i = 0; i < 8000; i++) {
3138 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3139 break;
3140 udelay(20);
3141 }
3142 if (i == 8000) {
3143 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3144 return -ENODEV;
3145 }
3146 }
3147 tp->nvram_lock_cnt++;
3148 }
3149 return 0;
3150 }
3151
3152 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3153 static void tg3_nvram_unlock(struct tg3 *tp)
3154 {
3155 if (tg3_flag(tp, NVRAM)) {
3156 if (tp->nvram_lock_cnt > 0)
3157 tp->nvram_lock_cnt--;
3158 if (tp->nvram_lock_cnt == 0)
3159 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3160 }
3161 }
3162
3163 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3164 static void tg3_enable_nvram_access(struct tg3 *tp)
3165 {
3166 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3167 u32 nvaccess = tr32(NVRAM_ACCESS);
3168
3169 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3170 }
3171 }
3172
3173 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3174 static void tg3_disable_nvram_access(struct tg3 *tp)
3175 {
3176 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3177 u32 nvaccess = tr32(NVRAM_ACCESS);
3178
3179 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3180 }
3181 }
3182
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3183 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3184 u32 offset, u32 *val)
3185 {
3186 u32 tmp;
3187 int i;
3188
3189 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3190 return -EINVAL;
3191
3192 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3193 EEPROM_ADDR_DEVID_MASK |
3194 EEPROM_ADDR_READ);
3195 tw32(GRC_EEPROM_ADDR,
3196 tmp |
3197 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3198 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3199 EEPROM_ADDR_ADDR_MASK) |
3200 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3201
3202 for (i = 0; i < 1000; i++) {
3203 tmp = tr32(GRC_EEPROM_ADDR);
3204
3205 if (tmp & EEPROM_ADDR_COMPLETE)
3206 break;
3207 msleep(1);
3208 }
3209 if (!(tmp & EEPROM_ADDR_COMPLETE))
3210 return -EBUSY;
3211
3212 tmp = tr32(GRC_EEPROM_DATA);
3213
3214 /*
3215 * The data will always be opposite the native endian
3216 * format. Perform a blind byteswap to compensate.
3217 */
3218 *val = swab32(tmp);
3219
3220 return 0;
3221 }
3222
3223 #define NVRAM_CMD_TIMEOUT 10000
3224
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3225 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3226 {
3227 int i;
3228
3229 tw32(NVRAM_CMD, nvram_cmd);
3230 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3231 usleep_range(10, 40);
3232 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3233 udelay(10);
3234 break;
3235 }
3236 }
3237
3238 if (i == NVRAM_CMD_TIMEOUT)
3239 return -EBUSY;
3240
3241 return 0;
3242 }
3243
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3244 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3245 {
3246 if (tg3_flag(tp, NVRAM) &&
3247 tg3_flag(tp, NVRAM_BUFFERED) &&
3248 tg3_flag(tp, FLASH) &&
3249 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3250 (tp->nvram_jedecnum == JEDEC_ATMEL))
3251
3252 addr = ((addr / tp->nvram_pagesize) <<
3253 ATMEL_AT45DB0X1B_PAGE_POS) +
3254 (addr % tp->nvram_pagesize);
3255
3256 return addr;
3257 }
3258
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3259 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3260 {
3261 if (tg3_flag(tp, NVRAM) &&
3262 tg3_flag(tp, NVRAM_BUFFERED) &&
3263 tg3_flag(tp, FLASH) &&
3264 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3265 (tp->nvram_jedecnum == JEDEC_ATMEL))
3266
3267 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3268 tp->nvram_pagesize) +
3269 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3270
3271 return addr;
3272 }
3273
3274 /* NOTE: Data read in from NVRAM is byteswapped according to
3275 * the byteswapping settings for all other register accesses.
3276 * tg3 devices are BE devices, so on a BE machine, the data
3277 * returned will be exactly as it is seen in NVRAM. On a LE
3278 * machine, the 32-bit value will be byteswapped.
3279 */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3280 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3281 {
3282 int ret;
3283
3284 if (!tg3_flag(tp, NVRAM))
3285 return tg3_nvram_read_using_eeprom(tp, offset, val);
3286
3287 offset = tg3_nvram_phys_addr(tp, offset);
3288
3289 if (offset > NVRAM_ADDR_MSK)
3290 return -EINVAL;
3291
3292 ret = tg3_nvram_lock(tp);
3293 if (ret)
3294 return ret;
3295
3296 tg3_enable_nvram_access(tp);
3297
3298 tw32(NVRAM_ADDR, offset);
3299 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3300 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3301
3302 if (ret == 0)
3303 *val = tr32(NVRAM_RDDATA);
3304
3305 tg3_disable_nvram_access(tp);
3306
3307 tg3_nvram_unlock(tp);
3308
3309 return ret;
3310 }
3311
3312 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3313 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3314 {
3315 u32 v;
3316 int res = tg3_nvram_read(tp, offset, &v);
3317 if (!res)
3318 *val = cpu_to_be32(v);
3319 return res;
3320 }
3321
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3322 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3323 u32 offset, u32 len, u8 *buf)
3324 {
3325 int i, j, rc = 0;
3326 u32 val;
3327
3328 for (i = 0; i < len; i += 4) {
3329 u32 addr;
3330 __be32 data;
3331
3332 addr = offset + i;
3333
3334 memcpy(&data, buf + i, 4);
3335
3336 /*
3337 * The SEEPROM interface expects the data to always be opposite
3338 * the native endian format. We accomplish this by reversing
3339 * all the operations that would have been performed on the
3340 * data from a call to tg3_nvram_read_be32().
3341 */
3342 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3343
3344 val = tr32(GRC_EEPROM_ADDR);
3345 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3346
3347 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3348 EEPROM_ADDR_READ);
3349 tw32(GRC_EEPROM_ADDR, val |
3350 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3351 (addr & EEPROM_ADDR_ADDR_MASK) |
3352 EEPROM_ADDR_START |
3353 EEPROM_ADDR_WRITE);
3354
3355 for (j = 0; j < 1000; j++) {
3356 val = tr32(GRC_EEPROM_ADDR);
3357
3358 if (val & EEPROM_ADDR_COMPLETE)
3359 break;
3360 msleep(1);
3361 }
3362 if (!(val & EEPROM_ADDR_COMPLETE)) {
3363 rc = -EBUSY;
3364 break;
3365 }
3366 }
3367
3368 return rc;
3369 }
3370
3371 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3372 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3373 u8 *buf)
3374 {
3375 int ret = 0;
3376 u32 pagesize = tp->nvram_pagesize;
3377 u32 pagemask = pagesize - 1;
3378 u32 nvram_cmd;
3379 u8 *tmp;
3380
3381 tmp = kmalloc(pagesize, GFP_KERNEL);
3382 if (tmp == NULL)
3383 return -ENOMEM;
3384
3385 while (len) {
3386 int j;
3387 u32 phy_addr, page_off, size;
3388
3389 phy_addr = offset & ~pagemask;
3390
3391 for (j = 0; j < pagesize; j += 4) {
3392 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3393 (__be32 *) (tmp + j));
3394 if (ret)
3395 break;
3396 }
3397 if (ret)
3398 break;
3399
3400 page_off = offset & pagemask;
3401 size = pagesize;
3402 if (len < size)
3403 size = len;
3404
3405 len -= size;
3406
3407 memcpy(tmp + page_off, buf, size);
3408
3409 offset = offset + (pagesize - page_off);
3410
3411 tg3_enable_nvram_access(tp);
3412
3413 /*
3414 * Before we can erase the flash page, we need
3415 * to issue a special "write enable" command.
3416 */
3417 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3418
3419 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3420 break;
3421
3422 /* Erase the target page */
3423 tw32(NVRAM_ADDR, phy_addr);
3424
3425 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3426 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3427
3428 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3429 break;
3430
3431 /* Issue another write enable to start the write. */
3432 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3433
3434 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3435 break;
3436
3437 for (j = 0; j < pagesize; j += 4) {
3438 __be32 data;
3439
3440 data = *((__be32 *) (tmp + j));
3441
3442 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3443
3444 tw32(NVRAM_ADDR, phy_addr + j);
3445
3446 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3447 NVRAM_CMD_WR;
3448
3449 if (j == 0)
3450 nvram_cmd |= NVRAM_CMD_FIRST;
3451 else if (j == (pagesize - 4))
3452 nvram_cmd |= NVRAM_CMD_LAST;
3453
3454 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3455 if (ret)
3456 break;
3457 }
3458 if (ret)
3459 break;
3460 }
3461
3462 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3463 tg3_nvram_exec_cmd(tp, nvram_cmd);
3464
3465 kfree(tmp);
3466
3467 return ret;
3468 }
3469
3470 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3471 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3472 u8 *buf)
3473 {
3474 int i, ret = 0;
3475
3476 for (i = 0; i < len; i += 4, offset += 4) {
3477 u32 page_off, phy_addr, nvram_cmd;
3478 __be32 data;
3479
3480 memcpy(&data, buf + i, 4);
3481 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3482
3483 page_off = offset % tp->nvram_pagesize;
3484
3485 phy_addr = tg3_nvram_phys_addr(tp, offset);
3486
3487 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3488
3489 if (page_off == 0 || i == 0)
3490 nvram_cmd |= NVRAM_CMD_FIRST;
3491 if (page_off == (tp->nvram_pagesize - 4))
3492 nvram_cmd |= NVRAM_CMD_LAST;
3493
3494 if (i == (len - 4))
3495 nvram_cmd |= NVRAM_CMD_LAST;
3496
3497 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3498 !tg3_flag(tp, FLASH) ||
3499 !tg3_flag(tp, 57765_PLUS))
3500 tw32(NVRAM_ADDR, phy_addr);
3501
3502 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3503 !tg3_flag(tp, 5755_PLUS) &&
3504 (tp->nvram_jedecnum == JEDEC_ST) &&
3505 (nvram_cmd & NVRAM_CMD_FIRST)) {
3506 u32 cmd;
3507
3508 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3509 ret = tg3_nvram_exec_cmd(tp, cmd);
3510 if (ret)
3511 break;
3512 }
3513 if (!tg3_flag(tp, FLASH)) {
3514 /* We always do complete word writes to eeprom. */
3515 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3516 }
3517
3518 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3519 if (ret)
3520 break;
3521 }
3522 return ret;
3523 }
3524
3525 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3526 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3527 {
3528 int ret;
3529
3530 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3531 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3532 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3533 udelay(40);
3534 }
3535
3536 if (!tg3_flag(tp, NVRAM)) {
3537 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3538 } else {
3539 u32 grc_mode;
3540
3541 ret = tg3_nvram_lock(tp);
3542 if (ret)
3543 return ret;
3544
3545 tg3_enable_nvram_access(tp);
3546 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3547 tw32(NVRAM_WRITE1, 0x406);
3548
3549 grc_mode = tr32(GRC_MODE);
3550 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3551
3552 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3553 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3554 buf);
3555 } else {
3556 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3557 buf);
3558 }
3559
3560 grc_mode = tr32(GRC_MODE);
3561 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3562
3563 tg3_disable_nvram_access(tp);
3564 tg3_nvram_unlock(tp);
3565 }
3566
3567 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3568 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3569 udelay(40);
3570 }
3571
3572 return ret;
3573 }
3574
3575 #define RX_CPU_SCRATCH_BASE 0x30000
3576 #define RX_CPU_SCRATCH_SIZE 0x04000
3577 #define TX_CPU_SCRATCH_BASE 0x34000
3578 #define TX_CPU_SCRATCH_SIZE 0x04000
3579
3580 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3581 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3582 {
3583 int i;
3584 const int iters = 10000;
3585
3586 for (i = 0; i < iters; i++) {
3587 tw32(cpu_base + CPU_STATE, 0xffffffff);
3588 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3589 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3590 break;
3591 if (pci_channel_offline(tp->pdev))
3592 return -EBUSY;
3593 }
3594
3595 return (i == iters) ? -EBUSY : 0;
3596 }
3597
3598 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3599 static int tg3_rxcpu_pause(struct tg3 *tp)
3600 {
3601 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3602
3603 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3604 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3605 udelay(10);
3606
3607 return rc;
3608 }
3609
3610 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3611 static int tg3_txcpu_pause(struct tg3 *tp)
3612 {
3613 return tg3_pause_cpu(tp, TX_CPU_BASE);
3614 }
3615
3616 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3617 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3618 {
3619 tw32(cpu_base + CPU_STATE, 0xffffffff);
3620 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3621 }
3622
3623 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3624 static void tg3_rxcpu_resume(struct tg3 *tp)
3625 {
3626 tg3_resume_cpu(tp, RX_CPU_BASE);
3627 }
3628
3629 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3630 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3631 {
3632 int rc;
3633
3634 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3635
3636 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3637 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3638
3639 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3640 return 0;
3641 }
3642 if (cpu_base == RX_CPU_BASE) {
3643 rc = tg3_rxcpu_pause(tp);
3644 } else {
3645 /*
3646 * There is only an Rx CPU for the 5750 derivative in the
3647 * BCM4785.
3648 */
3649 if (tg3_flag(tp, IS_SSB_CORE))
3650 return 0;
3651
3652 rc = tg3_txcpu_pause(tp);
3653 }
3654
3655 if (rc) {
3656 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3657 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3658 return -ENODEV;
3659 }
3660
3661 /* Clear firmware's nvram arbitration. */
3662 if (tg3_flag(tp, NVRAM))
3663 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3664 return 0;
3665 }
3666
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3667 static int tg3_fw_data_len(struct tg3 *tp,
3668 const struct tg3_firmware_hdr *fw_hdr)
3669 {
3670 int fw_len;
3671
3672 /* Non fragmented firmware have one firmware header followed by a
3673 * contiguous chunk of data to be written. The length field in that
3674 * header is not the length of data to be written but the complete
3675 * length of the bss. The data length is determined based on
3676 * tp->fw->size minus headers.
3677 *
3678 * Fragmented firmware have a main header followed by multiple
3679 * fragments. Each fragment is identical to non fragmented firmware
3680 * with a firmware header followed by a contiguous chunk of data. In
3681 * the main header, the length field is unused and set to 0xffffffff.
3682 * In each fragment header the length is the entire size of that
3683 * fragment i.e. fragment data + header length. Data length is
3684 * therefore length field in the header minus TG3_FW_HDR_LEN.
3685 */
3686 if (tp->fw_len == 0xffffffff)
3687 fw_len = be32_to_cpu(fw_hdr->len);
3688 else
3689 fw_len = tp->fw->size;
3690
3691 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3692 }
3693
3694 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3695 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3696 u32 cpu_scratch_base, int cpu_scratch_size,
3697 const struct tg3_firmware_hdr *fw_hdr)
3698 {
3699 int err, i;
3700 void (*write_op)(struct tg3 *, u32, u32);
3701 int total_len = tp->fw->size;
3702
3703 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3704 netdev_err(tp->dev,
3705 "%s: Trying to load TX cpu firmware which is 5705\n",
3706 __func__);
3707 return -EINVAL;
3708 }
3709
3710 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3711 write_op = tg3_write_mem;
3712 else
3713 write_op = tg3_write_indirect_reg32;
3714
3715 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3716 /* It is possible that bootcode is still loading at this point.
3717 * Get the nvram lock first before halting the cpu.
3718 */
3719 int lock_err = tg3_nvram_lock(tp);
3720 err = tg3_halt_cpu(tp, cpu_base);
3721 if (!lock_err)
3722 tg3_nvram_unlock(tp);
3723 if (err)
3724 goto out;
3725
3726 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3727 write_op(tp, cpu_scratch_base + i, 0);
3728 tw32(cpu_base + CPU_STATE, 0xffffffff);
3729 tw32(cpu_base + CPU_MODE,
3730 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3731 } else {
3732 /* Subtract additional main header for fragmented firmware and
3733 * advance to the first fragment
3734 */
3735 total_len -= TG3_FW_HDR_LEN;
3736 fw_hdr++;
3737 }
3738
3739 do {
3740 u32 *fw_data = (u32 *)(fw_hdr + 1);
3741 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3742 write_op(tp, cpu_scratch_base +
3743 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3744 (i * sizeof(u32)),
3745 be32_to_cpu(fw_data[i]));
3746
3747 total_len -= be32_to_cpu(fw_hdr->len);
3748
3749 /* Advance to next fragment */
3750 fw_hdr = (struct tg3_firmware_hdr *)
3751 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3752 } while (total_len > 0);
3753
3754 err = 0;
3755
3756 out:
3757 return err;
3758 }
3759
3760 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3761 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3762 {
3763 int i;
3764 const int iters = 5;
3765
3766 tw32(cpu_base + CPU_STATE, 0xffffffff);
3767 tw32_f(cpu_base + CPU_PC, pc);
3768
3769 for (i = 0; i < iters; i++) {
3770 if (tr32(cpu_base + CPU_PC) == pc)
3771 break;
3772 tw32(cpu_base + CPU_STATE, 0xffffffff);
3773 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3774 tw32_f(cpu_base + CPU_PC, pc);
3775 udelay(1000);
3776 }
3777
3778 return (i == iters) ? -EBUSY : 0;
3779 }
3780
3781 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3782 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3783 {
3784 const struct tg3_firmware_hdr *fw_hdr;
3785 int err;
3786
3787 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3788
3789 /* Firmware blob starts with version numbers, followed by
3790 start address and length. We are setting complete length.
3791 length = end_address_of_bss - start_address_of_text.
3792 Remainder is the blob to be loaded contiguously
3793 from start address. */
3794
3795 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3796 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3797 fw_hdr);
3798 if (err)
3799 return err;
3800
3801 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3802 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3803 fw_hdr);
3804 if (err)
3805 return err;
3806
3807 /* Now startup only the RX cpu. */
3808 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3809 be32_to_cpu(fw_hdr->base_addr));
3810 if (err) {
3811 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3812 "should be %08x\n", __func__,
3813 tr32(RX_CPU_BASE + CPU_PC),
3814 be32_to_cpu(fw_hdr->base_addr));
3815 return -ENODEV;
3816 }
3817
3818 tg3_rxcpu_resume(tp);
3819
3820 return 0;
3821 }
3822
tg3_validate_rxcpu_state(struct tg3 * tp)3823 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3824 {
3825 const int iters = 1000;
3826 int i;
3827 u32 val;
3828
3829 /* Wait for boot code to complete initialization and enter service
3830 * loop. It is then safe to download service patches
3831 */
3832 for (i = 0; i < iters; i++) {
3833 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3834 break;
3835
3836 udelay(10);
3837 }
3838
3839 if (i == iters) {
3840 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3841 return -EBUSY;
3842 }
3843
3844 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3845 if (val & 0xff) {
3846 netdev_warn(tp->dev,
3847 "Other patches exist. Not downloading EEE patch\n");
3848 return -EEXIST;
3849 }
3850
3851 return 0;
3852 }
3853
3854 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3855 static void tg3_load_57766_firmware(struct tg3 *tp)
3856 {
3857 struct tg3_firmware_hdr *fw_hdr;
3858
3859 if (!tg3_flag(tp, NO_NVRAM))
3860 return;
3861
3862 if (tg3_validate_rxcpu_state(tp))
3863 return;
3864
3865 if (!tp->fw)
3866 return;
3867
3868 /* This firmware blob has a different format than older firmware
3869 * releases as given below. The main difference is we have fragmented
3870 * data to be written to non-contiguous locations.
3871 *
3872 * In the beginning we have a firmware header identical to other
3873 * firmware which consists of version, base addr and length. The length
3874 * here is unused and set to 0xffffffff.
3875 *
3876 * This is followed by a series of firmware fragments which are
3877 * individually identical to previous firmware. i.e. they have the
3878 * firmware header and followed by data for that fragment. The version
3879 * field of the individual fragment header is unused.
3880 */
3881
3882 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3883 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3884 return;
3885
3886 if (tg3_rxcpu_pause(tp))
3887 return;
3888
3889 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3890 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3891
3892 tg3_rxcpu_resume(tp);
3893 }
3894
3895 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3896 static int tg3_load_tso_firmware(struct tg3 *tp)
3897 {
3898 const struct tg3_firmware_hdr *fw_hdr;
3899 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3900 int err;
3901
3902 if (!tg3_flag(tp, FW_TSO))
3903 return 0;
3904
3905 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3906
3907 /* Firmware blob starts with version numbers, followed by
3908 start address and length. We are setting complete length.
3909 length = end_address_of_bss - start_address_of_text.
3910 Remainder is the blob to be loaded contiguously
3911 from start address. */
3912
3913 cpu_scratch_size = tp->fw_len;
3914
3915 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3916 cpu_base = RX_CPU_BASE;
3917 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3918 } else {
3919 cpu_base = TX_CPU_BASE;
3920 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3921 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3922 }
3923
3924 err = tg3_load_firmware_cpu(tp, cpu_base,
3925 cpu_scratch_base, cpu_scratch_size,
3926 fw_hdr);
3927 if (err)
3928 return err;
3929
3930 /* Now startup the cpu. */
3931 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3932 be32_to_cpu(fw_hdr->base_addr));
3933 if (err) {
3934 netdev_err(tp->dev,
3935 "%s fails to set CPU PC, is %08x should be %08x\n",
3936 __func__, tr32(cpu_base + CPU_PC),
3937 be32_to_cpu(fw_hdr->base_addr));
3938 return -ENODEV;
3939 }
3940
3941 tg3_resume_cpu(tp, cpu_base);
3942 return 0;
3943 }
3944
3945 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,const u8 * mac_addr,int index)3946 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3947 int index)
3948 {
3949 u32 addr_high, addr_low;
3950
3951 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3952 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3953 (mac_addr[4] << 8) | mac_addr[5]);
3954
3955 if (index < 4) {
3956 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3957 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3958 } else {
3959 index -= 4;
3960 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3961 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3962 }
3963 }
3964
3965 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3966 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3967 {
3968 u32 addr_high;
3969 int i;
3970
3971 for (i = 0; i < 4; i++) {
3972 if (i == 1 && skip_mac_1)
3973 continue;
3974 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3975 }
3976
3977 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3978 tg3_asic_rev(tp) == ASIC_REV_5704) {
3979 for (i = 4; i < 16; i++)
3980 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3981 }
3982
3983 addr_high = (tp->dev->dev_addr[0] +
3984 tp->dev->dev_addr[1] +
3985 tp->dev->dev_addr[2] +
3986 tp->dev->dev_addr[3] +
3987 tp->dev->dev_addr[4] +
3988 tp->dev->dev_addr[5]) &
3989 TX_BACKOFF_SEED_MASK;
3990 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3991 }
3992
tg3_enable_register_access(struct tg3 * tp)3993 static void tg3_enable_register_access(struct tg3 *tp)
3994 {
3995 /*
3996 * Make sure register accesses (indirect or otherwise) will function
3997 * correctly.
3998 */
3999 pci_write_config_dword(tp->pdev,
4000 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4001 }
4002
tg3_power_up(struct tg3 * tp)4003 static int tg3_power_up(struct tg3 *tp)
4004 {
4005 int err;
4006
4007 tg3_enable_register_access(tp);
4008
4009 err = pci_set_power_state(tp->pdev, PCI_D0);
4010 if (!err) {
4011 /* Switch out of Vaux if it is a NIC */
4012 tg3_pwrsrc_switch_to_vmain(tp);
4013 } else {
4014 netdev_err(tp->dev, "Transition to D0 failed\n");
4015 }
4016
4017 return err;
4018 }
4019
4020 static int tg3_setup_phy(struct tg3 *, bool);
4021
tg3_power_down_prepare(struct tg3 * tp)4022 static void tg3_power_down_prepare(struct tg3 *tp)
4023 {
4024 u32 misc_host_ctrl;
4025 bool device_should_wake, do_low_power;
4026
4027 tg3_enable_register_access(tp);
4028
4029 /* Restore the CLKREQ setting. */
4030 if (tg3_flag(tp, CLKREQ_BUG))
4031 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4032 PCI_EXP_LNKCTL_CLKREQ_EN);
4033
4034 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4035 tw32(TG3PCI_MISC_HOST_CTRL,
4036 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4037
4038 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4039 tg3_flag(tp, WOL_ENABLE);
4040
4041 if (tg3_flag(tp, USE_PHYLIB)) {
4042 do_low_power = false;
4043 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4044 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4045 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4046 struct phy_device *phydev;
4047 u32 phyid;
4048
4049 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4050
4051 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4052
4053 tp->link_config.speed = phydev->speed;
4054 tp->link_config.duplex = phydev->duplex;
4055 tp->link_config.autoneg = phydev->autoneg;
4056 ethtool_convert_link_mode_to_legacy_u32(
4057 &tp->link_config.advertising,
4058 phydev->advertising);
4059
4060 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4061 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4062 advertising);
4063 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4064 advertising);
4065 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4066 advertising);
4067
4068 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4069 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4070 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4071 advertising);
4072 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4073 advertising);
4074 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4075 advertising);
4076 } else {
4077 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4078 advertising);
4079 }
4080 }
4081
4082 linkmode_copy(phydev->advertising, advertising);
4083 phy_start_aneg(phydev);
4084
4085 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4086 if (phyid != PHY_ID_BCMAC131) {
4087 phyid &= PHY_BCM_OUI_MASK;
4088 if (phyid == PHY_BCM_OUI_1 ||
4089 phyid == PHY_BCM_OUI_2 ||
4090 phyid == PHY_BCM_OUI_3)
4091 do_low_power = true;
4092 }
4093 }
4094 } else {
4095 do_low_power = true;
4096
4097 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4098 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4099
4100 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4101 tg3_setup_phy(tp, false);
4102 }
4103
4104 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4105 u32 val;
4106
4107 val = tr32(GRC_VCPU_EXT_CTRL);
4108 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4109 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4110 int i;
4111 u32 val;
4112
4113 for (i = 0; i < 200; i++) {
4114 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4115 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4116 break;
4117 msleep(1);
4118 }
4119 }
4120 if (tg3_flag(tp, WOL_CAP))
4121 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4122 WOL_DRV_STATE_SHUTDOWN |
4123 WOL_DRV_WOL |
4124 WOL_SET_MAGIC_PKT);
4125
4126 if (device_should_wake) {
4127 u32 mac_mode;
4128
4129 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4130 if (do_low_power &&
4131 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4132 tg3_phy_auxctl_write(tp,
4133 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4134 MII_TG3_AUXCTL_PCTL_WOL_EN |
4135 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4136 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4137 udelay(40);
4138 }
4139
4140 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4141 mac_mode = MAC_MODE_PORT_MODE_GMII;
4142 else if (tp->phy_flags &
4143 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4144 if (tp->link_config.active_speed == SPEED_1000)
4145 mac_mode = MAC_MODE_PORT_MODE_GMII;
4146 else
4147 mac_mode = MAC_MODE_PORT_MODE_MII;
4148 } else
4149 mac_mode = MAC_MODE_PORT_MODE_MII;
4150
4151 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4152 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4153 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4154 SPEED_100 : SPEED_10;
4155 if (tg3_5700_link_polarity(tp, speed))
4156 mac_mode |= MAC_MODE_LINK_POLARITY;
4157 else
4158 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4159 }
4160 } else {
4161 mac_mode = MAC_MODE_PORT_MODE_TBI;
4162 }
4163
4164 if (!tg3_flag(tp, 5750_PLUS))
4165 tw32(MAC_LED_CTRL, tp->led_ctrl);
4166
4167 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4168 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4169 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4170 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4171
4172 if (tg3_flag(tp, ENABLE_APE))
4173 mac_mode |= MAC_MODE_APE_TX_EN |
4174 MAC_MODE_APE_RX_EN |
4175 MAC_MODE_TDE_ENABLE;
4176
4177 tw32_f(MAC_MODE, mac_mode);
4178 udelay(100);
4179
4180 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4181 udelay(10);
4182 }
4183
4184 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4185 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4186 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4187 u32 base_val;
4188
4189 base_val = tp->pci_clock_ctrl;
4190 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4191 CLOCK_CTRL_TXCLK_DISABLE);
4192
4193 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4194 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4195 } else if (tg3_flag(tp, 5780_CLASS) ||
4196 tg3_flag(tp, CPMU_PRESENT) ||
4197 tg3_asic_rev(tp) == ASIC_REV_5906) {
4198 /* do nothing */
4199 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4200 u32 newbits1, newbits2;
4201
4202 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4203 tg3_asic_rev(tp) == ASIC_REV_5701) {
4204 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4205 CLOCK_CTRL_TXCLK_DISABLE |
4206 CLOCK_CTRL_ALTCLK);
4207 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208 } else if (tg3_flag(tp, 5705_PLUS)) {
4209 newbits1 = CLOCK_CTRL_625_CORE;
4210 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4211 } else {
4212 newbits1 = CLOCK_CTRL_ALTCLK;
4213 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4214 }
4215
4216 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4217 40);
4218
4219 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4220 40);
4221
4222 if (!tg3_flag(tp, 5705_PLUS)) {
4223 u32 newbits3;
4224
4225 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4226 tg3_asic_rev(tp) == ASIC_REV_5701) {
4227 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4228 CLOCK_CTRL_TXCLK_DISABLE |
4229 CLOCK_CTRL_44MHZ_CORE);
4230 } else {
4231 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4232 }
4233
4234 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4235 tp->pci_clock_ctrl | newbits3, 40);
4236 }
4237 }
4238
4239 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4240 tg3_power_down_phy(tp, do_low_power);
4241
4242 tg3_frob_aux_power(tp, true);
4243
4244 /* Workaround for unstable PLL clock */
4245 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4246 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4247 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4248 u32 val = tr32(0x7d00);
4249
4250 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4251 tw32(0x7d00, val);
4252 if (!tg3_flag(tp, ENABLE_ASF)) {
4253 int err;
4254
4255 err = tg3_nvram_lock(tp);
4256 tg3_halt_cpu(tp, RX_CPU_BASE);
4257 if (!err)
4258 tg3_nvram_unlock(tp);
4259 }
4260 }
4261
4262 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4263
4264 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4265
4266 return;
4267 }
4268
tg3_power_down(struct tg3 * tp)4269 static void tg3_power_down(struct tg3 *tp)
4270 {
4271 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4272 pci_set_power_state(tp->pdev, PCI_D3hot);
4273 }
4274
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u32 * speed,u8 * duplex)4275 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4276 {
4277 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4278 case MII_TG3_AUX_STAT_10HALF:
4279 *speed = SPEED_10;
4280 *duplex = DUPLEX_HALF;
4281 break;
4282
4283 case MII_TG3_AUX_STAT_10FULL:
4284 *speed = SPEED_10;
4285 *duplex = DUPLEX_FULL;
4286 break;
4287
4288 case MII_TG3_AUX_STAT_100HALF:
4289 *speed = SPEED_100;
4290 *duplex = DUPLEX_HALF;
4291 break;
4292
4293 case MII_TG3_AUX_STAT_100FULL:
4294 *speed = SPEED_100;
4295 *duplex = DUPLEX_FULL;
4296 break;
4297
4298 case MII_TG3_AUX_STAT_1000HALF:
4299 *speed = SPEED_1000;
4300 *duplex = DUPLEX_HALF;
4301 break;
4302
4303 case MII_TG3_AUX_STAT_1000FULL:
4304 *speed = SPEED_1000;
4305 *duplex = DUPLEX_FULL;
4306 break;
4307
4308 default:
4309 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4310 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4311 SPEED_10;
4312 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4313 DUPLEX_HALF;
4314 break;
4315 }
4316 *speed = SPEED_UNKNOWN;
4317 *duplex = DUPLEX_UNKNOWN;
4318 break;
4319 }
4320 }
4321
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4322 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4323 {
4324 int err = 0;
4325 u32 val, new_adv;
4326
4327 new_adv = ADVERTISE_CSMA;
4328 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4329 new_adv |= mii_advertise_flowctrl(flowctrl);
4330
4331 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4332 if (err)
4333 goto done;
4334
4335 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4336 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4337
4338 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4339 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4340 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4341
4342 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4343 if (err)
4344 goto done;
4345 }
4346
4347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4348 goto done;
4349
4350 tw32(TG3_CPMU_EEE_MODE,
4351 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4352
4353 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4354 if (!err) {
4355 u32 err2;
4356
4357 if (!tp->eee.eee_enabled)
4358 val = 0;
4359 else
4360 val = ethtool_adv_to_mmd_eee_adv_t(advertise);
4361
4362 mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
4363 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4364 if (err)
4365 val = 0;
4366
4367 switch (tg3_asic_rev(tp)) {
4368 case ASIC_REV_5717:
4369 case ASIC_REV_57765:
4370 case ASIC_REV_57766:
4371 case ASIC_REV_5719:
4372 /* If we advertised any eee advertisements above... */
4373 if (val)
4374 val = MII_TG3_DSP_TAP26_ALNOKO |
4375 MII_TG3_DSP_TAP26_RMRXSTO |
4376 MII_TG3_DSP_TAP26_OPCSINPT;
4377 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4378 fallthrough;
4379 case ASIC_REV_5720:
4380 case ASIC_REV_5762:
4381 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4382 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4383 MII_TG3_DSP_CH34TP2_HIBW01);
4384 }
4385
4386 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4387 if (!err)
4388 err = err2;
4389 }
4390
4391 done:
4392 return err;
4393 }
4394
tg3_phy_copper_begin(struct tg3 * tp)4395 static void tg3_phy_copper_begin(struct tg3 *tp)
4396 {
4397 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4398 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4399 u32 adv, fc;
4400
4401 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4402 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4403 adv = ADVERTISED_10baseT_Half |
4404 ADVERTISED_10baseT_Full;
4405 if (tg3_flag(tp, WOL_SPEED_100MB))
4406 adv |= ADVERTISED_100baseT_Half |
4407 ADVERTISED_100baseT_Full;
4408 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4409 if (!(tp->phy_flags &
4410 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4411 adv |= ADVERTISED_1000baseT_Half;
4412 adv |= ADVERTISED_1000baseT_Full;
4413 }
4414
4415 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4416 } else {
4417 adv = tp->link_config.advertising;
4418 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4419 adv &= ~(ADVERTISED_1000baseT_Half |
4420 ADVERTISED_1000baseT_Full);
4421
4422 fc = tp->link_config.flowctrl;
4423 }
4424
4425 tg3_phy_autoneg_cfg(tp, adv, fc);
4426
4427 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4428 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4429 /* Normally during power down we want to autonegotiate
4430 * the lowest possible speed for WOL. However, to avoid
4431 * link flap, we leave it untouched.
4432 */
4433 return;
4434 }
4435
4436 tg3_writephy(tp, MII_BMCR,
4437 BMCR_ANENABLE | BMCR_ANRESTART);
4438 } else {
4439 int i;
4440 u32 bmcr, orig_bmcr;
4441
4442 tp->link_config.active_speed = tp->link_config.speed;
4443 tp->link_config.active_duplex = tp->link_config.duplex;
4444
4445 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4446 /* With autoneg disabled, 5715 only links up when the
4447 * advertisement register has the configured speed
4448 * enabled.
4449 */
4450 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4451 }
4452
4453 bmcr = 0;
4454 switch (tp->link_config.speed) {
4455 default:
4456 case SPEED_10:
4457 break;
4458
4459 case SPEED_100:
4460 bmcr |= BMCR_SPEED100;
4461 break;
4462
4463 case SPEED_1000:
4464 bmcr |= BMCR_SPEED1000;
4465 break;
4466 }
4467
4468 if (tp->link_config.duplex == DUPLEX_FULL)
4469 bmcr |= BMCR_FULLDPLX;
4470
4471 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4472 (bmcr != orig_bmcr)) {
4473 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4474 for (i = 0; i < 1500; i++) {
4475 u32 tmp;
4476
4477 udelay(10);
4478 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4479 tg3_readphy(tp, MII_BMSR, &tmp))
4480 continue;
4481 if (!(tmp & BMSR_LSTATUS)) {
4482 udelay(40);
4483 break;
4484 }
4485 }
4486 tg3_writephy(tp, MII_BMCR, bmcr);
4487 udelay(40);
4488 }
4489 }
4490 }
4491
tg3_phy_pull_config(struct tg3 * tp)4492 static int tg3_phy_pull_config(struct tg3 *tp)
4493 {
4494 int err;
4495 u32 val;
4496
4497 err = tg3_readphy(tp, MII_BMCR, &val);
4498 if (err)
4499 goto done;
4500
4501 if (!(val & BMCR_ANENABLE)) {
4502 tp->link_config.autoneg = AUTONEG_DISABLE;
4503 tp->link_config.advertising = 0;
4504 tg3_flag_clear(tp, PAUSE_AUTONEG);
4505
4506 err = -EIO;
4507
4508 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4509 case 0:
4510 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4511 goto done;
4512
4513 tp->link_config.speed = SPEED_10;
4514 break;
4515 case BMCR_SPEED100:
4516 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4517 goto done;
4518
4519 tp->link_config.speed = SPEED_100;
4520 break;
4521 case BMCR_SPEED1000:
4522 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4523 tp->link_config.speed = SPEED_1000;
4524 break;
4525 }
4526 fallthrough;
4527 default:
4528 goto done;
4529 }
4530
4531 if (val & BMCR_FULLDPLX)
4532 tp->link_config.duplex = DUPLEX_FULL;
4533 else
4534 tp->link_config.duplex = DUPLEX_HALF;
4535
4536 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4537
4538 err = 0;
4539 goto done;
4540 }
4541
4542 tp->link_config.autoneg = AUTONEG_ENABLE;
4543 tp->link_config.advertising = ADVERTISED_Autoneg;
4544 tg3_flag_set(tp, PAUSE_AUTONEG);
4545
4546 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4547 u32 adv;
4548
4549 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4550 if (err)
4551 goto done;
4552
4553 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4554 tp->link_config.advertising |= adv | ADVERTISED_TP;
4555
4556 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4557 } else {
4558 tp->link_config.advertising |= ADVERTISED_FIBRE;
4559 }
4560
4561 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4562 u32 adv;
4563
4564 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4565 err = tg3_readphy(tp, MII_CTRL1000, &val);
4566 if (err)
4567 goto done;
4568
4569 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4570 } else {
4571 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4572 if (err)
4573 goto done;
4574
4575 adv = tg3_decode_flowctrl_1000X(val);
4576 tp->link_config.flowctrl = adv;
4577
4578 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4579 adv = mii_adv_to_ethtool_adv_x(val);
4580 }
4581
4582 tp->link_config.advertising |= adv;
4583 }
4584
4585 done:
4586 return err;
4587 }
4588
tg3_init_5401phy_dsp(struct tg3 * tp)4589 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4590 {
4591 int err;
4592
4593 /* Turn off tap power management. */
4594 /* Set Extended packet length bit */
4595 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4596
4597 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4598 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4599 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4600 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4601 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4602
4603 udelay(40);
4604
4605 return err;
4606 }
4607
tg3_phy_eee_config_ok(struct tg3 * tp)4608 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4609 {
4610 struct ethtool_keee eee = {};
4611
4612 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4613 return true;
4614
4615 tg3_eee_pull_config(tp, &eee);
4616
4617 if (tp->eee.eee_enabled) {
4618 if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
4619 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4620 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4621 return false;
4622 } else {
4623 /* EEE is disabled but we're advertising */
4624 if (!linkmode_empty(eee.advertised))
4625 return false;
4626 }
4627
4628 return true;
4629 }
4630
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4631 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4632 {
4633 u32 advmsk, tgtadv, advertising;
4634
4635 advertising = tp->link_config.advertising;
4636 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4637
4638 advmsk = ADVERTISE_ALL;
4639 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4640 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4641 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4642 }
4643
4644 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4645 return false;
4646
4647 if ((*lcladv & advmsk) != tgtadv)
4648 return false;
4649
4650 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4651 u32 tg3_ctrl;
4652
4653 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4654
4655 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4656 return false;
4657
4658 if (tgtadv &&
4659 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4660 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4661 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4662 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4663 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4664 } else {
4665 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4666 }
4667
4668 if (tg3_ctrl != tgtadv)
4669 return false;
4670 }
4671
4672 return true;
4673 }
4674
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4675 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4676 {
4677 u32 lpeth = 0;
4678
4679 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4680 u32 val;
4681
4682 if (tg3_readphy(tp, MII_STAT1000, &val))
4683 return false;
4684
4685 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4686 }
4687
4688 if (tg3_readphy(tp, MII_LPA, rmtadv))
4689 return false;
4690
4691 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4692 tp->link_config.rmt_adv = lpeth;
4693
4694 return true;
4695 }
4696
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4697 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4698 {
4699 if (curr_link_up != tp->link_up) {
4700 if (curr_link_up) {
4701 netif_carrier_on(tp->dev);
4702 } else {
4703 netif_carrier_off(tp->dev);
4704 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4705 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4706 }
4707
4708 tg3_link_report(tp);
4709 return true;
4710 }
4711
4712 return false;
4713 }
4714
tg3_clear_mac_status(struct tg3 * tp)4715 static void tg3_clear_mac_status(struct tg3 *tp)
4716 {
4717 tw32(MAC_EVENT, 0);
4718
4719 tw32_f(MAC_STATUS,
4720 MAC_STATUS_SYNC_CHANGED |
4721 MAC_STATUS_CFG_CHANGED |
4722 MAC_STATUS_MI_COMPLETION |
4723 MAC_STATUS_LNKSTATE_CHANGED);
4724 udelay(40);
4725 }
4726
tg3_setup_eee(struct tg3 * tp)4727 static void tg3_setup_eee(struct tg3 *tp)
4728 {
4729 u32 val;
4730
4731 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4732 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4733 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4734 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4735
4736 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4737
4738 tw32_f(TG3_CPMU_EEE_CTRL,
4739 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4740
4741 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4742 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4743 TG3_CPMU_EEEMD_LPI_IN_RX |
4744 TG3_CPMU_EEEMD_EEE_ENABLE;
4745
4746 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4747 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4748
4749 if (tg3_flag(tp, ENABLE_APE))
4750 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4751
4752 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4753
4754 tw32_f(TG3_CPMU_EEE_DBTMR1,
4755 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4756 (tp->eee.tx_lpi_timer & 0xffff));
4757
4758 tw32_f(TG3_CPMU_EEE_DBTMR2,
4759 TG3_CPMU_DBTMR2_APE_TX_2047US |
4760 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4761 }
4762
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4763 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4764 {
4765 bool current_link_up;
4766 u32 bmsr, val;
4767 u32 lcl_adv, rmt_adv;
4768 u32 current_speed;
4769 u8 current_duplex;
4770 int i, err;
4771
4772 tg3_clear_mac_status(tp);
4773
4774 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4775 tw32_f(MAC_MI_MODE,
4776 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4777 udelay(80);
4778 }
4779
4780 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4781
4782 /* Some third-party PHYs need to be reset on link going
4783 * down.
4784 */
4785 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4786 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4787 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4788 tp->link_up) {
4789 tg3_readphy(tp, MII_BMSR, &bmsr);
4790 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4791 !(bmsr & BMSR_LSTATUS))
4792 force_reset = true;
4793 }
4794 if (force_reset)
4795 tg3_phy_reset(tp);
4796
4797 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4798 tg3_readphy(tp, MII_BMSR, &bmsr);
4799 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4800 !tg3_flag(tp, INIT_COMPLETE))
4801 bmsr = 0;
4802
4803 if (!(bmsr & BMSR_LSTATUS)) {
4804 err = tg3_init_5401phy_dsp(tp);
4805 if (err)
4806 return err;
4807
4808 tg3_readphy(tp, MII_BMSR, &bmsr);
4809 for (i = 0; i < 1000; i++) {
4810 udelay(10);
4811 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4812 (bmsr & BMSR_LSTATUS)) {
4813 udelay(40);
4814 break;
4815 }
4816 }
4817
4818 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4819 TG3_PHY_REV_BCM5401_B0 &&
4820 !(bmsr & BMSR_LSTATUS) &&
4821 tp->link_config.active_speed == SPEED_1000) {
4822 err = tg3_phy_reset(tp);
4823 if (!err)
4824 err = tg3_init_5401phy_dsp(tp);
4825 if (err)
4826 return err;
4827 }
4828 }
4829 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4830 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4831 /* 5701 {A0,B0} CRC bug workaround */
4832 tg3_writephy(tp, 0x15, 0x0a75);
4833 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4834 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4835 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4836 }
4837
4838 /* Clear pending interrupts... */
4839 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4840 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4841
4842 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4843 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4844 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4845 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4846
4847 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4848 tg3_asic_rev(tp) == ASIC_REV_5701) {
4849 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4850 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4851 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4852 else
4853 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4854 }
4855
4856 current_link_up = false;
4857 current_speed = SPEED_UNKNOWN;
4858 current_duplex = DUPLEX_UNKNOWN;
4859 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4860 tp->link_config.rmt_adv = 0;
4861
4862 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4863 err = tg3_phy_auxctl_read(tp,
4864 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4865 &val);
4866 if (!err && !(val & (1 << 10))) {
4867 tg3_phy_auxctl_write(tp,
4868 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4869 val | (1 << 10));
4870 goto relink;
4871 }
4872 }
4873
4874 bmsr = 0;
4875 for (i = 0; i < 100; i++) {
4876 tg3_readphy(tp, MII_BMSR, &bmsr);
4877 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4878 (bmsr & BMSR_LSTATUS))
4879 break;
4880 udelay(40);
4881 }
4882
4883 if (bmsr & BMSR_LSTATUS) {
4884 u32 aux_stat, bmcr;
4885
4886 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4887 for (i = 0; i < 2000; i++) {
4888 udelay(10);
4889 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4890 aux_stat)
4891 break;
4892 }
4893
4894 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4895 ¤t_speed,
4896 ¤t_duplex);
4897
4898 bmcr = 0;
4899 for (i = 0; i < 200; i++) {
4900 tg3_readphy(tp, MII_BMCR, &bmcr);
4901 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4902 continue;
4903 if (bmcr && bmcr != 0x7fff)
4904 break;
4905 udelay(10);
4906 }
4907
4908 lcl_adv = 0;
4909 rmt_adv = 0;
4910
4911 tp->link_config.active_speed = current_speed;
4912 tp->link_config.active_duplex = current_duplex;
4913
4914 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4915 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4916
4917 if ((bmcr & BMCR_ANENABLE) &&
4918 eee_config_ok &&
4919 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4920 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4921 current_link_up = true;
4922
4923 /* EEE settings changes take effect only after a phy
4924 * reset. If we have skipped a reset due to Link Flap
4925 * Avoidance being enabled, do it now.
4926 */
4927 if (!eee_config_ok &&
4928 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4929 !force_reset) {
4930 tg3_setup_eee(tp);
4931 tg3_phy_reset(tp);
4932 }
4933 } else {
4934 if (!(bmcr & BMCR_ANENABLE) &&
4935 tp->link_config.speed == current_speed &&
4936 tp->link_config.duplex == current_duplex) {
4937 current_link_up = true;
4938 }
4939 }
4940
4941 if (current_link_up &&
4942 tp->link_config.active_duplex == DUPLEX_FULL) {
4943 u32 reg, bit;
4944
4945 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4946 reg = MII_TG3_FET_GEN_STAT;
4947 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4948 } else {
4949 reg = MII_TG3_EXT_STAT;
4950 bit = MII_TG3_EXT_STAT_MDIX;
4951 }
4952
4953 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4954 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4955
4956 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4957 }
4958 }
4959
4960 relink:
4961 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4962 tg3_phy_copper_begin(tp);
4963
4964 if (tg3_flag(tp, ROBOSWITCH)) {
4965 current_link_up = true;
4966 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4967 current_speed = SPEED_1000;
4968 current_duplex = DUPLEX_FULL;
4969 tp->link_config.active_speed = current_speed;
4970 tp->link_config.active_duplex = current_duplex;
4971 }
4972
4973 tg3_readphy(tp, MII_BMSR, &bmsr);
4974 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4975 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4976 current_link_up = true;
4977 }
4978
4979 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4980 if (current_link_up) {
4981 if (tp->link_config.active_speed == SPEED_100 ||
4982 tp->link_config.active_speed == SPEED_10)
4983 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4984 else
4985 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4986 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4987 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4988 else
4989 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4990
4991 /* In order for the 5750 core in BCM4785 chip to work properly
4992 * in RGMII mode, the Led Control Register must be set up.
4993 */
4994 if (tg3_flag(tp, RGMII_MODE)) {
4995 u32 led_ctrl = tr32(MAC_LED_CTRL);
4996 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4997
4998 if (tp->link_config.active_speed == SPEED_10)
4999 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5000 else if (tp->link_config.active_speed == SPEED_100)
5001 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5002 LED_CTRL_100MBPS_ON);
5003 else if (tp->link_config.active_speed == SPEED_1000)
5004 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5005 LED_CTRL_1000MBPS_ON);
5006
5007 tw32(MAC_LED_CTRL, led_ctrl);
5008 udelay(40);
5009 }
5010
5011 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5012 if (tp->link_config.active_duplex == DUPLEX_HALF)
5013 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5014
5015 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5016 if (current_link_up &&
5017 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5018 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5019 else
5020 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5021 }
5022
5023 /* ??? Without this setting Netgear GA302T PHY does not
5024 * ??? send/receive packets...
5025 */
5026 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5027 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5028 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5029 tw32_f(MAC_MI_MODE, tp->mi_mode);
5030 udelay(80);
5031 }
5032
5033 tw32_f(MAC_MODE, tp->mac_mode);
5034 udelay(40);
5035
5036 tg3_phy_eee_adjust(tp, current_link_up);
5037
5038 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5039 /* Polled via timer. */
5040 tw32_f(MAC_EVENT, 0);
5041 } else {
5042 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5043 }
5044 udelay(40);
5045
5046 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5047 current_link_up &&
5048 tp->link_config.active_speed == SPEED_1000 &&
5049 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5050 udelay(120);
5051 tw32_f(MAC_STATUS,
5052 (MAC_STATUS_SYNC_CHANGED |
5053 MAC_STATUS_CFG_CHANGED));
5054 udelay(40);
5055 tg3_write_mem(tp,
5056 NIC_SRAM_FIRMWARE_MBOX,
5057 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5058 }
5059
5060 /* Prevent send BD corruption. */
5061 if (tg3_flag(tp, CLKREQ_BUG)) {
5062 if (tp->link_config.active_speed == SPEED_100 ||
5063 tp->link_config.active_speed == SPEED_10)
5064 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5065 PCI_EXP_LNKCTL_CLKREQ_EN);
5066 else
5067 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5068 PCI_EXP_LNKCTL_CLKREQ_EN);
5069 }
5070
5071 tg3_test_and_report_link_chg(tp, current_link_up);
5072
5073 return 0;
5074 }
5075
5076 struct tg3_fiber_aneginfo {
5077 int state;
5078 #define ANEG_STATE_UNKNOWN 0
5079 #define ANEG_STATE_AN_ENABLE 1
5080 #define ANEG_STATE_RESTART_INIT 2
5081 #define ANEG_STATE_RESTART 3
5082 #define ANEG_STATE_DISABLE_LINK_OK 4
5083 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5084 #define ANEG_STATE_ABILITY_DETECT 6
5085 #define ANEG_STATE_ACK_DETECT_INIT 7
5086 #define ANEG_STATE_ACK_DETECT 8
5087 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5088 #define ANEG_STATE_COMPLETE_ACK 10
5089 #define ANEG_STATE_IDLE_DETECT_INIT 11
5090 #define ANEG_STATE_IDLE_DETECT 12
5091 #define ANEG_STATE_LINK_OK 13
5092 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5093 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5094
5095 u32 flags;
5096 #define MR_AN_ENABLE 0x00000001
5097 #define MR_RESTART_AN 0x00000002
5098 #define MR_AN_COMPLETE 0x00000004
5099 #define MR_PAGE_RX 0x00000008
5100 #define MR_NP_LOADED 0x00000010
5101 #define MR_TOGGLE_TX 0x00000020
5102 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5103 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5104 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5105 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5106 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5107 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5108 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5109 #define MR_TOGGLE_RX 0x00002000
5110 #define MR_NP_RX 0x00004000
5111
5112 #define MR_LINK_OK 0x80000000
5113
5114 unsigned long link_time, cur_time;
5115
5116 u32 ability_match_cfg;
5117 int ability_match_count;
5118
5119 char ability_match, idle_match, ack_match;
5120
5121 u32 txconfig, rxconfig;
5122 #define ANEG_CFG_NP 0x00000080
5123 #define ANEG_CFG_ACK 0x00000040
5124 #define ANEG_CFG_RF2 0x00000020
5125 #define ANEG_CFG_RF1 0x00000010
5126 #define ANEG_CFG_PS2 0x00000001
5127 #define ANEG_CFG_PS1 0x00008000
5128 #define ANEG_CFG_HD 0x00004000
5129 #define ANEG_CFG_FD 0x00002000
5130 #define ANEG_CFG_INVAL 0x00001f06
5131
5132 };
5133 #define ANEG_OK 0
5134 #define ANEG_DONE 1
5135 #define ANEG_TIMER_ENAB 2
5136 #define ANEG_FAILED -1
5137
5138 #define ANEG_STATE_SETTLE_TIME 10000
5139
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5140 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5141 struct tg3_fiber_aneginfo *ap)
5142 {
5143 u16 flowctrl;
5144 unsigned long delta;
5145 u32 rx_cfg_reg;
5146 int ret;
5147
5148 if (ap->state == ANEG_STATE_UNKNOWN) {
5149 ap->rxconfig = 0;
5150 ap->link_time = 0;
5151 ap->cur_time = 0;
5152 ap->ability_match_cfg = 0;
5153 ap->ability_match_count = 0;
5154 ap->ability_match = 0;
5155 ap->idle_match = 0;
5156 ap->ack_match = 0;
5157 }
5158 ap->cur_time++;
5159
5160 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5161 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5162
5163 if (rx_cfg_reg != ap->ability_match_cfg) {
5164 ap->ability_match_cfg = rx_cfg_reg;
5165 ap->ability_match = 0;
5166 ap->ability_match_count = 0;
5167 } else {
5168 if (++ap->ability_match_count > 1) {
5169 ap->ability_match = 1;
5170 ap->ability_match_cfg = rx_cfg_reg;
5171 }
5172 }
5173 if (rx_cfg_reg & ANEG_CFG_ACK)
5174 ap->ack_match = 1;
5175 else
5176 ap->ack_match = 0;
5177
5178 ap->idle_match = 0;
5179 } else {
5180 ap->idle_match = 1;
5181 ap->ability_match_cfg = 0;
5182 ap->ability_match_count = 0;
5183 ap->ability_match = 0;
5184 ap->ack_match = 0;
5185
5186 rx_cfg_reg = 0;
5187 }
5188
5189 ap->rxconfig = rx_cfg_reg;
5190 ret = ANEG_OK;
5191
5192 switch (ap->state) {
5193 case ANEG_STATE_UNKNOWN:
5194 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5195 ap->state = ANEG_STATE_AN_ENABLE;
5196
5197 fallthrough;
5198 case ANEG_STATE_AN_ENABLE:
5199 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5200 if (ap->flags & MR_AN_ENABLE) {
5201 ap->link_time = 0;
5202 ap->cur_time = 0;
5203 ap->ability_match_cfg = 0;
5204 ap->ability_match_count = 0;
5205 ap->ability_match = 0;
5206 ap->idle_match = 0;
5207 ap->ack_match = 0;
5208
5209 ap->state = ANEG_STATE_RESTART_INIT;
5210 } else {
5211 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5212 }
5213 break;
5214
5215 case ANEG_STATE_RESTART_INIT:
5216 ap->link_time = ap->cur_time;
5217 ap->flags &= ~(MR_NP_LOADED);
5218 ap->txconfig = 0;
5219 tw32(MAC_TX_AUTO_NEG, 0);
5220 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5221 tw32_f(MAC_MODE, tp->mac_mode);
5222 udelay(40);
5223
5224 ret = ANEG_TIMER_ENAB;
5225 ap->state = ANEG_STATE_RESTART;
5226
5227 fallthrough;
5228 case ANEG_STATE_RESTART:
5229 delta = ap->cur_time - ap->link_time;
5230 if (delta > ANEG_STATE_SETTLE_TIME)
5231 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5232 else
5233 ret = ANEG_TIMER_ENAB;
5234 break;
5235
5236 case ANEG_STATE_DISABLE_LINK_OK:
5237 ret = ANEG_DONE;
5238 break;
5239
5240 case ANEG_STATE_ABILITY_DETECT_INIT:
5241 ap->flags &= ~(MR_TOGGLE_TX);
5242 ap->txconfig = ANEG_CFG_FD;
5243 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5244 if (flowctrl & ADVERTISE_1000XPAUSE)
5245 ap->txconfig |= ANEG_CFG_PS1;
5246 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5247 ap->txconfig |= ANEG_CFG_PS2;
5248 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5249 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5250 tw32_f(MAC_MODE, tp->mac_mode);
5251 udelay(40);
5252
5253 ap->state = ANEG_STATE_ABILITY_DETECT;
5254 break;
5255
5256 case ANEG_STATE_ABILITY_DETECT:
5257 if (ap->ability_match != 0 && ap->rxconfig != 0)
5258 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5259 break;
5260
5261 case ANEG_STATE_ACK_DETECT_INIT:
5262 ap->txconfig |= ANEG_CFG_ACK;
5263 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5264 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5265 tw32_f(MAC_MODE, tp->mac_mode);
5266 udelay(40);
5267
5268 ap->state = ANEG_STATE_ACK_DETECT;
5269
5270 fallthrough;
5271 case ANEG_STATE_ACK_DETECT:
5272 if (ap->ack_match != 0) {
5273 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5274 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5275 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5276 } else {
5277 ap->state = ANEG_STATE_AN_ENABLE;
5278 }
5279 } else if (ap->ability_match != 0 &&
5280 ap->rxconfig == 0) {
5281 ap->state = ANEG_STATE_AN_ENABLE;
5282 }
5283 break;
5284
5285 case ANEG_STATE_COMPLETE_ACK_INIT:
5286 if (ap->rxconfig & ANEG_CFG_INVAL) {
5287 ret = ANEG_FAILED;
5288 break;
5289 }
5290 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5291 MR_LP_ADV_HALF_DUPLEX |
5292 MR_LP_ADV_SYM_PAUSE |
5293 MR_LP_ADV_ASYM_PAUSE |
5294 MR_LP_ADV_REMOTE_FAULT1 |
5295 MR_LP_ADV_REMOTE_FAULT2 |
5296 MR_LP_ADV_NEXT_PAGE |
5297 MR_TOGGLE_RX |
5298 MR_NP_RX);
5299 if (ap->rxconfig & ANEG_CFG_FD)
5300 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5301 if (ap->rxconfig & ANEG_CFG_HD)
5302 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5303 if (ap->rxconfig & ANEG_CFG_PS1)
5304 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5305 if (ap->rxconfig & ANEG_CFG_PS2)
5306 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5307 if (ap->rxconfig & ANEG_CFG_RF1)
5308 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5309 if (ap->rxconfig & ANEG_CFG_RF2)
5310 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5311 if (ap->rxconfig & ANEG_CFG_NP)
5312 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5313
5314 ap->link_time = ap->cur_time;
5315
5316 ap->flags ^= (MR_TOGGLE_TX);
5317 if (ap->rxconfig & 0x0008)
5318 ap->flags |= MR_TOGGLE_RX;
5319 if (ap->rxconfig & ANEG_CFG_NP)
5320 ap->flags |= MR_NP_RX;
5321 ap->flags |= MR_PAGE_RX;
5322
5323 ap->state = ANEG_STATE_COMPLETE_ACK;
5324 ret = ANEG_TIMER_ENAB;
5325 break;
5326
5327 case ANEG_STATE_COMPLETE_ACK:
5328 if (ap->ability_match != 0 &&
5329 ap->rxconfig == 0) {
5330 ap->state = ANEG_STATE_AN_ENABLE;
5331 break;
5332 }
5333 delta = ap->cur_time - ap->link_time;
5334 if (delta > ANEG_STATE_SETTLE_TIME) {
5335 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5336 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5337 } else {
5338 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5339 !(ap->flags & MR_NP_RX)) {
5340 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5341 } else {
5342 ret = ANEG_FAILED;
5343 }
5344 }
5345 }
5346 break;
5347
5348 case ANEG_STATE_IDLE_DETECT_INIT:
5349 ap->link_time = ap->cur_time;
5350 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5351 tw32_f(MAC_MODE, tp->mac_mode);
5352 udelay(40);
5353
5354 ap->state = ANEG_STATE_IDLE_DETECT;
5355 ret = ANEG_TIMER_ENAB;
5356 break;
5357
5358 case ANEG_STATE_IDLE_DETECT:
5359 if (ap->ability_match != 0 &&
5360 ap->rxconfig == 0) {
5361 ap->state = ANEG_STATE_AN_ENABLE;
5362 break;
5363 }
5364 delta = ap->cur_time - ap->link_time;
5365 if (delta > ANEG_STATE_SETTLE_TIME) {
5366 /* XXX another gem from the Broadcom driver :( */
5367 ap->state = ANEG_STATE_LINK_OK;
5368 }
5369 break;
5370
5371 case ANEG_STATE_LINK_OK:
5372 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5373 ret = ANEG_DONE;
5374 break;
5375
5376 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5377 /* ??? unimplemented */
5378 break;
5379
5380 case ANEG_STATE_NEXT_PAGE_WAIT:
5381 /* ??? unimplemented */
5382 break;
5383
5384 default:
5385 ret = ANEG_FAILED;
5386 break;
5387 }
5388
5389 return ret;
5390 }
5391
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5392 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5393 {
5394 int res = 0;
5395 struct tg3_fiber_aneginfo aninfo;
5396 int status = ANEG_FAILED;
5397 unsigned int tick;
5398 u32 tmp;
5399
5400 tw32_f(MAC_TX_AUTO_NEG, 0);
5401
5402 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5403 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5404 udelay(40);
5405
5406 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5407 udelay(40);
5408
5409 memset(&aninfo, 0, sizeof(aninfo));
5410 aninfo.flags |= MR_AN_ENABLE;
5411 aninfo.state = ANEG_STATE_UNKNOWN;
5412 aninfo.cur_time = 0;
5413 tick = 0;
5414 while (++tick < 195000) {
5415 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5416 if (status == ANEG_DONE || status == ANEG_FAILED)
5417 break;
5418
5419 udelay(1);
5420 }
5421
5422 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5423 tw32_f(MAC_MODE, tp->mac_mode);
5424 udelay(40);
5425
5426 *txflags = aninfo.txconfig;
5427 *rxflags = aninfo.flags;
5428
5429 if (status == ANEG_DONE &&
5430 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5431 MR_LP_ADV_FULL_DUPLEX)))
5432 res = 1;
5433
5434 return res;
5435 }
5436
tg3_init_bcm8002(struct tg3 * tp)5437 static void tg3_init_bcm8002(struct tg3 *tp)
5438 {
5439 u32 mac_status = tr32(MAC_STATUS);
5440 int i;
5441
5442 /* Reset when initting first time or we have a link. */
5443 if (tg3_flag(tp, INIT_COMPLETE) &&
5444 !(mac_status & MAC_STATUS_PCS_SYNCED))
5445 return;
5446
5447 /* Set PLL lock range. */
5448 tg3_writephy(tp, 0x16, 0x8007);
5449
5450 /* SW reset */
5451 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5452
5453 /* Wait for reset to complete. */
5454 /* XXX schedule_timeout() ... */
5455 for (i = 0; i < 500; i++)
5456 udelay(10);
5457
5458 /* Config mode; select PMA/Ch 1 regs. */
5459 tg3_writephy(tp, 0x10, 0x8411);
5460
5461 /* Enable auto-lock and comdet, select txclk for tx. */
5462 tg3_writephy(tp, 0x11, 0x0a10);
5463
5464 tg3_writephy(tp, 0x18, 0x00a0);
5465 tg3_writephy(tp, 0x16, 0x41ff);
5466
5467 /* Assert and deassert POR. */
5468 tg3_writephy(tp, 0x13, 0x0400);
5469 udelay(40);
5470 tg3_writephy(tp, 0x13, 0x0000);
5471
5472 tg3_writephy(tp, 0x11, 0x0a50);
5473 udelay(40);
5474 tg3_writephy(tp, 0x11, 0x0a10);
5475
5476 /* Wait for signal to stabilize */
5477 /* XXX schedule_timeout() ... */
5478 for (i = 0; i < 15000; i++)
5479 udelay(10);
5480
5481 /* Deselect the channel register so we can read the PHYID
5482 * later.
5483 */
5484 tg3_writephy(tp, 0x10, 0x8011);
5485 }
5486
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5487 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5488 {
5489 u16 flowctrl;
5490 bool current_link_up;
5491 u32 sg_dig_ctrl, sg_dig_status;
5492 u32 serdes_cfg, expected_sg_dig_ctrl;
5493 int workaround, port_a;
5494
5495 serdes_cfg = 0;
5496 workaround = 0;
5497 port_a = 1;
5498 current_link_up = false;
5499
5500 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5501 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5502 workaround = 1;
5503 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5504 port_a = 0;
5505
5506 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5507 /* preserve bits 20-23 for voltage regulator */
5508 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5509 }
5510
5511 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5512
5513 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5514 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5515 if (workaround) {
5516 u32 val = serdes_cfg;
5517
5518 if (port_a)
5519 val |= 0xc010000;
5520 else
5521 val |= 0x4010000;
5522 tw32_f(MAC_SERDES_CFG, val);
5523 }
5524
5525 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5526 }
5527 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5528 tg3_setup_flow_control(tp, 0, 0);
5529 current_link_up = true;
5530 }
5531 goto out;
5532 }
5533
5534 /* Want auto-negotiation. */
5535 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5536
5537 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5538 if (flowctrl & ADVERTISE_1000XPAUSE)
5539 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5540 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5541 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5542
5543 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5544 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5545 tp->serdes_counter &&
5546 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5547 MAC_STATUS_RCVD_CFG)) ==
5548 MAC_STATUS_PCS_SYNCED)) {
5549 tp->serdes_counter--;
5550 current_link_up = true;
5551 goto out;
5552 }
5553 restart_autoneg:
5554 if (workaround)
5555 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5556 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5557 udelay(5);
5558 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5559
5560 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5561 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5562 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5563 MAC_STATUS_SIGNAL_DET)) {
5564 sg_dig_status = tr32(SG_DIG_STATUS);
5565 mac_status = tr32(MAC_STATUS);
5566
5567 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5568 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5569 u32 local_adv = 0, remote_adv = 0;
5570
5571 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5572 local_adv |= ADVERTISE_1000XPAUSE;
5573 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5574 local_adv |= ADVERTISE_1000XPSE_ASYM;
5575
5576 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5577 remote_adv |= LPA_1000XPAUSE;
5578 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5579 remote_adv |= LPA_1000XPAUSE_ASYM;
5580
5581 tp->link_config.rmt_adv =
5582 mii_adv_to_ethtool_adv_x(remote_adv);
5583
5584 tg3_setup_flow_control(tp, local_adv, remote_adv);
5585 current_link_up = true;
5586 tp->serdes_counter = 0;
5587 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5588 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5589 if (tp->serdes_counter)
5590 tp->serdes_counter--;
5591 else {
5592 if (workaround) {
5593 u32 val = serdes_cfg;
5594
5595 if (port_a)
5596 val |= 0xc010000;
5597 else
5598 val |= 0x4010000;
5599
5600 tw32_f(MAC_SERDES_CFG, val);
5601 }
5602
5603 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5604 udelay(40);
5605
5606 /* Link parallel detection - link is up */
5607 /* only if we have PCS_SYNC and not */
5608 /* receiving config code words */
5609 mac_status = tr32(MAC_STATUS);
5610 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5611 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5612 tg3_setup_flow_control(tp, 0, 0);
5613 current_link_up = true;
5614 tp->phy_flags |=
5615 TG3_PHYFLG_PARALLEL_DETECT;
5616 tp->serdes_counter =
5617 SERDES_PARALLEL_DET_TIMEOUT;
5618 } else
5619 goto restart_autoneg;
5620 }
5621 }
5622 } else {
5623 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5624 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5625 }
5626
5627 out:
5628 return current_link_up;
5629 }
5630
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5631 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5632 {
5633 bool current_link_up = false;
5634
5635 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5636 goto out;
5637
5638 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5639 u32 txflags, rxflags;
5640 int i;
5641
5642 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5643 u32 local_adv = 0, remote_adv = 0;
5644
5645 if (txflags & ANEG_CFG_PS1)
5646 local_adv |= ADVERTISE_1000XPAUSE;
5647 if (txflags & ANEG_CFG_PS2)
5648 local_adv |= ADVERTISE_1000XPSE_ASYM;
5649
5650 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5651 remote_adv |= LPA_1000XPAUSE;
5652 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5653 remote_adv |= LPA_1000XPAUSE_ASYM;
5654
5655 tp->link_config.rmt_adv =
5656 mii_adv_to_ethtool_adv_x(remote_adv);
5657
5658 tg3_setup_flow_control(tp, local_adv, remote_adv);
5659
5660 current_link_up = true;
5661 }
5662 for (i = 0; i < 30; i++) {
5663 udelay(20);
5664 tw32_f(MAC_STATUS,
5665 (MAC_STATUS_SYNC_CHANGED |
5666 MAC_STATUS_CFG_CHANGED));
5667 udelay(40);
5668 if ((tr32(MAC_STATUS) &
5669 (MAC_STATUS_SYNC_CHANGED |
5670 MAC_STATUS_CFG_CHANGED)) == 0)
5671 break;
5672 }
5673
5674 mac_status = tr32(MAC_STATUS);
5675 if (!current_link_up &&
5676 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5677 !(mac_status & MAC_STATUS_RCVD_CFG))
5678 current_link_up = true;
5679 } else {
5680 tg3_setup_flow_control(tp, 0, 0);
5681
5682 /* Forcing 1000FD link up. */
5683 current_link_up = true;
5684
5685 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5686 udelay(40);
5687
5688 tw32_f(MAC_MODE, tp->mac_mode);
5689 udelay(40);
5690 }
5691
5692 out:
5693 return current_link_up;
5694 }
5695
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5696 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5697 {
5698 u32 orig_pause_cfg;
5699 u32 orig_active_speed;
5700 u8 orig_active_duplex;
5701 u32 mac_status;
5702 bool current_link_up;
5703 int i;
5704
5705 orig_pause_cfg = tp->link_config.active_flowctrl;
5706 orig_active_speed = tp->link_config.active_speed;
5707 orig_active_duplex = tp->link_config.active_duplex;
5708
5709 if (!tg3_flag(tp, HW_AUTONEG) &&
5710 tp->link_up &&
5711 tg3_flag(tp, INIT_COMPLETE)) {
5712 mac_status = tr32(MAC_STATUS);
5713 mac_status &= (MAC_STATUS_PCS_SYNCED |
5714 MAC_STATUS_SIGNAL_DET |
5715 MAC_STATUS_CFG_CHANGED |
5716 MAC_STATUS_RCVD_CFG);
5717 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5718 MAC_STATUS_SIGNAL_DET)) {
5719 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5720 MAC_STATUS_CFG_CHANGED));
5721 return 0;
5722 }
5723 }
5724
5725 tw32_f(MAC_TX_AUTO_NEG, 0);
5726
5727 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5728 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5729 tw32_f(MAC_MODE, tp->mac_mode);
5730 udelay(40);
5731
5732 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5733 tg3_init_bcm8002(tp);
5734
5735 /* Enable link change event even when serdes polling. */
5736 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5737 udelay(40);
5738
5739 tp->link_config.rmt_adv = 0;
5740 mac_status = tr32(MAC_STATUS);
5741
5742 if (tg3_flag(tp, HW_AUTONEG))
5743 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5744 else
5745 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5746
5747 tp->napi[0].hw_status->status =
5748 (SD_STATUS_UPDATED |
5749 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5750
5751 for (i = 0; i < 100; i++) {
5752 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5753 MAC_STATUS_CFG_CHANGED));
5754 udelay(5);
5755 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5756 MAC_STATUS_CFG_CHANGED |
5757 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5758 break;
5759 }
5760
5761 mac_status = tr32(MAC_STATUS);
5762 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5763 current_link_up = false;
5764 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5765 tp->serdes_counter == 0) {
5766 tw32_f(MAC_MODE, (tp->mac_mode |
5767 MAC_MODE_SEND_CONFIGS));
5768 udelay(1);
5769 tw32_f(MAC_MODE, tp->mac_mode);
5770 }
5771 }
5772
5773 if (current_link_up) {
5774 tp->link_config.active_speed = SPEED_1000;
5775 tp->link_config.active_duplex = DUPLEX_FULL;
5776 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5777 LED_CTRL_LNKLED_OVERRIDE |
5778 LED_CTRL_1000MBPS_ON));
5779 } else {
5780 tp->link_config.active_speed = SPEED_UNKNOWN;
5781 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5782 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5783 LED_CTRL_LNKLED_OVERRIDE |
5784 LED_CTRL_TRAFFIC_OVERRIDE));
5785 }
5786
5787 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5788 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5789 if (orig_pause_cfg != now_pause_cfg ||
5790 orig_active_speed != tp->link_config.active_speed ||
5791 orig_active_duplex != tp->link_config.active_duplex)
5792 tg3_link_report(tp);
5793 }
5794
5795 return 0;
5796 }
5797
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5798 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5799 {
5800 int err = 0;
5801 u32 bmsr, bmcr;
5802 u32 current_speed = SPEED_UNKNOWN;
5803 u8 current_duplex = DUPLEX_UNKNOWN;
5804 bool current_link_up = false;
5805 u32 local_adv, remote_adv, sgsr;
5806
5807 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5808 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5809 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5810 (sgsr & SERDES_TG3_SGMII_MODE)) {
5811
5812 if (force_reset)
5813 tg3_phy_reset(tp);
5814
5815 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5816
5817 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5818 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5819 } else {
5820 current_link_up = true;
5821 if (sgsr & SERDES_TG3_SPEED_1000) {
5822 current_speed = SPEED_1000;
5823 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5824 } else if (sgsr & SERDES_TG3_SPEED_100) {
5825 current_speed = SPEED_100;
5826 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5827 } else {
5828 current_speed = SPEED_10;
5829 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5830 }
5831
5832 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5833 current_duplex = DUPLEX_FULL;
5834 else
5835 current_duplex = DUPLEX_HALF;
5836 }
5837
5838 tw32_f(MAC_MODE, tp->mac_mode);
5839 udelay(40);
5840
5841 tg3_clear_mac_status(tp);
5842
5843 goto fiber_setup_done;
5844 }
5845
5846 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5847 tw32_f(MAC_MODE, tp->mac_mode);
5848 udelay(40);
5849
5850 tg3_clear_mac_status(tp);
5851
5852 if (force_reset)
5853 tg3_phy_reset(tp);
5854
5855 tp->link_config.rmt_adv = 0;
5856
5857 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5858 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5859 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5860 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5861 bmsr |= BMSR_LSTATUS;
5862 else
5863 bmsr &= ~BMSR_LSTATUS;
5864 }
5865
5866 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5867
5868 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5869 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5870 /* do nothing, just check for link up at the end */
5871 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5872 u32 adv, newadv;
5873
5874 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5875 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5876 ADVERTISE_1000XPAUSE |
5877 ADVERTISE_1000XPSE_ASYM |
5878 ADVERTISE_SLCT);
5879
5880 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5881 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5882
5883 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5884 tg3_writephy(tp, MII_ADVERTISE, newadv);
5885 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5886 tg3_writephy(tp, MII_BMCR, bmcr);
5887
5888 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5889 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5890 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5891
5892 return err;
5893 }
5894 } else {
5895 u32 new_bmcr;
5896
5897 bmcr &= ~BMCR_SPEED1000;
5898 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5899
5900 if (tp->link_config.duplex == DUPLEX_FULL)
5901 new_bmcr |= BMCR_FULLDPLX;
5902
5903 if (new_bmcr != bmcr) {
5904 /* BMCR_SPEED1000 is a reserved bit that needs
5905 * to be set on write.
5906 */
5907 new_bmcr |= BMCR_SPEED1000;
5908
5909 /* Force a linkdown */
5910 if (tp->link_up) {
5911 u32 adv;
5912
5913 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5914 adv &= ~(ADVERTISE_1000XFULL |
5915 ADVERTISE_1000XHALF |
5916 ADVERTISE_SLCT);
5917 tg3_writephy(tp, MII_ADVERTISE, adv);
5918 tg3_writephy(tp, MII_BMCR, bmcr |
5919 BMCR_ANRESTART |
5920 BMCR_ANENABLE);
5921 udelay(10);
5922 tg3_carrier_off(tp);
5923 }
5924 tg3_writephy(tp, MII_BMCR, new_bmcr);
5925 bmcr = new_bmcr;
5926 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5927 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5928 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5929 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5930 bmsr |= BMSR_LSTATUS;
5931 else
5932 bmsr &= ~BMSR_LSTATUS;
5933 }
5934 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5935 }
5936 }
5937
5938 if (bmsr & BMSR_LSTATUS) {
5939 current_speed = SPEED_1000;
5940 current_link_up = true;
5941 if (bmcr & BMCR_FULLDPLX)
5942 current_duplex = DUPLEX_FULL;
5943 else
5944 current_duplex = DUPLEX_HALF;
5945
5946 local_adv = 0;
5947 remote_adv = 0;
5948
5949 if (bmcr & BMCR_ANENABLE) {
5950 u32 common;
5951
5952 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5953 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5954 common = local_adv & remote_adv;
5955 if (common & (ADVERTISE_1000XHALF |
5956 ADVERTISE_1000XFULL)) {
5957 if (common & ADVERTISE_1000XFULL)
5958 current_duplex = DUPLEX_FULL;
5959 else
5960 current_duplex = DUPLEX_HALF;
5961
5962 tp->link_config.rmt_adv =
5963 mii_adv_to_ethtool_adv_x(remote_adv);
5964 } else if (!tg3_flag(tp, 5780_CLASS)) {
5965 /* Link is up via parallel detect */
5966 } else {
5967 current_link_up = false;
5968 }
5969 }
5970 }
5971
5972 fiber_setup_done:
5973 if (current_link_up && current_duplex == DUPLEX_FULL)
5974 tg3_setup_flow_control(tp, local_adv, remote_adv);
5975
5976 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5977 if (tp->link_config.active_duplex == DUPLEX_HALF)
5978 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5979
5980 tw32_f(MAC_MODE, tp->mac_mode);
5981 udelay(40);
5982
5983 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5984
5985 tp->link_config.active_speed = current_speed;
5986 tp->link_config.active_duplex = current_duplex;
5987
5988 tg3_test_and_report_link_chg(tp, current_link_up);
5989 return err;
5990 }
5991
tg3_serdes_parallel_detect(struct tg3 * tp)5992 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5993 {
5994 if (tp->serdes_counter) {
5995 /* Give autoneg time to complete. */
5996 tp->serdes_counter--;
5997 return;
5998 }
5999
6000 if (!tp->link_up &&
6001 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6002 u32 bmcr;
6003
6004 tg3_readphy(tp, MII_BMCR, &bmcr);
6005 if (bmcr & BMCR_ANENABLE) {
6006 u32 phy1, phy2;
6007
6008 /* Select shadow register 0x1f */
6009 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6010 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6011
6012 /* Select expansion interrupt status register */
6013 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6014 MII_TG3_DSP_EXP1_INT_STAT);
6015 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6016 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6017
6018 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6019 /* We have signal detect and not receiving
6020 * config code words, link is up by parallel
6021 * detection.
6022 */
6023
6024 bmcr &= ~BMCR_ANENABLE;
6025 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6026 tg3_writephy(tp, MII_BMCR, bmcr);
6027 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6028 }
6029 }
6030 } else if (tp->link_up &&
6031 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6032 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6033 u32 phy2;
6034
6035 /* Select expansion interrupt status register */
6036 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6037 MII_TG3_DSP_EXP1_INT_STAT);
6038 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6039 if (phy2 & 0x20) {
6040 u32 bmcr;
6041
6042 /* Config code words received, turn on autoneg. */
6043 tg3_readphy(tp, MII_BMCR, &bmcr);
6044 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6045
6046 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6047
6048 }
6049 }
6050 }
6051
tg3_setup_phy(struct tg3 * tp,bool force_reset)6052 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6053 {
6054 u32 val;
6055 int err;
6056
6057 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6058 err = tg3_setup_fiber_phy(tp, force_reset);
6059 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6060 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6061 else
6062 err = tg3_setup_copper_phy(tp, force_reset);
6063
6064 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6065 u32 scale;
6066
6067 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6068 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6069 scale = 65;
6070 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6071 scale = 6;
6072 else
6073 scale = 12;
6074
6075 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6076 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6077 tw32(GRC_MISC_CFG, val);
6078 }
6079
6080 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6081 (6 << TX_LENGTHS_IPG_SHIFT);
6082 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6083 tg3_asic_rev(tp) == ASIC_REV_5762)
6084 val |= tr32(MAC_TX_LENGTHS) &
6085 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6086 TX_LENGTHS_CNT_DWN_VAL_MSK);
6087
6088 if (tp->link_config.active_speed == SPEED_1000 &&
6089 tp->link_config.active_duplex == DUPLEX_HALF)
6090 tw32(MAC_TX_LENGTHS, val |
6091 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6092 else
6093 tw32(MAC_TX_LENGTHS, val |
6094 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6095
6096 if (!tg3_flag(tp, 5705_PLUS)) {
6097 if (tp->link_up) {
6098 tw32(HOSTCC_STAT_COAL_TICKS,
6099 tp->coal.stats_block_coalesce_usecs);
6100 } else {
6101 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6102 }
6103 }
6104
6105 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6106 val = tr32(PCIE_PWR_MGMT_THRESH);
6107 if (!tp->link_up)
6108 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6109 tp->pwrmgmt_thresh;
6110 else
6111 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6112 tw32(PCIE_PWR_MGMT_THRESH, val);
6113 }
6114
6115 return err;
6116 }
6117
6118 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp,struct ptp_system_timestamp * sts)6119 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6120 {
6121 u64 stamp;
6122
6123 ptp_read_system_prets(sts);
6124 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6125 ptp_read_system_postts(sts);
6126 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6127
6128 return stamp;
6129 }
6130
6131 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6132 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6133 {
6134 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6135
6136 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6137 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6138 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6139 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6140 }
6141
6142 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6143 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * info)6144 static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
6145 {
6146 struct tg3 *tp = netdev_priv(dev);
6147
6148 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
6149
6150 if (tg3_flag(tp, PTP_CAPABLE)) {
6151 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6152 SOF_TIMESTAMPING_RX_HARDWARE |
6153 SOF_TIMESTAMPING_RAW_HARDWARE;
6154 }
6155
6156 if (tp->ptp_clock)
6157 info->phc_index = ptp_clock_index(tp->ptp_clock);
6158
6159 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6160
6161 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6162 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6163 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6164 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6165 return 0;
6166 }
6167
tg3_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)6168 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6169 {
6170 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6171 u64 correction;
6172 bool neg_adj;
6173
6174 /* Frequency adjustment is performed using hardware with a 24 bit
6175 * accumulator and a programmable correction value. On each clk, the
6176 * correction value gets added to the accumulator and when it
6177 * overflows, the time counter is incremented/decremented.
6178 */
6179 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6180
6181 tg3_full_lock(tp, 0);
6182
6183 if (correction)
6184 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6185 TG3_EAV_REF_CLK_CORRECT_EN |
6186 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6187 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6188 else
6189 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6190
6191 tg3_full_unlock(tp);
6192
6193 return 0;
6194 }
6195
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6196 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6197 {
6198 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6199
6200 tg3_full_lock(tp, 0);
6201 tp->ptp_adjust += delta;
6202 tg3_full_unlock(tp);
6203
6204 return 0;
6205 }
6206
tg3_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)6207 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6208 struct ptp_system_timestamp *sts)
6209 {
6210 u64 ns;
6211 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6212
6213 tg3_full_lock(tp, 0);
6214 ns = tg3_refclk_read(tp, sts);
6215 ns += tp->ptp_adjust;
6216 tg3_full_unlock(tp);
6217
6218 *ts = ns_to_timespec64(ns);
6219
6220 return 0;
6221 }
6222
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6223 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6224 const struct timespec64 *ts)
6225 {
6226 u64 ns;
6227 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228
6229 ns = timespec64_to_ns(ts);
6230
6231 tg3_full_lock(tp, 0);
6232 tg3_refclk_write(tp, ns);
6233 tp->ptp_adjust = 0;
6234 tg3_full_unlock(tp);
6235
6236 return 0;
6237 }
6238
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6239 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6240 struct ptp_clock_request *rq, int on)
6241 {
6242 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6243 u32 clock_ctl;
6244 int rval = 0;
6245
6246 switch (rq->type) {
6247 case PTP_CLK_REQ_PEROUT:
6248 /* Reject requests with unsupported flags */
6249 if (rq->perout.flags)
6250 return -EOPNOTSUPP;
6251
6252 if (rq->perout.index != 0)
6253 return -EINVAL;
6254
6255 tg3_full_lock(tp, 0);
6256 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6257 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6258
6259 if (on) {
6260 u64 nsec;
6261
6262 nsec = rq->perout.start.sec * 1000000000ULL +
6263 rq->perout.start.nsec;
6264
6265 if (rq->perout.period.sec || rq->perout.period.nsec) {
6266 netdev_warn(tp->dev,
6267 "Device supports only a one-shot timesync output, period must be 0\n");
6268 rval = -EINVAL;
6269 goto err_out;
6270 }
6271
6272 if (nsec & (1ULL << 63)) {
6273 netdev_warn(tp->dev,
6274 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6275 rval = -EINVAL;
6276 goto err_out;
6277 }
6278
6279 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6280 tw32(TG3_EAV_WATCHDOG0_MSB,
6281 TG3_EAV_WATCHDOG0_EN |
6282 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6283
6284 tw32(TG3_EAV_REF_CLCK_CTL,
6285 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6286 } else {
6287 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6288 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6289 }
6290
6291 err_out:
6292 tg3_full_unlock(tp);
6293 return rval;
6294
6295 default:
6296 break;
6297 }
6298
6299 return -EOPNOTSUPP;
6300 }
6301
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6302 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6303 struct skb_shared_hwtstamps *timestamp)
6304 {
6305 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6306 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6307 tp->ptp_adjust);
6308 }
6309
tg3_read_tx_tstamp(struct tg3 * tp,u64 * hwclock)6310 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
6311 {
6312 *hwclock = tr32(TG3_TX_TSTAMP_LSB);
6313 *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6314 }
6315
tg3_ptp_ts_aux_work(struct ptp_clock_info * ptp)6316 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
6317 {
6318 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6319 struct skb_shared_hwtstamps timestamp;
6320 u64 hwclock;
6321
6322 if (tp->ptp_txts_retrycnt > 2)
6323 goto done;
6324
6325 tg3_read_tx_tstamp(tp, &hwclock);
6326
6327 if (hwclock != tp->pre_tx_ts) {
6328 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6329 skb_tstamp_tx(tp->tx_tstamp_skb, ×tamp);
6330 goto done;
6331 }
6332 tp->ptp_txts_retrycnt++;
6333 return HZ / 10;
6334 done:
6335 dev_consume_skb_any(tp->tx_tstamp_skb);
6336 tp->tx_tstamp_skb = NULL;
6337 tp->ptp_txts_retrycnt = 0;
6338 tp->pre_tx_ts = 0;
6339 return -1;
6340 }
6341
6342 static const struct ptp_clock_info tg3_ptp_caps = {
6343 .owner = THIS_MODULE,
6344 .name = "tg3 clock",
6345 .max_adj = 250000000,
6346 .n_alarm = 0,
6347 .n_ext_ts = 0,
6348 .n_per_out = 1,
6349 .n_pins = 0,
6350 .pps = 0,
6351 .adjfine = tg3_ptp_adjfine,
6352 .adjtime = tg3_ptp_adjtime,
6353 .do_aux_work = tg3_ptp_ts_aux_work,
6354 .gettimex64 = tg3_ptp_gettimex,
6355 .settime64 = tg3_ptp_settime,
6356 .enable = tg3_ptp_enable,
6357 };
6358
6359 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6360 static void tg3_ptp_init(struct tg3 *tp)
6361 {
6362 if (!tg3_flag(tp, PTP_CAPABLE))
6363 return;
6364
6365 /* Initialize the hardware clock to the system time. */
6366 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6367 tp->ptp_adjust = 0;
6368 tp->ptp_info = tg3_ptp_caps;
6369 }
6370
6371 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6372 static void tg3_ptp_resume(struct tg3 *tp)
6373 {
6374 if (!tg3_flag(tp, PTP_CAPABLE))
6375 return;
6376
6377 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6378 tp->ptp_adjust = 0;
6379 }
6380
tg3_ptp_fini(struct tg3 * tp)6381 static void tg3_ptp_fini(struct tg3 *tp)
6382 {
6383 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6384 return;
6385
6386 ptp_clock_unregister(tp->ptp_clock);
6387 tp->ptp_clock = NULL;
6388 tp->ptp_adjust = 0;
6389 dev_consume_skb_any(tp->tx_tstamp_skb);
6390 tp->tx_tstamp_skb = NULL;
6391 }
6392
tg3_irq_sync(struct tg3 * tp)6393 static inline int tg3_irq_sync(struct tg3 *tp)
6394 {
6395 return tp->irq_sync;
6396 }
6397
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6398 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6399 {
6400 int i;
6401
6402 dst = (u32 *)((u8 *)dst + off);
6403 for (i = 0; i < len; i += sizeof(u32))
6404 *dst++ = tr32(off + i);
6405 }
6406
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6407 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6408 {
6409 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6410 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6411 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6412 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6413 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6414 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6415 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6416 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6417 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6418 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6419 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6420 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6421 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6422 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6423 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6424 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6425 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6426 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6427 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6428
6429 if (tg3_flag(tp, SUPPORT_MSIX))
6430 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6431
6432 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6433 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6434 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6435 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6436 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6437 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6438 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6439 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6440
6441 if (!tg3_flag(tp, 5705_PLUS)) {
6442 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6443 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6444 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6445 }
6446
6447 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6448 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6449 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6450 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6451 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6452
6453 if (tg3_flag(tp, NVRAM))
6454 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6455 }
6456
tg3_dump_state(struct tg3 * tp)6457 static void tg3_dump_state(struct tg3 *tp)
6458 {
6459 int i;
6460 u32 *regs;
6461
6462 /* If it is a PCI error, all registers will be 0xffff,
6463 * we don't dump them out, just report the error and return
6464 */
6465 if (tp->pdev->error_state != pci_channel_io_normal) {
6466 netdev_err(tp->dev, "PCI channel ERROR!\n");
6467 return;
6468 }
6469
6470 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6471 if (!regs)
6472 return;
6473
6474 if (tg3_flag(tp, PCI_EXPRESS)) {
6475 /* Read up to but not including private PCI registers */
6476 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6477 regs[i / sizeof(u32)] = tr32(i);
6478 } else
6479 tg3_dump_legacy_regs(tp, regs);
6480
6481 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6482 if (!regs[i + 0] && !regs[i + 1] &&
6483 !regs[i + 2] && !regs[i + 3])
6484 continue;
6485
6486 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6487 i * 4,
6488 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6489 }
6490
6491 kfree(regs);
6492
6493 for (i = 0; i < tp->irq_cnt; i++) {
6494 struct tg3_napi *tnapi = &tp->napi[i];
6495
6496 /* SW status block */
6497 netdev_err(tp->dev,
6498 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6499 i,
6500 tnapi->hw_status->status,
6501 tnapi->hw_status->status_tag,
6502 tnapi->hw_status->rx_jumbo_consumer,
6503 tnapi->hw_status->rx_consumer,
6504 tnapi->hw_status->rx_mini_consumer,
6505 tnapi->hw_status->idx[0].rx_producer,
6506 tnapi->hw_status->idx[0].tx_consumer);
6507
6508 netdev_err(tp->dev,
6509 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6510 i,
6511 tnapi->last_tag, tnapi->last_irq_tag,
6512 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6513 tnapi->rx_rcb_ptr,
6514 tnapi->prodring.rx_std_prod_idx,
6515 tnapi->prodring.rx_std_cons_idx,
6516 tnapi->prodring.rx_jmb_prod_idx,
6517 tnapi->prodring.rx_jmb_cons_idx);
6518 }
6519 }
6520
6521 /* This is called whenever we suspect that the system chipset is re-
6522 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6523 * is bogus tx completions. We try to recover by setting the
6524 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6525 * in the workqueue.
6526 */
tg3_tx_recover(struct tg3 * tp)6527 static void tg3_tx_recover(struct tg3 *tp)
6528 {
6529 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6530 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6531
6532 netdev_warn(tp->dev,
6533 "The system may be re-ordering memory-mapped I/O "
6534 "cycles to the network device, attempting to recover. "
6535 "Please report the problem to the driver maintainer "
6536 "and include system chipset information.\n");
6537
6538 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6539 }
6540
tg3_tx_avail(struct tg3_napi * tnapi)6541 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6542 {
6543 /* Tell compiler to fetch tx indices from memory. */
6544 barrier();
6545 return tnapi->tx_pending -
6546 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6547 }
6548
6549 /* Tigon3 never reports partial packet sends. So we do not
6550 * need special logic to handle SKBs that have not had all
6551 * of their frags sent yet, like SunGEM does.
6552 */
tg3_tx(struct tg3_napi * tnapi)6553 static void tg3_tx(struct tg3_napi *tnapi)
6554 {
6555 struct tg3 *tp = tnapi->tp;
6556 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6557 u32 sw_idx = tnapi->tx_cons;
6558 struct netdev_queue *txq;
6559 int index = tnapi - tp->napi;
6560 unsigned int pkts_compl = 0, bytes_compl = 0;
6561
6562 if (tg3_flag(tp, ENABLE_TSS))
6563 index--;
6564
6565 txq = netdev_get_tx_queue(tp->dev, index);
6566
6567 while (sw_idx != hw_idx) {
6568 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6569 bool complete_skb_later = false;
6570 struct sk_buff *skb = ri->skb;
6571 int i, tx_bug = 0;
6572
6573 if (unlikely(skb == NULL)) {
6574 tg3_tx_recover(tp);
6575 return;
6576 }
6577
6578 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6579 struct skb_shared_hwtstamps timestamp;
6580 u64 hwclock;
6581
6582 tg3_read_tx_tstamp(tp, &hwclock);
6583 if (hwclock != tp->pre_tx_ts) {
6584 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6585 skb_tstamp_tx(skb, ×tamp);
6586 tp->pre_tx_ts = 0;
6587 } else {
6588 tp->tx_tstamp_skb = skb;
6589 complete_skb_later = true;
6590 }
6591 }
6592
6593 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6594 skb_headlen(skb), DMA_TO_DEVICE);
6595
6596 ri->skb = NULL;
6597
6598 while (ri->fragmented) {
6599 ri->fragmented = false;
6600 sw_idx = NEXT_TX(sw_idx);
6601 ri = &tnapi->tx_buffers[sw_idx];
6602 }
6603
6604 sw_idx = NEXT_TX(sw_idx);
6605
6606 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6607 ri = &tnapi->tx_buffers[sw_idx];
6608 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6609 tx_bug = 1;
6610
6611 dma_unmap_page(&tp->pdev->dev,
6612 dma_unmap_addr(ri, mapping),
6613 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6614 DMA_TO_DEVICE);
6615
6616 while (ri->fragmented) {
6617 ri->fragmented = false;
6618 sw_idx = NEXT_TX(sw_idx);
6619 ri = &tnapi->tx_buffers[sw_idx];
6620 }
6621
6622 sw_idx = NEXT_TX(sw_idx);
6623 }
6624
6625 pkts_compl++;
6626 bytes_compl += skb->len;
6627
6628 if (!complete_skb_later)
6629 dev_consume_skb_any(skb);
6630 else
6631 ptp_schedule_worker(tp->ptp_clock, 0);
6632
6633 if (unlikely(tx_bug)) {
6634 tg3_tx_recover(tp);
6635 return;
6636 }
6637 }
6638
6639 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6640
6641 tnapi->tx_cons = sw_idx;
6642
6643 /* Need to make the tx_cons update visible to __tg3_start_xmit()
6644 * before checking for netif_queue_stopped(). Without the
6645 * memory barrier, there is a small possibility that __tg3_start_xmit()
6646 * will miss it and cause the queue to be stopped forever.
6647 */
6648 smp_mb();
6649
6650 if (unlikely(netif_tx_queue_stopped(txq) &&
6651 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6652 __netif_tx_lock(txq, smp_processor_id());
6653 if (netif_tx_queue_stopped(txq) &&
6654 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6655 netif_tx_wake_queue(txq);
6656 __netif_tx_unlock(txq);
6657 }
6658 }
6659
tg3_frag_free(bool is_frag,void * data)6660 static void tg3_frag_free(bool is_frag, void *data)
6661 {
6662 if (is_frag)
6663 skb_free_frag(data);
6664 else
6665 kfree(data);
6666 }
6667
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6668 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6669 {
6670 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6671 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6672
6673 if (!ri->data)
6674 return;
6675
6676 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6677 DMA_FROM_DEVICE);
6678 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6679 ri->data = NULL;
6680 }
6681
6682
6683 /* Returns size of skb allocated or < 0 on error.
6684 *
6685 * We only need to fill in the address because the other members
6686 * of the RX descriptor are invariant, see tg3_init_rings.
6687 *
6688 * Note the purposeful assymetry of cpu vs. chip accesses. For
6689 * posting buffers we only dirty the first cache line of the RX
6690 * descriptor (containing the address). Whereas for the RX status
6691 * buffers the cpu only reads the last cacheline of the RX descriptor
6692 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6693 */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6694 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6695 u32 opaque_key, u32 dest_idx_unmasked,
6696 unsigned int *frag_size)
6697 {
6698 struct tg3_rx_buffer_desc *desc;
6699 struct ring_info *map;
6700 u8 *data;
6701 dma_addr_t mapping;
6702 int skb_size, data_size, dest_idx;
6703
6704 switch (opaque_key) {
6705 case RXD_OPAQUE_RING_STD:
6706 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6707 desc = &tpr->rx_std[dest_idx];
6708 map = &tpr->rx_std_buffers[dest_idx];
6709 data_size = tp->rx_pkt_map_sz;
6710 break;
6711
6712 case RXD_OPAQUE_RING_JUMBO:
6713 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6714 desc = &tpr->rx_jmb[dest_idx].std;
6715 map = &tpr->rx_jmb_buffers[dest_idx];
6716 data_size = TG3_RX_JMB_MAP_SZ;
6717 break;
6718
6719 default:
6720 return -EINVAL;
6721 }
6722
6723 /* Do not overwrite any of the map or rp information
6724 * until we are sure we can commit to a new buffer.
6725 *
6726 * Callers depend upon this behavior and assume that
6727 * we leave everything unchanged if we fail.
6728 */
6729 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6730 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6731 if (skb_size <= PAGE_SIZE) {
6732 data = napi_alloc_frag(skb_size);
6733 *frag_size = skb_size;
6734 } else {
6735 data = kmalloc(skb_size, GFP_ATOMIC);
6736 *frag_size = 0;
6737 }
6738 if (!data)
6739 return -ENOMEM;
6740
6741 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6742 data_size, DMA_FROM_DEVICE);
6743 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6744 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6745 return -EIO;
6746 }
6747
6748 map->data = data;
6749 dma_unmap_addr_set(map, mapping, mapping);
6750
6751 desc->addr_hi = ((u64)mapping >> 32);
6752 desc->addr_lo = ((u64)mapping & 0xffffffff);
6753
6754 return data_size;
6755 }
6756
6757 /* We only need to move over in the address because the other
6758 * members of the RX descriptor are invariant. See notes above
6759 * tg3_alloc_rx_data for full details.
6760 */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6761 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6762 struct tg3_rx_prodring_set *dpr,
6763 u32 opaque_key, int src_idx,
6764 u32 dest_idx_unmasked)
6765 {
6766 struct tg3 *tp = tnapi->tp;
6767 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6768 struct ring_info *src_map, *dest_map;
6769 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6770 int dest_idx;
6771
6772 switch (opaque_key) {
6773 case RXD_OPAQUE_RING_STD:
6774 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6775 dest_desc = &dpr->rx_std[dest_idx];
6776 dest_map = &dpr->rx_std_buffers[dest_idx];
6777 src_desc = &spr->rx_std[src_idx];
6778 src_map = &spr->rx_std_buffers[src_idx];
6779 break;
6780
6781 case RXD_OPAQUE_RING_JUMBO:
6782 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6783 dest_desc = &dpr->rx_jmb[dest_idx].std;
6784 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6785 src_desc = &spr->rx_jmb[src_idx].std;
6786 src_map = &spr->rx_jmb_buffers[src_idx];
6787 break;
6788
6789 default:
6790 return;
6791 }
6792
6793 dest_map->data = src_map->data;
6794 dma_unmap_addr_set(dest_map, mapping,
6795 dma_unmap_addr(src_map, mapping));
6796 dest_desc->addr_hi = src_desc->addr_hi;
6797 dest_desc->addr_lo = src_desc->addr_lo;
6798
6799 /* Ensure that the update to the skb happens after the physical
6800 * addresses have been transferred to the new BD location.
6801 */
6802 smp_wmb();
6803
6804 src_map->data = NULL;
6805 }
6806
6807 /* The RX ring scheme is composed of multiple rings which post fresh
6808 * buffers to the chip, and one special ring the chip uses to report
6809 * status back to the host.
6810 *
6811 * The special ring reports the status of received packets to the
6812 * host. The chip does not write into the original descriptor the
6813 * RX buffer was obtained from. The chip simply takes the original
6814 * descriptor as provided by the host, updates the status and length
6815 * field, then writes this into the next status ring entry.
6816 *
6817 * Each ring the host uses to post buffers to the chip is described
6818 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6819 * it is first placed into the on-chip ram. When the packet's length
6820 * is known, it walks down the TG3_BDINFO entries to select the ring.
6821 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6822 * which is within the range of the new packet's length is chosen.
6823 *
6824 * The "separate ring for rx status" scheme may sound queer, but it makes
6825 * sense from a cache coherency perspective. If only the host writes
6826 * to the buffer post rings, and only the chip writes to the rx status
6827 * rings, then cache lines never move beyond shared-modified state.
6828 * If both the host and chip were to write into the same ring, cache line
6829 * eviction could occur since both entities want it in an exclusive state.
6830 */
tg3_rx(struct tg3_napi * tnapi,int budget)6831 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6832 {
6833 struct tg3 *tp = tnapi->tp;
6834 u32 work_mask, rx_std_posted = 0;
6835 u32 std_prod_idx, jmb_prod_idx;
6836 u32 sw_idx = tnapi->rx_rcb_ptr;
6837 u16 hw_idx;
6838 int received;
6839 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6840
6841 hw_idx = *(tnapi->rx_rcb_prod_idx);
6842 /*
6843 * We need to order the read of hw_idx and the read of
6844 * the opaque cookie.
6845 */
6846 rmb();
6847 work_mask = 0;
6848 received = 0;
6849 std_prod_idx = tpr->rx_std_prod_idx;
6850 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6851 while (sw_idx != hw_idx && budget > 0) {
6852 struct ring_info *ri;
6853 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6854 unsigned int len;
6855 struct sk_buff *skb;
6856 dma_addr_t dma_addr;
6857 u32 opaque_key, desc_idx, *post_ptr;
6858 u8 *data;
6859 u64 tstamp = 0;
6860
6861 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6862 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6863 if (opaque_key == RXD_OPAQUE_RING_STD) {
6864 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6865 dma_addr = dma_unmap_addr(ri, mapping);
6866 data = ri->data;
6867 post_ptr = &std_prod_idx;
6868 rx_std_posted++;
6869 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6870 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6871 dma_addr = dma_unmap_addr(ri, mapping);
6872 data = ri->data;
6873 post_ptr = &jmb_prod_idx;
6874 } else
6875 goto next_pkt_nopost;
6876
6877 work_mask |= opaque_key;
6878
6879 if (desc->err_vlan & RXD_ERR_MASK) {
6880 drop_it:
6881 tg3_recycle_rx(tnapi, tpr, opaque_key,
6882 desc_idx, *post_ptr);
6883 drop_it_no_recycle:
6884 /* Other statistics kept track of by card. */
6885 tnapi->rx_dropped++;
6886 goto next_pkt;
6887 }
6888
6889 prefetch(data + TG3_RX_OFFSET(tp));
6890 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6891 ETH_FCS_LEN;
6892
6893 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6894 RXD_FLAG_PTPSTAT_PTPV1 ||
6895 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6896 RXD_FLAG_PTPSTAT_PTPV2) {
6897 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6898 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6899 }
6900
6901 if (len > TG3_RX_COPY_THRESH(tp)) {
6902 int skb_size;
6903 unsigned int frag_size;
6904
6905 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6906 *post_ptr, &frag_size);
6907 if (skb_size < 0)
6908 goto drop_it;
6909
6910 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6911 DMA_FROM_DEVICE);
6912
6913 /* Ensure that the update to the data happens
6914 * after the usage of the old DMA mapping.
6915 */
6916 smp_wmb();
6917
6918 ri->data = NULL;
6919
6920 if (frag_size)
6921 skb = build_skb(data, frag_size);
6922 else
6923 skb = slab_build_skb(data);
6924 if (!skb) {
6925 tg3_frag_free(frag_size != 0, data);
6926 goto drop_it_no_recycle;
6927 }
6928 skb_reserve(skb, TG3_RX_OFFSET(tp));
6929 } else {
6930 tg3_recycle_rx(tnapi, tpr, opaque_key,
6931 desc_idx, *post_ptr);
6932
6933 skb = netdev_alloc_skb(tp->dev,
6934 len + TG3_RAW_IP_ALIGN);
6935 if (skb == NULL)
6936 goto drop_it_no_recycle;
6937
6938 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6939 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6940 DMA_FROM_DEVICE);
6941 memcpy(skb->data,
6942 data + TG3_RX_OFFSET(tp),
6943 len);
6944 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6945 len, DMA_FROM_DEVICE);
6946 }
6947
6948 skb_put(skb, len);
6949 if (tstamp)
6950 tg3_hwclock_to_timestamp(tp, tstamp,
6951 skb_hwtstamps(skb));
6952
6953 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6954 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6955 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6956 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6957 skb->ip_summed = CHECKSUM_UNNECESSARY;
6958 else
6959 skb_checksum_none_assert(skb);
6960
6961 skb->protocol = eth_type_trans(skb, tp->dev);
6962
6963 if (len > (tp->dev->mtu + ETH_HLEN) &&
6964 skb->protocol != htons(ETH_P_8021Q) &&
6965 skb->protocol != htons(ETH_P_8021AD)) {
6966 dev_kfree_skb_any(skb);
6967 goto drop_it_no_recycle;
6968 }
6969
6970 if (desc->type_flags & RXD_FLAG_VLAN &&
6971 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6972 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6973 desc->err_vlan & RXD_VLAN_MASK);
6974
6975 napi_gro_receive(&tnapi->napi, skb);
6976
6977 received++;
6978 budget--;
6979
6980 next_pkt:
6981 (*post_ptr)++;
6982
6983 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6984 tpr->rx_std_prod_idx = std_prod_idx &
6985 tp->rx_std_ring_mask;
6986 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6987 tpr->rx_std_prod_idx);
6988 work_mask &= ~RXD_OPAQUE_RING_STD;
6989 rx_std_posted = 0;
6990 }
6991 next_pkt_nopost:
6992 sw_idx++;
6993 sw_idx &= tp->rx_ret_ring_mask;
6994
6995 /* Refresh hw_idx to see if there is new work */
6996 if (sw_idx == hw_idx) {
6997 hw_idx = *(tnapi->rx_rcb_prod_idx);
6998 rmb();
6999 }
7000 }
7001
7002 /* ACK the status ring. */
7003 tnapi->rx_rcb_ptr = sw_idx;
7004 tw32_rx_mbox(tnapi->consmbox, sw_idx);
7005
7006 /* Refill RX ring(s). */
7007 if (!tg3_flag(tp, ENABLE_RSS)) {
7008 /* Sync BD data before updating mailbox */
7009 wmb();
7010
7011 if (work_mask & RXD_OPAQUE_RING_STD) {
7012 tpr->rx_std_prod_idx = std_prod_idx &
7013 tp->rx_std_ring_mask;
7014 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7015 tpr->rx_std_prod_idx);
7016 }
7017 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7018 tpr->rx_jmb_prod_idx = jmb_prod_idx &
7019 tp->rx_jmb_ring_mask;
7020 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7021 tpr->rx_jmb_prod_idx);
7022 }
7023 } else if (work_mask) {
7024 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7025 * updated before the producer indices can be updated.
7026 */
7027 smp_wmb();
7028
7029 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7030 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7031
7032 if (tnapi != &tp->napi[1]) {
7033 tp->rx_refill = true;
7034 napi_schedule(&tp->napi[1].napi);
7035 }
7036 }
7037
7038 return received;
7039 }
7040
tg3_poll_link(struct tg3 * tp)7041 static void tg3_poll_link(struct tg3 *tp)
7042 {
7043 /* handle link change and other phy events */
7044 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7045 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7046
7047 if (sblk->status & SD_STATUS_LINK_CHG) {
7048 sblk->status = SD_STATUS_UPDATED |
7049 (sblk->status & ~SD_STATUS_LINK_CHG);
7050 spin_lock(&tp->lock);
7051 if (tg3_flag(tp, USE_PHYLIB)) {
7052 tw32_f(MAC_STATUS,
7053 (MAC_STATUS_SYNC_CHANGED |
7054 MAC_STATUS_CFG_CHANGED |
7055 MAC_STATUS_MI_COMPLETION |
7056 MAC_STATUS_LNKSTATE_CHANGED));
7057 udelay(40);
7058 } else
7059 tg3_setup_phy(tp, false);
7060 spin_unlock(&tp->lock);
7061 }
7062 }
7063 }
7064
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7065 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7066 struct tg3_rx_prodring_set *dpr,
7067 struct tg3_rx_prodring_set *spr)
7068 {
7069 u32 si, di, cpycnt, src_prod_idx;
7070 int i, err = 0;
7071
7072 while (1) {
7073 src_prod_idx = spr->rx_std_prod_idx;
7074
7075 /* Make sure updates to the rx_std_buffers[] entries and the
7076 * standard producer index are seen in the correct order.
7077 */
7078 smp_rmb();
7079
7080 if (spr->rx_std_cons_idx == src_prod_idx)
7081 break;
7082
7083 if (spr->rx_std_cons_idx < src_prod_idx)
7084 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7085 else
7086 cpycnt = tp->rx_std_ring_mask + 1 -
7087 spr->rx_std_cons_idx;
7088
7089 cpycnt = min(cpycnt,
7090 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7091
7092 si = spr->rx_std_cons_idx;
7093 di = dpr->rx_std_prod_idx;
7094
7095 for (i = di; i < di + cpycnt; i++) {
7096 if (dpr->rx_std_buffers[i].data) {
7097 cpycnt = i - di;
7098 err = -ENOSPC;
7099 break;
7100 }
7101 }
7102
7103 if (!cpycnt)
7104 break;
7105
7106 /* Ensure that updates to the rx_std_buffers ring and the
7107 * shadowed hardware producer ring from tg3_recycle_skb() are
7108 * ordered correctly WRT the skb check above.
7109 */
7110 smp_rmb();
7111
7112 memcpy(&dpr->rx_std_buffers[di],
7113 &spr->rx_std_buffers[si],
7114 cpycnt * sizeof(struct ring_info));
7115
7116 for (i = 0; i < cpycnt; i++, di++, si++) {
7117 struct tg3_rx_buffer_desc *sbd, *dbd;
7118 sbd = &spr->rx_std[si];
7119 dbd = &dpr->rx_std[di];
7120 dbd->addr_hi = sbd->addr_hi;
7121 dbd->addr_lo = sbd->addr_lo;
7122 }
7123
7124 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7125 tp->rx_std_ring_mask;
7126 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7127 tp->rx_std_ring_mask;
7128 }
7129
7130 while (1) {
7131 src_prod_idx = spr->rx_jmb_prod_idx;
7132
7133 /* Make sure updates to the rx_jmb_buffers[] entries and
7134 * the jumbo producer index are seen in the correct order.
7135 */
7136 smp_rmb();
7137
7138 if (spr->rx_jmb_cons_idx == src_prod_idx)
7139 break;
7140
7141 if (spr->rx_jmb_cons_idx < src_prod_idx)
7142 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7143 else
7144 cpycnt = tp->rx_jmb_ring_mask + 1 -
7145 spr->rx_jmb_cons_idx;
7146
7147 cpycnt = min(cpycnt,
7148 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7149
7150 si = spr->rx_jmb_cons_idx;
7151 di = dpr->rx_jmb_prod_idx;
7152
7153 for (i = di; i < di + cpycnt; i++) {
7154 if (dpr->rx_jmb_buffers[i].data) {
7155 cpycnt = i - di;
7156 err = -ENOSPC;
7157 break;
7158 }
7159 }
7160
7161 if (!cpycnt)
7162 break;
7163
7164 /* Ensure that updates to the rx_jmb_buffers ring and the
7165 * shadowed hardware producer ring from tg3_recycle_skb() are
7166 * ordered correctly WRT the skb check above.
7167 */
7168 smp_rmb();
7169
7170 memcpy(&dpr->rx_jmb_buffers[di],
7171 &spr->rx_jmb_buffers[si],
7172 cpycnt * sizeof(struct ring_info));
7173
7174 for (i = 0; i < cpycnt; i++, di++, si++) {
7175 struct tg3_rx_buffer_desc *sbd, *dbd;
7176 sbd = &spr->rx_jmb[si].std;
7177 dbd = &dpr->rx_jmb[di].std;
7178 dbd->addr_hi = sbd->addr_hi;
7179 dbd->addr_lo = sbd->addr_lo;
7180 }
7181
7182 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7183 tp->rx_jmb_ring_mask;
7184 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7185 tp->rx_jmb_ring_mask;
7186 }
7187
7188 return err;
7189 }
7190
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7191 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7192 {
7193 struct tg3 *tp = tnapi->tp;
7194
7195 /* run TX completion thread */
7196 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7197 tg3_tx(tnapi);
7198 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7199 return work_done;
7200 }
7201
7202 if (!tnapi->rx_rcb_prod_idx)
7203 return work_done;
7204
7205 /* run RX thread, within the bounds set by NAPI.
7206 * All RX "locking" is done by ensuring outside
7207 * code synchronizes with tg3->napi.poll()
7208 */
7209 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7210 work_done += tg3_rx(tnapi, budget - work_done);
7211
7212 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7213 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7214 int i, err = 0;
7215 u32 std_prod_idx = dpr->rx_std_prod_idx;
7216 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7217
7218 tp->rx_refill = false;
7219 for (i = 1; i <= tp->rxq_cnt; i++)
7220 err |= tg3_rx_prodring_xfer(tp, dpr,
7221 &tp->napi[i].prodring);
7222
7223 wmb();
7224
7225 if (std_prod_idx != dpr->rx_std_prod_idx)
7226 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7227 dpr->rx_std_prod_idx);
7228
7229 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7230 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7231 dpr->rx_jmb_prod_idx);
7232
7233 if (err)
7234 tw32_f(HOSTCC_MODE, tp->coal_now);
7235 }
7236
7237 return work_done;
7238 }
7239
tg3_reset_task_schedule(struct tg3 * tp)7240 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7241 {
7242 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7243 schedule_work(&tp->reset_task);
7244 }
7245
tg3_reset_task_cancel(struct tg3 * tp)7246 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7247 {
7248 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7249 cancel_work_sync(&tp->reset_task);
7250 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7251 }
7252
tg3_poll_msix(struct napi_struct * napi,int budget)7253 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7254 {
7255 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7256 struct tg3 *tp = tnapi->tp;
7257 int work_done = 0;
7258 struct tg3_hw_status *sblk = tnapi->hw_status;
7259
7260 while (1) {
7261 work_done = tg3_poll_work(tnapi, work_done, budget);
7262
7263 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7264 goto tx_recovery;
7265
7266 if (unlikely(work_done >= budget))
7267 break;
7268
7269 /* tp->last_tag is used in tg3_int_reenable() below
7270 * to tell the hw how much work has been processed,
7271 * so we must read it before checking for more work.
7272 */
7273 tnapi->last_tag = sblk->status_tag;
7274 tnapi->last_irq_tag = tnapi->last_tag;
7275 rmb();
7276
7277 /* check for RX/TX work to do */
7278 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7279 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7280
7281 /* This test here is not race free, but will reduce
7282 * the number of interrupts by looping again.
7283 */
7284 if (tnapi == &tp->napi[1] && tp->rx_refill)
7285 continue;
7286
7287 napi_complete_done(napi, work_done);
7288 /* Reenable interrupts. */
7289 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7290
7291 /* This test here is synchronized by napi_schedule()
7292 * and napi_complete() to close the race condition.
7293 */
7294 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7295 tw32(HOSTCC_MODE, tp->coalesce_mode |
7296 HOSTCC_MODE_ENABLE |
7297 tnapi->coal_now);
7298 }
7299 break;
7300 }
7301 }
7302
7303 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7304 return work_done;
7305
7306 tx_recovery:
7307 /* work_done is guaranteed to be less than budget. */
7308 napi_complete(napi);
7309 tg3_reset_task_schedule(tp);
7310 return work_done;
7311 }
7312
tg3_process_error(struct tg3 * tp)7313 static void tg3_process_error(struct tg3 *tp)
7314 {
7315 u32 val;
7316 bool real_error = false;
7317
7318 if (tg3_flag(tp, ERROR_PROCESSED))
7319 return;
7320
7321 /* Check Flow Attention register */
7322 val = tr32(HOSTCC_FLOW_ATTN);
7323 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7324 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7325 real_error = true;
7326 }
7327
7328 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7329 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7330 real_error = true;
7331 }
7332
7333 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7334 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7335 real_error = true;
7336 }
7337
7338 if (!real_error)
7339 return;
7340
7341 tg3_dump_state(tp);
7342
7343 tg3_flag_set(tp, ERROR_PROCESSED);
7344 tg3_reset_task_schedule(tp);
7345 }
7346
tg3_poll(struct napi_struct * napi,int budget)7347 static int tg3_poll(struct napi_struct *napi, int budget)
7348 {
7349 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7350 struct tg3 *tp = tnapi->tp;
7351 int work_done = 0;
7352 struct tg3_hw_status *sblk = tnapi->hw_status;
7353
7354 while (1) {
7355 if (sblk->status & SD_STATUS_ERROR)
7356 tg3_process_error(tp);
7357
7358 tg3_poll_link(tp);
7359
7360 work_done = tg3_poll_work(tnapi, work_done, budget);
7361
7362 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7363 goto tx_recovery;
7364
7365 if (unlikely(work_done >= budget))
7366 break;
7367
7368 if (tg3_flag(tp, TAGGED_STATUS)) {
7369 /* tp->last_tag is used in tg3_int_reenable() below
7370 * to tell the hw how much work has been processed,
7371 * so we must read it before checking for more work.
7372 */
7373 tnapi->last_tag = sblk->status_tag;
7374 tnapi->last_irq_tag = tnapi->last_tag;
7375 rmb();
7376 } else
7377 sblk->status &= ~SD_STATUS_UPDATED;
7378
7379 if (likely(!tg3_has_work(tnapi))) {
7380 napi_complete_done(napi, work_done);
7381 tg3_int_reenable(tnapi);
7382 break;
7383 }
7384 }
7385
7386 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7387 return work_done;
7388
7389 tx_recovery:
7390 /* work_done is guaranteed to be less than budget. */
7391 napi_complete(napi);
7392 tg3_reset_task_schedule(tp);
7393 return work_done;
7394 }
7395
tg3_napi_disable(struct tg3 * tp)7396 static void tg3_napi_disable(struct tg3 *tp)
7397 {
7398 int i;
7399
7400 for (i = tp->irq_cnt - 1; i >= 0; i--)
7401 napi_disable(&tp->napi[i].napi);
7402 }
7403
tg3_napi_enable(struct tg3 * tp)7404 static void tg3_napi_enable(struct tg3 *tp)
7405 {
7406 int i;
7407
7408 for (i = 0; i < tp->irq_cnt; i++)
7409 napi_enable(&tp->napi[i].napi);
7410 }
7411
tg3_napi_init(struct tg3 * tp)7412 static void tg3_napi_init(struct tg3 *tp)
7413 {
7414 int i;
7415
7416 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
7417 for (i = 1; i < tp->irq_cnt; i++)
7418 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
7419 }
7420
tg3_napi_fini(struct tg3 * tp)7421 static void tg3_napi_fini(struct tg3 *tp)
7422 {
7423 int i;
7424
7425 for (i = 0; i < tp->irq_cnt; i++)
7426 netif_napi_del(&tp->napi[i].napi);
7427 }
7428
tg3_netif_stop(struct tg3 * tp)7429 static inline void tg3_netif_stop(struct tg3 *tp)
7430 {
7431 netif_trans_update(tp->dev); /* prevent tx timeout */
7432 tg3_napi_disable(tp);
7433 netif_carrier_off(tp->dev);
7434 netif_tx_disable(tp->dev);
7435 }
7436
7437 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7438 static inline void tg3_netif_start(struct tg3 *tp)
7439 {
7440 tg3_ptp_resume(tp);
7441
7442 /* NOTE: unconditional netif_tx_wake_all_queues is only
7443 * appropriate so long as all callers are assured to
7444 * have free tx slots (such as after tg3_init_hw)
7445 */
7446 netif_tx_wake_all_queues(tp->dev);
7447
7448 if (tp->link_up)
7449 netif_carrier_on(tp->dev);
7450
7451 tg3_napi_enable(tp);
7452 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7453 tg3_enable_ints(tp);
7454 }
7455
tg3_irq_quiesce(struct tg3 * tp)7456 static void tg3_irq_quiesce(struct tg3 *tp)
7457 __releases(tp->lock)
7458 __acquires(tp->lock)
7459 {
7460 int i;
7461
7462 BUG_ON(tp->irq_sync);
7463
7464 tp->irq_sync = 1;
7465 smp_mb();
7466
7467 spin_unlock_bh(&tp->lock);
7468
7469 for (i = 0; i < tp->irq_cnt; i++)
7470 synchronize_irq(tp->napi[i].irq_vec);
7471
7472 spin_lock_bh(&tp->lock);
7473 }
7474
7475 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7476 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7477 * with as well. Most of the time, this is not necessary except when
7478 * shutting down the device.
7479 */
tg3_full_lock(struct tg3 * tp,int irq_sync)7480 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7481 {
7482 spin_lock_bh(&tp->lock);
7483 if (irq_sync)
7484 tg3_irq_quiesce(tp);
7485 }
7486
tg3_full_unlock(struct tg3 * tp)7487 static inline void tg3_full_unlock(struct tg3 *tp)
7488 {
7489 spin_unlock_bh(&tp->lock);
7490 }
7491
7492 /* One-shot MSI handler - Chip automatically disables interrupt
7493 * after sending MSI so driver doesn't have to do it.
7494 */
tg3_msi_1shot(int irq,void * dev_id)7495 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7496 {
7497 struct tg3_napi *tnapi = dev_id;
7498 struct tg3 *tp = tnapi->tp;
7499
7500 prefetch(tnapi->hw_status);
7501 if (tnapi->rx_rcb)
7502 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7503
7504 if (likely(!tg3_irq_sync(tp)))
7505 napi_schedule(&tnapi->napi);
7506
7507 return IRQ_HANDLED;
7508 }
7509
7510 /* MSI ISR - No need to check for interrupt sharing and no need to
7511 * flush status block and interrupt mailbox. PCI ordering rules
7512 * guarantee that MSI will arrive after the status block.
7513 */
tg3_msi(int irq,void * dev_id)7514 static irqreturn_t tg3_msi(int irq, void *dev_id)
7515 {
7516 struct tg3_napi *tnapi = dev_id;
7517 struct tg3 *tp = tnapi->tp;
7518
7519 prefetch(tnapi->hw_status);
7520 if (tnapi->rx_rcb)
7521 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7522 /*
7523 * Writing any value to intr-mbox-0 clears PCI INTA# and
7524 * chip-internal interrupt pending events.
7525 * Writing non-zero to intr-mbox-0 additional tells the
7526 * NIC to stop sending us irqs, engaging "in-intr-handler"
7527 * event coalescing.
7528 */
7529 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7530 if (likely(!tg3_irq_sync(tp)))
7531 napi_schedule(&tnapi->napi);
7532
7533 return IRQ_RETVAL(1);
7534 }
7535
tg3_interrupt(int irq,void * dev_id)7536 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7537 {
7538 struct tg3_napi *tnapi = dev_id;
7539 struct tg3 *tp = tnapi->tp;
7540 struct tg3_hw_status *sblk = tnapi->hw_status;
7541 unsigned int handled = 1;
7542
7543 /* In INTx mode, it is possible for the interrupt to arrive at
7544 * the CPU before the status block posted prior to the interrupt.
7545 * Reading the PCI State register will confirm whether the
7546 * interrupt is ours and will flush the status block.
7547 */
7548 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7549 if (tg3_flag(tp, CHIP_RESETTING) ||
7550 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7551 handled = 0;
7552 goto out;
7553 }
7554 }
7555
7556 /*
7557 * Writing any value to intr-mbox-0 clears PCI INTA# and
7558 * chip-internal interrupt pending events.
7559 * Writing non-zero to intr-mbox-0 additional tells the
7560 * NIC to stop sending us irqs, engaging "in-intr-handler"
7561 * event coalescing.
7562 *
7563 * Flush the mailbox to de-assert the IRQ immediately to prevent
7564 * spurious interrupts. The flush impacts performance but
7565 * excessive spurious interrupts can be worse in some cases.
7566 */
7567 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7568 if (tg3_irq_sync(tp))
7569 goto out;
7570 sblk->status &= ~SD_STATUS_UPDATED;
7571 if (likely(tg3_has_work(tnapi))) {
7572 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7573 napi_schedule(&tnapi->napi);
7574 } else {
7575 /* No work, shared interrupt perhaps? re-enable
7576 * interrupts, and flush that PCI write
7577 */
7578 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7579 0x00000000);
7580 }
7581 out:
7582 return IRQ_RETVAL(handled);
7583 }
7584
tg3_interrupt_tagged(int irq,void * dev_id)7585 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7586 {
7587 struct tg3_napi *tnapi = dev_id;
7588 struct tg3 *tp = tnapi->tp;
7589 struct tg3_hw_status *sblk = tnapi->hw_status;
7590 unsigned int handled = 1;
7591
7592 /* In INTx mode, it is possible for the interrupt to arrive at
7593 * the CPU before the status block posted prior to the interrupt.
7594 * Reading the PCI State register will confirm whether the
7595 * interrupt is ours and will flush the status block.
7596 */
7597 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7598 if (tg3_flag(tp, CHIP_RESETTING) ||
7599 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7600 handled = 0;
7601 goto out;
7602 }
7603 }
7604
7605 /*
7606 * writing any value to intr-mbox-0 clears PCI INTA# and
7607 * chip-internal interrupt pending events.
7608 * writing non-zero to intr-mbox-0 additional tells the
7609 * NIC to stop sending us irqs, engaging "in-intr-handler"
7610 * event coalescing.
7611 *
7612 * Flush the mailbox to de-assert the IRQ immediately to prevent
7613 * spurious interrupts. The flush impacts performance but
7614 * excessive spurious interrupts can be worse in some cases.
7615 */
7616 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7617
7618 /*
7619 * In a shared interrupt configuration, sometimes other devices'
7620 * interrupts will scream. We record the current status tag here
7621 * so that the above check can report that the screaming interrupts
7622 * are unhandled. Eventually they will be silenced.
7623 */
7624 tnapi->last_irq_tag = sblk->status_tag;
7625
7626 if (tg3_irq_sync(tp))
7627 goto out;
7628
7629 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7630
7631 napi_schedule(&tnapi->napi);
7632
7633 out:
7634 return IRQ_RETVAL(handled);
7635 }
7636
7637 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7638 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7639 {
7640 struct tg3_napi *tnapi = dev_id;
7641 struct tg3 *tp = tnapi->tp;
7642 struct tg3_hw_status *sblk = tnapi->hw_status;
7643
7644 if ((sblk->status & SD_STATUS_UPDATED) ||
7645 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7646 tg3_disable_ints(tp);
7647 return IRQ_RETVAL(1);
7648 }
7649 return IRQ_RETVAL(0);
7650 }
7651
7652 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7653 static void tg3_poll_controller(struct net_device *dev)
7654 {
7655 int i;
7656 struct tg3 *tp = netdev_priv(dev);
7657
7658 if (tg3_irq_sync(tp))
7659 return;
7660
7661 for (i = 0; i < tp->irq_cnt; i++)
7662 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7663 }
7664 #endif
7665
tg3_tx_timeout(struct net_device * dev,unsigned int txqueue)7666 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7667 {
7668 struct tg3 *tp = netdev_priv(dev);
7669
7670 if (netif_msg_tx_err(tp)) {
7671 netdev_err(dev, "transmit timed out, resetting\n");
7672 tg3_dump_state(tp);
7673 }
7674
7675 tg3_reset_task_schedule(tp);
7676 }
7677
7678 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7679 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7680 {
7681 u32 base = (u32) mapping & 0xffffffff;
7682
7683 return base + len + 8 < base;
7684 }
7685
7686 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7687 * of any 4GB boundaries: 4G, 8G, etc
7688 */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7689 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7690 u32 len, u32 mss)
7691 {
7692 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7693 u32 base = (u32) mapping & 0xffffffff;
7694
7695 return ((base + len + (mss & 0x3fff)) < base);
7696 }
7697 return 0;
7698 }
7699
7700 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7701 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7702 int len)
7703 {
7704 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7705 if (tg3_flag(tp, 40BIT_DMA_BUG))
7706 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7707 return 0;
7708 #else
7709 return 0;
7710 #endif
7711 }
7712
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7713 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7714 dma_addr_t mapping, u32 len, u32 flags,
7715 u32 mss, u32 vlan)
7716 {
7717 txbd->addr_hi = ((u64) mapping >> 32);
7718 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7719 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7720 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7721 }
7722
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7723 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7724 dma_addr_t map, u32 len, u32 flags,
7725 u32 mss, u32 vlan)
7726 {
7727 struct tg3 *tp = tnapi->tp;
7728 bool hwbug = false;
7729
7730 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7731 hwbug = true;
7732
7733 if (tg3_4g_overflow_test(map, len))
7734 hwbug = true;
7735
7736 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7737 hwbug = true;
7738
7739 if (tg3_40bit_overflow_test(tp, map, len))
7740 hwbug = true;
7741
7742 if (tp->dma_limit) {
7743 u32 prvidx = *entry;
7744 u32 tmp_flag = flags & ~TXD_FLAG_END;
7745 while (len > tp->dma_limit && *budget) {
7746 u32 frag_len = tp->dma_limit;
7747 len -= tp->dma_limit;
7748
7749 /* Avoid the 8byte DMA problem */
7750 if (len <= 8) {
7751 len += tp->dma_limit / 2;
7752 frag_len = tp->dma_limit / 2;
7753 }
7754
7755 tnapi->tx_buffers[*entry].fragmented = true;
7756
7757 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7758 frag_len, tmp_flag, mss, vlan);
7759 *budget -= 1;
7760 prvidx = *entry;
7761 *entry = NEXT_TX(*entry);
7762
7763 map += frag_len;
7764 }
7765
7766 if (len) {
7767 if (*budget) {
7768 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7769 len, flags, mss, vlan);
7770 *budget -= 1;
7771 *entry = NEXT_TX(*entry);
7772 } else {
7773 hwbug = true;
7774 tnapi->tx_buffers[prvidx].fragmented = false;
7775 }
7776 }
7777 } else {
7778 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7779 len, flags, mss, vlan);
7780 *entry = NEXT_TX(*entry);
7781 }
7782
7783 return hwbug;
7784 }
7785
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7786 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7787 {
7788 int i;
7789 struct sk_buff *skb;
7790 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7791
7792 skb = txb->skb;
7793 txb->skb = NULL;
7794
7795 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7796 skb_headlen(skb), DMA_TO_DEVICE);
7797
7798 while (txb->fragmented) {
7799 txb->fragmented = false;
7800 entry = NEXT_TX(entry);
7801 txb = &tnapi->tx_buffers[entry];
7802 }
7803
7804 for (i = 0; i <= last; i++) {
7805 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7806
7807 entry = NEXT_TX(entry);
7808 txb = &tnapi->tx_buffers[entry];
7809
7810 dma_unmap_page(&tnapi->tp->pdev->dev,
7811 dma_unmap_addr(txb, mapping),
7812 skb_frag_size(frag), DMA_TO_DEVICE);
7813
7814 while (txb->fragmented) {
7815 txb->fragmented = false;
7816 entry = NEXT_TX(entry);
7817 txb = &tnapi->tx_buffers[entry];
7818 }
7819 }
7820 }
7821
7822 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7823 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7824 struct sk_buff **pskb,
7825 u32 *entry, u32 *budget,
7826 u32 base_flags, u32 mss, u32 vlan)
7827 {
7828 struct tg3 *tp = tnapi->tp;
7829 struct sk_buff *new_skb, *skb = *pskb;
7830 dma_addr_t new_addr = 0;
7831 int ret = 0;
7832
7833 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7834 new_skb = skb_copy(skb, GFP_ATOMIC);
7835 else {
7836 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7837
7838 new_skb = skb_copy_expand(skb,
7839 skb_headroom(skb) + more_headroom,
7840 skb_tailroom(skb), GFP_ATOMIC);
7841 }
7842
7843 if (!new_skb) {
7844 ret = -1;
7845 } else {
7846 /* New SKB is guaranteed to be linear. */
7847 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7848 new_skb->len, DMA_TO_DEVICE);
7849 /* Make sure the mapping succeeded */
7850 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7851 dev_kfree_skb_any(new_skb);
7852 ret = -1;
7853 } else {
7854 u32 save_entry = *entry;
7855
7856 base_flags |= TXD_FLAG_END;
7857
7858 tnapi->tx_buffers[*entry].skb = new_skb;
7859 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7860 mapping, new_addr);
7861
7862 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7863 new_skb->len, base_flags,
7864 mss, vlan)) {
7865 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7866 dev_kfree_skb_any(new_skb);
7867 ret = -1;
7868 }
7869 }
7870 }
7871
7872 dev_consume_skb_any(skb);
7873 *pskb = new_skb;
7874 return ret;
7875 }
7876
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7877 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7878 {
7879 /* Check if we will never have enough descriptors,
7880 * as gso_segs can be more than current ring size
7881 */
7882 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7883 }
7884
7885 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
7886
7887 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7888 * indicated in tg3_tx_frag_set()
7889 */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7890 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7891 struct netdev_queue *txq, struct sk_buff *skb)
7892 {
7893 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7894 struct sk_buff *segs, *seg, *next;
7895
7896 /* Estimate the number of fragments in the worst case */
7897 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7898 netif_tx_stop_queue(txq);
7899
7900 /* netif_tx_stop_queue() must be done before checking
7901 * checking tx index in tg3_tx_avail() below, because in
7902 * tg3_tx(), we update tx index before checking for
7903 * netif_tx_queue_stopped().
7904 */
7905 smp_mb();
7906 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7907 return NETDEV_TX_BUSY;
7908
7909 netif_tx_wake_queue(txq);
7910 }
7911
7912 segs = skb_gso_segment(skb, tp->dev->features &
7913 ~(NETIF_F_TSO | NETIF_F_TSO6));
7914 if (IS_ERR(segs) || !segs) {
7915 tnapi->tx_dropped++;
7916 goto tg3_tso_bug_end;
7917 }
7918
7919 skb_list_walk_safe(segs, seg, next) {
7920 skb_mark_not_on_list(seg);
7921 __tg3_start_xmit(seg, tp->dev);
7922 }
7923
7924 tg3_tso_bug_end:
7925 dev_consume_skb_any(skb);
7926
7927 return NETDEV_TX_OK;
7928 }
7929
7930 /* hard_start_xmit for all devices */
__tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7931 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7932 {
7933 struct tg3 *tp = netdev_priv(dev);
7934 u32 len, entry, base_flags, mss, vlan = 0;
7935 u32 budget;
7936 int i = -1, would_hit_hwbug;
7937 dma_addr_t mapping;
7938 struct tg3_napi *tnapi;
7939 struct netdev_queue *txq;
7940 unsigned int last;
7941 struct iphdr *iph = NULL;
7942 struct tcphdr *tcph = NULL;
7943 __sum16 tcp_csum = 0, ip_csum = 0;
7944 __be16 ip_tot_len = 0;
7945
7946 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7947 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7948 if (tg3_flag(tp, ENABLE_TSS))
7949 tnapi++;
7950
7951 budget = tg3_tx_avail(tnapi);
7952
7953 /* We are running in BH disabled context with netif_tx_lock
7954 * and TX reclaim runs via tp->napi.poll inside of a software
7955 * interrupt. Furthermore, IRQ processing runs lockless so we have
7956 * no IRQ context deadlocks to worry about either. Rejoice!
7957 */
7958 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7959 if (!netif_tx_queue_stopped(txq)) {
7960 netif_tx_stop_queue(txq);
7961
7962 /* This is a hard error, log it. */
7963 netdev_err(dev,
7964 "BUG! Tx Ring full when queue awake!\n");
7965 }
7966 return NETDEV_TX_BUSY;
7967 }
7968
7969 entry = tnapi->tx_prod;
7970 base_flags = 0;
7971
7972 mss = skb_shinfo(skb)->gso_size;
7973 if (mss) {
7974 u32 tcp_opt_len, hdr_len;
7975
7976 if (skb_cow_head(skb, 0))
7977 goto drop;
7978
7979 iph = ip_hdr(skb);
7980 tcp_opt_len = tcp_optlen(skb);
7981
7982 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
7983
7984 /* HW/FW can not correctly segment packets that have been
7985 * vlan encapsulated.
7986 */
7987 if (skb->protocol == htons(ETH_P_8021Q) ||
7988 skb->protocol == htons(ETH_P_8021AD)) {
7989 if (tg3_tso_bug_gso_check(tnapi, skb))
7990 return tg3_tso_bug(tp, tnapi, txq, skb);
7991 goto drop;
7992 }
7993
7994 if (!skb_is_gso_v6(skb)) {
7995 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7996 tg3_flag(tp, TSO_BUG)) {
7997 if (tg3_tso_bug_gso_check(tnapi, skb))
7998 return tg3_tso_bug(tp, tnapi, txq, skb);
7999 goto drop;
8000 }
8001 ip_csum = iph->check;
8002 ip_tot_len = iph->tot_len;
8003 iph->check = 0;
8004 iph->tot_len = htons(mss + hdr_len);
8005 }
8006
8007 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
8008 TXD_FLAG_CPU_POST_DMA);
8009
8010 tcph = tcp_hdr(skb);
8011 tcp_csum = tcph->check;
8012
8013 if (tg3_flag(tp, HW_TSO_1) ||
8014 tg3_flag(tp, HW_TSO_2) ||
8015 tg3_flag(tp, HW_TSO_3)) {
8016 tcph->check = 0;
8017 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8018 } else {
8019 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8020 0, IPPROTO_TCP, 0);
8021 }
8022
8023 if (tg3_flag(tp, HW_TSO_3)) {
8024 mss |= (hdr_len & 0xc) << 12;
8025 if (hdr_len & 0x10)
8026 base_flags |= 0x00000010;
8027 base_flags |= (hdr_len & 0x3e0) << 5;
8028 } else if (tg3_flag(tp, HW_TSO_2))
8029 mss |= hdr_len << 9;
8030 else if (tg3_flag(tp, HW_TSO_1) ||
8031 tg3_asic_rev(tp) == ASIC_REV_5705) {
8032 if (tcp_opt_len || iph->ihl > 5) {
8033 int tsflags;
8034
8035 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8036 mss |= (tsflags << 11);
8037 }
8038 } else {
8039 if (tcp_opt_len || iph->ihl > 5) {
8040 int tsflags;
8041
8042 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8043 base_flags |= tsflags << 12;
8044 }
8045 }
8046 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8047 /* HW/FW can not correctly checksum packets that have been
8048 * vlan encapsulated.
8049 */
8050 if (skb->protocol == htons(ETH_P_8021Q) ||
8051 skb->protocol == htons(ETH_P_8021AD)) {
8052 if (skb_checksum_help(skb))
8053 goto drop;
8054 } else {
8055 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8056 }
8057 }
8058
8059 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8060 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8061 base_flags |= TXD_FLAG_JMB_PKT;
8062
8063 if (skb_vlan_tag_present(skb)) {
8064 base_flags |= TXD_FLAG_VLAN;
8065 vlan = skb_vlan_tag_get(skb);
8066 }
8067
8068 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8069 tg3_flag(tp, TX_TSTAMP_EN)) {
8070 tg3_full_lock(tp, 0);
8071 if (!tp->pre_tx_ts) {
8072 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8073 base_flags |= TXD_FLAG_HWTSTAMP;
8074 tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
8075 }
8076 tg3_full_unlock(tp);
8077 }
8078
8079 len = skb_headlen(skb);
8080
8081 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8082 DMA_TO_DEVICE);
8083 if (dma_mapping_error(&tp->pdev->dev, mapping))
8084 goto drop;
8085
8086
8087 tnapi->tx_buffers[entry].skb = skb;
8088 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8089
8090 would_hit_hwbug = 0;
8091
8092 if (tg3_flag(tp, 5701_DMA_BUG))
8093 would_hit_hwbug = 1;
8094
8095 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8096 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8097 mss, vlan)) {
8098 would_hit_hwbug = 1;
8099 } else if (skb_shinfo(skb)->nr_frags > 0) {
8100 u32 tmp_mss = mss;
8101
8102 if (!tg3_flag(tp, HW_TSO_1) &&
8103 !tg3_flag(tp, HW_TSO_2) &&
8104 !tg3_flag(tp, HW_TSO_3))
8105 tmp_mss = 0;
8106
8107 /* Now loop through additional data
8108 * fragments, and queue them.
8109 */
8110 last = skb_shinfo(skb)->nr_frags - 1;
8111 for (i = 0; i <= last; i++) {
8112 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8113
8114 len = skb_frag_size(frag);
8115 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8116 len, DMA_TO_DEVICE);
8117
8118 tnapi->tx_buffers[entry].skb = NULL;
8119 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8120 mapping);
8121 if (dma_mapping_error(&tp->pdev->dev, mapping))
8122 goto dma_error;
8123
8124 if (!budget ||
8125 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8126 len, base_flags |
8127 ((i == last) ? TXD_FLAG_END : 0),
8128 tmp_mss, vlan)) {
8129 would_hit_hwbug = 1;
8130 break;
8131 }
8132 }
8133 }
8134
8135 if (would_hit_hwbug) {
8136 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8137
8138 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8139 /* If it's a TSO packet, do GSO instead of
8140 * allocating and copying to a large linear SKB
8141 */
8142 if (ip_tot_len) {
8143 iph->check = ip_csum;
8144 iph->tot_len = ip_tot_len;
8145 }
8146 tcph->check = tcp_csum;
8147 return tg3_tso_bug(tp, tnapi, txq, skb);
8148 }
8149
8150 /* If the workaround fails due to memory/mapping
8151 * failure, silently drop this packet.
8152 */
8153 entry = tnapi->tx_prod;
8154 budget = tg3_tx_avail(tnapi);
8155 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8156 base_flags, mss, vlan))
8157 goto drop_nofree;
8158 }
8159
8160 skb_tx_timestamp(skb);
8161 netdev_tx_sent_queue(txq, skb->len);
8162
8163 /* Sync BD data before updating mailbox */
8164 wmb();
8165
8166 tnapi->tx_prod = entry;
8167 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8168 netif_tx_stop_queue(txq);
8169
8170 /* netif_tx_stop_queue() must be done before checking
8171 * checking tx index in tg3_tx_avail() below, because in
8172 * tg3_tx(), we update tx index before checking for
8173 * netif_tx_queue_stopped().
8174 */
8175 smp_mb();
8176 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8177 netif_tx_wake_queue(txq);
8178 }
8179
8180 return NETDEV_TX_OK;
8181
8182 dma_error:
8183 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8184 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8185 drop:
8186 dev_kfree_skb_any(skb);
8187 drop_nofree:
8188 tnapi->tx_dropped++;
8189 return NETDEV_TX_OK;
8190 }
8191
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)8192 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8193 {
8194 struct netdev_queue *txq;
8195 u16 skb_queue_mapping;
8196 netdev_tx_t ret;
8197
8198 skb_queue_mapping = skb_get_queue_mapping(skb);
8199 txq = netdev_get_tx_queue(dev, skb_queue_mapping);
8200
8201 ret = __tg3_start_xmit(skb, dev);
8202
8203 /* Notify the hardware that packets are ready by updating the TX ring
8204 * tail pointer. We respect netdev_xmit_more() thus avoiding poking
8205 * the hardware for every packet. To guarantee forward progress the TX
8206 * ring must be drained when it is full as indicated by
8207 * netif_xmit_stopped(). This needs to happen even when the current
8208 * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
8209 * queued by previous __tg3_start_xmit() calls might get stuck in
8210 * the queue forever.
8211 */
8212 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8213 struct tg3_napi *tnapi;
8214 struct tg3 *tp;
8215
8216 tp = netdev_priv(dev);
8217 tnapi = &tp->napi[skb_queue_mapping];
8218
8219 if (tg3_flag(tp, ENABLE_TSS))
8220 tnapi++;
8221
8222 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
8223 }
8224
8225 return ret;
8226 }
8227
tg3_mac_loopback(struct tg3 * tp,bool enable)8228 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8229 {
8230 if (enable) {
8231 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8232 MAC_MODE_PORT_MODE_MASK);
8233
8234 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8235
8236 if (!tg3_flag(tp, 5705_PLUS))
8237 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8238
8239 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8240 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8241 else
8242 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8243 } else {
8244 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8245
8246 if (tg3_flag(tp, 5705_PLUS) ||
8247 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8248 tg3_asic_rev(tp) == ASIC_REV_5700)
8249 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8250 }
8251
8252 tw32(MAC_MODE, tp->mac_mode);
8253 udelay(40);
8254 }
8255
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8256 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8257 {
8258 u32 val, bmcr, mac_mode, ptest = 0;
8259
8260 tg3_phy_toggle_apd(tp, false);
8261 tg3_phy_toggle_automdix(tp, false);
8262
8263 if (extlpbk && tg3_phy_set_extloopbk(tp))
8264 return -EIO;
8265
8266 bmcr = BMCR_FULLDPLX;
8267 switch (speed) {
8268 case SPEED_10:
8269 break;
8270 case SPEED_100:
8271 bmcr |= BMCR_SPEED100;
8272 break;
8273 case SPEED_1000:
8274 default:
8275 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8276 speed = SPEED_100;
8277 bmcr |= BMCR_SPEED100;
8278 } else {
8279 speed = SPEED_1000;
8280 bmcr |= BMCR_SPEED1000;
8281 }
8282 }
8283
8284 if (extlpbk) {
8285 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8286 tg3_readphy(tp, MII_CTRL1000, &val);
8287 val |= CTL1000_AS_MASTER |
8288 CTL1000_ENABLE_MASTER;
8289 tg3_writephy(tp, MII_CTRL1000, val);
8290 } else {
8291 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8292 MII_TG3_FET_PTEST_TRIM_2;
8293 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8294 }
8295 } else
8296 bmcr |= BMCR_LOOPBACK;
8297
8298 tg3_writephy(tp, MII_BMCR, bmcr);
8299
8300 /* The write needs to be flushed for the FETs */
8301 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8302 tg3_readphy(tp, MII_BMCR, &bmcr);
8303
8304 udelay(40);
8305
8306 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8307 tg3_asic_rev(tp) == ASIC_REV_5785) {
8308 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8309 MII_TG3_FET_PTEST_FRC_TX_LINK |
8310 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8311
8312 /* The write needs to be flushed for the AC131 */
8313 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8314 }
8315
8316 /* Reset to prevent losing 1st rx packet intermittently */
8317 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8318 tg3_flag(tp, 5780_CLASS)) {
8319 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8320 udelay(10);
8321 tw32_f(MAC_RX_MODE, tp->rx_mode);
8322 }
8323
8324 mac_mode = tp->mac_mode &
8325 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8326 if (speed == SPEED_1000)
8327 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8328 else
8329 mac_mode |= MAC_MODE_PORT_MODE_MII;
8330
8331 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8332 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8333
8334 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8335 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8336 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8337 mac_mode |= MAC_MODE_LINK_POLARITY;
8338
8339 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8340 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8341 }
8342
8343 tw32(MAC_MODE, mac_mode);
8344 udelay(40);
8345
8346 return 0;
8347 }
8348
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8349 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8350 {
8351 struct tg3 *tp = netdev_priv(dev);
8352
8353 if (features & NETIF_F_LOOPBACK) {
8354 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8355 return;
8356
8357 spin_lock_bh(&tp->lock);
8358 tg3_mac_loopback(tp, true);
8359 netif_carrier_on(tp->dev);
8360 spin_unlock_bh(&tp->lock);
8361 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8362 } else {
8363 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8364 return;
8365
8366 spin_lock_bh(&tp->lock);
8367 tg3_mac_loopback(tp, false);
8368 /* Force link status check */
8369 tg3_setup_phy(tp, true);
8370 spin_unlock_bh(&tp->lock);
8371 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8372 }
8373 }
8374
tg3_fix_features(struct net_device * dev,netdev_features_t features)8375 static netdev_features_t tg3_fix_features(struct net_device *dev,
8376 netdev_features_t features)
8377 {
8378 struct tg3 *tp = netdev_priv(dev);
8379
8380 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8381 features &= ~NETIF_F_ALL_TSO;
8382
8383 return features;
8384 }
8385
tg3_set_features(struct net_device * dev,netdev_features_t features)8386 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8387 {
8388 netdev_features_t changed = dev->features ^ features;
8389
8390 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8391 tg3_set_loopback(dev, features);
8392
8393 return 0;
8394 }
8395
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8396 static void tg3_rx_prodring_free(struct tg3 *tp,
8397 struct tg3_rx_prodring_set *tpr)
8398 {
8399 int i;
8400
8401 if (tpr != &tp->napi[0].prodring) {
8402 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8403 i = (i + 1) & tp->rx_std_ring_mask)
8404 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8405 tp->rx_pkt_map_sz);
8406
8407 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8408 for (i = tpr->rx_jmb_cons_idx;
8409 i != tpr->rx_jmb_prod_idx;
8410 i = (i + 1) & tp->rx_jmb_ring_mask) {
8411 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8412 TG3_RX_JMB_MAP_SZ);
8413 }
8414 }
8415
8416 return;
8417 }
8418
8419 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8420 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8421 tp->rx_pkt_map_sz);
8422
8423 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8424 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8425 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8426 TG3_RX_JMB_MAP_SZ);
8427 }
8428 }
8429
8430 /* Initialize rx rings for packet processing.
8431 *
8432 * The chip has been shut down and the driver detached from
8433 * the networking, so no interrupts or new tx packets will
8434 * end up in the driver. tp->{tx,}lock are held and thus
8435 * we may not sleep.
8436 */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8437 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8438 struct tg3_rx_prodring_set *tpr)
8439 {
8440 u32 i, rx_pkt_dma_sz;
8441
8442 tpr->rx_std_cons_idx = 0;
8443 tpr->rx_std_prod_idx = 0;
8444 tpr->rx_jmb_cons_idx = 0;
8445 tpr->rx_jmb_prod_idx = 0;
8446
8447 if (tpr != &tp->napi[0].prodring) {
8448 memset(&tpr->rx_std_buffers[0], 0,
8449 TG3_RX_STD_BUFF_RING_SIZE(tp));
8450 if (tpr->rx_jmb_buffers)
8451 memset(&tpr->rx_jmb_buffers[0], 0,
8452 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8453 goto done;
8454 }
8455
8456 /* Zero out all descriptors. */
8457 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8458
8459 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8460 if (tg3_flag(tp, 5780_CLASS) &&
8461 tp->dev->mtu > ETH_DATA_LEN)
8462 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8463 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8464
8465 /* Initialize invariants of the rings, we only set this
8466 * stuff once. This works because the card does not
8467 * write into the rx buffer posting rings.
8468 */
8469 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8470 struct tg3_rx_buffer_desc *rxd;
8471
8472 rxd = &tpr->rx_std[i];
8473 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8474 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8475 rxd->opaque = (RXD_OPAQUE_RING_STD |
8476 (i << RXD_OPAQUE_INDEX_SHIFT));
8477 }
8478
8479 /* Now allocate fresh SKBs for each rx ring. */
8480 for (i = 0; i < tp->rx_pending; i++) {
8481 unsigned int frag_size;
8482
8483 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8484 &frag_size) < 0) {
8485 netdev_warn(tp->dev,
8486 "Using a smaller RX standard ring. Only "
8487 "%d out of %d buffers were allocated "
8488 "successfully\n", i, tp->rx_pending);
8489 if (i == 0)
8490 goto initfail;
8491 tp->rx_pending = i;
8492 break;
8493 }
8494 }
8495
8496 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8497 goto done;
8498
8499 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8500
8501 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8502 goto done;
8503
8504 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8505 struct tg3_rx_buffer_desc *rxd;
8506
8507 rxd = &tpr->rx_jmb[i].std;
8508 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8509 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8510 RXD_FLAG_JUMBO;
8511 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8512 (i << RXD_OPAQUE_INDEX_SHIFT));
8513 }
8514
8515 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8516 unsigned int frag_size;
8517
8518 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8519 &frag_size) < 0) {
8520 netdev_warn(tp->dev,
8521 "Using a smaller RX jumbo ring. Only %d "
8522 "out of %d buffers were allocated "
8523 "successfully\n", i, tp->rx_jumbo_pending);
8524 if (i == 0)
8525 goto initfail;
8526 tp->rx_jumbo_pending = i;
8527 break;
8528 }
8529 }
8530
8531 done:
8532 return 0;
8533
8534 initfail:
8535 tg3_rx_prodring_free(tp, tpr);
8536 return -ENOMEM;
8537 }
8538
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8539 static void tg3_rx_prodring_fini(struct tg3 *tp,
8540 struct tg3_rx_prodring_set *tpr)
8541 {
8542 kfree(tpr->rx_std_buffers);
8543 tpr->rx_std_buffers = NULL;
8544 kfree(tpr->rx_jmb_buffers);
8545 tpr->rx_jmb_buffers = NULL;
8546 if (tpr->rx_std) {
8547 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8548 tpr->rx_std, tpr->rx_std_mapping);
8549 tpr->rx_std = NULL;
8550 }
8551 if (tpr->rx_jmb) {
8552 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8553 tpr->rx_jmb, tpr->rx_jmb_mapping);
8554 tpr->rx_jmb = NULL;
8555 }
8556 }
8557
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8558 static int tg3_rx_prodring_init(struct tg3 *tp,
8559 struct tg3_rx_prodring_set *tpr)
8560 {
8561 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8562 GFP_KERNEL);
8563 if (!tpr->rx_std_buffers)
8564 return -ENOMEM;
8565
8566 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8567 TG3_RX_STD_RING_BYTES(tp),
8568 &tpr->rx_std_mapping,
8569 GFP_KERNEL);
8570 if (!tpr->rx_std)
8571 goto err_out;
8572
8573 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8574 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8575 GFP_KERNEL);
8576 if (!tpr->rx_jmb_buffers)
8577 goto err_out;
8578
8579 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8580 TG3_RX_JMB_RING_BYTES(tp),
8581 &tpr->rx_jmb_mapping,
8582 GFP_KERNEL);
8583 if (!tpr->rx_jmb)
8584 goto err_out;
8585 }
8586
8587 return 0;
8588
8589 err_out:
8590 tg3_rx_prodring_fini(tp, tpr);
8591 return -ENOMEM;
8592 }
8593
8594 /* Free up pending packets in all rx/tx rings.
8595 *
8596 * The chip has been shut down and the driver detached from
8597 * the networking, so no interrupts or new tx packets will
8598 * end up in the driver. tp->{tx,}lock is not held and we are not
8599 * in an interrupt context and thus may sleep.
8600 */
tg3_free_rings(struct tg3 * tp)8601 static void tg3_free_rings(struct tg3 *tp)
8602 {
8603 int i, j;
8604
8605 for (j = 0; j < tp->irq_cnt; j++) {
8606 struct tg3_napi *tnapi = &tp->napi[j];
8607
8608 tg3_rx_prodring_free(tp, &tnapi->prodring);
8609
8610 if (!tnapi->tx_buffers)
8611 continue;
8612
8613 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8614 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8615
8616 if (!skb)
8617 continue;
8618
8619 tg3_tx_skb_unmap(tnapi, i,
8620 skb_shinfo(skb)->nr_frags - 1);
8621
8622 dev_consume_skb_any(skb);
8623 }
8624 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8625 }
8626 }
8627
8628 /* Initialize tx/rx rings for packet processing.
8629 *
8630 * The chip has been shut down and the driver detached from
8631 * the networking, so no interrupts or new tx packets will
8632 * end up in the driver. tp->{tx,}lock are held and thus
8633 * we may not sleep.
8634 */
tg3_init_rings(struct tg3 * tp)8635 static int tg3_init_rings(struct tg3 *tp)
8636 {
8637 int i;
8638
8639 /* Free up all the SKBs. */
8640 tg3_free_rings(tp);
8641
8642 for (i = 0; i < tp->irq_cnt; i++) {
8643 struct tg3_napi *tnapi = &tp->napi[i];
8644
8645 tnapi->last_tag = 0;
8646 tnapi->last_irq_tag = 0;
8647 tnapi->hw_status->status = 0;
8648 tnapi->hw_status->status_tag = 0;
8649 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8650
8651 tnapi->tx_prod = 0;
8652 tnapi->tx_cons = 0;
8653 if (tnapi->tx_ring)
8654 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8655
8656 tnapi->rx_rcb_ptr = 0;
8657 if (tnapi->rx_rcb)
8658 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8659
8660 if (tnapi->prodring.rx_std &&
8661 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8662 tg3_free_rings(tp);
8663 return -ENOMEM;
8664 }
8665 }
8666
8667 return 0;
8668 }
8669
tg3_mem_tx_release(struct tg3 * tp)8670 static void tg3_mem_tx_release(struct tg3 *tp)
8671 {
8672 int i;
8673
8674 for (i = 0; i < tp->irq_max; i++) {
8675 struct tg3_napi *tnapi = &tp->napi[i];
8676
8677 if (tnapi->tx_ring) {
8678 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8679 tnapi->tx_ring, tnapi->tx_desc_mapping);
8680 tnapi->tx_ring = NULL;
8681 }
8682
8683 kfree(tnapi->tx_buffers);
8684 tnapi->tx_buffers = NULL;
8685 }
8686 }
8687
tg3_mem_tx_acquire(struct tg3 * tp)8688 static int tg3_mem_tx_acquire(struct tg3 *tp)
8689 {
8690 int i;
8691 struct tg3_napi *tnapi = &tp->napi[0];
8692
8693 /* If multivector TSS is enabled, vector 0 does not handle
8694 * tx interrupts. Don't allocate any resources for it.
8695 */
8696 if (tg3_flag(tp, ENABLE_TSS))
8697 tnapi++;
8698
8699 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8700 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8701 sizeof(struct tg3_tx_ring_info),
8702 GFP_KERNEL);
8703 if (!tnapi->tx_buffers)
8704 goto err_out;
8705
8706 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8707 TG3_TX_RING_BYTES,
8708 &tnapi->tx_desc_mapping,
8709 GFP_KERNEL);
8710 if (!tnapi->tx_ring)
8711 goto err_out;
8712 }
8713
8714 return 0;
8715
8716 err_out:
8717 tg3_mem_tx_release(tp);
8718 return -ENOMEM;
8719 }
8720
tg3_mem_rx_release(struct tg3 * tp)8721 static void tg3_mem_rx_release(struct tg3 *tp)
8722 {
8723 int i;
8724
8725 for (i = 0; i < tp->irq_max; i++) {
8726 struct tg3_napi *tnapi = &tp->napi[i];
8727
8728 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8729
8730 if (!tnapi->rx_rcb)
8731 continue;
8732
8733 dma_free_coherent(&tp->pdev->dev,
8734 TG3_RX_RCB_RING_BYTES(tp),
8735 tnapi->rx_rcb,
8736 tnapi->rx_rcb_mapping);
8737 tnapi->rx_rcb = NULL;
8738 }
8739 }
8740
tg3_mem_rx_acquire(struct tg3 * tp)8741 static int tg3_mem_rx_acquire(struct tg3 *tp)
8742 {
8743 unsigned int i, limit;
8744
8745 limit = tp->rxq_cnt;
8746
8747 /* If RSS is enabled, we need a (dummy) producer ring
8748 * set on vector zero. This is the true hw prodring.
8749 */
8750 if (tg3_flag(tp, ENABLE_RSS))
8751 limit++;
8752
8753 for (i = 0; i < limit; i++) {
8754 struct tg3_napi *tnapi = &tp->napi[i];
8755
8756 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8757 goto err_out;
8758
8759 /* If multivector RSS is enabled, vector 0
8760 * does not handle rx or tx interrupts.
8761 * Don't allocate any resources for it.
8762 */
8763 if (!i && tg3_flag(tp, ENABLE_RSS))
8764 continue;
8765
8766 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8767 TG3_RX_RCB_RING_BYTES(tp),
8768 &tnapi->rx_rcb_mapping,
8769 GFP_KERNEL);
8770 if (!tnapi->rx_rcb)
8771 goto err_out;
8772 }
8773
8774 return 0;
8775
8776 err_out:
8777 tg3_mem_rx_release(tp);
8778 return -ENOMEM;
8779 }
8780
8781 /*
8782 * Must not be invoked with interrupt sources disabled and
8783 * the hardware shutdown down.
8784 */
tg3_free_consistent(struct tg3 * tp)8785 static void tg3_free_consistent(struct tg3 *tp)
8786 {
8787 int i;
8788
8789 for (i = 0; i < tp->irq_cnt; i++) {
8790 struct tg3_napi *tnapi = &tp->napi[i];
8791
8792 if (tnapi->hw_status) {
8793 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8794 tnapi->hw_status,
8795 tnapi->status_mapping);
8796 tnapi->hw_status = NULL;
8797 }
8798 }
8799
8800 tg3_mem_rx_release(tp);
8801 tg3_mem_tx_release(tp);
8802
8803 /* tp->hw_stats can be referenced safely:
8804 * 1. under rtnl_lock
8805 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8806 */
8807 if (tp->hw_stats) {
8808 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8809 tp->hw_stats, tp->stats_mapping);
8810 tp->hw_stats = NULL;
8811 }
8812 }
8813
8814 /*
8815 * Must not be invoked with interrupt sources disabled and
8816 * the hardware shutdown down. Can sleep.
8817 */
tg3_alloc_consistent(struct tg3 * tp)8818 static int tg3_alloc_consistent(struct tg3 *tp)
8819 {
8820 int i;
8821
8822 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8823 sizeof(struct tg3_hw_stats),
8824 &tp->stats_mapping, GFP_KERNEL);
8825 if (!tp->hw_stats)
8826 goto err_out;
8827
8828 for (i = 0; i < tp->irq_cnt; i++) {
8829 struct tg3_napi *tnapi = &tp->napi[i];
8830 struct tg3_hw_status *sblk;
8831
8832 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8833 TG3_HW_STATUS_SIZE,
8834 &tnapi->status_mapping,
8835 GFP_KERNEL);
8836 if (!tnapi->hw_status)
8837 goto err_out;
8838
8839 sblk = tnapi->hw_status;
8840
8841 if (tg3_flag(tp, ENABLE_RSS)) {
8842 u16 *prodptr = NULL;
8843
8844 /*
8845 * When RSS is enabled, the status block format changes
8846 * slightly. The "rx_jumbo_consumer", "reserved",
8847 * and "rx_mini_consumer" members get mapped to the
8848 * other three rx return ring producer indexes.
8849 */
8850 switch (i) {
8851 case 1:
8852 prodptr = &sblk->idx[0].rx_producer;
8853 break;
8854 case 2:
8855 prodptr = &sblk->rx_jumbo_consumer;
8856 break;
8857 case 3:
8858 prodptr = &sblk->reserved;
8859 break;
8860 case 4:
8861 prodptr = &sblk->rx_mini_consumer;
8862 break;
8863 }
8864 tnapi->rx_rcb_prod_idx = prodptr;
8865 } else {
8866 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8867 }
8868 }
8869
8870 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8871 goto err_out;
8872
8873 return 0;
8874
8875 err_out:
8876 tg3_free_consistent(tp);
8877 return -ENOMEM;
8878 }
8879
8880 #define MAX_WAIT_CNT 1000
8881
8882 /* To stop a block, clear the enable bit and poll till it
8883 * clears. tp->lock is held.
8884 */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8885 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8886 {
8887 unsigned int i;
8888 u32 val;
8889
8890 if (tg3_flag(tp, 5705_PLUS)) {
8891 switch (ofs) {
8892 case RCVLSC_MODE:
8893 case DMAC_MODE:
8894 case MBFREE_MODE:
8895 case BUFMGR_MODE:
8896 case MEMARB_MODE:
8897 /* We can't enable/disable these bits of the
8898 * 5705/5750, just say success.
8899 */
8900 return 0;
8901
8902 default:
8903 break;
8904 }
8905 }
8906
8907 val = tr32(ofs);
8908 val &= ~enable_bit;
8909 tw32_f(ofs, val);
8910
8911 for (i = 0; i < MAX_WAIT_CNT; i++) {
8912 if (pci_channel_offline(tp->pdev)) {
8913 dev_err(&tp->pdev->dev,
8914 "tg3_stop_block device offline, "
8915 "ofs=%lx enable_bit=%x\n",
8916 ofs, enable_bit);
8917 return -ENODEV;
8918 }
8919
8920 udelay(100);
8921 val = tr32(ofs);
8922 if ((val & enable_bit) == 0)
8923 break;
8924 }
8925
8926 if (i == MAX_WAIT_CNT && !silent) {
8927 dev_err(&tp->pdev->dev,
8928 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8929 ofs, enable_bit);
8930 return -ENODEV;
8931 }
8932
8933 return 0;
8934 }
8935
8936 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8937 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8938 {
8939 int i, err;
8940
8941 tg3_disable_ints(tp);
8942
8943 if (pci_channel_offline(tp->pdev)) {
8944 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8945 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8946 err = -ENODEV;
8947 goto err_no_dev;
8948 }
8949
8950 tp->rx_mode &= ~RX_MODE_ENABLE;
8951 tw32_f(MAC_RX_MODE, tp->rx_mode);
8952 udelay(10);
8953
8954 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8955 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8956 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8957 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8958 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8959 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8960
8961 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8962 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8963 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8964 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8965 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8966 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8967 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8968
8969 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8970 tw32_f(MAC_MODE, tp->mac_mode);
8971 udelay(40);
8972
8973 tp->tx_mode &= ~TX_MODE_ENABLE;
8974 tw32_f(MAC_TX_MODE, tp->tx_mode);
8975
8976 for (i = 0; i < MAX_WAIT_CNT; i++) {
8977 udelay(100);
8978 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8979 break;
8980 }
8981 if (i >= MAX_WAIT_CNT) {
8982 dev_err(&tp->pdev->dev,
8983 "%s timed out, TX_MODE_ENABLE will not clear "
8984 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8985 err |= -ENODEV;
8986 }
8987
8988 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8989 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8990 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8991
8992 tw32(FTQ_RESET, 0xffffffff);
8993 tw32(FTQ_RESET, 0x00000000);
8994
8995 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8996 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8997
8998 err_no_dev:
8999 for (i = 0; i < tp->irq_cnt; i++) {
9000 struct tg3_napi *tnapi = &tp->napi[i];
9001 if (tnapi->hw_status)
9002 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9003 }
9004
9005 return err;
9006 }
9007
9008 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)9009 static void tg3_save_pci_state(struct tg3 *tp)
9010 {
9011 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
9012 }
9013
9014 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)9015 static void tg3_restore_pci_state(struct tg3 *tp)
9016 {
9017 u32 val;
9018
9019 /* Re-enable indirect register accesses. */
9020 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9021 tp->misc_host_ctrl);
9022
9023 /* Set MAX PCI retry to zero. */
9024 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
9025 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9026 tg3_flag(tp, PCIX_MODE))
9027 val |= PCISTATE_RETRY_SAME_DMA;
9028 /* Allow reads and writes to the APE register and memory space. */
9029 if (tg3_flag(tp, ENABLE_APE))
9030 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9031 PCISTATE_ALLOW_APE_SHMEM_WR |
9032 PCISTATE_ALLOW_APE_PSPACE_WR;
9033 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
9034
9035 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
9036
9037 if (!tg3_flag(tp, PCI_EXPRESS)) {
9038 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
9039 tp->pci_cacheline_sz);
9040 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
9041 tp->pci_lat_timer);
9042 }
9043
9044 /* Make sure PCI-X relaxed ordering bit is clear. */
9045 if (tg3_flag(tp, PCIX_MODE)) {
9046 u16 pcix_cmd;
9047
9048 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9049 &pcix_cmd);
9050 pcix_cmd &= ~PCI_X_CMD_ERO;
9051 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9052 pcix_cmd);
9053 }
9054
9055 if (tg3_flag(tp, 5780_CLASS)) {
9056
9057 /* Chip reset on 5780 will reset MSI enable bit,
9058 * so need to restore it.
9059 */
9060 if (tg3_flag(tp, USING_MSI)) {
9061 u16 ctrl;
9062
9063 pci_read_config_word(tp->pdev,
9064 tp->msi_cap + PCI_MSI_FLAGS,
9065 &ctrl);
9066 pci_write_config_word(tp->pdev,
9067 tp->msi_cap + PCI_MSI_FLAGS,
9068 ctrl | PCI_MSI_FLAGS_ENABLE);
9069 val = tr32(MSGINT_MODE);
9070 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9071 }
9072 }
9073 }
9074
tg3_override_clk(struct tg3 * tp)9075 static void tg3_override_clk(struct tg3 *tp)
9076 {
9077 u32 val;
9078
9079 switch (tg3_asic_rev(tp)) {
9080 case ASIC_REV_5717:
9081 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9082 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9083 TG3_CPMU_MAC_ORIDE_ENABLE);
9084 break;
9085
9086 case ASIC_REV_5719:
9087 case ASIC_REV_5720:
9088 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9089 break;
9090
9091 default:
9092 return;
9093 }
9094 }
9095
tg3_restore_clk(struct tg3 * tp)9096 static void tg3_restore_clk(struct tg3 *tp)
9097 {
9098 u32 val;
9099
9100 switch (tg3_asic_rev(tp)) {
9101 case ASIC_REV_5717:
9102 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9103 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9104 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9105 break;
9106
9107 case ASIC_REV_5719:
9108 case ASIC_REV_5720:
9109 val = tr32(TG3_CPMU_CLCK_ORIDE);
9110 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9111 break;
9112
9113 default:
9114 return;
9115 }
9116 }
9117
9118 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9119 static int tg3_chip_reset(struct tg3 *tp)
9120 __releases(tp->lock)
9121 __acquires(tp->lock)
9122 {
9123 u32 val;
9124 void (*write_op)(struct tg3 *, u32, u32);
9125 int i, err;
9126
9127 if (!pci_device_is_present(tp->pdev))
9128 return -ENODEV;
9129
9130 tg3_nvram_lock(tp);
9131
9132 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9133
9134 /* No matching tg3_nvram_unlock() after this because
9135 * chip reset below will undo the nvram lock.
9136 */
9137 tp->nvram_lock_cnt = 0;
9138
9139 /* GRC_MISC_CFG core clock reset will clear the memory
9140 * enable bit in PCI register 4 and the MSI enable bit
9141 * on some chips, so we save relevant registers here.
9142 */
9143 tg3_save_pci_state(tp);
9144
9145 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9146 tg3_flag(tp, 5755_PLUS))
9147 tw32(GRC_FASTBOOT_PC, 0);
9148
9149 /*
9150 * We must avoid the readl() that normally takes place.
9151 * It locks machines, causes machine checks, and other
9152 * fun things. So, temporarily disable the 5701
9153 * hardware workaround, while we do the reset.
9154 */
9155 write_op = tp->write32;
9156 if (write_op == tg3_write_flush_reg32)
9157 tp->write32 = tg3_write32;
9158
9159 /* Prevent the irq handler from reading or writing PCI registers
9160 * during chip reset when the memory enable bit in the PCI command
9161 * register may be cleared. The chip does not generate interrupt
9162 * at this time, but the irq handler may still be called due to irq
9163 * sharing or irqpoll.
9164 */
9165 tg3_flag_set(tp, CHIP_RESETTING);
9166 for (i = 0; i < tp->irq_cnt; i++) {
9167 struct tg3_napi *tnapi = &tp->napi[i];
9168 if (tnapi->hw_status) {
9169 tnapi->hw_status->status = 0;
9170 tnapi->hw_status->status_tag = 0;
9171 }
9172 tnapi->last_tag = 0;
9173 tnapi->last_irq_tag = 0;
9174 }
9175 smp_mb();
9176
9177 tg3_full_unlock(tp);
9178
9179 for (i = 0; i < tp->irq_cnt; i++)
9180 synchronize_irq(tp->napi[i].irq_vec);
9181
9182 tg3_full_lock(tp, 0);
9183
9184 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9185 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9186 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9187 }
9188
9189 /* do the reset */
9190 val = GRC_MISC_CFG_CORECLK_RESET;
9191
9192 if (tg3_flag(tp, PCI_EXPRESS)) {
9193 /* Force PCIe 1.0a mode */
9194 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9195 !tg3_flag(tp, 57765_PLUS) &&
9196 tr32(TG3_PCIE_PHY_TSTCTL) ==
9197 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9198 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9199
9200 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9201 tw32(GRC_MISC_CFG, (1 << 29));
9202 val |= (1 << 29);
9203 }
9204 }
9205
9206 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9207 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9208 tw32(GRC_VCPU_EXT_CTRL,
9209 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9210 }
9211
9212 /* Set the clock to the highest frequency to avoid timeouts. With link
9213 * aware mode, the clock speed could be slow and bootcode does not
9214 * complete within the expected time. Override the clock to allow the
9215 * bootcode to finish sooner and then restore it.
9216 */
9217 tg3_override_clk(tp);
9218
9219 /* Manage gphy power for all CPMU absent PCIe devices. */
9220 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9221 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9222
9223 tw32(GRC_MISC_CFG, val);
9224
9225 /* restore 5701 hardware bug workaround write method */
9226 tp->write32 = write_op;
9227
9228 /* Unfortunately, we have to delay before the PCI read back.
9229 * Some 575X chips even will not respond to a PCI cfg access
9230 * when the reset command is given to the chip.
9231 *
9232 * How do these hardware designers expect things to work
9233 * properly if the PCI write is posted for a long period
9234 * of time? It is always necessary to have some method by
9235 * which a register read back can occur to push the write
9236 * out which does the reset.
9237 *
9238 * For most tg3 variants the trick below was working.
9239 * Ho hum...
9240 */
9241 udelay(120);
9242
9243 /* Flush PCI posted writes. The normal MMIO registers
9244 * are inaccessible at this time so this is the only
9245 * way to make this reliably (actually, this is no longer
9246 * the case, see above). I tried to use indirect
9247 * register read/write but this upset some 5701 variants.
9248 */
9249 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9250
9251 udelay(120);
9252
9253 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9254 u16 val16;
9255
9256 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9257 int j;
9258 u32 cfg_val;
9259
9260 /* Wait for link training to complete. */
9261 for (j = 0; j < 5000; j++)
9262 udelay(100);
9263
9264 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9265 pci_write_config_dword(tp->pdev, 0xc4,
9266 cfg_val | (1 << 15));
9267 }
9268
9269 /* Clear the "no snoop" and "relaxed ordering" bits. */
9270 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9271 /*
9272 * Older PCIe devices only support the 128 byte
9273 * MPS setting. Enforce the restriction.
9274 */
9275 if (!tg3_flag(tp, CPMU_PRESENT))
9276 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9277 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9278
9279 /* Clear error status */
9280 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9281 PCI_EXP_DEVSTA_CED |
9282 PCI_EXP_DEVSTA_NFED |
9283 PCI_EXP_DEVSTA_FED |
9284 PCI_EXP_DEVSTA_URD);
9285 }
9286
9287 tg3_restore_pci_state(tp);
9288
9289 tg3_flag_clear(tp, CHIP_RESETTING);
9290 tg3_flag_clear(tp, ERROR_PROCESSED);
9291
9292 val = 0;
9293 if (tg3_flag(tp, 5780_CLASS))
9294 val = tr32(MEMARB_MODE);
9295 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9296
9297 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9298 tg3_stop_fw(tp);
9299 tw32(0x5000, 0x400);
9300 }
9301
9302 if (tg3_flag(tp, IS_SSB_CORE)) {
9303 /*
9304 * BCM4785: In order to avoid repercussions from using
9305 * potentially defective internal ROM, stop the Rx RISC CPU,
9306 * which is not required.
9307 */
9308 tg3_stop_fw(tp);
9309 tg3_halt_cpu(tp, RX_CPU_BASE);
9310 }
9311
9312 err = tg3_poll_fw(tp);
9313 if (err)
9314 return err;
9315
9316 tw32(GRC_MODE, tp->grc_mode);
9317
9318 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9319 val = tr32(0xc4);
9320
9321 tw32(0xc4, val | (1 << 15));
9322 }
9323
9324 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9325 tg3_asic_rev(tp) == ASIC_REV_5705) {
9326 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9327 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9328 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9329 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9330 }
9331
9332 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9333 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9334 val = tp->mac_mode;
9335 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9336 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9337 val = tp->mac_mode;
9338 } else
9339 val = 0;
9340
9341 tw32_f(MAC_MODE, val);
9342 udelay(40);
9343
9344 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9345
9346 tg3_mdio_start(tp);
9347
9348 if (tg3_flag(tp, PCI_EXPRESS) &&
9349 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9350 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9351 !tg3_flag(tp, 57765_PLUS)) {
9352 val = tr32(0x7c00);
9353
9354 tw32(0x7c00, val | (1 << 25));
9355 }
9356
9357 tg3_restore_clk(tp);
9358
9359 /* Increase the core clock speed to fix tx timeout issue for 5762
9360 * with 100Mbps link speed.
9361 */
9362 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9363 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9364 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9365 TG3_CPMU_MAC_ORIDE_ENABLE);
9366 }
9367
9368 /* Reprobe ASF enable state. */
9369 tg3_flag_clear(tp, ENABLE_ASF);
9370 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9371 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9372
9373 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9374 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9375 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9376 u32 nic_cfg;
9377
9378 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9379 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9380 tg3_flag_set(tp, ENABLE_ASF);
9381 tp->last_event_jiffies = jiffies;
9382 if (tg3_flag(tp, 5750_PLUS))
9383 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9384
9385 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9386 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9387 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9388 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9389 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9390 }
9391 }
9392
9393 return 0;
9394 }
9395
9396 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9397 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9398 static void __tg3_set_rx_mode(struct net_device *);
9399
9400 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9401 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9402 {
9403 int err, i;
9404
9405 tg3_stop_fw(tp);
9406
9407 tg3_write_sig_pre_reset(tp, kind);
9408
9409 tg3_abort_hw(tp, silent);
9410 err = tg3_chip_reset(tp);
9411
9412 __tg3_set_mac_addr(tp, false);
9413
9414 tg3_write_sig_legacy(tp, kind);
9415 tg3_write_sig_post_reset(tp, kind);
9416
9417 if (tp->hw_stats) {
9418 /* Save the stats across chip resets... */
9419 tg3_get_nstats(tp, &tp->net_stats_prev);
9420 tg3_get_estats(tp, &tp->estats_prev);
9421
9422 /* And make sure the next sample is new data */
9423 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9424
9425 for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
9426 struct tg3_napi *tnapi = &tp->napi[i];
9427
9428 tnapi->rx_dropped = 0;
9429 tnapi->tx_dropped = 0;
9430 }
9431 }
9432
9433 return err;
9434 }
9435
tg3_set_mac_addr(struct net_device * dev,void * p)9436 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9437 {
9438 struct tg3 *tp = netdev_priv(dev);
9439 struct sockaddr *addr = p;
9440 int err = 0;
9441 bool skip_mac_1 = false;
9442
9443 if (!is_valid_ether_addr(addr->sa_data))
9444 return -EADDRNOTAVAIL;
9445
9446 eth_hw_addr_set(dev, addr->sa_data);
9447
9448 if (!netif_running(dev))
9449 return 0;
9450
9451 if (tg3_flag(tp, ENABLE_ASF)) {
9452 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9453
9454 addr0_high = tr32(MAC_ADDR_0_HIGH);
9455 addr0_low = tr32(MAC_ADDR_0_LOW);
9456 addr1_high = tr32(MAC_ADDR_1_HIGH);
9457 addr1_low = tr32(MAC_ADDR_1_LOW);
9458
9459 /* Skip MAC addr 1 if ASF is using it. */
9460 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9461 !(addr1_high == 0 && addr1_low == 0))
9462 skip_mac_1 = true;
9463 }
9464 spin_lock_bh(&tp->lock);
9465 __tg3_set_mac_addr(tp, skip_mac_1);
9466 __tg3_set_rx_mode(dev);
9467 spin_unlock_bh(&tp->lock);
9468
9469 return err;
9470 }
9471
9472 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9473 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9474 dma_addr_t mapping, u32 maxlen_flags,
9475 u32 nic_addr)
9476 {
9477 tg3_write_mem(tp,
9478 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9479 ((u64) mapping >> 32));
9480 tg3_write_mem(tp,
9481 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9482 ((u64) mapping & 0xffffffff));
9483 tg3_write_mem(tp,
9484 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9485 maxlen_flags);
9486
9487 if (!tg3_flag(tp, 5705_PLUS))
9488 tg3_write_mem(tp,
9489 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9490 nic_addr);
9491 }
9492
9493
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9494 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9495 {
9496 int i = 0;
9497
9498 if (!tg3_flag(tp, ENABLE_TSS)) {
9499 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9500 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9501 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9502 } else {
9503 tw32(HOSTCC_TXCOL_TICKS, 0);
9504 tw32(HOSTCC_TXMAX_FRAMES, 0);
9505 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9506
9507 for (; i < tp->txq_cnt; i++) {
9508 u32 reg;
9509
9510 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9511 tw32(reg, ec->tx_coalesce_usecs);
9512 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9513 tw32(reg, ec->tx_max_coalesced_frames);
9514 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9515 tw32(reg, ec->tx_max_coalesced_frames_irq);
9516 }
9517 }
9518
9519 for (; i < tp->irq_max - 1; i++) {
9520 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9521 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9522 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9523 }
9524 }
9525
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9526 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9527 {
9528 int i = 0;
9529 u32 limit = tp->rxq_cnt;
9530
9531 if (!tg3_flag(tp, ENABLE_RSS)) {
9532 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9533 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9534 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9535 limit--;
9536 } else {
9537 tw32(HOSTCC_RXCOL_TICKS, 0);
9538 tw32(HOSTCC_RXMAX_FRAMES, 0);
9539 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9540 }
9541
9542 for (; i < limit; i++) {
9543 u32 reg;
9544
9545 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9546 tw32(reg, ec->rx_coalesce_usecs);
9547 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9548 tw32(reg, ec->rx_max_coalesced_frames);
9549 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9550 tw32(reg, ec->rx_max_coalesced_frames_irq);
9551 }
9552
9553 for (; i < tp->irq_max - 1; i++) {
9554 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9555 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9556 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9557 }
9558 }
9559
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9560 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9561 {
9562 tg3_coal_tx_init(tp, ec);
9563 tg3_coal_rx_init(tp, ec);
9564
9565 if (!tg3_flag(tp, 5705_PLUS)) {
9566 u32 val = ec->stats_block_coalesce_usecs;
9567
9568 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9569 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9570
9571 if (!tp->link_up)
9572 val = 0;
9573
9574 tw32(HOSTCC_STAT_COAL_TICKS, val);
9575 }
9576 }
9577
9578 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9579 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9580 {
9581 u32 txrcb, limit;
9582
9583 /* Disable all transmit rings but the first. */
9584 if (!tg3_flag(tp, 5705_PLUS))
9585 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9586 else if (tg3_flag(tp, 5717_PLUS))
9587 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9588 else if (tg3_flag(tp, 57765_CLASS) ||
9589 tg3_asic_rev(tp) == ASIC_REV_5762)
9590 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9591 else
9592 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9593
9594 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9595 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9596 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9597 BDINFO_FLAGS_DISABLED);
9598 }
9599
9600 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9601 static void tg3_tx_rcbs_init(struct tg3 *tp)
9602 {
9603 int i = 0;
9604 u32 txrcb = NIC_SRAM_SEND_RCB;
9605
9606 if (tg3_flag(tp, ENABLE_TSS))
9607 i++;
9608
9609 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9610 struct tg3_napi *tnapi = &tp->napi[i];
9611
9612 if (!tnapi->tx_ring)
9613 continue;
9614
9615 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9616 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9617 NIC_SRAM_TX_BUFFER_DESC);
9618 }
9619 }
9620
9621 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9622 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9623 {
9624 u32 rxrcb, limit;
9625
9626 /* Disable all receive return rings but the first. */
9627 if (tg3_flag(tp, 5717_PLUS))
9628 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9629 else if (!tg3_flag(tp, 5705_PLUS))
9630 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9631 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9632 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9633 tg3_flag(tp, 57765_CLASS))
9634 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9635 else
9636 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9637
9638 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9639 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9640 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9641 BDINFO_FLAGS_DISABLED);
9642 }
9643
9644 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9645 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9646 {
9647 int i = 0;
9648 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9649
9650 if (tg3_flag(tp, ENABLE_RSS))
9651 i++;
9652
9653 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9654 struct tg3_napi *tnapi = &tp->napi[i];
9655
9656 if (!tnapi->rx_rcb)
9657 continue;
9658
9659 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9660 (tp->rx_ret_ring_mask + 1) <<
9661 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9662 }
9663 }
9664
9665 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9666 static void tg3_rings_reset(struct tg3 *tp)
9667 {
9668 int i;
9669 u32 stblk;
9670 struct tg3_napi *tnapi = &tp->napi[0];
9671
9672 tg3_tx_rcbs_disable(tp);
9673
9674 tg3_rx_ret_rcbs_disable(tp);
9675
9676 /* Disable interrupts */
9677 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9678 tp->napi[0].chk_msi_cnt = 0;
9679 tp->napi[0].last_rx_cons = 0;
9680 tp->napi[0].last_tx_cons = 0;
9681
9682 /* Zero mailbox registers. */
9683 if (tg3_flag(tp, SUPPORT_MSIX)) {
9684 for (i = 1; i < tp->irq_max; i++) {
9685 tp->napi[i].tx_prod = 0;
9686 tp->napi[i].tx_cons = 0;
9687 if (tg3_flag(tp, ENABLE_TSS))
9688 tw32_mailbox(tp->napi[i].prodmbox, 0);
9689 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9690 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9691 tp->napi[i].chk_msi_cnt = 0;
9692 tp->napi[i].last_rx_cons = 0;
9693 tp->napi[i].last_tx_cons = 0;
9694 }
9695 if (!tg3_flag(tp, ENABLE_TSS))
9696 tw32_mailbox(tp->napi[0].prodmbox, 0);
9697 } else {
9698 tp->napi[0].tx_prod = 0;
9699 tp->napi[0].tx_cons = 0;
9700 tw32_mailbox(tp->napi[0].prodmbox, 0);
9701 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9702 }
9703
9704 /* Make sure the NIC-based send BD rings are disabled. */
9705 if (!tg3_flag(tp, 5705_PLUS)) {
9706 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9707 for (i = 0; i < 16; i++)
9708 tw32_tx_mbox(mbox + i * 8, 0);
9709 }
9710
9711 /* Clear status block in ram. */
9712 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9713
9714 /* Set status block DMA address */
9715 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9716 ((u64) tnapi->status_mapping >> 32));
9717 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9718 ((u64) tnapi->status_mapping & 0xffffffff));
9719
9720 stblk = HOSTCC_STATBLCK_RING1;
9721
9722 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9723 u64 mapping = (u64)tnapi->status_mapping;
9724 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9725 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9726 stblk += 8;
9727
9728 /* Clear status block in ram. */
9729 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9730 }
9731
9732 tg3_tx_rcbs_init(tp);
9733 tg3_rx_ret_rcbs_init(tp);
9734 }
9735
tg3_setup_rxbd_thresholds(struct tg3 * tp)9736 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9737 {
9738 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9739
9740 if (!tg3_flag(tp, 5750_PLUS) ||
9741 tg3_flag(tp, 5780_CLASS) ||
9742 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9743 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9744 tg3_flag(tp, 57765_PLUS))
9745 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9746 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9747 tg3_asic_rev(tp) == ASIC_REV_5787)
9748 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9749 else
9750 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9751
9752 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9753 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9754
9755 val = min(nic_rep_thresh, host_rep_thresh);
9756 tw32(RCVBDI_STD_THRESH, val);
9757
9758 if (tg3_flag(tp, 57765_PLUS))
9759 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9760
9761 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9762 return;
9763
9764 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9765
9766 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9767
9768 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9769 tw32(RCVBDI_JUMBO_THRESH, val);
9770
9771 if (tg3_flag(tp, 57765_PLUS))
9772 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9773 }
9774
calc_crc(unsigned char * buf,int len)9775 static inline u32 calc_crc(unsigned char *buf, int len)
9776 {
9777 u32 reg;
9778 u32 tmp;
9779 int j, k;
9780
9781 reg = 0xffffffff;
9782
9783 for (j = 0; j < len; j++) {
9784 reg ^= buf[j];
9785
9786 for (k = 0; k < 8; k++) {
9787 tmp = reg & 0x01;
9788
9789 reg >>= 1;
9790
9791 if (tmp)
9792 reg ^= CRC32_POLY_LE;
9793 }
9794 }
9795
9796 return ~reg;
9797 }
9798
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9799 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9800 {
9801 /* accept or reject all multicast frames */
9802 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9803 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9804 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9805 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9806 }
9807
__tg3_set_rx_mode(struct net_device * dev)9808 static void __tg3_set_rx_mode(struct net_device *dev)
9809 {
9810 struct tg3 *tp = netdev_priv(dev);
9811 u32 rx_mode;
9812
9813 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9814 RX_MODE_KEEP_VLAN_TAG);
9815
9816 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9817 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9818 * flag clear.
9819 */
9820 if (!tg3_flag(tp, ENABLE_ASF))
9821 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9822 #endif
9823
9824 if (dev->flags & IFF_PROMISC) {
9825 /* Promiscuous mode. */
9826 rx_mode |= RX_MODE_PROMISC;
9827 } else if (dev->flags & IFF_ALLMULTI) {
9828 /* Accept all multicast. */
9829 tg3_set_multi(tp, 1);
9830 } else if (netdev_mc_empty(dev)) {
9831 /* Reject all multicast. */
9832 tg3_set_multi(tp, 0);
9833 } else {
9834 /* Accept one or more multicast(s). */
9835 struct netdev_hw_addr *ha;
9836 u32 mc_filter[4] = { 0, };
9837 u32 regidx;
9838 u32 bit;
9839 u32 crc;
9840
9841 netdev_for_each_mc_addr(ha, dev) {
9842 crc = calc_crc(ha->addr, ETH_ALEN);
9843 bit = ~crc & 0x7f;
9844 regidx = (bit & 0x60) >> 5;
9845 bit &= 0x1f;
9846 mc_filter[regidx] |= (1 << bit);
9847 }
9848
9849 tw32(MAC_HASH_REG_0, mc_filter[0]);
9850 tw32(MAC_HASH_REG_1, mc_filter[1]);
9851 tw32(MAC_HASH_REG_2, mc_filter[2]);
9852 tw32(MAC_HASH_REG_3, mc_filter[3]);
9853 }
9854
9855 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9856 rx_mode |= RX_MODE_PROMISC;
9857 } else if (!(dev->flags & IFF_PROMISC)) {
9858 /* Add all entries into to the mac addr filter list */
9859 int i = 0;
9860 struct netdev_hw_addr *ha;
9861
9862 netdev_for_each_uc_addr(ha, dev) {
9863 __tg3_set_one_mac_addr(tp, ha->addr,
9864 i + TG3_UCAST_ADDR_IDX(tp));
9865 i++;
9866 }
9867 }
9868
9869 if (rx_mode != tp->rx_mode) {
9870 tp->rx_mode = rx_mode;
9871 tw32_f(MAC_RX_MODE, rx_mode);
9872 udelay(10);
9873 }
9874 }
9875
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9876 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9877 {
9878 int i;
9879
9880 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9881 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9882 }
9883
tg3_rss_check_indir_tbl(struct tg3 * tp)9884 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9885 {
9886 int i;
9887
9888 if (!tg3_flag(tp, SUPPORT_MSIX))
9889 return;
9890
9891 if (tp->rxq_cnt == 1) {
9892 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9893 return;
9894 }
9895
9896 /* Validate table against current IRQ count */
9897 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9898 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9899 break;
9900 }
9901
9902 if (i != TG3_RSS_INDIR_TBL_SIZE)
9903 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9904 }
9905
tg3_rss_write_indir_tbl(struct tg3 * tp)9906 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9907 {
9908 int i = 0;
9909 u32 reg = MAC_RSS_INDIR_TBL_0;
9910
9911 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9912 u32 val = tp->rss_ind_tbl[i];
9913 i++;
9914 for (; i % 8; i++) {
9915 val <<= 4;
9916 val |= tp->rss_ind_tbl[i];
9917 }
9918 tw32(reg, val);
9919 reg += 4;
9920 }
9921 }
9922
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9923 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9924 {
9925 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9926 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9927 else
9928 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9929 }
9930
9931 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9932 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9933 {
9934 u32 val, rdmac_mode;
9935 int i, err, limit;
9936 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9937
9938 tg3_disable_ints(tp);
9939
9940 tg3_stop_fw(tp);
9941
9942 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9943
9944 if (tg3_flag(tp, INIT_COMPLETE))
9945 tg3_abort_hw(tp, 1);
9946
9947 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9948 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9949 tg3_phy_pull_config(tp);
9950 tg3_eee_pull_config(tp, NULL);
9951 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9952 }
9953
9954 /* Enable MAC control of LPI */
9955 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9956 tg3_setup_eee(tp);
9957
9958 if (reset_phy)
9959 tg3_phy_reset(tp);
9960
9961 err = tg3_chip_reset(tp);
9962 if (err)
9963 return err;
9964
9965 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9966
9967 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9968 val = tr32(TG3_CPMU_CTRL);
9969 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9970 tw32(TG3_CPMU_CTRL, val);
9971
9972 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9973 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9974 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9975 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9976
9977 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9978 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9979 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9980 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9981
9982 val = tr32(TG3_CPMU_HST_ACC);
9983 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9984 val |= CPMU_HST_ACC_MACCLK_6_25;
9985 tw32(TG3_CPMU_HST_ACC, val);
9986 }
9987
9988 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9989 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9990 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9991 PCIE_PWR_MGMT_L1_THRESH_4MS;
9992 tw32(PCIE_PWR_MGMT_THRESH, val);
9993
9994 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9995 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9996
9997 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9998
9999 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
10000 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
10001 }
10002
10003 if (tg3_flag(tp, L1PLLPD_EN)) {
10004 u32 grc_mode = tr32(GRC_MODE);
10005
10006 /* Access the lower 1K of PL PCIE block registers. */
10007 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10008 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10009
10010 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
10011 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
10012 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
10013
10014 tw32(GRC_MODE, grc_mode);
10015 }
10016
10017 if (tg3_flag(tp, 57765_CLASS)) {
10018 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
10019 u32 grc_mode = tr32(GRC_MODE);
10020
10021 /* Access the lower 1K of PL PCIE block registers. */
10022 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10023 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10024
10025 val = tr32(TG3_PCIE_TLDLPL_PORT +
10026 TG3_PCIE_PL_LO_PHYCTL5);
10027 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
10028 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
10029
10030 tw32(GRC_MODE, grc_mode);
10031 }
10032
10033 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
10034 u32 grc_mode;
10035
10036 /* Fix transmit hangs */
10037 val = tr32(TG3_CPMU_PADRNG_CTL);
10038 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
10039 tw32(TG3_CPMU_PADRNG_CTL, val);
10040
10041 grc_mode = tr32(GRC_MODE);
10042
10043 /* Access the lower 1K of DL PCIE block registers. */
10044 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10045 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
10046
10047 val = tr32(TG3_PCIE_TLDLPL_PORT +
10048 TG3_PCIE_DL_LO_FTSMAX);
10049 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10050 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10051 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10052
10053 tw32(GRC_MODE, grc_mode);
10054 }
10055
10056 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10057 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10058 val |= CPMU_LSPD_10MB_MACCLK_6_25;
10059 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10060 }
10061
10062 /* This works around an issue with Athlon chipsets on
10063 * B3 tigon3 silicon. This bit has no effect on any
10064 * other revision. But do not set this on PCI Express
10065 * chips and don't even touch the clocks if the CPMU is present.
10066 */
10067 if (!tg3_flag(tp, CPMU_PRESENT)) {
10068 if (!tg3_flag(tp, PCI_EXPRESS))
10069 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10070 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10071 }
10072
10073 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10074 tg3_flag(tp, PCIX_MODE)) {
10075 val = tr32(TG3PCI_PCISTATE);
10076 val |= PCISTATE_RETRY_SAME_DMA;
10077 tw32(TG3PCI_PCISTATE, val);
10078 }
10079
10080 if (tg3_flag(tp, ENABLE_APE)) {
10081 /* Allow reads and writes to the
10082 * APE register and memory space.
10083 */
10084 val = tr32(TG3PCI_PCISTATE);
10085 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10086 PCISTATE_ALLOW_APE_SHMEM_WR |
10087 PCISTATE_ALLOW_APE_PSPACE_WR;
10088 tw32(TG3PCI_PCISTATE, val);
10089 }
10090
10091 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10092 /* Enable some hw fixes. */
10093 val = tr32(TG3PCI_MSI_DATA);
10094 val |= (1 << 26) | (1 << 28) | (1 << 29);
10095 tw32(TG3PCI_MSI_DATA, val);
10096 }
10097
10098 /* Descriptor ring init may make accesses to the
10099 * NIC SRAM area to setup the TX descriptors, so we
10100 * can only do this after the hardware has been
10101 * successfully reset.
10102 */
10103 err = tg3_init_rings(tp);
10104 if (err)
10105 return err;
10106
10107 if (tg3_flag(tp, 57765_PLUS)) {
10108 val = tr32(TG3PCI_DMA_RW_CTRL) &
10109 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10110 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10111 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10112 if (!tg3_flag(tp, 57765_CLASS) &&
10113 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10114 tg3_asic_rev(tp) != ASIC_REV_5762)
10115 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10116 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10117 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10118 tg3_asic_rev(tp) != ASIC_REV_5761) {
10119 /* This value is determined during the probe time DMA
10120 * engine test, tg3_test_dma.
10121 */
10122 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10123 }
10124
10125 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10126 GRC_MODE_4X_NIC_SEND_RINGS |
10127 GRC_MODE_NO_TX_PHDR_CSUM |
10128 GRC_MODE_NO_RX_PHDR_CSUM);
10129 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10130
10131 /* Pseudo-header checksum is done by hardware logic and not
10132 * the offload processers, so make the chip do the pseudo-
10133 * header checksums on receive. For transmit it is more
10134 * convenient to do the pseudo-header checksum in software
10135 * as Linux does that on transmit for us in all cases.
10136 */
10137 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10138
10139 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10140 if (tp->rxptpctl)
10141 tw32(TG3_RX_PTP_CTL,
10142 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10143
10144 if (tg3_flag(tp, PTP_CAPABLE))
10145 val |= GRC_MODE_TIME_SYNC_ENABLE;
10146
10147 tw32(GRC_MODE, tp->grc_mode | val);
10148
10149 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10150 * south bridge limitation. As a workaround, Driver is setting MRRS
10151 * to 2048 instead of default 4096.
10152 */
10153 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10154 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10155 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10156 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10157 }
10158
10159 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10160 val = tr32(GRC_MISC_CFG);
10161 val &= ~0xff;
10162 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10163 tw32(GRC_MISC_CFG, val);
10164
10165 /* Initialize MBUF/DESC pool. */
10166 if (tg3_flag(tp, 5750_PLUS)) {
10167 /* Do nothing. */
10168 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10169 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10170 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10171 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10172 else
10173 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10174 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10175 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10176 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10177 int fw_len;
10178
10179 fw_len = tp->fw_len;
10180 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10181 tw32(BUFMGR_MB_POOL_ADDR,
10182 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10183 tw32(BUFMGR_MB_POOL_SIZE,
10184 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10185 }
10186
10187 if (tp->dev->mtu <= ETH_DATA_LEN) {
10188 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10189 tp->bufmgr_config.mbuf_read_dma_low_water);
10190 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10191 tp->bufmgr_config.mbuf_mac_rx_low_water);
10192 tw32(BUFMGR_MB_HIGH_WATER,
10193 tp->bufmgr_config.mbuf_high_water);
10194 } else {
10195 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10196 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10197 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10198 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10199 tw32(BUFMGR_MB_HIGH_WATER,
10200 tp->bufmgr_config.mbuf_high_water_jumbo);
10201 }
10202 tw32(BUFMGR_DMA_LOW_WATER,
10203 tp->bufmgr_config.dma_low_water);
10204 tw32(BUFMGR_DMA_HIGH_WATER,
10205 tp->bufmgr_config.dma_high_water);
10206
10207 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10208 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10209 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10210 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10211 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10212 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10213 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10214 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10215 tw32(BUFMGR_MODE, val);
10216 for (i = 0; i < 2000; i++) {
10217 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10218 break;
10219 udelay(10);
10220 }
10221 if (i >= 2000) {
10222 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10223 return -ENODEV;
10224 }
10225
10226 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10227 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10228
10229 tg3_setup_rxbd_thresholds(tp);
10230
10231 /* Initialize TG3_BDINFO's at:
10232 * RCVDBDI_STD_BD: standard eth size rx ring
10233 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10234 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10235 *
10236 * like so:
10237 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10238 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10239 * ring attribute flags
10240 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10241 *
10242 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10243 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10244 *
10245 * The size of each ring is fixed in the firmware, but the location is
10246 * configurable.
10247 */
10248 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10249 ((u64) tpr->rx_std_mapping >> 32));
10250 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10251 ((u64) tpr->rx_std_mapping & 0xffffffff));
10252 if (!tg3_flag(tp, 5717_PLUS))
10253 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10254 NIC_SRAM_RX_BUFFER_DESC);
10255
10256 /* Disable the mini ring */
10257 if (!tg3_flag(tp, 5705_PLUS))
10258 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10259 BDINFO_FLAGS_DISABLED);
10260
10261 /* Program the jumbo buffer descriptor ring control
10262 * blocks on those devices that have them.
10263 */
10264 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10265 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10266
10267 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10268 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10269 ((u64) tpr->rx_jmb_mapping >> 32));
10270 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10271 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10272 val = TG3_RX_JMB_RING_SIZE(tp) <<
10273 BDINFO_FLAGS_MAXLEN_SHIFT;
10274 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10275 val | BDINFO_FLAGS_USE_EXT_RECV);
10276 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10277 tg3_flag(tp, 57765_CLASS) ||
10278 tg3_asic_rev(tp) == ASIC_REV_5762)
10279 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10280 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10281 } else {
10282 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10283 BDINFO_FLAGS_DISABLED);
10284 }
10285
10286 if (tg3_flag(tp, 57765_PLUS)) {
10287 val = TG3_RX_STD_RING_SIZE(tp);
10288 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10289 val |= (TG3_RX_STD_DMA_SZ << 2);
10290 } else
10291 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10292 } else
10293 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10294
10295 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10296
10297 tpr->rx_std_prod_idx = tp->rx_pending;
10298 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10299
10300 tpr->rx_jmb_prod_idx =
10301 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10302 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10303
10304 tg3_rings_reset(tp);
10305
10306 /* Initialize MAC address and backoff seed. */
10307 __tg3_set_mac_addr(tp, false);
10308
10309 /* MTU + ethernet header + FCS + optional VLAN tag */
10310 tw32(MAC_RX_MTU_SIZE,
10311 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10312
10313 /* The slot time is changed by tg3_setup_phy if we
10314 * run at gigabit with half duplex.
10315 */
10316 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10317 (6 << TX_LENGTHS_IPG_SHIFT) |
10318 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10319
10320 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10321 tg3_asic_rev(tp) == ASIC_REV_5762)
10322 val |= tr32(MAC_TX_LENGTHS) &
10323 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10324 TX_LENGTHS_CNT_DWN_VAL_MSK);
10325
10326 tw32(MAC_TX_LENGTHS, val);
10327
10328 /* Receive rules. */
10329 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10330 tw32(RCVLPC_CONFIG, 0x0181);
10331
10332 /* Calculate RDMAC_MODE setting early, we need it to determine
10333 * the RCVLPC_STATE_ENABLE mask.
10334 */
10335 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10336 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10337 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10338 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10339 RDMAC_MODE_LNGREAD_ENAB);
10340
10341 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10342 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10343
10344 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10345 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10346 tg3_asic_rev(tp) == ASIC_REV_57780)
10347 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10348 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10349 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10350
10351 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10352 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10353 if (tg3_flag(tp, TSO_CAPABLE)) {
10354 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10355 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10356 !tg3_flag(tp, IS_5788)) {
10357 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10358 }
10359 }
10360
10361 if (tg3_flag(tp, PCI_EXPRESS))
10362 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10363
10364 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10365 tp->dma_limit = 0;
10366 if (tp->dev->mtu <= ETH_DATA_LEN) {
10367 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10368 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10369 }
10370 }
10371
10372 if (tg3_flag(tp, HW_TSO_1) ||
10373 tg3_flag(tp, HW_TSO_2) ||
10374 tg3_flag(tp, HW_TSO_3))
10375 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10376
10377 if (tg3_flag(tp, 57765_PLUS) ||
10378 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10379 tg3_asic_rev(tp) == ASIC_REV_57780)
10380 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10381
10382 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10383 tg3_asic_rev(tp) == ASIC_REV_5762)
10384 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10385
10386 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10387 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10388 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10389 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10390 tg3_flag(tp, 57765_PLUS)) {
10391 u32 tgtreg;
10392
10393 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10394 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10395 else
10396 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10397
10398 val = tr32(tgtreg);
10399 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10400 tg3_asic_rev(tp) == ASIC_REV_5762) {
10401 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10402 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10403 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10404 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10405 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10406 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10407 }
10408 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10409 }
10410
10411 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10412 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10413 tg3_asic_rev(tp) == ASIC_REV_5762) {
10414 u32 tgtreg;
10415
10416 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10417 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10418 else
10419 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10420
10421 val = tr32(tgtreg);
10422 tw32(tgtreg, val |
10423 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10424 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10425 }
10426
10427 /* Receive/send statistics. */
10428 if (tg3_flag(tp, 5750_PLUS)) {
10429 val = tr32(RCVLPC_STATS_ENABLE);
10430 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10431 tw32(RCVLPC_STATS_ENABLE, val);
10432 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10433 tg3_flag(tp, TSO_CAPABLE)) {
10434 val = tr32(RCVLPC_STATS_ENABLE);
10435 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10436 tw32(RCVLPC_STATS_ENABLE, val);
10437 } else {
10438 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10439 }
10440 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10441 tw32(SNDDATAI_STATSENAB, 0xffffff);
10442 tw32(SNDDATAI_STATSCTRL,
10443 (SNDDATAI_SCTRL_ENABLE |
10444 SNDDATAI_SCTRL_FASTUPD));
10445
10446 /* Setup host coalescing engine. */
10447 tw32(HOSTCC_MODE, 0);
10448 for (i = 0; i < 2000; i++) {
10449 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10450 break;
10451 udelay(10);
10452 }
10453
10454 __tg3_set_coalesce(tp, &tp->coal);
10455
10456 if (!tg3_flag(tp, 5705_PLUS)) {
10457 /* Status/statistics block address. See tg3_timer,
10458 * the tg3_periodic_fetch_stats call there, and
10459 * tg3_get_stats to see how this works for 5705/5750 chips.
10460 */
10461 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10462 ((u64) tp->stats_mapping >> 32));
10463 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10464 ((u64) tp->stats_mapping & 0xffffffff));
10465 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10466
10467 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10468
10469 /* Clear statistics and status block memory areas */
10470 for (i = NIC_SRAM_STATS_BLK;
10471 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10472 i += sizeof(u32)) {
10473 tg3_write_mem(tp, i, 0);
10474 udelay(40);
10475 }
10476 }
10477
10478 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10479
10480 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10481 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10482 if (!tg3_flag(tp, 5705_PLUS))
10483 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10484
10485 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10486 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10487 /* reset to prevent losing 1st rx packet intermittently */
10488 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10489 udelay(10);
10490 }
10491
10492 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10493 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10494 MAC_MODE_FHDE_ENABLE;
10495 if (tg3_flag(tp, ENABLE_APE))
10496 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10497 if (!tg3_flag(tp, 5705_PLUS) &&
10498 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10499 tg3_asic_rev(tp) != ASIC_REV_5700)
10500 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10501 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10502 udelay(40);
10503
10504 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10505 * If TG3_FLAG_IS_NIC is zero, we should read the
10506 * register to preserve the GPIO settings for LOMs. The GPIOs,
10507 * whether used as inputs or outputs, are set by boot code after
10508 * reset.
10509 */
10510 if (!tg3_flag(tp, IS_NIC)) {
10511 u32 gpio_mask;
10512
10513 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10514 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10515 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10516
10517 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10518 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10519 GRC_LCLCTRL_GPIO_OUTPUT3;
10520
10521 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10522 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10523
10524 tp->grc_local_ctrl &= ~gpio_mask;
10525 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10526
10527 /* GPIO1 must be driven high for eeprom write protect */
10528 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10529 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10530 GRC_LCLCTRL_GPIO_OUTPUT1);
10531 }
10532 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10533 udelay(100);
10534
10535 if (tg3_flag(tp, USING_MSIX)) {
10536 val = tr32(MSGINT_MODE);
10537 val |= MSGINT_MODE_ENABLE;
10538 if (tp->irq_cnt > 1)
10539 val |= MSGINT_MODE_MULTIVEC_EN;
10540 if (!tg3_flag(tp, 1SHOT_MSI))
10541 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10542 tw32(MSGINT_MODE, val);
10543 }
10544
10545 if (!tg3_flag(tp, 5705_PLUS)) {
10546 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10547 udelay(40);
10548 }
10549
10550 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10551 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10552 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10553 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10554 WDMAC_MODE_LNGREAD_ENAB);
10555
10556 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10557 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10558 if (tg3_flag(tp, TSO_CAPABLE) &&
10559 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10560 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10561 /* nothing */
10562 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10563 !tg3_flag(tp, IS_5788)) {
10564 val |= WDMAC_MODE_RX_ACCEL;
10565 }
10566 }
10567
10568 /* Enable host coalescing bug fix */
10569 if (tg3_flag(tp, 5755_PLUS))
10570 val |= WDMAC_MODE_STATUS_TAG_FIX;
10571
10572 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10573 val |= WDMAC_MODE_BURST_ALL_DATA;
10574
10575 tw32_f(WDMAC_MODE, val);
10576 udelay(40);
10577
10578 if (tg3_flag(tp, PCIX_MODE)) {
10579 u16 pcix_cmd;
10580
10581 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10582 &pcix_cmd);
10583 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10584 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10585 pcix_cmd |= PCI_X_CMD_READ_2K;
10586 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10587 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10588 pcix_cmd |= PCI_X_CMD_READ_2K;
10589 }
10590 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10591 pcix_cmd);
10592 }
10593
10594 tw32_f(RDMAC_MODE, rdmac_mode);
10595 udelay(40);
10596
10597 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10598 tg3_asic_rev(tp) == ASIC_REV_5720) {
10599 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10600 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10601 break;
10602 }
10603 if (i < TG3_NUM_RDMA_CHANNELS) {
10604 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10605 val |= tg3_lso_rd_dma_workaround_bit(tp);
10606 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10607 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10608 }
10609 }
10610
10611 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10612 if (!tg3_flag(tp, 5705_PLUS))
10613 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10614
10615 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10616 tw32(SNDDATAC_MODE,
10617 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10618 else
10619 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10620
10621 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10622 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10623 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10624 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10625 val |= RCVDBDI_MODE_LRG_RING_SZ;
10626 tw32(RCVDBDI_MODE, val);
10627 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10628 if (tg3_flag(tp, HW_TSO_1) ||
10629 tg3_flag(tp, HW_TSO_2) ||
10630 tg3_flag(tp, HW_TSO_3))
10631 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10632 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10633 if (tg3_flag(tp, ENABLE_TSS))
10634 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10635 tw32(SNDBDI_MODE, val);
10636 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10637
10638 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10639 err = tg3_load_5701_a0_firmware_fix(tp);
10640 if (err)
10641 return err;
10642 }
10643
10644 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10645 /* Ignore any errors for the firmware download. If download
10646 * fails, the device will operate with EEE disabled
10647 */
10648 tg3_load_57766_firmware(tp);
10649 }
10650
10651 if (tg3_flag(tp, TSO_CAPABLE)) {
10652 err = tg3_load_tso_firmware(tp);
10653 if (err)
10654 return err;
10655 }
10656
10657 tp->tx_mode = TX_MODE_ENABLE;
10658
10659 if (tg3_flag(tp, 5755_PLUS) ||
10660 tg3_asic_rev(tp) == ASIC_REV_5906)
10661 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10662
10663 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10664 tg3_asic_rev(tp) == ASIC_REV_5762) {
10665 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10666 tp->tx_mode &= ~val;
10667 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10668 }
10669
10670 tw32_f(MAC_TX_MODE, tp->tx_mode);
10671 udelay(100);
10672
10673 if (tg3_flag(tp, ENABLE_RSS)) {
10674 u32 rss_key[10];
10675
10676 tg3_rss_write_indir_tbl(tp);
10677
10678 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10679
10680 for (i = 0; i < 10 ; i++)
10681 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10682 }
10683
10684 tp->rx_mode = RX_MODE_ENABLE;
10685 if (tg3_flag(tp, 5755_PLUS))
10686 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10687
10688 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10689 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10690
10691 if (tg3_flag(tp, ENABLE_RSS))
10692 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10693 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10694 RX_MODE_RSS_IPV6_HASH_EN |
10695 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10696 RX_MODE_RSS_IPV4_HASH_EN |
10697 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10698
10699 tw32_f(MAC_RX_MODE, tp->rx_mode);
10700 udelay(10);
10701
10702 tw32(MAC_LED_CTRL, tp->led_ctrl);
10703
10704 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10705 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10706 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10707 udelay(10);
10708 }
10709 tw32_f(MAC_RX_MODE, tp->rx_mode);
10710 udelay(10);
10711
10712 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10713 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10714 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10715 /* Set drive transmission level to 1.2V */
10716 /* only if the signal pre-emphasis bit is not set */
10717 val = tr32(MAC_SERDES_CFG);
10718 val &= 0xfffff000;
10719 val |= 0x880;
10720 tw32(MAC_SERDES_CFG, val);
10721 }
10722 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10723 tw32(MAC_SERDES_CFG, 0x616000);
10724 }
10725
10726 /* Prevent chip from dropping frames when flow control
10727 * is enabled.
10728 */
10729 if (tg3_flag(tp, 57765_CLASS))
10730 val = 1;
10731 else
10732 val = 2;
10733 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10734
10735 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10736 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10737 /* Use hardware link auto-negotiation */
10738 tg3_flag_set(tp, HW_AUTONEG);
10739 }
10740
10741 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10742 tg3_asic_rev(tp) == ASIC_REV_5714) {
10743 u32 tmp;
10744
10745 tmp = tr32(SERDES_RX_CTRL);
10746 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10747 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10748 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10749 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10750 }
10751
10752 if (!tg3_flag(tp, USE_PHYLIB)) {
10753 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10754 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10755
10756 err = tg3_setup_phy(tp, false);
10757 if (err)
10758 return err;
10759
10760 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10761 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10762 u32 tmp;
10763
10764 /* Clear CRC stats. */
10765 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10766 tg3_writephy(tp, MII_TG3_TEST1,
10767 tmp | MII_TG3_TEST1_CRC_EN);
10768 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10769 }
10770 }
10771 }
10772
10773 __tg3_set_rx_mode(tp->dev);
10774
10775 /* Initialize receive rules. */
10776 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10777 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10778 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10779 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10780
10781 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10782 limit = 8;
10783 else
10784 limit = 16;
10785 if (tg3_flag(tp, ENABLE_ASF))
10786 limit -= 4;
10787 switch (limit) {
10788 case 16:
10789 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10790 fallthrough;
10791 case 15:
10792 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10793 fallthrough;
10794 case 14:
10795 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10796 fallthrough;
10797 case 13:
10798 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10799 fallthrough;
10800 case 12:
10801 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10802 fallthrough;
10803 case 11:
10804 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10805 fallthrough;
10806 case 10:
10807 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10808 fallthrough;
10809 case 9:
10810 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10811 fallthrough;
10812 case 8:
10813 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10814 fallthrough;
10815 case 7:
10816 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10817 fallthrough;
10818 case 6:
10819 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10820 fallthrough;
10821 case 5:
10822 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10823 fallthrough;
10824 case 4:
10825 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10826 case 3:
10827 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10828 case 2:
10829 case 1:
10830
10831 default:
10832 break;
10833 }
10834
10835 if (tg3_flag(tp, ENABLE_APE))
10836 /* Write our heartbeat update interval to APE. */
10837 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10838 APE_HOST_HEARTBEAT_INT_5SEC);
10839
10840 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10841
10842 return 0;
10843 }
10844
10845 /* Called at device open time to get the chip ready for
10846 * packet processing. Invoked with tp->lock held.
10847 */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10848 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10849 {
10850 /* Chip may have been just powered on. If so, the boot code may still
10851 * be running initialization. Wait for it to finish to avoid races in
10852 * accessing the hardware.
10853 */
10854 tg3_enable_register_access(tp);
10855 tg3_poll_fw(tp);
10856
10857 tg3_switch_clocks(tp);
10858
10859 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10860
10861 return tg3_reset_hw(tp, reset_phy);
10862 }
10863
10864 #ifdef CONFIG_TIGON3_HWMON
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10865 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10866 {
10867 u32 off, len = TG3_OCIR_LEN;
10868 int i;
10869
10870 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10871 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10872
10873 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10874 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10875 memset(ocir, 0, len);
10876 }
10877 }
10878
10879 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10880 static ssize_t tg3_show_temp(struct device *dev,
10881 struct device_attribute *devattr, char *buf)
10882 {
10883 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10884 struct tg3 *tp = dev_get_drvdata(dev);
10885 u32 temperature;
10886
10887 spin_lock_bh(&tp->lock);
10888 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10889 sizeof(temperature));
10890 spin_unlock_bh(&tp->lock);
10891 return sprintf(buf, "%u\n", temperature * 1000);
10892 }
10893
10894
10895 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10896 TG3_TEMP_SENSOR_OFFSET);
10897 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10898 TG3_TEMP_CAUTION_OFFSET);
10899 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10900 TG3_TEMP_MAX_OFFSET);
10901
10902 static struct attribute *tg3_attrs[] = {
10903 &sensor_dev_attr_temp1_input.dev_attr.attr,
10904 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10905 &sensor_dev_attr_temp1_max.dev_attr.attr,
10906 NULL
10907 };
10908 ATTRIBUTE_GROUPS(tg3);
10909
tg3_hwmon_close(struct tg3 * tp)10910 static void tg3_hwmon_close(struct tg3 *tp)
10911 {
10912 if (tp->hwmon_dev) {
10913 hwmon_device_unregister(tp->hwmon_dev);
10914 tp->hwmon_dev = NULL;
10915 }
10916 }
10917
tg3_hwmon_open(struct tg3 * tp)10918 static void tg3_hwmon_open(struct tg3 *tp)
10919 {
10920 int i;
10921 u32 size = 0;
10922 struct pci_dev *pdev = tp->pdev;
10923 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10924
10925 tg3_sd_scan_scratchpad(tp, ocirs);
10926
10927 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10928 if (!ocirs[i].src_data_length)
10929 continue;
10930
10931 size += ocirs[i].src_hdr_length;
10932 size += ocirs[i].src_data_length;
10933 }
10934
10935 if (!size)
10936 return;
10937
10938 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10939 tp, tg3_groups);
10940 if (IS_ERR(tp->hwmon_dev)) {
10941 tp->hwmon_dev = NULL;
10942 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10943 }
10944 }
10945 #else
tg3_hwmon_close(struct tg3 * tp)10946 static inline void tg3_hwmon_close(struct tg3 *tp) { }
tg3_hwmon_open(struct tg3 * tp)10947 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10948 #endif /* CONFIG_TIGON3_HWMON */
10949
10950
10951 #define TG3_STAT_ADD32(PSTAT, REG) \
10952 do { u32 __val = tr32(REG); \
10953 (PSTAT)->low += __val; \
10954 if ((PSTAT)->low < __val) \
10955 (PSTAT)->high += 1; \
10956 } while (0)
10957
tg3_periodic_fetch_stats(struct tg3 * tp)10958 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10959 {
10960 struct tg3_hw_stats *sp = tp->hw_stats;
10961
10962 if (!tp->link_up)
10963 return;
10964
10965 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10966 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10967 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10968 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10969 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10970 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10971 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10972 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10973 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10974 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10975 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10976 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10977 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10978 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10979 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10980 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10981 u32 val;
10982
10983 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10984 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10985 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10986 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10987 }
10988
10989 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10990 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10991 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10992 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10993 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10994 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10995 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10996 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10997 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10998 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10999 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
11000 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
11001 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
11002 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
11003
11004 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
11005 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
11006 tg3_asic_rev(tp) != ASIC_REV_5762 &&
11007 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
11008 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
11009 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
11010 } else {
11011 u32 val = tr32(HOSTCC_FLOW_ATTN);
11012 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
11013 if (val) {
11014 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
11015 sp->rx_discards.low += val;
11016 if (sp->rx_discards.low < val)
11017 sp->rx_discards.high += 1;
11018 }
11019 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
11020 }
11021 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
11022 }
11023
tg3_chk_missed_msi(struct tg3 * tp)11024 static void tg3_chk_missed_msi(struct tg3 *tp)
11025 {
11026 u32 i;
11027
11028 for (i = 0; i < tp->irq_cnt; i++) {
11029 struct tg3_napi *tnapi = &tp->napi[i];
11030
11031 if (tg3_has_work(tnapi)) {
11032 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
11033 tnapi->last_tx_cons == tnapi->tx_cons) {
11034 if (tnapi->chk_msi_cnt < 1) {
11035 tnapi->chk_msi_cnt++;
11036 return;
11037 }
11038 tg3_msi(0, tnapi);
11039 }
11040 }
11041 tnapi->chk_msi_cnt = 0;
11042 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
11043 tnapi->last_tx_cons = tnapi->tx_cons;
11044 }
11045 }
11046
tg3_timer(struct timer_list * t)11047 static void tg3_timer(struct timer_list *t)
11048 {
11049 struct tg3 *tp = from_timer(tp, t, timer);
11050
11051 spin_lock(&tp->lock);
11052
11053 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11054 spin_unlock(&tp->lock);
11055 goto restart_timer;
11056 }
11057
11058 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11059 tg3_flag(tp, 57765_CLASS))
11060 tg3_chk_missed_msi(tp);
11061
11062 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11063 /* BCM4785: Flush posted writes from GbE to host memory. */
11064 tr32(HOSTCC_MODE);
11065 }
11066
11067 if (!tg3_flag(tp, TAGGED_STATUS)) {
11068 /* All of this garbage is because when using non-tagged
11069 * IRQ status the mailbox/status_block protocol the chip
11070 * uses with the cpu is race prone.
11071 */
11072 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11073 tw32(GRC_LOCAL_CTRL,
11074 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11075 } else {
11076 tw32(HOSTCC_MODE, tp->coalesce_mode |
11077 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11078 }
11079
11080 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11081 spin_unlock(&tp->lock);
11082 tg3_reset_task_schedule(tp);
11083 goto restart_timer;
11084 }
11085 }
11086
11087 /* This part only runs once per second. */
11088 if (!--tp->timer_counter) {
11089 if (tg3_flag(tp, 5705_PLUS))
11090 tg3_periodic_fetch_stats(tp);
11091
11092 if (tp->setlpicnt && !--tp->setlpicnt)
11093 tg3_phy_eee_enable(tp);
11094
11095 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11096 u32 mac_stat;
11097 int phy_event;
11098
11099 mac_stat = tr32(MAC_STATUS);
11100
11101 phy_event = 0;
11102 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11103 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11104 phy_event = 1;
11105 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11106 phy_event = 1;
11107
11108 if (phy_event)
11109 tg3_setup_phy(tp, false);
11110 } else if (tg3_flag(tp, POLL_SERDES)) {
11111 u32 mac_stat = tr32(MAC_STATUS);
11112 int need_setup = 0;
11113
11114 if (tp->link_up &&
11115 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11116 need_setup = 1;
11117 }
11118 if (!tp->link_up &&
11119 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11120 MAC_STATUS_SIGNAL_DET))) {
11121 need_setup = 1;
11122 }
11123 if (need_setup) {
11124 if (!tp->serdes_counter) {
11125 tw32_f(MAC_MODE,
11126 (tp->mac_mode &
11127 ~MAC_MODE_PORT_MODE_MASK));
11128 udelay(40);
11129 tw32_f(MAC_MODE, tp->mac_mode);
11130 udelay(40);
11131 }
11132 tg3_setup_phy(tp, false);
11133 }
11134 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11135 tg3_flag(tp, 5780_CLASS)) {
11136 tg3_serdes_parallel_detect(tp);
11137 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11138 u32 cpmu = tr32(TG3_CPMU_STATUS);
11139 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11140 TG3_CPMU_STATUS_LINK_MASK);
11141
11142 if (link_up != tp->link_up)
11143 tg3_setup_phy(tp, false);
11144 }
11145
11146 tp->timer_counter = tp->timer_multiplier;
11147 }
11148
11149 /* Heartbeat is only sent once every 2 seconds.
11150 *
11151 * The heartbeat is to tell the ASF firmware that the host
11152 * driver is still alive. In the event that the OS crashes,
11153 * ASF needs to reset the hardware to free up the FIFO space
11154 * that may be filled with rx packets destined for the host.
11155 * If the FIFO is full, ASF will no longer function properly.
11156 *
11157 * Unintended resets have been reported on real time kernels
11158 * where the timer doesn't run on time. Netpoll will also have
11159 * same problem.
11160 *
11161 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11162 * to check the ring condition when the heartbeat is expiring
11163 * before doing the reset. This will prevent most unintended
11164 * resets.
11165 */
11166 if (!--tp->asf_counter) {
11167 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11168 tg3_wait_for_event_ack(tp);
11169
11170 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11171 FWCMD_NICDRV_ALIVE3);
11172 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11173 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11174 TG3_FW_UPDATE_TIMEOUT_SEC);
11175
11176 tg3_generate_fw_event(tp);
11177 }
11178 tp->asf_counter = tp->asf_multiplier;
11179 }
11180
11181 /* Update the APE heartbeat every 5 seconds.*/
11182 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11183
11184 spin_unlock(&tp->lock);
11185
11186 restart_timer:
11187 tp->timer.expires = jiffies + tp->timer_offset;
11188 add_timer(&tp->timer);
11189 }
11190
tg3_timer_init(struct tg3 * tp)11191 static void tg3_timer_init(struct tg3 *tp)
11192 {
11193 if (tg3_flag(tp, TAGGED_STATUS) &&
11194 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11195 !tg3_flag(tp, 57765_CLASS))
11196 tp->timer_offset = HZ;
11197 else
11198 tp->timer_offset = HZ / 10;
11199
11200 BUG_ON(tp->timer_offset > HZ);
11201
11202 tp->timer_multiplier = (HZ / tp->timer_offset);
11203 tp->asf_multiplier = (HZ / tp->timer_offset) *
11204 TG3_FW_UPDATE_FREQ_SEC;
11205
11206 timer_setup(&tp->timer, tg3_timer, 0);
11207 }
11208
tg3_timer_start(struct tg3 * tp)11209 static void tg3_timer_start(struct tg3 *tp)
11210 {
11211 tp->asf_counter = tp->asf_multiplier;
11212 tp->timer_counter = tp->timer_multiplier;
11213
11214 tp->timer.expires = jiffies + tp->timer_offset;
11215 add_timer(&tp->timer);
11216 }
11217
tg3_timer_stop(struct tg3 * tp)11218 static void tg3_timer_stop(struct tg3 *tp)
11219 {
11220 del_timer_sync(&tp->timer);
11221 }
11222
11223 /* Restart hardware after configuration changes, self-test, etc.
11224 * Invoked with tp->lock held.
11225 */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11226 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11227 __releases(tp->lock)
11228 __acquires(tp->lock)
11229 {
11230 int err;
11231
11232 err = tg3_init_hw(tp, reset_phy);
11233 if (err) {
11234 netdev_err(tp->dev,
11235 "Failed to re-initialize device, aborting\n");
11236 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11237 tg3_full_unlock(tp);
11238 tg3_timer_stop(tp);
11239 tp->irq_sync = 0;
11240 tg3_napi_enable(tp);
11241 dev_close(tp->dev);
11242 tg3_full_lock(tp, 0);
11243 }
11244 return err;
11245 }
11246
tg3_reset_task(struct work_struct * work)11247 static void tg3_reset_task(struct work_struct *work)
11248 {
11249 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11250 int err;
11251
11252 rtnl_lock();
11253 tg3_full_lock(tp, 0);
11254
11255 if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11256 tp->pdev->error_state != pci_channel_io_normal) {
11257 tg3_flag_clear(tp, RESET_TASK_PENDING);
11258 tg3_full_unlock(tp);
11259 rtnl_unlock();
11260 return;
11261 }
11262
11263 tg3_full_unlock(tp);
11264
11265 tg3_phy_stop(tp);
11266
11267 tg3_netif_stop(tp);
11268
11269 tg3_full_lock(tp, 1);
11270
11271 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11272 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11273 tp->write32_rx_mbox = tg3_write_flush_reg32;
11274 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11275 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11276 }
11277
11278 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11279 err = tg3_init_hw(tp, true);
11280 if (err) {
11281 tg3_full_unlock(tp);
11282 tp->irq_sync = 0;
11283 tg3_napi_enable(tp);
11284 /* Clear this flag so that tg3_reset_task_cancel() will not
11285 * call cancel_work_sync() and wait forever.
11286 */
11287 tg3_flag_clear(tp, RESET_TASK_PENDING);
11288 dev_close(tp->dev);
11289 goto out;
11290 }
11291
11292 tg3_netif_start(tp);
11293 tg3_full_unlock(tp);
11294 tg3_phy_start(tp);
11295 tg3_flag_clear(tp, RESET_TASK_PENDING);
11296 out:
11297 rtnl_unlock();
11298 }
11299
tg3_request_irq(struct tg3 * tp,int irq_num)11300 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11301 {
11302 irq_handler_t fn;
11303 unsigned long flags;
11304 char *name;
11305 struct tg3_napi *tnapi = &tp->napi[irq_num];
11306
11307 if (tp->irq_cnt == 1)
11308 name = tp->dev->name;
11309 else {
11310 name = &tnapi->irq_lbl[0];
11311 if (tnapi->tx_buffers && tnapi->rx_rcb)
11312 snprintf(name, IFNAMSIZ,
11313 "%s-txrx-%d", tp->dev->name, irq_num);
11314 else if (tnapi->tx_buffers)
11315 snprintf(name, IFNAMSIZ,
11316 "%s-tx-%d", tp->dev->name, irq_num);
11317 else if (tnapi->rx_rcb)
11318 snprintf(name, IFNAMSIZ,
11319 "%s-rx-%d", tp->dev->name, irq_num);
11320 else
11321 snprintf(name, IFNAMSIZ,
11322 "%s-%d", tp->dev->name, irq_num);
11323 name[IFNAMSIZ-1] = 0;
11324 }
11325
11326 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11327 fn = tg3_msi;
11328 if (tg3_flag(tp, 1SHOT_MSI))
11329 fn = tg3_msi_1shot;
11330 flags = 0;
11331 } else {
11332 fn = tg3_interrupt;
11333 if (tg3_flag(tp, TAGGED_STATUS))
11334 fn = tg3_interrupt_tagged;
11335 flags = IRQF_SHARED;
11336 }
11337
11338 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11339 }
11340
tg3_test_interrupt(struct tg3 * tp)11341 static int tg3_test_interrupt(struct tg3 *tp)
11342 {
11343 struct tg3_napi *tnapi = &tp->napi[0];
11344 struct net_device *dev = tp->dev;
11345 int err, i, intr_ok = 0;
11346 u32 val;
11347
11348 if (!netif_running(dev))
11349 return -ENODEV;
11350
11351 tg3_disable_ints(tp);
11352
11353 free_irq(tnapi->irq_vec, tnapi);
11354
11355 /*
11356 * Turn off MSI one shot mode. Otherwise this test has no
11357 * observable way to know whether the interrupt was delivered.
11358 */
11359 if (tg3_flag(tp, 57765_PLUS)) {
11360 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11361 tw32(MSGINT_MODE, val);
11362 }
11363
11364 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11365 IRQF_SHARED, dev->name, tnapi);
11366 if (err)
11367 return err;
11368
11369 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11370 tg3_enable_ints(tp);
11371
11372 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11373 tnapi->coal_now);
11374
11375 for (i = 0; i < 5; i++) {
11376 u32 int_mbox, misc_host_ctrl;
11377
11378 int_mbox = tr32_mailbox(tnapi->int_mbox);
11379 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11380
11381 if ((int_mbox != 0) ||
11382 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11383 intr_ok = 1;
11384 break;
11385 }
11386
11387 if (tg3_flag(tp, 57765_PLUS) &&
11388 tnapi->hw_status->status_tag != tnapi->last_tag)
11389 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11390
11391 msleep(10);
11392 }
11393
11394 tg3_disable_ints(tp);
11395
11396 free_irq(tnapi->irq_vec, tnapi);
11397
11398 err = tg3_request_irq(tp, 0);
11399
11400 if (err)
11401 return err;
11402
11403 if (intr_ok) {
11404 /* Reenable MSI one shot mode. */
11405 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11406 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11407 tw32(MSGINT_MODE, val);
11408 }
11409 return 0;
11410 }
11411
11412 return -EIO;
11413 }
11414
11415 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11416 * successfully restored
11417 */
tg3_test_msi(struct tg3 * tp)11418 static int tg3_test_msi(struct tg3 *tp)
11419 {
11420 int err;
11421 u16 pci_cmd;
11422
11423 if (!tg3_flag(tp, USING_MSI))
11424 return 0;
11425
11426 /* Turn off SERR reporting in case MSI terminates with Master
11427 * Abort.
11428 */
11429 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11430 pci_write_config_word(tp->pdev, PCI_COMMAND,
11431 pci_cmd & ~PCI_COMMAND_SERR);
11432
11433 err = tg3_test_interrupt(tp);
11434
11435 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11436
11437 if (!err)
11438 return 0;
11439
11440 /* other failures */
11441 if (err != -EIO)
11442 return err;
11443
11444 /* MSI test failed, go back to INTx mode */
11445 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11446 "to INTx mode. Please report this failure to the PCI "
11447 "maintainer and include system chipset information\n");
11448
11449 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11450
11451 pci_disable_msi(tp->pdev);
11452
11453 tg3_flag_clear(tp, USING_MSI);
11454 tp->napi[0].irq_vec = tp->pdev->irq;
11455
11456 err = tg3_request_irq(tp, 0);
11457 if (err)
11458 return err;
11459
11460 /* Need to reset the chip because the MSI cycle may have terminated
11461 * with Master Abort.
11462 */
11463 tg3_full_lock(tp, 1);
11464
11465 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11466 err = tg3_init_hw(tp, true);
11467
11468 tg3_full_unlock(tp);
11469
11470 if (err)
11471 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11472
11473 return err;
11474 }
11475
tg3_request_firmware(struct tg3 * tp)11476 static int tg3_request_firmware(struct tg3 *tp)
11477 {
11478 const struct tg3_firmware_hdr *fw_hdr;
11479
11480 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11481 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11482 tp->fw_needed);
11483 return -ENOENT;
11484 }
11485
11486 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11487
11488 /* Firmware blob starts with version numbers, followed by
11489 * start address and _full_ length including BSS sections
11490 * (which must be longer than the actual data, of course
11491 */
11492
11493 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11494 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11495 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11496 tp->fw_len, tp->fw_needed);
11497 release_firmware(tp->fw);
11498 tp->fw = NULL;
11499 return -EINVAL;
11500 }
11501
11502 /* We no longer need firmware; we have it. */
11503 tp->fw_needed = NULL;
11504 return 0;
11505 }
11506
tg3_irq_count(struct tg3 * tp)11507 static u32 tg3_irq_count(struct tg3 *tp)
11508 {
11509 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11510
11511 if (irq_cnt > 1) {
11512 /* We want as many rx rings enabled as there are cpus.
11513 * In multiqueue MSI-X mode, the first MSI-X vector
11514 * only deals with link interrupts, etc, so we add
11515 * one to the number of vectors we are requesting.
11516 */
11517 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11518 }
11519
11520 return irq_cnt;
11521 }
11522
tg3_enable_msix(struct tg3 * tp)11523 static bool tg3_enable_msix(struct tg3 *tp)
11524 {
11525 int i, rc;
11526 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11527
11528 tp->txq_cnt = tp->txq_req;
11529 tp->rxq_cnt = tp->rxq_req;
11530 if (!tp->rxq_cnt)
11531 tp->rxq_cnt = netif_get_num_default_rss_queues();
11532 if (tp->rxq_cnt > tp->rxq_max)
11533 tp->rxq_cnt = tp->rxq_max;
11534
11535 /* Disable multiple TX rings by default. Simple round-robin hardware
11536 * scheduling of the TX rings can cause starvation of rings with
11537 * small packets when other rings have TSO or jumbo packets.
11538 */
11539 if (!tp->txq_req)
11540 tp->txq_cnt = 1;
11541
11542 tp->irq_cnt = tg3_irq_count(tp);
11543
11544 for (i = 0; i < tp->irq_max; i++) {
11545 msix_ent[i].entry = i;
11546 msix_ent[i].vector = 0;
11547 }
11548
11549 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11550 if (rc < 0) {
11551 return false;
11552 } else if (rc < tp->irq_cnt) {
11553 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11554 tp->irq_cnt, rc);
11555 tp->irq_cnt = rc;
11556 tp->rxq_cnt = max(rc - 1, 1);
11557 if (tp->txq_cnt)
11558 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11559 }
11560
11561 for (i = 0; i < tp->irq_max; i++)
11562 tp->napi[i].irq_vec = msix_ent[i].vector;
11563
11564 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11565 pci_disable_msix(tp->pdev);
11566 return false;
11567 }
11568
11569 if (tp->irq_cnt == 1)
11570 return true;
11571
11572 tg3_flag_set(tp, ENABLE_RSS);
11573
11574 if (tp->txq_cnt > 1)
11575 tg3_flag_set(tp, ENABLE_TSS);
11576
11577 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11578
11579 return true;
11580 }
11581
tg3_ints_init(struct tg3 * tp)11582 static void tg3_ints_init(struct tg3 *tp)
11583 {
11584 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11585 !tg3_flag(tp, TAGGED_STATUS)) {
11586 /* All MSI supporting chips should support tagged
11587 * status. Assert that this is the case.
11588 */
11589 netdev_warn(tp->dev,
11590 "MSI without TAGGED_STATUS? Not using MSI\n");
11591 goto defcfg;
11592 }
11593
11594 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11595 tg3_flag_set(tp, USING_MSIX);
11596 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11597 tg3_flag_set(tp, USING_MSI);
11598
11599 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11600 u32 msi_mode = tr32(MSGINT_MODE);
11601 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11602 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11603 if (!tg3_flag(tp, 1SHOT_MSI))
11604 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11605 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11606 }
11607 defcfg:
11608 if (!tg3_flag(tp, USING_MSIX)) {
11609 tp->irq_cnt = 1;
11610 tp->napi[0].irq_vec = tp->pdev->irq;
11611 }
11612
11613 if (tp->irq_cnt == 1) {
11614 tp->txq_cnt = 1;
11615 tp->rxq_cnt = 1;
11616 netif_set_real_num_tx_queues(tp->dev, 1);
11617 netif_set_real_num_rx_queues(tp->dev, 1);
11618 }
11619 }
11620
tg3_ints_fini(struct tg3 * tp)11621 static void tg3_ints_fini(struct tg3 *tp)
11622 {
11623 if (tg3_flag(tp, USING_MSIX))
11624 pci_disable_msix(tp->pdev);
11625 else if (tg3_flag(tp, USING_MSI))
11626 pci_disable_msi(tp->pdev);
11627 tg3_flag_clear(tp, USING_MSI);
11628 tg3_flag_clear(tp, USING_MSIX);
11629 tg3_flag_clear(tp, ENABLE_RSS);
11630 tg3_flag_clear(tp, ENABLE_TSS);
11631 }
11632
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11633 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11634 bool init)
11635 {
11636 struct net_device *dev = tp->dev;
11637 int i, err;
11638
11639 /*
11640 * Setup interrupts first so we know how
11641 * many NAPI resources to allocate
11642 */
11643 tg3_ints_init(tp);
11644
11645 tg3_rss_check_indir_tbl(tp);
11646
11647 /* The placement of this call is tied
11648 * to the setup and use of Host TX descriptors.
11649 */
11650 err = tg3_alloc_consistent(tp);
11651 if (err)
11652 goto out_ints_fini;
11653
11654 tg3_napi_init(tp);
11655
11656 tg3_napi_enable(tp);
11657
11658 for (i = 0; i < tp->irq_cnt; i++) {
11659 err = tg3_request_irq(tp, i);
11660 if (err) {
11661 for (i--; i >= 0; i--) {
11662 struct tg3_napi *tnapi = &tp->napi[i];
11663
11664 free_irq(tnapi->irq_vec, tnapi);
11665 }
11666 goto out_napi_fini;
11667 }
11668 }
11669
11670 tg3_full_lock(tp, 0);
11671
11672 if (init)
11673 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11674
11675 err = tg3_init_hw(tp, reset_phy);
11676 if (err) {
11677 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11678 tg3_free_rings(tp);
11679 }
11680
11681 tg3_full_unlock(tp);
11682
11683 if (err)
11684 goto out_free_irq;
11685
11686 if (test_irq && tg3_flag(tp, USING_MSI)) {
11687 err = tg3_test_msi(tp);
11688
11689 if (err) {
11690 tg3_full_lock(tp, 0);
11691 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11692 tg3_free_rings(tp);
11693 tg3_full_unlock(tp);
11694
11695 goto out_napi_fini;
11696 }
11697
11698 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11699 u32 val = tr32(PCIE_TRANSACTION_CFG);
11700
11701 tw32(PCIE_TRANSACTION_CFG,
11702 val | PCIE_TRANS_CFG_1SHOT_MSI);
11703 }
11704 }
11705
11706 tg3_phy_start(tp);
11707
11708 tg3_hwmon_open(tp);
11709
11710 tg3_full_lock(tp, 0);
11711
11712 tg3_timer_start(tp);
11713 tg3_flag_set(tp, INIT_COMPLETE);
11714 tg3_enable_ints(tp);
11715
11716 tg3_ptp_resume(tp);
11717
11718 tg3_full_unlock(tp);
11719
11720 netif_tx_start_all_queues(dev);
11721
11722 /*
11723 * Reset loopback feature if it was turned on while the device was down
11724 * make sure that it's installed properly now.
11725 */
11726 if (dev->features & NETIF_F_LOOPBACK)
11727 tg3_set_loopback(dev, dev->features);
11728
11729 return 0;
11730
11731 out_free_irq:
11732 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11733 struct tg3_napi *tnapi = &tp->napi[i];
11734 free_irq(tnapi->irq_vec, tnapi);
11735 }
11736
11737 out_napi_fini:
11738 tg3_napi_disable(tp);
11739 tg3_napi_fini(tp);
11740 tg3_free_consistent(tp);
11741
11742 out_ints_fini:
11743 tg3_ints_fini(tp);
11744
11745 return err;
11746 }
11747
tg3_stop(struct tg3 * tp)11748 static void tg3_stop(struct tg3 *tp)
11749 {
11750 int i;
11751
11752 tg3_reset_task_cancel(tp);
11753 tg3_netif_stop(tp);
11754
11755 tg3_timer_stop(tp);
11756
11757 tg3_hwmon_close(tp);
11758
11759 tg3_phy_stop(tp);
11760
11761 tg3_full_lock(tp, 1);
11762
11763 tg3_disable_ints(tp);
11764
11765 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11766 tg3_free_rings(tp);
11767 tg3_flag_clear(tp, INIT_COMPLETE);
11768
11769 tg3_full_unlock(tp);
11770
11771 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11772 struct tg3_napi *tnapi = &tp->napi[i];
11773 free_irq(tnapi->irq_vec, tnapi);
11774 }
11775
11776 tg3_ints_fini(tp);
11777
11778 tg3_napi_fini(tp);
11779
11780 tg3_free_consistent(tp);
11781 }
11782
tg3_open(struct net_device * dev)11783 static int tg3_open(struct net_device *dev)
11784 {
11785 struct tg3 *tp = netdev_priv(dev);
11786 int err;
11787
11788 if (tp->pcierr_recovery) {
11789 netdev_err(dev, "Failed to open device. PCI error recovery "
11790 "in progress\n");
11791 return -EAGAIN;
11792 }
11793
11794 if (tp->fw_needed) {
11795 err = tg3_request_firmware(tp);
11796 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11797 if (err) {
11798 netdev_warn(tp->dev, "EEE capability disabled\n");
11799 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11800 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11801 netdev_warn(tp->dev, "EEE capability restored\n");
11802 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11803 }
11804 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11805 if (err)
11806 return err;
11807 } else if (err) {
11808 netdev_warn(tp->dev, "TSO capability disabled\n");
11809 tg3_flag_clear(tp, TSO_CAPABLE);
11810 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11811 netdev_notice(tp->dev, "TSO capability restored\n");
11812 tg3_flag_set(tp, TSO_CAPABLE);
11813 }
11814 }
11815
11816 tg3_carrier_off(tp);
11817
11818 err = tg3_power_up(tp);
11819 if (err)
11820 return err;
11821
11822 tg3_full_lock(tp, 0);
11823
11824 tg3_disable_ints(tp);
11825 tg3_flag_clear(tp, INIT_COMPLETE);
11826
11827 tg3_full_unlock(tp);
11828
11829 err = tg3_start(tp,
11830 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11831 true, true);
11832 if (err) {
11833 tg3_frob_aux_power(tp, false);
11834 pci_set_power_state(tp->pdev, PCI_D3hot);
11835 }
11836
11837 return err;
11838 }
11839
tg3_close(struct net_device * dev)11840 static int tg3_close(struct net_device *dev)
11841 {
11842 struct tg3 *tp = netdev_priv(dev);
11843
11844 if (tp->pcierr_recovery) {
11845 netdev_err(dev, "Failed to close device. PCI error recovery "
11846 "in progress\n");
11847 return -EAGAIN;
11848 }
11849
11850 tg3_stop(tp);
11851
11852 if (pci_device_is_present(tp->pdev)) {
11853 tg3_power_down_prepare(tp);
11854
11855 tg3_carrier_off(tp);
11856 }
11857 return 0;
11858 }
11859
get_stat64(tg3_stat64_t * val)11860 static inline u64 get_stat64(tg3_stat64_t *val)
11861 {
11862 return ((u64)val->high << 32) | ((u64)val->low);
11863 }
11864
tg3_calc_crc_errors(struct tg3 * tp)11865 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11866 {
11867 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11868
11869 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11870 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11871 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11872 u32 val;
11873
11874 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11875 tg3_writephy(tp, MII_TG3_TEST1,
11876 val | MII_TG3_TEST1_CRC_EN);
11877 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11878 } else
11879 val = 0;
11880
11881 tp->phy_crc_errors += val;
11882
11883 return tp->phy_crc_errors;
11884 }
11885
11886 return get_stat64(&hw_stats->rx_fcs_errors);
11887 }
11888
11889 #define ESTAT_ADD(member) \
11890 estats->member = old_estats->member + \
11891 get_stat64(&hw_stats->member)
11892
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11893 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11894 {
11895 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11896 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11897
11898 ESTAT_ADD(rx_octets);
11899 ESTAT_ADD(rx_fragments);
11900 ESTAT_ADD(rx_ucast_packets);
11901 ESTAT_ADD(rx_mcast_packets);
11902 ESTAT_ADD(rx_bcast_packets);
11903 ESTAT_ADD(rx_fcs_errors);
11904 ESTAT_ADD(rx_align_errors);
11905 ESTAT_ADD(rx_xon_pause_rcvd);
11906 ESTAT_ADD(rx_xoff_pause_rcvd);
11907 ESTAT_ADD(rx_mac_ctrl_rcvd);
11908 ESTAT_ADD(rx_xoff_entered);
11909 ESTAT_ADD(rx_frame_too_long_errors);
11910 ESTAT_ADD(rx_jabbers);
11911 ESTAT_ADD(rx_undersize_packets);
11912 ESTAT_ADD(rx_in_length_errors);
11913 ESTAT_ADD(rx_out_length_errors);
11914 ESTAT_ADD(rx_64_or_less_octet_packets);
11915 ESTAT_ADD(rx_65_to_127_octet_packets);
11916 ESTAT_ADD(rx_128_to_255_octet_packets);
11917 ESTAT_ADD(rx_256_to_511_octet_packets);
11918 ESTAT_ADD(rx_512_to_1023_octet_packets);
11919 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11920 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11921 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11922 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11923 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11924
11925 ESTAT_ADD(tx_octets);
11926 ESTAT_ADD(tx_collisions);
11927 ESTAT_ADD(tx_xon_sent);
11928 ESTAT_ADD(tx_xoff_sent);
11929 ESTAT_ADD(tx_flow_control);
11930 ESTAT_ADD(tx_mac_errors);
11931 ESTAT_ADD(tx_single_collisions);
11932 ESTAT_ADD(tx_mult_collisions);
11933 ESTAT_ADD(tx_deferred);
11934 ESTAT_ADD(tx_excessive_collisions);
11935 ESTAT_ADD(tx_late_collisions);
11936 ESTAT_ADD(tx_collide_2times);
11937 ESTAT_ADD(tx_collide_3times);
11938 ESTAT_ADD(tx_collide_4times);
11939 ESTAT_ADD(tx_collide_5times);
11940 ESTAT_ADD(tx_collide_6times);
11941 ESTAT_ADD(tx_collide_7times);
11942 ESTAT_ADD(tx_collide_8times);
11943 ESTAT_ADD(tx_collide_9times);
11944 ESTAT_ADD(tx_collide_10times);
11945 ESTAT_ADD(tx_collide_11times);
11946 ESTAT_ADD(tx_collide_12times);
11947 ESTAT_ADD(tx_collide_13times);
11948 ESTAT_ADD(tx_collide_14times);
11949 ESTAT_ADD(tx_collide_15times);
11950 ESTAT_ADD(tx_ucast_packets);
11951 ESTAT_ADD(tx_mcast_packets);
11952 ESTAT_ADD(tx_bcast_packets);
11953 ESTAT_ADD(tx_carrier_sense_errors);
11954 ESTAT_ADD(tx_discards);
11955 ESTAT_ADD(tx_errors);
11956
11957 ESTAT_ADD(dma_writeq_full);
11958 ESTAT_ADD(dma_write_prioq_full);
11959 ESTAT_ADD(rxbds_empty);
11960 ESTAT_ADD(rx_discards);
11961 ESTAT_ADD(rx_errors);
11962 ESTAT_ADD(rx_threshold_hit);
11963
11964 ESTAT_ADD(dma_readq_full);
11965 ESTAT_ADD(dma_read_prioq_full);
11966 ESTAT_ADD(tx_comp_queue_full);
11967
11968 ESTAT_ADD(ring_set_send_prod_index);
11969 ESTAT_ADD(ring_status_update);
11970 ESTAT_ADD(nic_irqs);
11971 ESTAT_ADD(nic_avoided_irqs);
11972 ESTAT_ADD(nic_tx_threshold_hit);
11973
11974 ESTAT_ADD(mbuf_lwm_thresh_hit);
11975 }
11976
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)11977 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11978 {
11979 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11980 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11981 unsigned long rx_dropped;
11982 unsigned long tx_dropped;
11983 int i;
11984
11985 stats->rx_packets = old_stats->rx_packets +
11986 get_stat64(&hw_stats->rx_ucast_packets) +
11987 get_stat64(&hw_stats->rx_mcast_packets) +
11988 get_stat64(&hw_stats->rx_bcast_packets);
11989
11990 stats->tx_packets = old_stats->tx_packets +
11991 get_stat64(&hw_stats->tx_ucast_packets) +
11992 get_stat64(&hw_stats->tx_mcast_packets) +
11993 get_stat64(&hw_stats->tx_bcast_packets);
11994
11995 stats->rx_bytes = old_stats->rx_bytes +
11996 get_stat64(&hw_stats->rx_octets);
11997 stats->tx_bytes = old_stats->tx_bytes +
11998 get_stat64(&hw_stats->tx_octets);
11999
12000 stats->rx_errors = old_stats->rx_errors +
12001 get_stat64(&hw_stats->rx_errors);
12002 stats->tx_errors = old_stats->tx_errors +
12003 get_stat64(&hw_stats->tx_errors) +
12004 get_stat64(&hw_stats->tx_mac_errors) +
12005 get_stat64(&hw_stats->tx_carrier_sense_errors) +
12006 get_stat64(&hw_stats->tx_discards);
12007
12008 stats->multicast = old_stats->multicast +
12009 get_stat64(&hw_stats->rx_mcast_packets);
12010 stats->collisions = old_stats->collisions +
12011 get_stat64(&hw_stats->tx_collisions);
12012
12013 stats->rx_length_errors = old_stats->rx_length_errors +
12014 get_stat64(&hw_stats->rx_frame_too_long_errors) +
12015 get_stat64(&hw_stats->rx_undersize_packets);
12016
12017 stats->rx_frame_errors = old_stats->rx_frame_errors +
12018 get_stat64(&hw_stats->rx_align_errors);
12019 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
12020 get_stat64(&hw_stats->tx_discards);
12021 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
12022 get_stat64(&hw_stats->tx_carrier_sense_errors);
12023
12024 stats->rx_crc_errors = old_stats->rx_crc_errors +
12025 tg3_calc_crc_errors(tp);
12026
12027 stats->rx_missed_errors = old_stats->rx_missed_errors +
12028 get_stat64(&hw_stats->rx_discards);
12029
12030 /* Aggregate per-queue counters. The per-queue counters are updated
12031 * by a single writer, race-free. The result computed by this loop
12032 * might not be 100% accurate (counters can be updated in the middle of
12033 * the loop) but the next tg3_get_nstats() will recompute the current
12034 * value so it is acceptable.
12035 *
12036 * Note that these counters wrap around at 4G on 32bit machines.
12037 */
12038 rx_dropped = (unsigned long)(old_stats->rx_dropped);
12039 tx_dropped = (unsigned long)(old_stats->tx_dropped);
12040
12041 for (i = 0; i < tp->irq_cnt; i++) {
12042 struct tg3_napi *tnapi = &tp->napi[i];
12043
12044 rx_dropped += tnapi->rx_dropped;
12045 tx_dropped += tnapi->tx_dropped;
12046 }
12047
12048 stats->rx_dropped = rx_dropped;
12049 stats->tx_dropped = tx_dropped;
12050 }
12051
tg3_get_regs_len(struct net_device * dev)12052 static int tg3_get_regs_len(struct net_device *dev)
12053 {
12054 return TG3_REG_BLK_SIZE;
12055 }
12056
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)12057 static void tg3_get_regs(struct net_device *dev,
12058 struct ethtool_regs *regs, void *_p)
12059 {
12060 struct tg3 *tp = netdev_priv(dev);
12061
12062 regs->version = 0;
12063
12064 memset(_p, 0, TG3_REG_BLK_SIZE);
12065
12066 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12067 return;
12068
12069 tg3_full_lock(tp, 0);
12070
12071 tg3_dump_legacy_regs(tp, (u32 *)_p);
12072
12073 tg3_full_unlock(tp);
12074 }
12075
tg3_get_eeprom_len(struct net_device * dev)12076 static int tg3_get_eeprom_len(struct net_device *dev)
12077 {
12078 struct tg3 *tp = netdev_priv(dev);
12079
12080 return tp->nvram_size;
12081 }
12082
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12083 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12084 {
12085 struct tg3 *tp = netdev_priv(dev);
12086 int ret, cpmu_restore = 0;
12087 u8 *pd;
12088 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12089 __be32 val;
12090
12091 if (tg3_flag(tp, NO_NVRAM))
12092 return -EINVAL;
12093
12094 offset = eeprom->offset;
12095 len = eeprom->len;
12096 eeprom->len = 0;
12097
12098 eeprom->magic = TG3_EEPROM_MAGIC;
12099
12100 /* Override clock, link aware and link idle modes */
12101 if (tg3_flag(tp, CPMU_PRESENT)) {
12102 cpmu_val = tr32(TG3_CPMU_CTRL);
12103 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12104 CPMU_CTRL_LINK_IDLE_MODE)) {
12105 tw32(TG3_CPMU_CTRL, cpmu_val &
12106 ~(CPMU_CTRL_LINK_AWARE_MODE |
12107 CPMU_CTRL_LINK_IDLE_MODE));
12108 cpmu_restore = 1;
12109 }
12110 }
12111 tg3_override_clk(tp);
12112
12113 if (offset & 3) {
12114 /* adjustments to start on required 4 byte boundary */
12115 b_offset = offset & 3;
12116 b_count = 4 - b_offset;
12117 if (b_count > len) {
12118 /* i.e. offset=1 len=2 */
12119 b_count = len;
12120 }
12121 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12122 if (ret)
12123 goto eeprom_done;
12124 memcpy(data, ((char *)&val) + b_offset, b_count);
12125 len -= b_count;
12126 offset += b_count;
12127 eeprom->len += b_count;
12128 }
12129
12130 /* read bytes up to the last 4 byte boundary */
12131 pd = &data[eeprom->len];
12132 for (i = 0; i < (len - (len & 3)); i += 4) {
12133 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12134 if (ret) {
12135 if (i)
12136 i -= 4;
12137 eeprom->len += i;
12138 goto eeprom_done;
12139 }
12140 memcpy(pd + i, &val, 4);
12141 if (need_resched()) {
12142 if (signal_pending(current)) {
12143 eeprom->len += i;
12144 ret = -EINTR;
12145 goto eeprom_done;
12146 }
12147 cond_resched();
12148 }
12149 }
12150 eeprom->len += i;
12151
12152 if (len & 3) {
12153 /* read last bytes not ending on 4 byte boundary */
12154 pd = &data[eeprom->len];
12155 b_count = len & 3;
12156 b_offset = offset + len - b_count;
12157 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12158 if (ret)
12159 goto eeprom_done;
12160 memcpy(pd, &val, b_count);
12161 eeprom->len += b_count;
12162 }
12163 ret = 0;
12164
12165 eeprom_done:
12166 /* Restore clock, link aware and link idle modes */
12167 tg3_restore_clk(tp);
12168 if (cpmu_restore)
12169 tw32(TG3_CPMU_CTRL, cpmu_val);
12170
12171 return ret;
12172 }
12173
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12174 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12175 {
12176 struct tg3 *tp = netdev_priv(dev);
12177 int ret;
12178 u32 offset, len, b_offset, odd_len;
12179 u8 *buf;
12180 __be32 start = 0, end;
12181
12182 if (tg3_flag(tp, NO_NVRAM) ||
12183 eeprom->magic != TG3_EEPROM_MAGIC)
12184 return -EINVAL;
12185
12186 offset = eeprom->offset;
12187 len = eeprom->len;
12188
12189 if ((b_offset = (offset & 3))) {
12190 /* adjustments to start on required 4 byte boundary */
12191 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12192 if (ret)
12193 return ret;
12194 len += b_offset;
12195 offset &= ~3;
12196 if (len < 4)
12197 len = 4;
12198 }
12199
12200 odd_len = 0;
12201 if (len & 3) {
12202 /* adjustments to end on required 4 byte boundary */
12203 odd_len = 1;
12204 len = (len + 3) & ~3;
12205 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12206 if (ret)
12207 return ret;
12208 }
12209
12210 buf = data;
12211 if (b_offset || odd_len) {
12212 buf = kmalloc(len, GFP_KERNEL);
12213 if (!buf)
12214 return -ENOMEM;
12215 if (b_offset)
12216 memcpy(buf, &start, 4);
12217 if (odd_len)
12218 memcpy(buf+len-4, &end, 4);
12219 memcpy(buf + b_offset, data, eeprom->len);
12220 }
12221
12222 ret = tg3_nvram_write_block(tp, offset, len, buf);
12223
12224 if (buf != data)
12225 kfree(buf);
12226
12227 return ret;
12228 }
12229
tg3_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)12230 static int tg3_get_link_ksettings(struct net_device *dev,
12231 struct ethtool_link_ksettings *cmd)
12232 {
12233 struct tg3 *tp = netdev_priv(dev);
12234 u32 supported, advertising;
12235
12236 if (tg3_flag(tp, USE_PHYLIB)) {
12237 struct phy_device *phydev;
12238 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12239 return -EAGAIN;
12240 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12241 phy_ethtool_ksettings_get(phydev, cmd);
12242
12243 return 0;
12244 }
12245
12246 supported = (SUPPORTED_Autoneg);
12247
12248 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12249 supported |= (SUPPORTED_1000baseT_Half |
12250 SUPPORTED_1000baseT_Full);
12251
12252 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12253 supported |= (SUPPORTED_100baseT_Half |
12254 SUPPORTED_100baseT_Full |
12255 SUPPORTED_10baseT_Half |
12256 SUPPORTED_10baseT_Full |
12257 SUPPORTED_TP);
12258 cmd->base.port = PORT_TP;
12259 } else {
12260 supported |= SUPPORTED_FIBRE;
12261 cmd->base.port = PORT_FIBRE;
12262 }
12263 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12264 supported);
12265
12266 advertising = tp->link_config.advertising;
12267 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12268 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12269 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12270 advertising |= ADVERTISED_Pause;
12271 } else {
12272 advertising |= ADVERTISED_Pause |
12273 ADVERTISED_Asym_Pause;
12274 }
12275 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12276 advertising |= ADVERTISED_Asym_Pause;
12277 }
12278 }
12279 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12280 advertising);
12281
12282 if (netif_running(dev) && tp->link_up) {
12283 cmd->base.speed = tp->link_config.active_speed;
12284 cmd->base.duplex = tp->link_config.active_duplex;
12285 ethtool_convert_legacy_u32_to_link_mode(
12286 cmd->link_modes.lp_advertising,
12287 tp->link_config.rmt_adv);
12288
12289 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12290 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12291 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12292 else
12293 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12294 }
12295 } else {
12296 cmd->base.speed = SPEED_UNKNOWN;
12297 cmd->base.duplex = DUPLEX_UNKNOWN;
12298 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12299 }
12300 cmd->base.phy_address = tp->phy_addr;
12301 cmd->base.autoneg = tp->link_config.autoneg;
12302 return 0;
12303 }
12304
tg3_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)12305 static int tg3_set_link_ksettings(struct net_device *dev,
12306 const struct ethtool_link_ksettings *cmd)
12307 {
12308 struct tg3 *tp = netdev_priv(dev);
12309 u32 speed = cmd->base.speed;
12310 u32 advertising;
12311
12312 if (tg3_flag(tp, USE_PHYLIB)) {
12313 struct phy_device *phydev;
12314 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12315 return -EAGAIN;
12316 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12317 return phy_ethtool_ksettings_set(phydev, cmd);
12318 }
12319
12320 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12321 cmd->base.autoneg != AUTONEG_DISABLE)
12322 return -EINVAL;
12323
12324 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12325 cmd->base.duplex != DUPLEX_FULL &&
12326 cmd->base.duplex != DUPLEX_HALF)
12327 return -EINVAL;
12328
12329 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12330 cmd->link_modes.advertising);
12331
12332 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12333 u32 mask = ADVERTISED_Autoneg |
12334 ADVERTISED_Pause |
12335 ADVERTISED_Asym_Pause;
12336
12337 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12338 mask |= ADVERTISED_1000baseT_Half |
12339 ADVERTISED_1000baseT_Full;
12340
12341 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12342 mask |= ADVERTISED_100baseT_Half |
12343 ADVERTISED_100baseT_Full |
12344 ADVERTISED_10baseT_Half |
12345 ADVERTISED_10baseT_Full |
12346 ADVERTISED_TP;
12347 else
12348 mask |= ADVERTISED_FIBRE;
12349
12350 if (advertising & ~mask)
12351 return -EINVAL;
12352
12353 mask &= (ADVERTISED_1000baseT_Half |
12354 ADVERTISED_1000baseT_Full |
12355 ADVERTISED_100baseT_Half |
12356 ADVERTISED_100baseT_Full |
12357 ADVERTISED_10baseT_Half |
12358 ADVERTISED_10baseT_Full);
12359
12360 advertising &= mask;
12361 } else {
12362 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12363 if (speed != SPEED_1000)
12364 return -EINVAL;
12365
12366 if (cmd->base.duplex != DUPLEX_FULL)
12367 return -EINVAL;
12368 } else {
12369 if (speed != SPEED_100 &&
12370 speed != SPEED_10)
12371 return -EINVAL;
12372 }
12373 }
12374
12375 tg3_full_lock(tp, 0);
12376
12377 tp->link_config.autoneg = cmd->base.autoneg;
12378 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12379 tp->link_config.advertising = (advertising |
12380 ADVERTISED_Autoneg);
12381 tp->link_config.speed = SPEED_UNKNOWN;
12382 tp->link_config.duplex = DUPLEX_UNKNOWN;
12383 } else {
12384 tp->link_config.advertising = 0;
12385 tp->link_config.speed = speed;
12386 tp->link_config.duplex = cmd->base.duplex;
12387 }
12388
12389 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12390
12391 tg3_warn_mgmt_link_flap(tp);
12392
12393 if (netif_running(dev))
12394 tg3_setup_phy(tp, true);
12395
12396 tg3_full_unlock(tp);
12397
12398 return 0;
12399 }
12400
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12401 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12402 {
12403 struct tg3 *tp = netdev_priv(dev);
12404
12405 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12406 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12407 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12408 }
12409
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12410 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12411 {
12412 struct tg3 *tp = netdev_priv(dev);
12413
12414 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12415 wol->supported = WAKE_MAGIC;
12416 else
12417 wol->supported = 0;
12418 wol->wolopts = 0;
12419 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12420 wol->wolopts = WAKE_MAGIC;
12421 memset(&wol->sopass, 0, sizeof(wol->sopass));
12422 }
12423
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12424 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12425 {
12426 struct tg3 *tp = netdev_priv(dev);
12427 struct device *dp = &tp->pdev->dev;
12428
12429 if (wol->wolopts & ~WAKE_MAGIC)
12430 return -EINVAL;
12431 if ((wol->wolopts & WAKE_MAGIC) &&
12432 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12433 return -EINVAL;
12434
12435 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12436
12437 if (device_may_wakeup(dp))
12438 tg3_flag_set(tp, WOL_ENABLE);
12439 else
12440 tg3_flag_clear(tp, WOL_ENABLE);
12441
12442 return 0;
12443 }
12444
tg3_get_msglevel(struct net_device * dev)12445 static u32 tg3_get_msglevel(struct net_device *dev)
12446 {
12447 struct tg3 *tp = netdev_priv(dev);
12448 return tp->msg_enable;
12449 }
12450
tg3_set_msglevel(struct net_device * dev,u32 value)12451 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12452 {
12453 struct tg3 *tp = netdev_priv(dev);
12454 tp->msg_enable = value;
12455 }
12456
tg3_nway_reset(struct net_device * dev)12457 static int tg3_nway_reset(struct net_device *dev)
12458 {
12459 struct tg3 *tp = netdev_priv(dev);
12460 int r;
12461
12462 if (!netif_running(dev))
12463 return -EAGAIN;
12464
12465 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12466 return -EINVAL;
12467
12468 tg3_warn_mgmt_link_flap(tp);
12469
12470 if (tg3_flag(tp, USE_PHYLIB)) {
12471 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12472 return -EAGAIN;
12473 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12474 } else {
12475 u32 bmcr;
12476
12477 spin_lock_bh(&tp->lock);
12478 r = -EINVAL;
12479 tg3_readphy(tp, MII_BMCR, &bmcr);
12480 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12481 ((bmcr & BMCR_ANENABLE) ||
12482 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12483 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12484 BMCR_ANENABLE);
12485 r = 0;
12486 }
12487 spin_unlock_bh(&tp->lock);
12488 }
12489
12490 return r;
12491 }
12492
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12493 static void tg3_get_ringparam(struct net_device *dev,
12494 struct ethtool_ringparam *ering,
12495 struct kernel_ethtool_ringparam *kernel_ering,
12496 struct netlink_ext_ack *extack)
12497 {
12498 struct tg3 *tp = netdev_priv(dev);
12499
12500 ering->rx_max_pending = tp->rx_std_ring_mask;
12501 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12502 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12503 else
12504 ering->rx_jumbo_max_pending = 0;
12505
12506 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12507
12508 ering->rx_pending = tp->rx_pending;
12509 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12510 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12511 else
12512 ering->rx_jumbo_pending = 0;
12513
12514 ering->tx_pending = tp->napi[0].tx_pending;
12515 }
12516
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12517 static int tg3_set_ringparam(struct net_device *dev,
12518 struct ethtool_ringparam *ering,
12519 struct kernel_ethtool_ringparam *kernel_ering,
12520 struct netlink_ext_ack *extack)
12521 {
12522 struct tg3 *tp = netdev_priv(dev);
12523 int i, irq_sync = 0, err = 0;
12524 bool reset_phy = false;
12525
12526 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12527 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12528 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12529 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12530 (tg3_flag(tp, TSO_BUG) &&
12531 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12532 return -EINVAL;
12533
12534 if (netif_running(dev)) {
12535 tg3_phy_stop(tp);
12536 tg3_netif_stop(tp);
12537 irq_sync = 1;
12538 }
12539
12540 tg3_full_lock(tp, irq_sync);
12541
12542 tp->rx_pending = ering->rx_pending;
12543
12544 if (tg3_flag(tp, MAX_RXPEND_64) &&
12545 tp->rx_pending > 63)
12546 tp->rx_pending = 63;
12547
12548 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12549 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12550
12551 for (i = 0; i < tp->irq_max; i++)
12552 tp->napi[i].tx_pending = ering->tx_pending;
12553
12554 if (netif_running(dev)) {
12555 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12556 /* Reset PHY to avoid PHY lock up */
12557 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12558 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12559 tg3_asic_rev(tp) == ASIC_REV_5720)
12560 reset_phy = true;
12561
12562 err = tg3_restart_hw(tp, reset_phy);
12563 if (!err)
12564 tg3_netif_start(tp);
12565 }
12566
12567 tg3_full_unlock(tp);
12568
12569 if (irq_sync && !err)
12570 tg3_phy_start(tp);
12571
12572 return err;
12573 }
12574
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12575 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12576 {
12577 struct tg3 *tp = netdev_priv(dev);
12578
12579 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12580
12581 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12582 epause->rx_pause = 1;
12583 else
12584 epause->rx_pause = 0;
12585
12586 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12587 epause->tx_pause = 1;
12588 else
12589 epause->tx_pause = 0;
12590 }
12591
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12592 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12593 {
12594 struct tg3 *tp = netdev_priv(dev);
12595 int err = 0;
12596 bool reset_phy = false;
12597
12598 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12599 tg3_warn_mgmt_link_flap(tp);
12600
12601 if (tg3_flag(tp, USE_PHYLIB)) {
12602 struct phy_device *phydev;
12603
12604 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12605
12606 if (!phy_validate_pause(phydev, epause))
12607 return -EINVAL;
12608
12609 tp->link_config.flowctrl = 0;
12610 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12611 if (epause->rx_pause) {
12612 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12613
12614 if (epause->tx_pause) {
12615 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12616 }
12617 } else if (epause->tx_pause) {
12618 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12619 }
12620
12621 if (epause->autoneg)
12622 tg3_flag_set(tp, PAUSE_AUTONEG);
12623 else
12624 tg3_flag_clear(tp, PAUSE_AUTONEG);
12625
12626 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12627 if (phydev->autoneg) {
12628 /* phy_set_asym_pause() will
12629 * renegotiate the link to inform our
12630 * link partner of our flow control
12631 * settings, even if the flow control
12632 * is forced. Let tg3_adjust_link()
12633 * do the final flow control setup.
12634 */
12635 return 0;
12636 }
12637
12638 if (!epause->autoneg)
12639 tg3_setup_flow_control(tp, 0, 0);
12640 }
12641 } else {
12642 int irq_sync = 0;
12643
12644 if (netif_running(dev)) {
12645 tg3_netif_stop(tp);
12646 irq_sync = 1;
12647 }
12648
12649 tg3_full_lock(tp, irq_sync);
12650
12651 if (epause->autoneg)
12652 tg3_flag_set(tp, PAUSE_AUTONEG);
12653 else
12654 tg3_flag_clear(tp, PAUSE_AUTONEG);
12655 if (epause->rx_pause)
12656 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12657 else
12658 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12659 if (epause->tx_pause)
12660 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12661 else
12662 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12663
12664 if (netif_running(dev)) {
12665 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12666 /* Reset PHY to avoid PHY lock up */
12667 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12668 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12669 tg3_asic_rev(tp) == ASIC_REV_5720)
12670 reset_phy = true;
12671
12672 err = tg3_restart_hw(tp, reset_phy);
12673 if (!err)
12674 tg3_netif_start(tp);
12675 }
12676
12677 tg3_full_unlock(tp);
12678 }
12679
12680 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12681
12682 return err;
12683 }
12684
tg3_get_sset_count(struct net_device * dev,int sset)12685 static int tg3_get_sset_count(struct net_device *dev, int sset)
12686 {
12687 switch (sset) {
12688 case ETH_SS_TEST:
12689 return TG3_NUM_TEST;
12690 case ETH_SS_STATS:
12691 return TG3_NUM_STATS;
12692 default:
12693 return -EOPNOTSUPP;
12694 }
12695 }
12696
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)12697 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12698 u32 *rules __always_unused)
12699 {
12700 struct tg3 *tp = netdev_priv(dev);
12701
12702 if (!tg3_flag(tp, SUPPORT_MSIX))
12703 return -EOPNOTSUPP;
12704
12705 switch (info->cmd) {
12706 case ETHTOOL_GRXRINGS:
12707 if (netif_running(tp->dev))
12708 info->data = tp->rxq_cnt;
12709 else {
12710 info->data = num_online_cpus();
12711 if (info->data > TG3_RSS_MAX_NUM_QS)
12712 info->data = TG3_RSS_MAX_NUM_QS;
12713 }
12714
12715 return 0;
12716
12717 default:
12718 return -EOPNOTSUPP;
12719 }
12720 }
12721
tg3_get_rxfh_indir_size(struct net_device * dev)12722 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12723 {
12724 u32 size = 0;
12725 struct tg3 *tp = netdev_priv(dev);
12726
12727 if (tg3_flag(tp, SUPPORT_MSIX))
12728 size = TG3_RSS_INDIR_TBL_SIZE;
12729
12730 return size;
12731 }
12732
tg3_get_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh)12733 static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
12734 {
12735 struct tg3 *tp = netdev_priv(dev);
12736 int i;
12737
12738 rxfh->hfunc = ETH_RSS_HASH_TOP;
12739 if (!rxfh->indir)
12740 return 0;
12741
12742 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12743 rxfh->indir[i] = tp->rss_ind_tbl[i];
12744
12745 return 0;
12746 }
12747
tg3_set_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)12748 static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
12749 struct netlink_ext_ack *extack)
12750 {
12751 struct tg3 *tp = netdev_priv(dev);
12752 size_t i;
12753
12754 /* We require at least one supported parameter to be changed and no
12755 * change in any of the unsupported parameters
12756 */
12757 if (rxfh->key ||
12758 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
12759 rxfh->hfunc != ETH_RSS_HASH_TOP))
12760 return -EOPNOTSUPP;
12761
12762 if (!rxfh->indir)
12763 return 0;
12764
12765 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12766 tp->rss_ind_tbl[i] = rxfh->indir[i];
12767
12768 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12769 return 0;
12770
12771 /* It is legal to write the indirection
12772 * table while the device is running.
12773 */
12774 tg3_full_lock(tp, 0);
12775 tg3_rss_write_indir_tbl(tp);
12776 tg3_full_unlock(tp);
12777
12778 return 0;
12779 }
12780
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12781 static void tg3_get_channels(struct net_device *dev,
12782 struct ethtool_channels *channel)
12783 {
12784 struct tg3 *tp = netdev_priv(dev);
12785 u32 deflt_qs = netif_get_num_default_rss_queues();
12786
12787 channel->max_rx = tp->rxq_max;
12788 channel->max_tx = tp->txq_max;
12789
12790 if (netif_running(dev)) {
12791 channel->rx_count = tp->rxq_cnt;
12792 channel->tx_count = tp->txq_cnt;
12793 } else {
12794 if (tp->rxq_req)
12795 channel->rx_count = tp->rxq_req;
12796 else
12797 channel->rx_count = min(deflt_qs, tp->rxq_max);
12798
12799 if (tp->txq_req)
12800 channel->tx_count = tp->txq_req;
12801 else
12802 channel->tx_count = min(deflt_qs, tp->txq_max);
12803 }
12804 }
12805
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12806 static int tg3_set_channels(struct net_device *dev,
12807 struct ethtool_channels *channel)
12808 {
12809 struct tg3 *tp = netdev_priv(dev);
12810
12811 if (!tg3_flag(tp, SUPPORT_MSIX))
12812 return -EOPNOTSUPP;
12813
12814 if (channel->rx_count > tp->rxq_max ||
12815 channel->tx_count > tp->txq_max)
12816 return -EINVAL;
12817
12818 tp->rxq_req = channel->rx_count;
12819 tp->txq_req = channel->tx_count;
12820
12821 if (!netif_running(dev))
12822 return 0;
12823
12824 tg3_stop(tp);
12825
12826 tg3_carrier_off(tp);
12827
12828 tg3_start(tp, true, false, false);
12829
12830 return 0;
12831 }
12832
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12833 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12834 {
12835 switch (stringset) {
12836 case ETH_SS_STATS:
12837 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12838 break;
12839 case ETH_SS_TEST:
12840 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12841 break;
12842 default:
12843 WARN_ON(1); /* we need a WARN() */
12844 break;
12845 }
12846 }
12847
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12848 static int tg3_set_phys_id(struct net_device *dev,
12849 enum ethtool_phys_id_state state)
12850 {
12851 struct tg3 *tp = netdev_priv(dev);
12852
12853 switch (state) {
12854 case ETHTOOL_ID_ACTIVE:
12855 return 1; /* cycle on/off once per second */
12856
12857 case ETHTOOL_ID_ON:
12858 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12859 LED_CTRL_1000MBPS_ON |
12860 LED_CTRL_100MBPS_ON |
12861 LED_CTRL_10MBPS_ON |
12862 LED_CTRL_TRAFFIC_OVERRIDE |
12863 LED_CTRL_TRAFFIC_BLINK |
12864 LED_CTRL_TRAFFIC_LED);
12865 break;
12866
12867 case ETHTOOL_ID_OFF:
12868 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12869 LED_CTRL_TRAFFIC_OVERRIDE);
12870 break;
12871
12872 case ETHTOOL_ID_INACTIVE:
12873 tw32(MAC_LED_CTRL, tp->led_ctrl);
12874 break;
12875 }
12876
12877 return 0;
12878 }
12879
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12880 static void tg3_get_ethtool_stats(struct net_device *dev,
12881 struct ethtool_stats *estats, u64 *tmp_stats)
12882 {
12883 struct tg3 *tp = netdev_priv(dev);
12884
12885 if (tp->hw_stats)
12886 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12887 else
12888 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12889 }
12890
tg3_vpd_readblock(struct tg3 * tp,unsigned int * vpdlen)12891 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12892 {
12893 int i;
12894 __be32 *buf;
12895 u32 offset = 0, len = 0;
12896 u32 magic, val;
12897
12898 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12899 return NULL;
12900
12901 if (magic == TG3_EEPROM_MAGIC) {
12902 for (offset = TG3_NVM_DIR_START;
12903 offset < TG3_NVM_DIR_END;
12904 offset += TG3_NVM_DIRENT_SIZE) {
12905 if (tg3_nvram_read(tp, offset, &val))
12906 return NULL;
12907
12908 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12909 TG3_NVM_DIRTYPE_EXTVPD)
12910 break;
12911 }
12912
12913 if (offset != TG3_NVM_DIR_END) {
12914 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12915 if (tg3_nvram_read(tp, offset + 4, &offset))
12916 return NULL;
12917
12918 offset = tg3_nvram_logical_addr(tp, offset);
12919 }
12920
12921 if (!offset || !len) {
12922 offset = TG3_NVM_VPD_OFF;
12923 len = TG3_NVM_VPD_LEN;
12924 }
12925
12926 buf = kmalloc(len, GFP_KERNEL);
12927 if (!buf)
12928 return NULL;
12929
12930 for (i = 0; i < len; i += 4) {
12931 /* The data is in little-endian format in NVRAM.
12932 * Use the big-endian read routines to preserve
12933 * the byte order as it exists in NVRAM.
12934 */
12935 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12936 goto error;
12937 }
12938 *vpdlen = len;
12939 } else {
12940 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12941 if (IS_ERR(buf))
12942 return NULL;
12943 }
12944
12945 return buf;
12946
12947 error:
12948 kfree(buf);
12949 return NULL;
12950 }
12951
12952 #define NVRAM_TEST_SIZE 0x100
12953 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12954 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12955 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12956 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12957 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12958 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12959 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12960 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12961
tg3_test_nvram(struct tg3 * tp)12962 static int tg3_test_nvram(struct tg3 *tp)
12963 {
12964 u32 csum, magic;
12965 __be32 *buf;
12966 int i, j, k, err = 0, size;
12967 unsigned int len;
12968
12969 if (tg3_flag(tp, NO_NVRAM))
12970 return 0;
12971
12972 if (tg3_nvram_read(tp, 0, &magic) != 0)
12973 return -EIO;
12974
12975 if (magic == TG3_EEPROM_MAGIC)
12976 size = NVRAM_TEST_SIZE;
12977 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12978 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12979 TG3_EEPROM_SB_FORMAT_1) {
12980 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12981 case TG3_EEPROM_SB_REVISION_0:
12982 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12983 break;
12984 case TG3_EEPROM_SB_REVISION_2:
12985 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12986 break;
12987 case TG3_EEPROM_SB_REVISION_3:
12988 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12989 break;
12990 case TG3_EEPROM_SB_REVISION_4:
12991 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12992 break;
12993 case TG3_EEPROM_SB_REVISION_5:
12994 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12995 break;
12996 case TG3_EEPROM_SB_REVISION_6:
12997 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12998 break;
12999 default:
13000 return -EIO;
13001 }
13002 } else
13003 return 0;
13004 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13005 size = NVRAM_SELFBOOT_HW_SIZE;
13006 else
13007 return -EIO;
13008
13009 buf = kmalloc(size, GFP_KERNEL);
13010 if (buf == NULL)
13011 return -ENOMEM;
13012
13013 err = -EIO;
13014 for (i = 0, j = 0; i < size; i += 4, j++) {
13015 err = tg3_nvram_read_be32(tp, i, &buf[j]);
13016 if (err)
13017 break;
13018 }
13019 if (i < size)
13020 goto out;
13021
13022 /* Selfboot format */
13023 magic = be32_to_cpu(buf[0]);
13024 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
13025 TG3_EEPROM_MAGIC_FW) {
13026 u8 *buf8 = (u8 *) buf, csum8 = 0;
13027
13028 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
13029 TG3_EEPROM_SB_REVISION_2) {
13030 /* For rev 2, the csum doesn't include the MBA. */
13031 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
13032 csum8 += buf8[i];
13033 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
13034 csum8 += buf8[i];
13035 } else {
13036 for (i = 0; i < size; i++)
13037 csum8 += buf8[i];
13038 }
13039
13040 if (csum8 == 0) {
13041 err = 0;
13042 goto out;
13043 }
13044
13045 err = -EIO;
13046 goto out;
13047 }
13048
13049 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13050 TG3_EEPROM_MAGIC_HW) {
13051 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13052 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13053 u8 *buf8 = (u8 *) buf;
13054
13055 /* Separate the parity bits and the data bytes. */
13056 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13057 if ((i == 0) || (i == 8)) {
13058 int l;
13059 u8 msk;
13060
13061 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13062 parity[k++] = buf8[i] & msk;
13063 i++;
13064 } else if (i == 16) {
13065 int l;
13066 u8 msk;
13067
13068 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13069 parity[k++] = buf8[i] & msk;
13070 i++;
13071
13072 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13073 parity[k++] = buf8[i] & msk;
13074 i++;
13075 }
13076 data[j++] = buf8[i];
13077 }
13078
13079 err = -EIO;
13080 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13081 u8 hw8 = hweight8(data[i]);
13082
13083 if ((hw8 & 0x1) && parity[i])
13084 goto out;
13085 else if (!(hw8 & 0x1) && !parity[i])
13086 goto out;
13087 }
13088 err = 0;
13089 goto out;
13090 }
13091
13092 err = -EIO;
13093
13094 /* Bootstrap checksum at offset 0x10 */
13095 csum = calc_crc((unsigned char *) buf, 0x10);
13096 if (csum != le32_to_cpu(buf[0x10/4]))
13097 goto out;
13098
13099 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13100 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13101 if (csum != le32_to_cpu(buf[0xfc/4]))
13102 goto out;
13103
13104 kfree(buf);
13105
13106 buf = tg3_vpd_readblock(tp, &len);
13107 if (!buf)
13108 return -ENOMEM;
13109
13110 err = pci_vpd_check_csum(buf, len);
13111 /* go on if no checksum found */
13112 if (err == 1)
13113 err = 0;
13114 out:
13115 kfree(buf);
13116 return err;
13117 }
13118
13119 #define TG3_SERDES_TIMEOUT_SEC 2
13120 #define TG3_COPPER_TIMEOUT_SEC 6
13121
tg3_test_link(struct tg3 * tp)13122 static int tg3_test_link(struct tg3 *tp)
13123 {
13124 int i, max;
13125
13126 if (!netif_running(tp->dev))
13127 return -ENODEV;
13128
13129 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13130 max = TG3_SERDES_TIMEOUT_SEC;
13131 else
13132 max = TG3_COPPER_TIMEOUT_SEC;
13133
13134 for (i = 0; i < max; i++) {
13135 if (tp->link_up)
13136 return 0;
13137
13138 if (msleep_interruptible(1000))
13139 break;
13140 }
13141
13142 return -EIO;
13143 }
13144
13145 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13146 static int tg3_test_registers(struct tg3 *tp)
13147 {
13148 int i, is_5705, is_5750;
13149 u32 offset, read_mask, write_mask, val, save_val, read_val;
13150 static struct {
13151 u16 offset;
13152 u16 flags;
13153 #define TG3_FL_5705 0x1
13154 #define TG3_FL_NOT_5705 0x2
13155 #define TG3_FL_NOT_5788 0x4
13156 #define TG3_FL_NOT_5750 0x8
13157 u32 read_mask;
13158 u32 write_mask;
13159 } reg_tbl[] = {
13160 /* MAC Control Registers */
13161 { MAC_MODE, TG3_FL_NOT_5705,
13162 0x00000000, 0x00ef6f8c },
13163 { MAC_MODE, TG3_FL_5705,
13164 0x00000000, 0x01ef6b8c },
13165 { MAC_STATUS, TG3_FL_NOT_5705,
13166 0x03800107, 0x00000000 },
13167 { MAC_STATUS, TG3_FL_5705,
13168 0x03800100, 0x00000000 },
13169 { MAC_ADDR_0_HIGH, 0x0000,
13170 0x00000000, 0x0000ffff },
13171 { MAC_ADDR_0_LOW, 0x0000,
13172 0x00000000, 0xffffffff },
13173 { MAC_RX_MTU_SIZE, 0x0000,
13174 0x00000000, 0x0000ffff },
13175 { MAC_TX_MODE, 0x0000,
13176 0x00000000, 0x00000070 },
13177 { MAC_TX_LENGTHS, 0x0000,
13178 0x00000000, 0x00003fff },
13179 { MAC_RX_MODE, TG3_FL_NOT_5705,
13180 0x00000000, 0x000007fc },
13181 { MAC_RX_MODE, TG3_FL_5705,
13182 0x00000000, 0x000007dc },
13183 { MAC_HASH_REG_0, 0x0000,
13184 0x00000000, 0xffffffff },
13185 { MAC_HASH_REG_1, 0x0000,
13186 0x00000000, 0xffffffff },
13187 { MAC_HASH_REG_2, 0x0000,
13188 0x00000000, 0xffffffff },
13189 { MAC_HASH_REG_3, 0x0000,
13190 0x00000000, 0xffffffff },
13191
13192 /* Receive Data and Receive BD Initiator Control Registers. */
13193 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13194 0x00000000, 0xffffffff },
13195 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13196 0x00000000, 0xffffffff },
13197 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13198 0x00000000, 0x00000003 },
13199 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13200 0x00000000, 0xffffffff },
13201 { RCVDBDI_STD_BD+0, 0x0000,
13202 0x00000000, 0xffffffff },
13203 { RCVDBDI_STD_BD+4, 0x0000,
13204 0x00000000, 0xffffffff },
13205 { RCVDBDI_STD_BD+8, 0x0000,
13206 0x00000000, 0xffff0002 },
13207 { RCVDBDI_STD_BD+0xc, 0x0000,
13208 0x00000000, 0xffffffff },
13209
13210 /* Receive BD Initiator Control Registers. */
13211 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13212 0x00000000, 0xffffffff },
13213 { RCVBDI_STD_THRESH, TG3_FL_5705,
13214 0x00000000, 0x000003ff },
13215 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13216 0x00000000, 0xffffffff },
13217
13218 /* Host Coalescing Control Registers. */
13219 { HOSTCC_MODE, TG3_FL_NOT_5705,
13220 0x00000000, 0x00000004 },
13221 { HOSTCC_MODE, TG3_FL_5705,
13222 0x00000000, 0x000000f6 },
13223 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13224 0x00000000, 0xffffffff },
13225 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13226 0x00000000, 0x000003ff },
13227 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13228 0x00000000, 0xffffffff },
13229 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13230 0x00000000, 0x000003ff },
13231 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13232 0x00000000, 0xffffffff },
13233 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13234 0x00000000, 0x000000ff },
13235 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13236 0x00000000, 0xffffffff },
13237 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13238 0x00000000, 0x000000ff },
13239 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13240 0x00000000, 0xffffffff },
13241 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13242 0x00000000, 0xffffffff },
13243 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13244 0x00000000, 0xffffffff },
13245 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13246 0x00000000, 0x000000ff },
13247 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13248 0x00000000, 0xffffffff },
13249 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13250 0x00000000, 0x000000ff },
13251 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13252 0x00000000, 0xffffffff },
13253 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13254 0x00000000, 0xffffffff },
13255 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13256 0x00000000, 0xffffffff },
13257 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13258 0x00000000, 0xffffffff },
13259 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13260 0x00000000, 0xffffffff },
13261 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13262 0xffffffff, 0x00000000 },
13263 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13264 0xffffffff, 0x00000000 },
13265
13266 /* Buffer Manager Control Registers. */
13267 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13268 0x00000000, 0x007fff80 },
13269 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13270 0x00000000, 0x007fffff },
13271 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13272 0x00000000, 0x0000003f },
13273 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13274 0x00000000, 0x000001ff },
13275 { BUFMGR_MB_HIGH_WATER, 0x0000,
13276 0x00000000, 0x000001ff },
13277 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13278 0xffffffff, 0x00000000 },
13279 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13280 0xffffffff, 0x00000000 },
13281
13282 /* Mailbox Registers */
13283 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13284 0x00000000, 0x000001ff },
13285 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13286 0x00000000, 0x000001ff },
13287 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13288 0x00000000, 0x000007ff },
13289 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13290 0x00000000, 0x000001ff },
13291
13292 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13293 };
13294
13295 is_5705 = is_5750 = 0;
13296 if (tg3_flag(tp, 5705_PLUS)) {
13297 is_5705 = 1;
13298 if (tg3_flag(tp, 5750_PLUS))
13299 is_5750 = 1;
13300 }
13301
13302 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13303 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13304 continue;
13305
13306 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13307 continue;
13308
13309 if (tg3_flag(tp, IS_5788) &&
13310 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13311 continue;
13312
13313 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13314 continue;
13315
13316 offset = (u32) reg_tbl[i].offset;
13317 read_mask = reg_tbl[i].read_mask;
13318 write_mask = reg_tbl[i].write_mask;
13319
13320 /* Save the original register content */
13321 save_val = tr32(offset);
13322
13323 /* Determine the read-only value. */
13324 read_val = save_val & read_mask;
13325
13326 /* Write zero to the register, then make sure the read-only bits
13327 * are not changed and the read/write bits are all zeros.
13328 */
13329 tw32(offset, 0);
13330
13331 val = tr32(offset);
13332
13333 /* Test the read-only and read/write bits. */
13334 if (((val & read_mask) != read_val) || (val & write_mask))
13335 goto out;
13336
13337 /* Write ones to all the bits defined by RdMask and WrMask, then
13338 * make sure the read-only bits are not changed and the
13339 * read/write bits are all ones.
13340 */
13341 tw32(offset, read_mask | write_mask);
13342
13343 val = tr32(offset);
13344
13345 /* Test the read-only bits. */
13346 if ((val & read_mask) != read_val)
13347 goto out;
13348
13349 /* Test the read/write bits. */
13350 if ((val & write_mask) != write_mask)
13351 goto out;
13352
13353 tw32(offset, save_val);
13354 }
13355
13356 return 0;
13357
13358 out:
13359 if (netif_msg_hw(tp))
13360 netdev_err(tp->dev,
13361 "Register test failed at offset %x\n", offset);
13362 tw32(offset, save_val);
13363 return -EIO;
13364 }
13365
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13366 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13367 {
13368 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13369 int i;
13370 u32 j;
13371
13372 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13373 for (j = 0; j < len; j += 4) {
13374 u32 val;
13375
13376 tg3_write_mem(tp, offset + j, test_pattern[i]);
13377 tg3_read_mem(tp, offset + j, &val);
13378 if (val != test_pattern[i])
13379 return -EIO;
13380 }
13381 }
13382 return 0;
13383 }
13384
tg3_test_memory(struct tg3 * tp)13385 static int tg3_test_memory(struct tg3 *tp)
13386 {
13387 static struct mem_entry {
13388 u32 offset;
13389 u32 len;
13390 } mem_tbl_570x[] = {
13391 { 0x00000000, 0x00b50},
13392 { 0x00002000, 0x1c000},
13393 { 0xffffffff, 0x00000}
13394 }, mem_tbl_5705[] = {
13395 { 0x00000100, 0x0000c},
13396 { 0x00000200, 0x00008},
13397 { 0x00004000, 0x00800},
13398 { 0x00006000, 0x01000},
13399 { 0x00008000, 0x02000},
13400 { 0x00010000, 0x0e000},
13401 { 0xffffffff, 0x00000}
13402 }, mem_tbl_5755[] = {
13403 { 0x00000200, 0x00008},
13404 { 0x00004000, 0x00800},
13405 { 0x00006000, 0x00800},
13406 { 0x00008000, 0x02000},
13407 { 0x00010000, 0x0c000},
13408 { 0xffffffff, 0x00000}
13409 }, mem_tbl_5906[] = {
13410 { 0x00000200, 0x00008},
13411 { 0x00004000, 0x00400},
13412 { 0x00006000, 0x00400},
13413 { 0x00008000, 0x01000},
13414 { 0x00010000, 0x01000},
13415 { 0xffffffff, 0x00000}
13416 }, mem_tbl_5717[] = {
13417 { 0x00000200, 0x00008},
13418 { 0x00010000, 0x0a000},
13419 { 0x00020000, 0x13c00},
13420 { 0xffffffff, 0x00000}
13421 }, mem_tbl_57765[] = {
13422 { 0x00000200, 0x00008},
13423 { 0x00004000, 0x00800},
13424 { 0x00006000, 0x09800},
13425 { 0x00010000, 0x0a000},
13426 { 0xffffffff, 0x00000}
13427 };
13428 struct mem_entry *mem_tbl;
13429 int err = 0;
13430 int i;
13431
13432 if (tg3_flag(tp, 5717_PLUS))
13433 mem_tbl = mem_tbl_5717;
13434 else if (tg3_flag(tp, 57765_CLASS) ||
13435 tg3_asic_rev(tp) == ASIC_REV_5762)
13436 mem_tbl = mem_tbl_57765;
13437 else if (tg3_flag(tp, 5755_PLUS))
13438 mem_tbl = mem_tbl_5755;
13439 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13440 mem_tbl = mem_tbl_5906;
13441 else if (tg3_flag(tp, 5705_PLUS))
13442 mem_tbl = mem_tbl_5705;
13443 else
13444 mem_tbl = mem_tbl_570x;
13445
13446 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13447 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13448 if (err)
13449 break;
13450 }
13451
13452 return err;
13453 }
13454
13455 #define TG3_TSO_MSS 500
13456
13457 #define TG3_TSO_IP_HDR_LEN 20
13458 #define TG3_TSO_TCP_HDR_LEN 20
13459 #define TG3_TSO_TCP_OPT_LEN 12
13460
13461 static const u8 tg3_tso_header[] = {
13462 0x08, 0x00,
13463 0x45, 0x00, 0x00, 0x00,
13464 0x00, 0x00, 0x40, 0x00,
13465 0x40, 0x06, 0x00, 0x00,
13466 0x0a, 0x00, 0x00, 0x01,
13467 0x0a, 0x00, 0x00, 0x02,
13468 0x0d, 0x00, 0xe0, 0x00,
13469 0x00, 0x00, 0x01, 0x00,
13470 0x00, 0x00, 0x02, 0x00,
13471 0x80, 0x10, 0x10, 0x00,
13472 0x14, 0x09, 0x00, 0x00,
13473 0x01, 0x01, 0x08, 0x0a,
13474 0x11, 0x11, 0x11, 0x11,
13475 0x11, 0x11, 0x11, 0x11,
13476 };
13477
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13478 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13479 {
13480 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13481 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13482 u32 budget;
13483 struct sk_buff *skb;
13484 u8 *tx_data, *rx_data;
13485 dma_addr_t map;
13486 int num_pkts, tx_len, rx_len, i, err;
13487 struct tg3_rx_buffer_desc *desc;
13488 struct tg3_napi *tnapi, *rnapi;
13489 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13490
13491 tnapi = &tp->napi[0];
13492 rnapi = &tp->napi[0];
13493 if (tp->irq_cnt > 1) {
13494 if (tg3_flag(tp, ENABLE_RSS))
13495 rnapi = &tp->napi[1];
13496 if (tg3_flag(tp, ENABLE_TSS))
13497 tnapi = &tp->napi[1];
13498 }
13499 coal_now = tnapi->coal_now | rnapi->coal_now;
13500
13501 err = -EIO;
13502
13503 tx_len = pktsz;
13504 skb = netdev_alloc_skb(tp->dev, tx_len);
13505 if (!skb)
13506 return -ENOMEM;
13507
13508 tx_data = skb_put(skb, tx_len);
13509 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13510 memset(tx_data + ETH_ALEN, 0x0, 8);
13511
13512 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13513
13514 if (tso_loopback) {
13515 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13516
13517 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13518 TG3_TSO_TCP_OPT_LEN;
13519
13520 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13521 sizeof(tg3_tso_header));
13522 mss = TG3_TSO_MSS;
13523
13524 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13525 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13526
13527 /* Set the total length field in the IP header */
13528 iph->tot_len = htons((u16)(mss + hdr_len));
13529
13530 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13531 TXD_FLAG_CPU_POST_DMA);
13532
13533 if (tg3_flag(tp, HW_TSO_1) ||
13534 tg3_flag(tp, HW_TSO_2) ||
13535 tg3_flag(tp, HW_TSO_3)) {
13536 struct tcphdr *th;
13537 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13538 th = (struct tcphdr *)&tx_data[val];
13539 th->check = 0;
13540 } else
13541 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13542
13543 if (tg3_flag(tp, HW_TSO_3)) {
13544 mss |= (hdr_len & 0xc) << 12;
13545 if (hdr_len & 0x10)
13546 base_flags |= 0x00000010;
13547 base_flags |= (hdr_len & 0x3e0) << 5;
13548 } else if (tg3_flag(tp, HW_TSO_2))
13549 mss |= hdr_len << 9;
13550 else if (tg3_flag(tp, HW_TSO_1) ||
13551 tg3_asic_rev(tp) == ASIC_REV_5705) {
13552 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13553 } else {
13554 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13555 }
13556
13557 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13558 } else {
13559 num_pkts = 1;
13560 data_off = ETH_HLEN;
13561
13562 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13563 tx_len > VLAN_ETH_FRAME_LEN)
13564 base_flags |= TXD_FLAG_JMB_PKT;
13565 }
13566
13567 for (i = data_off; i < tx_len; i++)
13568 tx_data[i] = (u8) (i & 0xff);
13569
13570 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13571 if (dma_mapping_error(&tp->pdev->dev, map)) {
13572 dev_kfree_skb(skb);
13573 return -EIO;
13574 }
13575
13576 val = tnapi->tx_prod;
13577 tnapi->tx_buffers[val].skb = skb;
13578 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13579
13580 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13581 rnapi->coal_now);
13582
13583 udelay(10);
13584
13585 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13586
13587 budget = tg3_tx_avail(tnapi);
13588 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13589 base_flags | TXD_FLAG_END, mss, 0)) {
13590 tnapi->tx_buffers[val].skb = NULL;
13591 dev_kfree_skb(skb);
13592 return -EIO;
13593 }
13594
13595 tnapi->tx_prod++;
13596
13597 /* Sync BD data before updating mailbox */
13598 wmb();
13599
13600 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13601 tr32_mailbox(tnapi->prodmbox);
13602
13603 udelay(10);
13604
13605 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13606 for (i = 0; i < 35; i++) {
13607 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13608 coal_now);
13609
13610 udelay(10);
13611
13612 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13613 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13614 if ((tx_idx == tnapi->tx_prod) &&
13615 (rx_idx == (rx_start_idx + num_pkts)))
13616 break;
13617 }
13618
13619 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13620 dev_kfree_skb(skb);
13621
13622 if (tx_idx != tnapi->tx_prod)
13623 goto out;
13624
13625 if (rx_idx != rx_start_idx + num_pkts)
13626 goto out;
13627
13628 val = data_off;
13629 while (rx_idx != rx_start_idx) {
13630 desc = &rnapi->rx_rcb[rx_start_idx++];
13631 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13632 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13633
13634 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13635 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13636 goto out;
13637
13638 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13639 - ETH_FCS_LEN;
13640
13641 if (!tso_loopback) {
13642 if (rx_len != tx_len)
13643 goto out;
13644
13645 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13646 if (opaque_key != RXD_OPAQUE_RING_STD)
13647 goto out;
13648 } else {
13649 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13650 goto out;
13651 }
13652 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13653 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13654 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13655 goto out;
13656 }
13657
13658 if (opaque_key == RXD_OPAQUE_RING_STD) {
13659 rx_data = tpr->rx_std_buffers[desc_idx].data;
13660 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13661 mapping);
13662 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13663 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13664 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13665 mapping);
13666 } else
13667 goto out;
13668
13669 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13670 DMA_FROM_DEVICE);
13671
13672 rx_data += TG3_RX_OFFSET(tp);
13673 for (i = data_off; i < rx_len; i++, val++) {
13674 if (*(rx_data + i) != (u8) (val & 0xff))
13675 goto out;
13676 }
13677 }
13678
13679 err = 0;
13680
13681 /* tg3_free_rings will unmap and free the rx_data */
13682 out:
13683 return err;
13684 }
13685
13686 #define TG3_STD_LOOPBACK_FAILED 1
13687 #define TG3_JMB_LOOPBACK_FAILED 2
13688 #define TG3_TSO_LOOPBACK_FAILED 4
13689 #define TG3_LOOPBACK_FAILED \
13690 (TG3_STD_LOOPBACK_FAILED | \
13691 TG3_JMB_LOOPBACK_FAILED | \
13692 TG3_TSO_LOOPBACK_FAILED)
13693
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13694 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13695 {
13696 int err = -EIO;
13697 u32 eee_cap;
13698 u32 jmb_pkt_sz = 9000;
13699
13700 if (tp->dma_limit)
13701 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13702
13703 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13704 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13705
13706 if (!netif_running(tp->dev)) {
13707 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13708 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13709 if (do_extlpbk)
13710 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13711 goto done;
13712 }
13713
13714 err = tg3_reset_hw(tp, true);
13715 if (err) {
13716 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13717 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13718 if (do_extlpbk)
13719 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13720 goto done;
13721 }
13722
13723 if (tg3_flag(tp, ENABLE_RSS)) {
13724 int i;
13725
13726 /* Reroute all rx packets to the 1st queue */
13727 for (i = MAC_RSS_INDIR_TBL_0;
13728 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13729 tw32(i, 0x0);
13730 }
13731
13732 /* HW errata - mac loopback fails in some cases on 5780.
13733 * Normal traffic and PHY loopback are not affected by
13734 * errata. Also, the MAC loopback test is deprecated for
13735 * all newer ASIC revisions.
13736 */
13737 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13738 !tg3_flag(tp, CPMU_PRESENT)) {
13739 tg3_mac_loopback(tp, true);
13740
13741 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13742 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13743
13744 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13745 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13746 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13747
13748 tg3_mac_loopback(tp, false);
13749 }
13750
13751 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13752 !tg3_flag(tp, USE_PHYLIB)) {
13753 int i;
13754
13755 tg3_phy_lpbk_set(tp, 0, false);
13756
13757 /* Wait for link */
13758 for (i = 0; i < 100; i++) {
13759 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13760 break;
13761 mdelay(1);
13762 }
13763
13764 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13765 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13766 if (tg3_flag(tp, TSO_CAPABLE) &&
13767 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13768 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13769 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13770 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13771 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13772
13773 if (do_extlpbk) {
13774 tg3_phy_lpbk_set(tp, 0, true);
13775
13776 /* All link indications report up, but the hardware
13777 * isn't really ready for about 20 msec. Double it
13778 * to be sure.
13779 */
13780 mdelay(40);
13781
13782 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13783 data[TG3_EXT_LOOPB_TEST] |=
13784 TG3_STD_LOOPBACK_FAILED;
13785 if (tg3_flag(tp, TSO_CAPABLE) &&
13786 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13787 data[TG3_EXT_LOOPB_TEST] |=
13788 TG3_TSO_LOOPBACK_FAILED;
13789 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13790 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13791 data[TG3_EXT_LOOPB_TEST] |=
13792 TG3_JMB_LOOPBACK_FAILED;
13793 }
13794
13795 /* Re-enable gphy autopowerdown. */
13796 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13797 tg3_phy_toggle_apd(tp, true);
13798 }
13799
13800 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13801 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13802
13803 done:
13804 tp->phy_flags |= eee_cap;
13805
13806 return err;
13807 }
13808
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13809 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13810 u64 *data)
13811 {
13812 struct tg3 *tp = netdev_priv(dev);
13813 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13814
13815 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13816 if (tg3_power_up(tp)) {
13817 etest->flags |= ETH_TEST_FL_FAILED;
13818 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13819 return;
13820 }
13821 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13822 }
13823
13824 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13825
13826 if (tg3_test_nvram(tp) != 0) {
13827 etest->flags |= ETH_TEST_FL_FAILED;
13828 data[TG3_NVRAM_TEST] = 1;
13829 }
13830 if (!doextlpbk && tg3_test_link(tp)) {
13831 etest->flags |= ETH_TEST_FL_FAILED;
13832 data[TG3_LINK_TEST] = 1;
13833 }
13834 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13835 int err, err2 = 0, irq_sync = 0;
13836
13837 if (netif_running(dev)) {
13838 tg3_phy_stop(tp);
13839 tg3_netif_stop(tp);
13840 irq_sync = 1;
13841 }
13842
13843 tg3_full_lock(tp, irq_sync);
13844 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13845 err = tg3_nvram_lock(tp);
13846 tg3_halt_cpu(tp, RX_CPU_BASE);
13847 if (!tg3_flag(tp, 5705_PLUS))
13848 tg3_halt_cpu(tp, TX_CPU_BASE);
13849 if (!err)
13850 tg3_nvram_unlock(tp);
13851
13852 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13853 tg3_phy_reset(tp);
13854
13855 if (tg3_test_registers(tp) != 0) {
13856 etest->flags |= ETH_TEST_FL_FAILED;
13857 data[TG3_REGISTER_TEST] = 1;
13858 }
13859
13860 if (tg3_test_memory(tp) != 0) {
13861 etest->flags |= ETH_TEST_FL_FAILED;
13862 data[TG3_MEMORY_TEST] = 1;
13863 }
13864
13865 if (doextlpbk)
13866 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13867
13868 if (tg3_test_loopback(tp, data, doextlpbk))
13869 etest->flags |= ETH_TEST_FL_FAILED;
13870
13871 tg3_full_unlock(tp);
13872
13873 if (tg3_test_interrupt(tp) != 0) {
13874 etest->flags |= ETH_TEST_FL_FAILED;
13875 data[TG3_INTERRUPT_TEST] = 1;
13876 }
13877
13878 tg3_full_lock(tp, 0);
13879
13880 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13881 if (netif_running(dev)) {
13882 tg3_flag_set(tp, INIT_COMPLETE);
13883 err2 = tg3_restart_hw(tp, true);
13884 if (!err2)
13885 tg3_netif_start(tp);
13886 }
13887
13888 tg3_full_unlock(tp);
13889
13890 if (irq_sync && !err2)
13891 tg3_phy_start(tp);
13892 }
13893 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13894 tg3_power_down_prepare(tp);
13895
13896 }
13897
tg3_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)13898 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13899 {
13900 struct tg3 *tp = netdev_priv(dev);
13901 struct hwtstamp_config stmpconf;
13902
13903 if (!tg3_flag(tp, PTP_CAPABLE))
13904 return -EOPNOTSUPP;
13905
13906 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13907 return -EFAULT;
13908
13909 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13910 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13911 return -ERANGE;
13912
13913 switch (stmpconf.rx_filter) {
13914 case HWTSTAMP_FILTER_NONE:
13915 tp->rxptpctl = 0;
13916 break;
13917 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13918 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13919 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13920 break;
13921 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13922 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13923 TG3_RX_PTP_CTL_SYNC_EVNT;
13924 break;
13925 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13926 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13927 TG3_RX_PTP_CTL_DELAY_REQ;
13928 break;
13929 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13930 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13931 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13932 break;
13933 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13934 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13935 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13936 break;
13937 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13938 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13939 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13940 break;
13941 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13942 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13943 TG3_RX_PTP_CTL_SYNC_EVNT;
13944 break;
13945 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13946 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13947 TG3_RX_PTP_CTL_SYNC_EVNT;
13948 break;
13949 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13950 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13951 TG3_RX_PTP_CTL_SYNC_EVNT;
13952 break;
13953 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13954 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13955 TG3_RX_PTP_CTL_DELAY_REQ;
13956 break;
13957 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13958 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13959 TG3_RX_PTP_CTL_DELAY_REQ;
13960 break;
13961 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13962 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13963 TG3_RX_PTP_CTL_DELAY_REQ;
13964 break;
13965 default:
13966 return -ERANGE;
13967 }
13968
13969 if (netif_running(dev) && tp->rxptpctl)
13970 tw32(TG3_RX_PTP_CTL,
13971 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13972
13973 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13974 tg3_flag_set(tp, TX_TSTAMP_EN);
13975 else
13976 tg3_flag_clear(tp, TX_TSTAMP_EN);
13977
13978 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13979 -EFAULT : 0;
13980 }
13981
tg3_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)13982 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13983 {
13984 struct tg3 *tp = netdev_priv(dev);
13985 struct hwtstamp_config stmpconf;
13986
13987 if (!tg3_flag(tp, PTP_CAPABLE))
13988 return -EOPNOTSUPP;
13989
13990 stmpconf.flags = 0;
13991 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13992 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13993
13994 switch (tp->rxptpctl) {
13995 case 0:
13996 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13997 break;
13998 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13999 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14000 break;
14001 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14002 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
14003 break;
14004 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14005 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
14006 break;
14007 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14008 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14009 break;
14010 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14011 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14012 break;
14013 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14014 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14015 break;
14016 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14017 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
14018 break;
14019 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14020 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
14021 break;
14022 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14023 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14024 break;
14025 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14026 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14027 break;
14028 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14029 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14030 break;
14031 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14032 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14033 break;
14034 default:
14035 WARN_ON_ONCE(1);
14036 return -ERANGE;
14037 }
14038
14039 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
14040 -EFAULT : 0;
14041 }
14042
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)14043 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14044 {
14045 struct mii_ioctl_data *data = if_mii(ifr);
14046 struct tg3 *tp = netdev_priv(dev);
14047 int err;
14048
14049 if (tg3_flag(tp, USE_PHYLIB)) {
14050 struct phy_device *phydev;
14051 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14052 return -EAGAIN;
14053 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14054 return phy_mii_ioctl(phydev, ifr, cmd);
14055 }
14056
14057 switch (cmd) {
14058 case SIOCGMIIPHY:
14059 data->phy_id = tp->phy_addr;
14060
14061 fallthrough;
14062 case SIOCGMIIREG: {
14063 u32 mii_regval;
14064
14065 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14066 break; /* We have no PHY */
14067
14068 if (!netif_running(dev))
14069 return -EAGAIN;
14070
14071 spin_lock_bh(&tp->lock);
14072 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14073 data->reg_num & 0x1f, &mii_regval);
14074 spin_unlock_bh(&tp->lock);
14075
14076 data->val_out = mii_regval;
14077
14078 return err;
14079 }
14080
14081 case SIOCSMIIREG:
14082 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14083 break; /* We have no PHY */
14084
14085 if (!netif_running(dev))
14086 return -EAGAIN;
14087
14088 spin_lock_bh(&tp->lock);
14089 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14090 data->reg_num & 0x1f, data->val_in);
14091 spin_unlock_bh(&tp->lock);
14092
14093 return err;
14094
14095 case SIOCSHWTSTAMP:
14096 return tg3_hwtstamp_set(dev, ifr);
14097
14098 case SIOCGHWTSTAMP:
14099 return tg3_hwtstamp_get(dev, ifr);
14100
14101 default:
14102 /* do nothing */
14103 break;
14104 }
14105 return -EOPNOTSUPP;
14106 }
14107
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14108 static int tg3_get_coalesce(struct net_device *dev,
14109 struct ethtool_coalesce *ec,
14110 struct kernel_ethtool_coalesce *kernel_coal,
14111 struct netlink_ext_ack *extack)
14112 {
14113 struct tg3 *tp = netdev_priv(dev);
14114
14115 memcpy(ec, &tp->coal, sizeof(*ec));
14116 return 0;
14117 }
14118
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14119 static int tg3_set_coalesce(struct net_device *dev,
14120 struct ethtool_coalesce *ec,
14121 struct kernel_ethtool_coalesce *kernel_coal,
14122 struct netlink_ext_ack *extack)
14123 {
14124 struct tg3 *tp = netdev_priv(dev);
14125 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14126 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14127
14128 if (!tg3_flag(tp, 5705_PLUS)) {
14129 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14130 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14131 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14132 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14133 }
14134
14135 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14136 (!ec->rx_coalesce_usecs) ||
14137 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14138 (!ec->tx_coalesce_usecs) ||
14139 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14140 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14141 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14142 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14143 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14144 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14145 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14146 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14147 return -EINVAL;
14148
14149 /* Only copy relevant parameters, ignore all others. */
14150 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14151 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14152 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14153 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14154 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14155 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14156 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14157 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14158 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14159
14160 if (netif_running(dev)) {
14161 tg3_full_lock(tp, 0);
14162 __tg3_set_coalesce(tp, &tp->coal);
14163 tg3_full_unlock(tp);
14164 }
14165 return 0;
14166 }
14167
tg3_set_eee(struct net_device * dev,struct ethtool_keee * edata)14168 static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
14169 {
14170 struct tg3 *tp = netdev_priv(dev);
14171
14172 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14173 netdev_warn(tp->dev, "Board does not support EEE!\n");
14174 return -EOPNOTSUPP;
14175 }
14176
14177 if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
14178 netdev_warn(tp->dev,
14179 "Direct manipulation of EEE advertisement is not supported\n");
14180 return -EINVAL;
14181 }
14182
14183 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14184 netdev_warn(tp->dev,
14185 "Maximal Tx Lpi timer supported is %#x(u)\n",
14186 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14187 return -EINVAL;
14188 }
14189
14190 tp->eee.eee_enabled = edata->eee_enabled;
14191 tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
14192 tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
14193
14194 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14195 tg3_warn_mgmt_link_flap(tp);
14196
14197 if (netif_running(tp->dev)) {
14198 tg3_full_lock(tp, 0);
14199 tg3_setup_eee(tp);
14200 tg3_phy_reset(tp);
14201 tg3_full_unlock(tp);
14202 }
14203
14204 return 0;
14205 }
14206
tg3_get_eee(struct net_device * dev,struct ethtool_keee * edata)14207 static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
14208 {
14209 struct tg3 *tp = netdev_priv(dev);
14210
14211 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14212 netdev_warn(tp->dev,
14213 "Board does not support EEE!\n");
14214 return -EOPNOTSUPP;
14215 }
14216
14217 *edata = tp->eee;
14218 return 0;
14219 }
14220
14221 static const struct ethtool_ops tg3_ethtool_ops = {
14222 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14223 ETHTOOL_COALESCE_MAX_FRAMES |
14224 ETHTOOL_COALESCE_USECS_IRQ |
14225 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14226 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14227 .get_drvinfo = tg3_get_drvinfo,
14228 .get_regs_len = tg3_get_regs_len,
14229 .get_regs = tg3_get_regs,
14230 .get_wol = tg3_get_wol,
14231 .set_wol = tg3_set_wol,
14232 .get_msglevel = tg3_get_msglevel,
14233 .set_msglevel = tg3_set_msglevel,
14234 .nway_reset = tg3_nway_reset,
14235 .get_link = ethtool_op_get_link,
14236 .get_eeprom_len = tg3_get_eeprom_len,
14237 .get_eeprom = tg3_get_eeprom,
14238 .set_eeprom = tg3_set_eeprom,
14239 .get_ringparam = tg3_get_ringparam,
14240 .set_ringparam = tg3_set_ringparam,
14241 .get_pauseparam = tg3_get_pauseparam,
14242 .set_pauseparam = tg3_set_pauseparam,
14243 .self_test = tg3_self_test,
14244 .get_strings = tg3_get_strings,
14245 .set_phys_id = tg3_set_phys_id,
14246 .get_ethtool_stats = tg3_get_ethtool_stats,
14247 .get_coalesce = tg3_get_coalesce,
14248 .set_coalesce = tg3_set_coalesce,
14249 .get_sset_count = tg3_get_sset_count,
14250 .get_rxnfc = tg3_get_rxnfc,
14251 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14252 .get_rxfh = tg3_get_rxfh,
14253 .set_rxfh = tg3_set_rxfh,
14254 .get_channels = tg3_get_channels,
14255 .set_channels = tg3_set_channels,
14256 .get_ts_info = tg3_get_ts_info,
14257 .get_eee = tg3_get_eee,
14258 .set_eee = tg3_set_eee,
14259 .get_link_ksettings = tg3_get_link_ksettings,
14260 .set_link_ksettings = tg3_set_link_ksettings,
14261 };
14262
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14263 static void tg3_get_stats64(struct net_device *dev,
14264 struct rtnl_link_stats64 *stats)
14265 {
14266 struct tg3 *tp = netdev_priv(dev);
14267
14268 spin_lock_bh(&tp->lock);
14269 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14270 *stats = tp->net_stats_prev;
14271 spin_unlock_bh(&tp->lock);
14272 return;
14273 }
14274
14275 tg3_get_nstats(tp, stats);
14276 spin_unlock_bh(&tp->lock);
14277 }
14278
tg3_set_rx_mode(struct net_device * dev)14279 static void tg3_set_rx_mode(struct net_device *dev)
14280 {
14281 struct tg3 *tp = netdev_priv(dev);
14282
14283 if (!netif_running(dev))
14284 return;
14285
14286 tg3_full_lock(tp, 0);
14287 __tg3_set_rx_mode(dev);
14288 tg3_full_unlock(tp);
14289 }
14290
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14291 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14292 int new_mtu)
14293 {
14294 WRITE_ONCE(dev->mtu, new_mtu);
14295
14296 if (new_mtu > ETH_DATA_LEN) {
14297 if (tg3_flag(tp, 5780_CLASS)) {
14298 netdev_update_features(dev);
14299 tg3_flag_clear(tp, TSO_CAPABLE);
14300 } else {
14301 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14302 }
14303 } else {
14304 if (tg3_flag(tp, 5780_CLASS)) {
14305 tg3_flag_set(tp, TSO_CAPABLE);
14306 netdev_update_features(dev);
14307 }
14308 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14309 }
14310 }
14311
tg3_change_mtu(struct net_device * dev,int new_mtu)14312 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14313 {
14314 struct tg3 *tp = netdev_priv(dev);
14315 int err;
14316 bool reset_phy = false;
14317
14318 if (!netif_running(dev)) {
14319 /* We'll just catch it later when the
14320 * device is up'd.
14321 */
14322 tg3_set_mtu(dev, tp, new_mtu);
14323 return 0;
14324 }
14325
14326 tg3_phy_stop(tp);
14327
14328 tg3_netif_stop(tp);
14329
14330 tg3_set_mtu(dev, tp, new_mtu);
14331
14332 tg3_full_lock(tp, 1);
14333
14334 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14335
14336 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14337 * breaks all requests to 256 bytes.
14338 */
14339 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14340 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14341 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14342 tg3_asic_rev(tp) == ASIC_REV_5720)
14343 reset_phy = true;
14344
14345 err = tg3_restart_hw(tp, reset_phy);
14346
14347 if (!err)
14348 tg3_netif_start(tp);
14349
14350 tg3_full_unlock(tp);
14351
14352 if (!err)
14353 tg3_phy_start(tp);
14354
14355 return err;
14356 }
14357
14358 static const struct net_device_ops tg3_netdev_ops = {
14359 .ndo_open = tg3_open,
14360 .ndo_stop = tg3_close,
14361 .ndo_start_xmit = tg3_start_xmit,
14362 .ndo_get_stats64 = tg3_get_stats64,
14363 .ndo_validate_addr = eth_validate_addr,
14364 .ndo_set_rx_mode = tg3_set_rx_mode,
14365 .ndo_set_mac_address = tg3_set_mac_addr,
14366 .ndo_eth_ioctl = tg3_ioctl,
14367 .ndo_tx_timeout = tg3_tx_timeout,
14368 .ndo_change_mtu = tg3_change_mtu,
14369 .ndo_fix_features = tg3_fix_features,
14370 .ndo_set_features = tg3_set_features,
14371 #ifdef CONFIG_NET_POLL_CONTROLLER
14372 .ndo_poll_controller = tg3_poll_controller,
14373 #endif
14374 };
14375
tg3_get_eeprom_size(struct tg3 * tp)14376 static void tg3_get_eeprom_size(struct tg3 *tp)
14377 {
14378 u32 cursize, val, magic;
14379
14380 tp->nvram_size = EEPROM_CHIP_SIZE;
14381
14382 if (tg3_nvram_read(tp, 0, &magic) != 0)
14383 return;
14384
14385 if ((magic != TG3_EEPROM_MAGIC) &&
14386 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14387 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14388 return;
14389
14390 /*
14391 * Size the chip by reading offsets at increasing powers of two.
14392 * When we encounter our validation signature, we know the addressing
14393 * has wrapped around, and thus have our chip size.
14394 */
14395 cursize = 0x10;
14396
14397 while (cursize < tp->nvram_size) {
14398 if (tg3_nvram_read(tp, cursize, &val) != 0)
14399 return;
14400
14401 if (val == magic)
14402 break;
14403
14404 cursize <<= 1;
14405 }
14406
14407 tp->nvram_size = cursize;
14408 }
14409
tg3_get_nvram_size(struct tg3 * tp)14410 static void tg3_get_nvram_size(struct tg3 *tp)
14411 {
14412 u32 val;
14413
14414 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14415 return;
14416
14417 /* Selfboot format */
14418 if (val != TG3_EEPROM_MAGIC) {
14419 tg3_get_eeprom_size(tp);
14420 return;
14421 }
14422
14423 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14424 if (val != 0) {
14425 /* This is confusing. We want to operate on the
14426 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14427 * call will read from NVRAM and byteswap the data
14428 * according to the byteswapping settings for all
14429 * other register accesses. This ensures the data we
14430 * want will always reside in the lower 16-bits.
14431 * However, the data in NVRAM is in LE format, which
14432 * means the data from the NVRAM read will always be
14433 * opposite the endianness of the CPU. The 16-bit
14434 * byteswap then brings the data to CPU endianness.
14435 */
14436 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14437 return;
14438 }
14439 }
14440 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14441 }
14442
tg3_get_nvram_info(struct tg3 * tp)14443 static void tg3_get_nvram_info(struct tg3 *tp)
14444 {
14445 u32 nvcfg1;
14446
14447 nvcfg1 = tr32(NVRAM_CFG1);
14448 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14449 tg3_flag_set(tp, FLASH);
14450 } else {
14451 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14452 tw32(NVRAM_CFG1, nvcfg1);
14453 }
14454
14455 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14456 tg3_flag(tp, 5780_CLASS)) {
14457 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14458 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14459 tp->nvram_jedecnum = JEDEC_ATMEL;
14460 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14461 tg3_flag_set(tp, NVRAM_BUFFERED);
14462 break;
14463 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14464 tp->nvram_jedecnum = JEDEC_ATMEL;
14465 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14466 break;
14467 case FLASH_VENDOR_ATMEL_EEPROM:
14468 tp->nvram_jedecnum = JEDEC_ATMEL;
14469 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14470 tg3_flag_set(tp, NVRAM_BUFFERED);
14471 break;
14472 case FLASH_VENDOR_ST:
14473 tp->nvram_jedecnum = JEDEC_ST;
14474 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14475 tg3_flag_set(tp, NVRAM_BUFFERED);
14476 break;
14477 case FLASH_VENDOR_SAIFUN:
14478 tp->nvram_jedecnum = JEDEC_SAIFUN;
14479 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14480 break;
14481 case FLASH_VENDOR_SST_SMALL:
14482 case FLASH_VENDOR_SST_LARGE:
14483 tp->nvram_jedecnum = JEDEC_SST;
14484 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14485 break;
14486 }
14487 } else {
14488 tp->nvram_jedecnum = JEDEC_ATMEL;
14489 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14490 tg3_flag_set(tp, NVRAM_BUFFERED);
14491 }
14492 }
14493
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14494 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14495 {
14496 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14497 case FLASH_5752PAGE_SIZE_256:
14498 tp->nvram_pagesize = 256;
14499 break;
14500 case FLASH_5752PAGE_SIZE_512:
14501 tp->nvram_pagesize = 512;
14502 break;
14503 case FLASH_5752PAGE_SIZE_1K:
14504 tp->nvram_pagesize = 1024;
14505 break;
14506 case FLASH_5752PAGE_SIZE_2K:
14507 tp->nvram_pagesize = 2048;
14508 break;
14509 case FLASH_5752PAGE_SIZE_4K:
14510 tp->nvram_pagesize = 4096;
14511 break;
14512 case FLASH_5752PAGE_SIZE_264:
14513 tp->nvram_pagesize = 264;
14514 break;
14515 case FLASH_5752PAGE_SIZE_528:
14516 tp->nvram_pagesize = 528;
14517 break;
14518 }
14519 }
14520
tg3_get_5752_nvram_info(struct tg3 * tp)14521 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14522 {
14523 u32 nvcfg1;
14524
14525 nvcfg1 = tr32(NVRAM_CFG1);
14526
14527 /* NVRAM protection for TPM */
14528 if (nvcfg1 & (1 << 27))
14529 tg3_flag_set(tp, PROTECTED_NVRAM);
14530
14531 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14532 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14533 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14534 tp->nvram_jedecnum = JEDEC_ATMEL;
14535 tg3_flag_set(tp, NVRAM_BUFFERED);
14536 break;
14537 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14538 tp->nvram_jedecnum = JEDEC_ATMEL;
14539 tg3_flag_set(tp, NVRAM_BUFFERED);
14540 tg3_flag_set(tp, FLASH);
14541 break;
14542 case FLASH_5752VENDOR_ST_M45PE10:
14543 case FLASH_5752VENDOR_ST_M45PE20:
14544 case FLASH_5752VENDOR_ST_M45PE40:
14545 tp->nvram_jedecnum = JEDEC_ST;
14546 tg3_flag_set(tp, NVRAM_BUFFERED);
14547 tg3_flag_set(tp, FLASH);
14548 break;
14549 }
14550
14551 if (tg3_flag(tp, FLASH)) {
14552 tg3_nvram_get_pagesize(tp, nvcfg1);
14553 } else {
14554 /* For eeprom, set pagesize to maximum eeprom size */
14555 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14556
14557 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14558 tw32(NVRAM_CFG1, nvcfg1);
14559 }
14560 }
14561
tg3_get_5755_nvram_info(struct tg3 * tp)14562 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14563 {
14564 u32 nvcfg1, protect = 0;
14565
14566 nvcfg1 = tr32(NVRAM_CFG1);
14567
14568 /* NVRAM protection for TPM */
14569 if (nvcfg1 & (1 << 27)) {
14570 tg3_flag_set(tp, PROTECTED_NVRAM);
14571 protect = 1;
14572 }
14573
14574 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14575 switch (nvcfg1) {
14576 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14577 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14578 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14579 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14580 tp->nvram_jedecnum = JEDEC_ATMEL;
14581 tg3_flag_set(tp, NVRAM_BUFFERED);
14582 tg3_flag_set(tp, FLASH);
14583 tp->nvram_pagesize = 264;
14584 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14585 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14586 tp->nvram_size = (protect ? 0x3e200 :
14587 TG3_NVRAM_SIZE_512KB);
14588 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14589 tp->nvram_size = (protect ? 0x1f200 :
14590 TG3_NVRAM_SIZE_256KB);
14591 else
14592 tp->nvram_size = (protect ? 0x1f200 :
14593 TG3_NVRAM_SIZE_128KB);
14594 break;
14595 case FLASH_5752VENDOR_ST_M45PE10:
14596 case FLASH_5752VENDOR_ST_M45PE20:
14597 case FLASH_5752VENDOR_ST_M45PE40:
14598 tp->nvram_jedecnum = JEDEC_ST;
14599 tg3_flag_set(tp, NVRAM_BUFFERED);
14600 tg3_flag_set(tp, FLASH);
14601 tp->nvram_pagesize = 256;
14602 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14603 tp->nvram_size = (protect ?
14604 TG3_NVRAM_SIZE_64KB :
14605 TG3_NVRAM_SIZE_128KB);
14606 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14607 tp->nvram_size = (protect ?
14608 TG3_NVRAM_SIZE_64KB :
14609 TG3_NVRAM_SIZE_256KB);
14610 else
14611 tp->nvram_size = (protect ?
14612 TG3_NVRAM_SIZE_128KB :
14613 TG3_NVRAM_SIZE_512KB);
14614 break;
14615 }
14616 }
14617
tg3_get_5787_nvram_info(struct tg3 * tp)14618 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14619 {
14620 u32 nvcfg1;
14621
14622 nvcfg1 = tr32(NVRAM_CFG1);
14623
14624 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14625 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14626 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14627 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14628 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14629 tp->nvram_jedecnum = JEDEC_ATMEL;
14630 tg3_flag_set(tp, NVRAM_BUFFERED);
14631 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14632
14633 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14634 tw32(NVRAM_CFG1, nvcfg1);
14635 break;
14636 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14637 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14638 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14639 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14640 tp->nvram_jedecnum = JEDEC_ATMEL;
14641 tg3_flag_set(tp, NVRAM_BUFFERED);
14642 tg3_flag_set(tp, FLASH);
14643 tp->nvram_pagesize = 264;
14644 break;
14645 case FLASH_5752VENDOR_ST_M45PE10:
14646 case FLASH_5752VENDOR_ST_M45PE20:
14647 case FLASH_5752VENDOR_ST_M45PE40:
14648 tp->nvram_jedecnum = JEDEC_ST;
14649 tg3_flag_set(tp, NVRAM_BUFFERED);
14650 tg3_flag_set(tp, FLASH);
14651 tp->nvram_pagesize = 256;
14652 break;
14653 }
14654 }
14655
tg3_get_5761_nvram_info(struct tg3 * tp)14656 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14657 {
14658 u32 nvcfg1, protect = 0;
14659
14660 nvcfg1 = tr32(NVRAM_CFG1);
14661
14662 /* NVRAM protection for TPM */
14663 if (nvcfg1 & (1 << 27)) {
14664 tg3_flag_set(tp, PROTECTED_NVRAM);
14665 protect = 1;
14666 }
14667
14668 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14669 switch (nvcfg1) {
14670 case FLASH_5761VENDOR_ATMEL_ADB021D:
14671 case FLASH_5761VENDOR_ATMEL_ADB041D:
14672 case FLASH_5761VENDOR_ATMEL_ADB081D:
14673 case FLASH_5761VENDOR_ATMEL_ADB161D:
14674 case FLASH_5761VENDOR_ATMEL_MDB021D:
14675 case FLASH_5761VENDOR_ATMEL_MDB041D:
14676 case FLASH_5761VENDOR_ATMEL_MDB081D:
14677 case FLASH_5761VENDOR_ATMEL_MDB161D:
14678 tp->nvram_jedecnum = JEDEC_ATMEL;
14679 tg3_flag_set(tp, NVRAM_BUFFERED);
14680 tg3_flag_set(tp, FLASH);
14681 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14682 tp->nvram_pagesize = 256;
14683 break;
14684 case FLASH_5761VENDOR_ST_A_M45PE20:
14685 case FLASH_5761VENDOR_ST_A_M45PE40:
14686 case FLASH_5761VENDOR_ST_A_M45PE80:
14687 case FLASH_5761VENDOR_ST_A_M45PE16:
14688 case FLASH_5761VENDOR_ST_M_M45PE20:
14689 case FLASH_5761VENDOR_ST_M_M45PE40:
14690 case FLASH_5761VENDOR_ST_M_M45PE80:
14691 case FLASH_5761VENDOR_ST_M_M45PE16:
14692 tp->nvram_jedecnum = JEDEC_ST;
14693 tg3_flag_set(tp, NVRAM_BUFFERED);
14694 tg3_flag_set(tp, FLASH);
14695 tp->nvram_pagesize = 256;
14696 break;
14697 }
14698
14699 if (protect) {
14700 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14701 } else {
14702 switch (nvcfg1) {
14703 case FLASH_5761VENDOR_ATMEL_ADB161D:
14704 case FLASH_5761VENDOR_ATMEL_MDB161D:
14705 case FLASH_5761VENDOR_ST_A_M45PE16:
14706 case FLASH_5761VENDOR_ST_M_M45PE16:
14707 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14708 break;
14709 case FLASH_5761VENDOR_ATMEL_ADB081D:
14710 case FLASH_5761VENDOR_ATMEL_MDB081D:
14711 case FLASH_5761VENDOR_ST_A_M45PE80:
14712 case FLASH_5761VENDOR_ST_M_M45PE80:
14713 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14714 break;
14715 case FLASH_5761VENDOR_ATMEL_ADB041D:
14716 case FLASH_5761VENDOR_ATMEL_MDB041D:
14717 case FLASH_5761VENDOR_ST_A_M45PE40:
14718 case FLASH_5761VENDOR_ST_M_M45PE40:
14719 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14720 break;
14721 case FLASH_5761VENDOR_ATMEL_ADB021D:
14722 case FLASH_5761VENDOR_ATMEL_MDB021D:
14723 case FLASH_5761VENDOR_ST_A_M45PE20:
14724 case FLASH_5761VENDOR_ST_M_M45PE20:
14725 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14726 break;
14727 }
14728 }
14729 }
14730
tg3_get_5906_nvram_info(struct tg3 * tp)14731 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14732 {
14733 tp->nvram_jedecnum = JEDEC_ATMEL;
14734 tg3_flag_set(tp, NVRAM_BUFFERED);
14735 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14736 }
14737
tg3_get_57780_nvram_info(struct tg3 * tp)14738 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14739 {
14740 u32 nvcfg1;
14741
14742 nvcfg1 = tr32(NVRAM_CFG1);
14743
14744 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14745 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14746 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14747 tp->nvram_jedecnum = JEDEC_ATMEL;
14748 tg3_flag_set(tp, NVRAM_BUFFERED);
14749 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14750
14751 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14752 tw32(NVRAM_CFG1, nvcfg1);
14753 return;
14754 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14755 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14756 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14757 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14758 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14759 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14760 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14761 tp->nvram_jedecnum = JEDEC_ATMEL;
14762 tg3_flag_set(tp, NVRAM_BUFFERED);
14763 tg3_flag_set(tp, FLASH);
14764
14765 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14766 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14767 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14768 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14769 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14770 break;
14771 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14772 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14773 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14774 break;
14775 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14776 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14777 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14778 break;
14779 }
14780 break;
14781 case FLASH_5752VENDOR_ST_M45PE10:
14782 case FLASH_5752VENDOR_ST_M45PE20:
14783 case FLASH_5752VENDOR_ST_M45PE40:
14784 tp->nvram_jedecnum = JEDEC_ST;
14785 tg3_flag_set(tp, NVRAM_BUFFERED);
14786 tg3_flag_set(tp, FLASH);
14787
14788 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14789 case FLASH_5752VENDOR_ST_M45PE10:
14790 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14791 break;
14792 case FLASH_5752VENDOR_ST_M45PE20:
14793 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14794 break;
14795 case FLASH_5752VENDOR_ST_M45PE40:
14796 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14797 break;
14798 }
14799 break;
14800 default:
14801 tg3_flag_set(tp, NO_NVRAM);
14802 return;
14803 }
14804
14805 tg3_nvram_get_pagesize(tp, nvcfg1);
14806 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14807 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14808 }
14809
14810
tg3_get_5717_nvram_info(struct tg3 * tp)14811 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14812 {
14813 u32 nvcfg1;
14814
14815 nvcfg1 = tr32(NVRAM_CFG1);
14816
14817 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14818 case FLASH_5717VENDOR_ATMEL_EEPROM:
14819 case FLASH_5717VENDOR_MICRO_EEPROM:
14820 tp->nvram_jedecnum = JEDEC_ATMEL;
14821 tg3_flag_set(tp, NVRAM_BUFFERED);
14822 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14823
14824 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14825 tw32(NVRAM_CFG1, nvcfg1);
14826 return;
14827 case FLASH_5717VENDOR_ATMEL_MDB011D:
14828 case FLASH_5717VENDOR_ATMEL_ADB011B:
14829 case FLASH_5717VENDOR_ATMEL_ADB011D:
14830 case FLASH_5717VENDOR_ATMEL_MDB021D:
14831 case FLASH_5717VENDOR_ATMEL_ADB021B:
14832 case FLASH_5717VENDOR_ATMEL_ADB021D:
14833 case FLASH_5717VENDOR_ATMEL_45USPT:
14834 tp->nvram_jedecnum = JEDEC_ATMEL;
14835 tg3_flag_set(tp, NVRAM_BUFFERED);
14836 tg3_flag_set(tp, FLASH);
14837
14838 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14839 case FLASH_5717VENDOR_ATMEL_MDB021D:
14840 /* Detect size with tg3_nvram_get_size() */
14841 break;
14842 case FLASH_5717VENDOR_ATMEL_ADB021B:
14843 case FLASH_5717VENDOR_ATMEL_ADB021D:
14844 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14845 break;
14846 default:
14847 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14848 break;
14849 }
14850 break;
14851 case FLASH_5717VENDOR_ST_M_M25PE10:
14852 case FLASH_5717VENDOR_ST_A_M25PE10:
14853 case FLASH_5717VENDOR_ST_M_M45PE10:
14854 case FLASH_5717VENDOR_ST_A_M45PE10:
14855 case FLASH_5717VENDOR_ST_M_M25PE20:
14856 case FLASH_5717VENDOR_ST_A_M25PE20:
14857 case FLASH_5717VENDOR_ST_M_M45PE20:
14858 case FLASH_5717VENDOR_ST_A_M45PE20:
14859 case FLASH_5717VENDOR_ST_25USPT:
14860 case FLASH_5717VENDOR_ST_45USPT:
14861 tp->nvram_jedecnum = JEDEC_ST;
14862 tg3_flag_set(tp, NVRAM_BUFFERED);
14863 tg3_flag_set(tp, FLASH);
14864
14865 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14866 case FLASH_5717VENDOR_ST_M_M25PE20:
14867 case FLASH_5717VENDOR_ST_M_M45PE20:
14868 /* Detect size with tg3_nvram_get_size() */
14869 break;
14870 case FLASH_5717VENDOR_ST_A_M25PE20:
14871 case FLASH_5717VENDOR_ST_A_M45PE20:
14872 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14873 break;
14874 default:
14875 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14876 break;
14877 }
14878 break;
14879 default:
14880 tg3_flag_set(tp, NO_NVRAM);
14881 return;
14882 }
14883
14884 tg3_nvram_get_pagesize(tp, nvcfg1);
14885 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14886 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14887 }
14888
tg3_get_5720_nvram_info(struct tg3 * tp)14889 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14890 {
14891 u32 nvcfg1, nvmpinstrp, nv_status;
14892
14893 nvcfg1 = tr32(NVRAM_CFG1);
14894 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14895
14896 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14897 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14898 tg3_flag_set(tp, NO_NVRAM);
14899 return;
14900 }
14901
14902 switch (nvmpinstrp) {
14903 case FLASH_5762_MX25L_100:
14904 case FLASH_5762_MX25L_200:
14905 case FLASH_5762_MX25L_400:
14906 case FLASH_5762_MX25L_800:
14907 case FLASH_5762_MX25L_160_320:
14908 tp->nvram_pagesize = 4096;
14909 tp->nvram_jedecnum = JEDEC_MACRONIX;
14910 tg3_flag_set(tp, NVRAM_BUFFERED);
14911 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14912 tg3_flag_set(tp, FLASH);
14913 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14914 tp->nvram_size =
14915 (1 << (nv_status >> AUTOSENSE_DEVID &
14916 AUTOSENSE_DEVID_MASK)
14917 << AUTOSENSE_SIZE_IN_MB);
14918 return;
14919
14920 case FLASH_5762_EEPROM_HD:
14921 nvmpinstrp = FLASH_5720_EEPROM_HD;
14922 break;
14923 case FLASH_5762_EEPROM_LD:
14924 nvmpinstrp = FLASH_5720_EEPROM_LD;
14925 break;
14926 case FLASH_5720VENDOR_M_ST_M45PE20:
14927 /* This pinstrap supports multiple sizes, so force it
14928 * to read the actual size from location 0xf0.
14929 */
14930 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14931 break;
14932 }
14933 }
14934
14935 switch (nvmpinstrp) {
14936 case FLASH_5720_EEPROM_HD:
14937 case FLASH_5720_EEPROM_LD:
14938 tp->nvram_jedecnum = JEDEC_ATMEL;
14939 tg3_flag_set(tp, NVRAM_BUFFERED);
14940
14941 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14942 tw32(NVRAM_CFG1, nvcfg1);
14943 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14944 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14945 else
14946 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14947 return;
14948 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14949 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14950 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14951 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14952 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14953 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14954 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14955 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14956 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14957 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14958 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14959 case FLASH_5720VENDOR_ATMEL_45USPT:
14960 tp->nvram_jedecnum = JEDEC_ATMEL;
14961 tg3_flag_set(tp, NVRAM_BUFFERED);
14962 tg3_flag_set(tp, FLASH);
14963
14964 switch (nvmpinstrp) {
14965 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14966 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14967 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14968 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14969 break;
14970 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14971 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14972 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14973 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14974 break;
14975 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14976 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14977 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14978 break;
14979 default:
14980 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14981 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14982 break;
14983 }
14984 break;
14985 case FLASH_5720VENDOR_M_ST_M25PE10:
14986 case FLASH_5720VENDOR_M_ST_M45PE10:
14987 case FLASH_5720VENDOR_A_ST_M25PE10:
14988 case FLASH_5720VENDOR_A_ST_M45PE10:
14989 case FLASH_5720VENDOR_M_ST_M25PE20:
14990 case FLASH_5720VENDOR_M_ST_M45PE20:
14991 case FLASH_5720VENDOR_A_ST_M25PE20:
14992 case FLASH_5720VENDOR_A_ST_M45PE20:
14993 case FLASH_5720VENDOR_M_ST_M25PE40:
14994 case FLASH_5720VENDOR_M_ST_M45PE40:
14995 case FLASH_5720VENDOR_A_ST_M25PE40:
14996 case FLASH_5720VENDOR_A_ST_M45PE40:
14997 case FLASH_5720VENDOR_M_ST_M25PE80:
14998 case FLASH_5720VENDOR_M_ST_M45PE80:
14999 case FLASH_5720VENDOR_A_ST_M25PE80:
15000 case FLASH_5720VENDOR_A_ST_M45PE80:
15001 case FLASH_5720VENDOR_ST_25USPT:
15002 case FLASH_5720VENDOR_ST_45USPT:
15003 tp->nvram_jedecnum = JEDEC_ST;
15004 tg3_flag_set(tp, NVRAM_BUFFERED);
15005 tg3_flag_set(tp, FLASH);
15006
15007 switch (nvmpinstrp) {
15008 case FLASH_5720VENDOR_M_ST_M25PE20:
15009 case FLASH_5720VENDOR_M_ST_M45PE20:
15010 case FLASH_5720VENDOR_A_ST_M25PE20:
15011 case FLASH_5720VENDOR_A_ST_M45PE20:
15012 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15013 break;
15014 case FLASH_5720VENDOR_M_ST_M25PE40:
15015 case FLASH_5720VENDOR_M_ST_M45PE40:
15016 case FLASH_5720VENDOR_A_ST_M25PE40:
15017 case FLASH_5720VENDOR_A_ST_M45PE40:
15018 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15019 break;
15020 case FLASH_5720VENDOR_M_ST_M25PE80:
15021 case FLASH_5720VENDOR_M_ST_M45PE80:
15022 case FLASH_5720VENDOR_A_ST_M25PE80:
15023 case FLASH_5720VENDOR_A_ST_M45PE80:
15024 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15025 break;
15026 default:
15027 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15028 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15029 break;
15030 }
15031 break;
15032 default:
15033 tg3_flag_set(tp, NO_NVRAM);
15034 return;
15035 }
15036
15037 tg3_nvram_get_pagesize(tp, nvcfg1);
15038 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15039 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15040
15041 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15042 u32 val;
15043
15044 if (tg3_nvram_read(tp, 0, &val))
15045 return;
15046
15047 if (val != TG3_EEPROM_MAGIC &&
15048 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15049 tg3_flag_set(tp, NO_NVRAM);
15050 }
15051 }
15052
15053 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)15054 static void tg3_nvram_init(struct tg3 *tp)
15055 {
15056 if (tg3_flag(tp, IS_SSB_CORE)) {
15057 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15058 tg3_flag_clear(tp, NVRAM);
15059 tg3_flag_clear(tp, NVRAM_BUFFERED);
15060 tg3_flag_set(tp, NO_NVRAM);
15061 return;
15062 }
15063
15064 tw32_f(GRC_EEPROM_ADDR,
15065 (EEPROM_ADDR_FSM_RESET |
15066 (EEPROM_DEFAULT_CLOCK_PERIOD <<
15067 EEPROM_ADDR_CLKPERD_SHIFT)));
15068
15069 msleep(1);
15070
15071 /* Enable seeprom accesses. */
15072 tw32_f(GRC_LOCAL_CTRL,
15073 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15074 udelay(100);
15075
15076 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15077 tg3_asic_rev(tp) != ASIC_REV_5701) {
15078 tg3_flag_set(tp, NVRAM);
15079
15080 if (tg3_nvram_lock(tp)) {
15081 netdev_warn(tp->dev,
15082 "Cannot get nvram lock, %s failed\n",
15083 __func__);
15084 return;
15085 }
15086 tg3_enable_nvram_access(tp);
15087
15088 tp->nvram_size = 0;
15089
15090 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15091 tg3_get_5752_nvram_info(tp);
15092 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15093 tg3_get_5755_nvram_info(tp);
15094 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15095 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15096 tg3_asic_rev(tp) == ASIC_REV_5785)
15097 tg3_get_5787_nvram_info(tp);
15098 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15099 tg3_get_5761_nvram_info(tp);
15100 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15101 tg3_get_5906_nvram_info(tp);
15102 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15103 tg3_flag(tp, 57765_CLASS))
15104 tg3_get_57780_nvram_info(tp);
15105 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15106 tg3_asic_rev(tp) == ASIC_REV_5719)
15107 tg3_get_5717_nvram_info(tp);
15108 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15109 tg3_asic_rev(tp) == ASIC_REV_5762)
15110 tg3_get_5720_nvram_info(tp);
15111 else
15112 tg3_get_nvram_info(tp);
15113
15114 if (tp->nvram_size == 0)
15115 tg3_get_nvram_size(tp);
15116
15117 tg3_disable_nvram_access(tp);
15118 tg3_nvram_unlock(tp);
15119
15120 } else {
15121 tg3_flag_clear(tp, NVRAM);
15122 tg3_flag_clear(tp, NVRAM_BUFFERED);
15123
15124 tg3_get_eeprom_size(tp);
15125 }
15126 }
15127
15128 struct subsys_tbl_ent {
15129 u16 subsys_vendor, subsys_devid;
15130 u32 phy_id;
15131 };
15132
15133 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15134 /* Broadcom boards. */
15135 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15136 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15137 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15138 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15139 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15140 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15141 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15142 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15143 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15144 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15145 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15146 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15147 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15148 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15149 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15150 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15151 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15152 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15153 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15154 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15155 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15156 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15157
15158 /* 3com boards. */
15159 { TG3PCI_SUBVENDOR_ID_3COM,
15160 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15161 { TG3PCI_SUBVENDOR_ID_3COM,
15162 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15163 { TG3PCI_SUBVENDOR_ID_3COM,
15164 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15165 { TG3PCI_SUBVENDOR_ID_3COM,
15166 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15167 { TG3PCI_SUBVENDOR_ID_3COM,
15168 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15169
15170 /* DELL boards. */
15171 { TG3PCI_SUBVENDOR_ID_DELL,
15172 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15173 { TG3PCI_SUBVENDOR_ID_DELL,
15174 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15175 { TG3PCI_SUBVENDOR_ID_DELL,
15176 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15177 { TG3PCI_SUBVENDOR_ID_DELL,
15178 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15179
15180 /* Compaq boards. */
15181 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15182 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15183 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15184 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15185 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15186 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15187 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15188 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15189 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15190 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15191
15192 /* IBM boards. */
15193 { TG3PCI_SUBVENDOR_ID_IBM,
15194 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15195 };
15196
tg3_lookup_by_subsys(struct tg3 * tp)15197 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15198 {
15199 int i;
15200
15201 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15202 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15203 tp->pdev->subsystem_vendor) &&
15204 (subsys_id_to_phy_id[i].subsys_devid ==
15205 tp->pdev->subsystem_device))
15206 return &subsys_id_to_phy_id[i];
15207 }
15208 return NULL;
15209 }
15210
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15211 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15212 {
15213 u32 val;
15214
15215 tp->phy_id = TG3_PHY_ID_INVALID;
15216 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15217
15218 /* Assume an onboard device and WOL capable by default. */
15219 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15220 tg3_flag_set(tp, WOL_CAP);
15221
15222 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15223 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15224 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15225 tg3_flag_set(tp, IS_NIC);
15226 }
15227 val = tr32(VCPU_CFGSHDW);
15228 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15229 tg3_flag_set(tp, ASPM_WORKAROUND);
15230 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15231 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15232 tg3_flag_set(tp, WOL_ENABLE);
15233 device_set_wakeup_enable(&tp->pdev->dev, true);
15234 }
15235 goto done;
15236 }
15237
15238 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15239 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15240 u32 nic_cfg, led_cfg;
15241 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15242 u32 nic_phy_id, ver, eeprom_phy_id;
15243 int eeprom_phy_serdes = 0;
15244
15245 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15246 tp->nic_sram_data_cfg = nic_cfg;
15247
15248 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15249 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15250 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15251 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15252 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15253 (ver > 0) && (ver < 0x100))
15254 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15255
15256 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15257 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15258
15259 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15260 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15261 tg3_asic_rev(tp) == ASIC_REV_5720)
15262 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15263
15264 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15265 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15266 eeprom_phy_serdes = 1;
15267
15268 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15269 if (nic_phy_id != 0) {
15270 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15271 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15272
15273 eeprom_phy_id = (id1 >> 16) << 10;
15274 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15275 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15276 } else
15277 eeprom_phy_id = 0;
15278
15279 tp->phy_id = eeprom_phy_id;
15280 if (eeprom_phy_serdes) {
15281 if (!tg3_flag(tp, 5705_PLUS))
15282 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15283 else
15284 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15285 }
15286
15287 if (tg3_flag(tp, 5750_PLUS))
15288 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15289 SHASTA_EXT_LED_MODE_MASK);
15290 else
15291 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15292
15293 switch (led_cfg) {
15294 default:
15295 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15296 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15297 break;
15298
15299 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15300 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15301 break;
15302
15303 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15304 tp->led_ctrl = LED_CTRL_MODE_MAC;
15305
15306 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15307 * read on some older 5700/5701 bootcode.
15308 */
15309 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15310 tg3_asic_rev(tp) == ASIC_REV_5701)
15311 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15312
15313 break;
15314
15315 case SHASTA_EXT_LED_SHARED:
15316 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15317 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15318 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15319 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15320 LED_CTRL_MODE_PHY_2);
15321
15322 if (tg3_flag(tp, 5717_PLUS) ||
15323 tg3_asic_rev(tp) == ASIC_REV_5762)
15324 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15325 LED_CTRL_BLINK_RATE_MASK;
15326
15327 break;
15328
15329 case SHASTA_EXT_LED_MAC:
15330 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15331 break;
15332
15333 case SHASTA_EXT_LED_COMBO:
15334 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15335 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15336 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15337 LED_CTRL_MODE_PHY_2);
15338 break;
15339
15340 }
15341
15342 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15343 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15344 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15345 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15346
15347 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15348 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15349
15350 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15351 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15352 if ((tp->pdev->subsystem_vendor ==
15353 PCI_VENDOR_ID_ARIMA) &&
15354 (tp->pdev->subsystem_device == 0x205a ||
15355 tp->pdev->subsystem_device == 0x2063))
15356 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15357 } else {
15358 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15359 tg3_flag_set(tp, IS_NIC);
15360 }
15361
15362 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15363 tg3_flag_set(tp, ENABLE_ASF);
15364 if (tg3_flag(tp, 5750_PLUS))
15365 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15366 }
15367
15368 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15369 tg3_flag(tp, 5750_PLUS))
15370 tg3_flag_set(tp, ENABLE_APE);
15371
15372 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15373 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15374 tg3_flag_clear(tp, WOL_CAP);
15375
15376 if (tg3_flag(tp, WOL_CAP) &&
15377 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15378 tg3_flag_set(tp, WOL_ENABLE);
15379 device_set_wakeup_enable(&tp->pdev->dev, true);
15380 }
15381
15382 if (cfg2 & (1 << 17))
15383 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15384
15385 /* serdes signal pre-emphasis in register 0x590 set by */
15386 /* bootcode if bit 18 is set */
15387 if (cfg2 & (1 << 18))
15388 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15389
15390 if ((tg3_flag(tp, 57765_PLUS) ||
15391 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15392 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15393 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15394 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15395
15396 if (tg3_flag(tp, PCI_EXPRESS)) {
15397 u32 cfg3;
15398
15399 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15400 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15401 !tg3_flag(tp, 57765_PLUS) &&
15402 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15403 tg3_flag_set(tp, ASPM_WORKAROUND);
15404 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15405 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15406 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15407 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15408 }
15409
15410 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15411 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15412 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15413 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15414 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15415 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15416
15417 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15418 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15419 }
15420 done:
15421 if (tg3_flag(tp, WOL_CAP))
15422 device_set_wakeup_enable(&tp->pdev->dev,
15423 tg3_flag(tp, WOL_ENABLE));
15424 else
15425 device_set_wakeup_capable(&tp->pdev->dev, false);
15426 }
15427
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15428 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15429 {
15430 int i, err;
15431 u32 val2, off = offset * 8;
15432
15433 err = tg3_nvram_lock(tp);
15434 if (err)
15435 return err;
15436
15437 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15438 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15439 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15440 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15441 udelay(10);
15442
15443 for (i = 0; i < 100; i++) {
15444 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15445 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15446 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15447 break;
15448 }
15449 udelay(10);
15450 }
15451
15452 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15453
15454 tg3_nvram_unlock(tp);
15455 if (val2 & APE_OTP_STATUS_CMD_DONE)
15456 return 0;
15457
15458 return -EBUSY;
15459 }
15460
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15461 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15462 {
15463 int i;
15464 u32 val;
15465
15466 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15467 tw32(OTP_CTRL, cmd);
15468
15469 /* Wait for up to 1 ms for command to execute. */
15470 for (i = 0; i < 100; i++) {
15471 val = tr32(OTP_STATUS);
15472 if (val & OTP_STATUS_CMD_DONE)
15473 break;
15474 udelay(10);
15475 }
15476
15477 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15478 }
15479
15480 /* Read the gphy configuration from the OTP region of the chip. The gphy
15481 * configuration is a 32-bit value that straddles the alignment boundary.
15482 * We do two 32-bit reads and then shift and merge the results.
15483 */
tg3_read_otp_phycfg(struct tg3 * tp)15484 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15485 {
15486 u32 bhalf_otp, thalf_otp;
15487
15488 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15489
15490 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15491 return 0;
15492
15493 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15494
15495 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15496 return 0;
15497
15498 thalf_otp = tr32(OTP_READ_DATA);
15499
15500 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15501
15502 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15503 return 0;
15504
15505 bhalf_otp = tr32(OTP_READ_DATA);
15506
15507 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15508 }
15509
tg3_phy_init_link_config(struct tg3 * tp)15510 static void tg3_phy_init_link_config(struct tg3 *tp)
15511 {
15512 u32 adv = ADVERTISED_Autoneg;
15513
15514 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15515 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15516 adv |= ADVERTISED_1000baseT_Half;
15517 adv |= ADVERTISED_1000baseT_Full;
15518 }
15519
15520 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15521 adv |= ADVERTISED_100baseT_Half |
15522 ADVERTISED_100baseT_Full |
15523 ADVERTISED_10baseT_Half |
15524 ADVERTISED_10baseT_Full |
15525 ADVERTISED_TP;
15526 else
15527 adv |= ADVERTISED_FIBRE;
15528
15529 tp->link_config.advertising = adv;
15530 tp->link_config.speed = SPEED_UNKNOWN;
15531 tp->link_config.duplex = DUPLEX_UNKNOWN;
15532 tp->link_config.autoneg = AUTONEG_ENABLE;
15533 tp->link_config.active_speed = SPEED_UNKNOWN;
15534 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15535
15536 tp->old_link = -1;
15537 }
15538
tg3_phy_probe(struct tg3 * tp)15539 static int tg3_phy_probe(struct tg3 *tp)
15540 {
15541 u32 hw_phy_id_1, hw_phy_id_2;
15542 u32 hw_phy_id, hw_phy_id_masked;
15543 int err;
15544
15545 /* flow control autonegotiation is default behavior */
15546 tg3_flag_set(tp, PAUSE_AUTONEG);
15547 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15548
15549 if (tg3_flag(tp, ENABLE_APE)) {
15550 switch (tp->pci_fn) {
15551 case 0:
15552 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15553 break;
15554 case 1:
15555 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15556 break;
15557 case 2:
15558 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15559 break;
15560 case 3:
15561 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15562 break;
15563 }
15564 }
15565
15566 if (!tg3_flag(tp, ENABLE_ASF) &&
15567 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15568 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15569 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15570 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15571
15572 if (tg3_flag(tp, USE_PHYLIB))
15573 return tg3_phy_init(tp);
15574
15575 /* Reading the PHY ID register can conflict with ASF
15576 * firmware access to the PHY hardware.
15577 */
15578 err = 0;
15579 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15580 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15581 } else {
15582 /* Now read the physical PHY_ID from the chip and verify
15583 * that it is sane. If it doesn't look good, we fall back
15584 * to either the hard-coded table based PHY_ID and failing
15585 * that the value found in the eeprom area.
15586 */
15587 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15588 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15589
15590 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15591 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15592 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15593
15594 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15595 }
15596
15597 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15598 tp->phy_id = hw_phy_id;
15599 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15600 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15601 else
15602 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15603 } else {
15604 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15605 /* Do nothing, phy ID already set up in
15606 * tg3_get_eeprom_hw_cfg().
15607 */
15608 } else {
15609 struct subsys_tbl_ent *p;
15610
15611 /* No eeprom signature? Try the hardcoded
15612 * subsys device table.
15613 */
15614 p = tg3_lookup_by_subsys(tp);
15615 if (p) {
15616 tp->phy_id = p->phy_id;
15617 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15618 /* For now we saw the IDs 0xbc050cd0,
15619 * 0xbc050f80 and 0xbc050c30 on devices
15620 * connected to an BCM4785 and there are
15621 * probably more. Just assume that the phy is
15622 * supported when it is connected to a SSB core
15623 * for now.
15624 */
15625 return -ENODEV;
15626 }
15627
15628 if (!tp->phy_id ||
15629 tp->phy_id == TG3_PHY_ID_BCM8002)
15630 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15631 }
15632 }
15633
15634 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15635 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15636 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15637 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15638 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15639 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15640 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15641 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15642 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15643 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15644
15645 linkmode_zero(tp->eee.supported);
15646 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
15647 tp->eee.supported);
15648 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
15649 tp->eee.supported);
15650 linkmode_copy(tp->eee.advertised, tp->eee.supported);
15651
15652 tp->eee.eee_enabled = 1;
15653 tp->eee.tx_lpi_enabled = 1;
15654 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15655 }
15656
15657 tg3_phy_init_link_config(tp);
15658
15659 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15660 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15661 !tg3_flag(tp, ENABLE_APE) &&
15662 !tg3_flag(tp, ENABLE_ASF)) {
15663 u32 bmsr, dummy;
15664
15665 tg3_readphy(tp, MII_BMSR, &bmsr);
15666 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15667 (bmsr & BMSR_LSTATUS))
15668 goto skip_phy_reset;
15669
15670 err = tg3_phy_reset(tp);
15671 if (err)
15672 return err;
15673
15674 tg3_phy_set_wirespeed(tp);
15675
15676 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15677 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15678 tp->link_config.flowctrl);
15679
15680 tg3_writephy(tp, MII_BMCR,
15681 BMCR_ANENABLE | BMCR_ANRESTART);
15682 }
15683 }
15684
15685 skip_phy_reset:
15686 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15687 err = tg3_init_5401phy_dsp(tp);
15688 if (err)
15689 return err;
15690
15691 err = tg3_init_5401phy_dsp(tp);
15692 }
15693
15694 return err;
15695 }
15696
tg3_read_vpd(struct tg3 * tp)15697 static void tg3_read_vpd(struct tg3 *tp)
15698 {
15699 u8 *vpd_data;
15700 unsigned int len, vpdlen;
15701 int i;
15702
15703 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15704 if (!vpd_data)
15705 goto out_no_vpd;
15706
15707 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15708 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15709 if (i < 0)
15710 goto partno;
15711
15712 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15713 goto partno;
15714
15715 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15716 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15717 if (i < 0)
15718 goto partno;
15719
15720 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15721 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15722
15723 partno:
15724 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15725 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15726 if (i < 0)
15727 goto out_not_found;
15728
15729 if (len > TG3_BPN_SIZE)
15730 goto out_not_found;
15731
15732 memcpy(tp->board_part_number, &vpd_data[i], len);
15733
15734 out_not_found:
15735 kfree(vpd_data);
15736 if (tp->board_part_number[0])
15737 return;
15738
15739 out_no_vpd:
15740 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15741 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15742 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15743 strcpy(tp->board_part_number, "BCM5717");
15744 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15745 strcpy(tp->board_part_number, "BCM5718");
15746 else
15747 goto nomatch;
15748 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15749 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15750 strcpy(tp->board_part_number, "BCM57780");
15751 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15752 strcpy(tp->board_part_number, "BCM57760");
15753 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15754 strcpy(tp->board_part_number, "BCM57790");
15755 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15756 strcpy(tp->board_part_number, "BCM57788");
15757 else
15758 goto nomatch;
15759 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15760 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15761 strcpy(tp->board_part_number, "BCM57761");
15762 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15763 strcpy(tp->board_part_number, "BCM57765");
15764 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15765 strcpy(tp->board_part_number, "BCM57781");
15766 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15767 strcpy(tp->board_part_number, "BCM57785");
15768 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15769 strcpy(tp->board_part_number, "BCM57791");
15770 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15771 strcpy(tp->board_part_number, "BCM57795");
15772 else
15773 goto nomatch;
15774 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15775 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15776 strcpy(tp->board_part_number, "BCM57762");
15777 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15778 strcpy(tp->board_part_number, "BCM57766");
15779 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15780 strcpy(tp->board_part_number, "BCM57782");
15781 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15782 strcpy(tp->board_part_number, "BCM57786");
15783 else
15784 goto nomatch;
15785 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15786 strcpy(tp->board_part_number, "BCM95906");
15787 } else {
15788 nomatch:
15789 strcpy(tp->board_part_number, "none");
15790 }
15791 }
15792
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15793 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15794 {
15795 u32 val;
15796
15797 if (tg3_nvram_read(tp, offset, &val) ||
15798 (val & 0xfc000000) != 0x0c000000 ||
15799 tg3_nvram_read(tp, offset + 4, &val) ||
15800 val != 0)
15801 return 0;
15802
15803 return 1;
15804 }
15805
tg3_read_bc_ver(struct tg3 * tp)15806 static void tg3_read_bc_ver(struct tg3 *tp)
15807 {
15808 u32 val, offset, start, ver_offset;
15809 int i, dst_off;
15810 bool newver = false;
15811
15812 if (tg3_nvram_read(tp, 0xc, &offset) ||
15813 tg3_nvram_read(tp, 0x4, &start))
15814 return;
15815
15816 offset = tg3_nvram_logical_addr(tp, offset);
15817
15818 if (tg3_nvram_read(tp, offset, &val))
15819 return;
15820
15821 if ((val & 0xfc000000) == 0x0c000000) {
15822 if (tg3_nvram_read(tp, offset + 4, &val))
15823 return;
15824
15825 if (val == 0)
15826 newver = true;
15827 }
15828
15829 dst_off = strlen(tp->fw_ver);
15830
15831 if (newver) {
15832 if (TG3_VER_SIZE - dst_off < 16 ||
15833 tg3_nvram_read(tp, offset + 8, &ver_offset))
15834 return;
15835
15836 offset = offset + ver_offset - start;
15837 for (i = 0; i < 16; i += 4) {
15838 __be32 v;
15839 if (tg3_nvram_read_be32(tp, offset + i, &v))
15840 return;
15841
15842 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15843 }
15844 } else {
15845 u32 major, minor;
15846
15847 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15848 return;
15849
15850 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15851 TG3_NVM_BCVER_MAJSFT;
15852 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15853 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15854 "v%d.%02d", major, minor);
15855 }
15856 }
15857
tg3_read_hwsb_ver(struct tg3 * tp)15858 static void tg3_read_hwsb_ver(struct tg3 *tp)
15859 {
15860 u32 val, major, minor;
15861
15862 /* Use native endian representation */
15863 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15864 return;
15865
15866 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15867 TG3_NVM_HWSB_CFG1_MAJSFT;
15868 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15869 TG3_NVM_HWSB_CFG1_MINSFT;
15870
15871 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15872 }
15873
tg3_read_sb_ver(struct tg3 * tp,u32 val)15874 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15875 {
15876 u32 offset, major, minor, build;
15877
15878 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15879
15880 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15881 return;
15882
15883 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15884 case TG3_EEPROM_SB_REVISION_0:
15885 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15886 break;
15887 case TG3_EEPROM_SB_REVISION_2:
15888 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15889 break;
15890 case TG3_EEPROM_SB_REVISION_3:
15891 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15892 break;
15893 case TG3_EEPROM_SB_REVISION_4:
15894 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15895 break;
15896 case TG3_EEPROM_SB_REVISION_5:
15897 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15898 break;
15899 case TG3_EEPROM_SB_REVISION_6:
15900 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15901 break;
15902 default:
15903 return;
15904 }
15905
15906 if (tg3_nvram_read(tp, offset, &val))
15907 return;
15908
15909 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15910 TG3_EEPROM_SB_EDH_BLD_SHFT;
15911 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15912 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15913 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15914
15915 if (minor > 99 || build > 26)
15916 return;
15917
15918 offset = strlen(tp->fw_ver);
15919 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15920 " v%d.%02d", major, minor);
15921
15922 if (build > 0) {
15923 offset = strlen(tp->fw_ver);
15924 if (offset < TG3_VER_SIZE - 1)
15925 tp->fw_ver[offset] = 'a' + build - 1;
15926 }
15927 }
15928
tg3_read_mgmtfw_ver(struct tg3 * tp)15929 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15930 {
15931 u32 val, offset, start;
15932 int i, vlen;
15933
15934 for (offset = TG3_NVM_DIR_START;
15935 offset < TG3_NVM_DIR_END;
15936 offset += TG3_NVM_DIRENT_SIZE) {
15937 if (tg3_nvram_read(tp, offset, &val))
15938 return;
15939
15940 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15941 break;
15942 }
15943
15944 if (offset == TG3_NVM_DIR_END)
15945 return;
15946
15947 if (!tg3_flag(tp, 5705_PLUS))
15948 start = 0x08000000;
15949 else if (tg3_nvram_read(tp, offset - 4, &start))
15950 return;
15951
15952 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15953 !tg3_fw_img_is_valid(tp, offset) ||
15954 tg3_nvram_read(tp, offset + 8, &val))
15955 return;
15956
15957 offset += val - start;
15958
15959 vlen = strlen(tp->fw_ver);
15960
15961 tp->fw_ver[vlen++] = ',';
15962 tp->fw_ver[vlen++] = ' ';
15963
15964 for (i = 0; i < 4; i++) {
15965 __be32 v;
15966 if (tg3_nvram_read_be32(tp, offset, &v))
15967 return;
15968
15969 offset += sizeof(v);
15970
15971 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15972 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15973 break;
15974 }
15975
15976 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15977 vlen += sizeof(v);
15978 }
15979 }
15980
tg3_probe_ncsi(struct tg3 * tp)15981 static void tg3_probe_ncsi(struct tg3 *tp)
15982 {
15983 u32 apedata;
15984
15985 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15986 if (apedata != APE_SEG_SIG_MAGIC)
15987 return;
15988
15989 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15990 if (!(apedata & APE_FW_STATUS_READY))
15991 return;
15992
15993 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15994 tg3_flag_set(tp, APE_HAS_NCSI);
15995 }
15996
tg3_read_dash_ver(struct tg3 * tp)15997 static void tg3_read_dash_ver(struct tg3 *tp)
15998 {
15999 int vlen;
16000 u32 apedata;
16001 char *fwtype;
16002
16003 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
16004
16005 if (tg3_flag(tp, APE_HAS_NCSI))
16006 fwtype = "NCSI";
16007 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
16008 fwtype = "SMASH";
16009 else
16010 fwtype = "DASH";
16011
16012 vlen = strlen(tp->fw_ver);
16013
16014 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
16015 fwtype,
16016 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
16017 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
16018 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
16019 (apedata & APE_FW_VERSION_BLDMSK));
16020 }
16021
tg3_read_otp_ver(struct tg3 * tp)16022 static void tg3_read_otp_ver(struct tg3 *tp)
16023 {
16024 u32 val, val2;
16025
16026 if (tg3_asic_rev(tp) != ASIC_REV_5762)
16027 return;
16028
16029 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16030 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16031 TG3_OTP_MAGIC0_VALID(val)) {
16032 u64 val64 = (u64) val << 32 | val2;
16033 u32 ver = 0;
16034 int i, vlen;
16035
16036 for (i = 0; i < 7; i++) {
16037 if ((val64 & 0xff) == 0)
16038 break;
16039 ver = val64 & 0xff;
16040 val64 >>= 8;
16041 }
16042 vlen = strlen(tp->fw_ver);
16043 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16044 }
16045 }
16046
tg3_read_fw_ver(struct tg3 * tp)16047 static void tg3_read_fw_ver(struct tg3 *tp)
16048 {
16049 u32 val;
16050 bool vpd_vers = false;
16051
16052 if (tp->fw_ver[0] != 0)
16053 vpd_vers = true;
16054
16055 if (tg3_flag(tp, NO_NVRAM)) {
16056 strcat(tp->fw_ver, "sb");
16057 tg3_read_otp_ver(tp);
16058 return;
16059 }
16060
16061 if (tg3_nvram_read(tp, 0, &val))
16062 return;
16063
16064 if (val == TG3_EEPROM_MAGIC)
16065 tg3_read_bc_ver(tp);
16066 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16067 tg3_read_sb_ver(tp, val);
16068 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16069 tg3_read_hwsb_ver(tp);
16070
16071 if (tg3_flag(tp, ENABLE_ASF)) {
16072 if (tg3_flag(tp, ENABLE_APE)) {
16073 tg3_probe_ncsi(tp);
16074 if (!vpd_vers)
16075 tg3_read_dash_ver(tp);
16076 } else if (!vpd_vers) {
16077 tg3_read_mgmtfw_ver(tp);
16078 }
16079 }
16080
16081 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16082 }
16083
tg3_rx_ret_ring_size(struct tg3 * tp)16084 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16085 {
16086 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16087 return TG3_RX_RET_MAX_SIZE_5717;
16088 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16089 return TG3_RX_RET_MAX_SIZE_5700;
16090 else
16091 return TG3_RX_RET_MAX_SIZE_5705;
16092 }
16093
16094 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16095 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16096 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16097 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16098 { },
16099 };
16100
tg3_find_peer(struct tg3 * tp)16101 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16102 {
16103 struct pci_dev *peer;
16104 unsigned int func, devnr = tp->pdev->devfn & ~7;
16105
16106 for (func = 0; func < 8; func++) {
16107 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16108 if (peer && peer != tp->pdev)
16109 break;
16110 pci_dev_put(peer);
16111 }
16112 /* 5704 can be configured in single-port mode, set peer to
16113 * tp->pdev in that case.
16114 */
16115 if (!peer) {
16116 peer = tp->pdev;
16117 return peer;
16118 }
16119
16120 /*
16121 * We don't need to keep the refcount elevated; there's no way
16122 * to remove one half of this device without removing the other
16123 */
16124 pci_dev_put(peer);
16125
16126 return peer;
16127 }
16128
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16129 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16130 {
16131 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16132 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16133 u32 reg;
16134
16135 /* All devices that use the alternate
16136 * ASIC REV location have a CPMU.
16137 */
16138 tg3_flag_set(tp, CPMU_PRESENT);
16139
16140 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16141 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16142 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16143 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16144 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16145 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16146 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16147 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16148 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16149 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16150 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16151 reg = TG3PCI_GEN2_PRODID_ASICREV;
16152 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16153 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16154 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16155 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16156 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16157 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16158 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16159 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16160 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16161 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16162 reg = TG3PCI_GEN15_PRODID_ASICREV;
16163 else
16164 reg = TG3PCI_PRODID_ASICREV;
16165
16166 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16167 }
16168
16169 /* Wrong chip ID in 5752 A0. This code can be removed later
16170 * as A0 is not in production.
16171 */
16172 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16173 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16174
16175 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16176 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16177
16178 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16179 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16180 tg3_asic_rev(tp) == ASIC_REV_5720)
16181 tg3_flag_set(tp, 5717_PLUS);
16182
16183 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16184 tg3_asic_rev(tp) == ASIC_REV_57766)
16185 tg3_flag_set(tp, 57765_CLASS);
16186
16187 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16188 tg3_asic_rev(tp) == ASIC_REV_5762)
16189 tg3_flag_set(tp, 57765_PLUS);
16190
16191 /* Intentionally exclude ASIC_REV_5906 */
16192 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16193 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16194 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16195 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16196 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16197 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16198 tg3_flag(tp, 57765_PLUS))
16199 tg3_flag_set(tp, 5755_PLUS);
16200
16201 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16202 tg3_asic_rev(tp) == ASIC_REV_5714)
16203 tg3_flag_set(tp, 5780_CLASS);
16204
16205 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16206 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16207 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16208 tg3_flag(tp, 5755_PLUS) ||
16209 tg3_flag(tp, 5780_CLASS))
16210 tg3_flag_set(tp, 5750_PLUS);
16211
16212 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16213 tg3_flag(tp, 5750_PLUS))
16214 tg3_flag_set(tp, 5705_PLUS);
16215 }
16216
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16217 static bool tg3_10_100_only_device(struct tg3 *tp,
16218 const struct pci_device_id *ent)
16219 {
16220 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16221
16222 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16223 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16224 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16225 return true;
16226
16227 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16228 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16229 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16230 return true;
16231 } else {
16232 return true;
16233 }
16234 }
16235
16236 return false;
16237 }
16238
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16239 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16240 {
16241 u32 misc_ctrl_reg;
16242 u32 pci_state_reg, grc_misc_cfg;
16243 u32 val;
16244 u16 pci_cmd;
16245 int err;
16246
16247 /* Force memory write invalidate off. If we leave it on,
16248 * then on 5700_BX chips we have to enable a workaround.
16249 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16250 * to match the cacheline size. The Broadcom driver have this
16251 * workaround but turns MWI off all the times so never uses
16252 * it. This seems to suggest that the workaround is insufficient.
16253 */
16254 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16255 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16256 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16257
16258 /* Important! -- Make sure register accesses are byteswapped
16259 * correctly. Also, for those chips that require it, make
16260 * sure that indirect register accesses are enabled before
16261 * the first operation.
16262 */
16263 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16264 &misc_ctrl_reg);
16265 tp->misc_host_ctrl |= (misc_ctrl_reg &
16266 MISC_HOST_CTRL_CHIPREV);
16267 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16268 tp->misc_host_ctrl);
16269
16270 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16271
16272 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16273 * we need to disable memory and use config. cycles
16274 * only to access all registers. The 5702/03 chips
16275 * can mistakenly decode the special cycles from the
16276 * ICH chipsets as memory write cycles, causing corruption
16277 * of register and memory space. Only certain ICH bridges
16278 * will drive special cycles with non-zero data during the
16279 * address phase which can fall within the 5703's address
16280 * range. This is not an ICH bug as the PCI spec allows
16281 * non-zero address during special cycles. However, only
16282 * these ICH bridges are known to drive non-zero addresses
16283 * during special cycles.
16284 *
16285 * Since special cycles do not cross PCI bridges, we only
16286 * enable this workaround if the 5703 is on the secondary
16287 * bus of these ICH bridges.
16288 */
16289 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16290 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16291 static struct tg3_dev_id {
16292 u32 vendor;
16293 u32 device;
16294 u32 rev;
16295 } ich_chipsets[] = {
16296 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16297 PCI_ANY_ID },
16298 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16299 PCI_ANY_ID },
16300 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16301 0xa },
16302 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16303 PCI_ANY_ID },
16304 { },
16305 };
16306 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16307 struct pci_dev *bridge = NULL;
16308
16309 while (pci_id->vendor != 0) {
16310 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16311 bridge);
16312 if (!bridge) {
16313 pci_id++;
16314 continue;
16315 }
16316 if (pci_id->rev != PCI_ANY_ID) {
16317 if (bridge->revision > pci_id->rev)
16318 continue;
16319 }
16320 if (bridge->subordinate &&
16321 (bridge->subordinate->number ==
16322 tp->pdev->bus->number)) {
16323 tg3_flag_set(tp, ICH_WORKAROUND);
16324 pci_dev_put(bridge);
16325 break;
16326 }
16327 }
16328 }
16329
16330 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16331 static struct tg3_dev_id {
16332 u32 vendor;
16333 u32 device;
16334 } bridge_chipsets[] = {
16335 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16336 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16337 { },
16338 };
16339 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16340 struct pci_dev *bridge = NULL;
16341
16342 while (pci_id->vendor != 0) {
16343 bridge = pci_get_device(pci_id->vendor,
16344 pci_id->device,
16345 bridge);
16346 if (!bridge) {
16347 pci_id++;
16348 continue;
16349 }
16350 if (bridge->subordinate &&
16351 (bridge->subordinate->number <=
16352 tp->pdev->bus->number) &&
16353 (bridge->subordinate->busn_res.end >=
16354 tp->pdev->bus->number)) {
16355 tg3_flag_set(tp, 5701_DMA_BUG);
16356 pci_dev_put(bridge);
16357 break;
16358 }
16359 }
16360 }
16361
16362 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16363 * DMA addresses > 40-bit. This bridge may have other additional
16364 * 57xx devices behind it in some 4-port NIC designs for example.
16365 * Any tg3 device found behind the bridge will also need the 40-bit
16366 * DMA workaround.
16367 */
16368 if (tg3_flag(tp, 5780_CLASS)) {
16369 tg3_flag_set(tp, 40BIT_DMA_BUG);
16370 tp->msi_cap = tp->pdev->msi_cap;
16371 } else {
16372 struct pci_dev *bridge = NULL;
16373
16374 do {
16375 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16376 PCI_DEVICE_ID_SERVERWORKS_EPB,
16377 bridge);
16378 if (bridge && bridge->subordinate &&
16379 (bridge->subordinate->number <=
16380 tp->pdev->bus->number) &&
16381 (bridge->subordinate->busn_res.end >=
16382 tp->pdev->bus->number)) {
16383 tg3_flag_set(tp, 40BIT_DMA_BUG);
16384 pci_dev_put(bridge);
16385 break;
16386 }
16387 } while (bridge);
16388 }
16389
16390 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16391 tg3_asic_rev(tp) == ASIC_REV_5714)
16392 tp->pdev_peer = tg3_find_peer(tp);
16393
16394 /* Determine TSO capabilities */
16395 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16396 ; /* Do nothing. HW bug. */
16397 else if (tg3_flag(tp, 57765_PLUS))
16398 tg3_flag_set(tp, HW_TSO_3);
16399 else if (tg3_flag(tp, 5755_PLUS) ||
16400 tg3_asic_rev(tp) == ASIC_REV_5906)
16401 tg3_flag_set(tp, HW_TSO_2);
16402 else if (tg3_flag(tp, 5750_PLUS)) {
16403 tg3_flag_set(tp, HW_TSO_1);
16404 tg3_flag_set(tp, TSO_BUG);
16405 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16406 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16407 tg3_flag_clear(tp, TSO_BUG);
16408 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16409 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16410 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16411 tg3_flag_set(tp, FW_TSO);
16412 tg3_flag_set(tp, TSO_BUG);
16413 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16414 tp->fw_needed = FIRMWARE_TG3TSO5;
16415 else
16416 tp->fw_needed = FIRMWARE_TG3TSO;
16417 }
16418
16419 /* Selectively allow TSO based on operating conditions */
16420 if (tg3_flag(tp, HW_TSO_1) ||
16421 tg3_flag(tp, HW_TSO_2) ||
16422 tg3_flag(tp, HW_TSO_3) ||
16423 tg3_flag(tp, FW_TSO)) {
16424 /* For firmware TSO, assume ASF is disabled.
16425 * We'll disable TSO later if we discover ASF
16426 * is enabled in tg3_get_eeprom_hw_cfg().
16427 */
16428 tg3_flag_set(tp, TSO_CAPABLE);
16429 } else {
16430 tg3_flag_clear(tp, TSO_CAPABLE);
16431 tg3_flag_clear(tp, TSO_BUG);
16432 tp->fw_needed = NULL;
16433 }
16434
16435 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16436 tp->fw_needed = FIRMWARE_TG3;
16437
16438 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16439 tp->fw_needed = FIRMWARE_TG357766;
16440
16441 tp->irq_max = 1;
16442
16443 if (tg3_flag(tp, 5750_PLUS)) {
16444 tg3_flag_set(tp, SUPPORT_MSI);
16445 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16446 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16447 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16448 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16449 tp->pdev_peer == tp->pdev))
16450 tg3_flag_clear(tp, SUPPORT_MSI);
16451
16452 if (tg3_flag(tp, 5755_PLUS) ||
16453 tg3_asic_rev(tp) == ASIC_REV_5906) {
16454 tg3_flag_set(tp, 1SHOT_MSI);
16455 }
16456
16457 if (tg3_flag(tp, 57765_PLUS)) {
16458 tg3_flag_set(tp, SUPPORT_MSIX);
16459 tp->irq_max = TG3_IRQ_MAX_VECS;
16460 }
16461 }
16462
16463 tp->txq_max = 1;
16464 tp->rxq_max = 1;
16465 if (tp->irq_max > 1) {
16466 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16467 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16468
16469 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16470 tg3_asic_rev(tp) == ASIC_REV_5720)
16471 tp->txq_max = tp->irq_max - 1;
16472 }
16473
16474 if (tg3_flag(tp, 5755_PLUS) ||
16475 tg3_asic_rev(tp) == ASIC_REV_5906)
16476 tg3_flag_set(tp, SHORT_DMA_BUG);
16477
16478 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16479 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16480
16481 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16482 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16483 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16484 tg3_asic_rev(tp) == ASIC_REV_5762)
16485 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16486
16487 if (tg3_flag(tp, 57765_PLUS) &&
16488 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16489 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16490
16491 if (!tg3_flag(tp, 5705_PLUS) ||
16492 tg3_flag(tp, 5780_CLASS) ||
16493 tg3_flag(tp, USE_JUMBO_BDFLAG))
16494 tg3_flag_set(tp, JUMBO_CAPABLE);
16495
16496 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16497 &pci_state_reg);
16498
16499 if (pci_is_pcie(tp->pdev)) {
16500 u16 lnkctl;
16501
16502 tg3_flag_set(tp, PCI_EXPRESS);
16503
16504 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16505 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16506 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16507 tg3_flag_clear(tp, HW_TSO_2);
16508 tg3_flag_clear(tp, TSO_CAPABLE);
16509 }
16510 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16511 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16512 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16513 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16514 tg3_flag_set(tp, CLKREQ_BUG);
16515 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16516 tg3_flag_set(tp, L1PLLPD_EN);
16517 }
16518 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16519 /* BCM5785 devices are effectively PCIe devices, and should
16520 * follow PCIe codepaths, but do not have a PCIe capabilities
16521 * section.
16522 */
16523 tg3_flag_set(tp, PCI_EXPRESS);
16524 } else if (!tg3_flag(tp, 5705_PLUS) ||
16525 tg3_flag(tp, 5780_CLASS)) {
16526 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16527 if (!tp->pcix_cap) {
16528 dev_err(&tp->pdev->dev,
16529 "Cannot find PCI-X capability, aborting\n");
16530 return -EIO;
16531 }
16532
16533 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16534 tg3_flag_set(tp, PCIX_MODE);
16535 }
16536
16537 /* If we have an AMD 762 or VIA K8T800 chipset, write
16538 * reordering to the mailbox registers done by the host
16539 * controller can cause major troubles. We read back from
16540 * every mailbox register write to force the writes to be
16541 * posted to the chip in order.
16542 */
16543 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16544 !tg3_flag(tp, PCI_EXPRESS))
16545 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16546
16547 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16548 &tp->pci_cacheline_sz);
16549 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16550 &tp->pci_lat_timer);
16551 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16552 tp->pci_lat_timer < 64) {
16553 tp->pci_lat_timer = 64;
16554 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16555 tp->pci_lat_timer);
16556 }
16557
16558 /* Important! -- It is critical that the PCI-X hw workaround
16559 * situation is decided before the first MMIO register access.
16560 */
16561 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16562 /* 5700 BX chips need to have their TX producer index
16563 * mailboxes written twice to workaround a bug.
16564 */
16565 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16566
16567 /* If we are in PCI-X mode, enable register write workaround.
16568 *
16569 * The workaround is to use indirect register accesses
16570 * for all chip writes not to mailbox registers.
16571 */
16572 if (tg3_flag(tp, PCIX_MODE)) {
16573 u32 pm_reg;
16574
16575 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16576
16577 /* The chip can have it's power management PCI config
16578 * space registers clobbered due to this bug.
16579 * So explicitly force the chip into D0 here.
16580 */
16581 pci_read_config_dword(tp->pdev,
16582 tp->pdev->pm_cap + PCI_PM_CTRL,
16583 &pm_reg);
16584 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16585 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16586 pci_write_config_dword(tp->pdev,
16587 tp->pdev->pm_cap + PCI_PM_CTRL,
16588 pm_reg);
16589
16590 /* Also, force SERR#/PERR# in PCI command. */
16591 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16592 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16593 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16594 }
16595 }
16596
16597 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16598 tg3_flag_set(tp, PCI_HIGH_SPEED);
16599 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16600 tg3_flag_set(tp, PCI_32BIT);
16601
16602 /* Chip-specific fixup from Broadcom driver */
16603 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16604 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16605 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16606 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16607 }
16608
16609 /* Default fast path register access methods */
16610 tp->read32 = tg3_read32;
16611 tp->write32 = tg3_write32;
16612 tp->read32_mbox = tg3_read32;
16613 tp->write32_mbox = tg3_write32;
16614 tp->write32_tx_mbox = tg3_write32;
16615 tp->write32_rx_mbox = tg3_write32;
16616
16617 /* Various workaround register access methods */
16618 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16619 tp->write32 = tg3_write_indirect_reg32;
16620 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16621 (tg3_flag(tp, PCI_EXPRESS) &&
16622 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16623 /*
16624 * Back to back register writes can cause problems on these
16625 * chips, the workaround is to read back all reg writes
16626 * except those to mailbox regs.
16627 *
16628 * See tg3_write_indirect_reg32().
16629 */
16630 tp->write32 = tg3_write_flush_reg32;
16631 }
16632
16633 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16634 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16635 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16636 tp->write32_rx_mbox = tg3_write_flush_reg32;
16637 }
16638
16639 if (tg3_flag(tp, ICH_WORKAROUND)) {
16640 tp->read32 = tg3_read_indirect_reg32;
16641 tp->write32 = tg3_write_indirect_reg32;
16642 tp->read32_mbox = tg3_read_indirect_mbox;
16643 tp->write32_mbox = tg3_write_indirect_mbox;
16644 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16645 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16646
16647 iounmap(tp->regs);
16648 tp->regs = NULL;
16649
16650 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16651 pci_cmd &= ~PCI_COMMAND_MEMORY;
16652 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16653 }
16654 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16655 tp->read32_mbox = tg3_read32_mbox_5906;
16656 tp->write32_mbox = tg3_write32_mbox_5906;
16657 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16658 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16659 }
16660
16661 if (tp->write32 == tg3_write_indirect_reg32 ||
16662 (tg3_flag(tp, PCIX_MODE) &&
16663 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16664 tg3_asic_rev(tp) == ASIC_REV_5701)))
16665 tg3_flag_set(tp, SRAM_USE_CONFIG);
16666
16667 /* The memory arbiter has to be enabled in order for SRAM accesses
16668 * to succeed. Normally on powerup the tg3 chip firmware will make
16669 * sure it is enabled, but other entities such as system netboot
16670 * code might disable it.
16671 */
16672 val = tr32(MEMARB_MODE);
16673 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16674
16675 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16676 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16677 tg3_flag(tp, 5780_CLASS)) {
16678 if (tg3_flag(tp, PCIX_MODE)) {
16679 pci_read_config_dword(tp->pdev,
16680 tp->pcix_cap + PCI_X_STATUS,
16681 &val);
16682 tp->pci_fn = val & 0x7;
16683 }
16684 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16685 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16686 tg3_asic_rev(tp) == ASIC_REV_5720) {
16687 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16688 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16689 val = tr32(TG3_CPMU_STATUS);
16690
16691 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16692 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16693 else
16694 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16695 TG3_CPMU_STATUS_FSHFT_5719;
16696 }
16697
16698 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16699 tp->write32_tx_mbox = tg3_write_flush_reg32;
16700 tp->write32_rx_mbox = tg3_write_flush_reg32;
16701 }
16702
16703 /* Get eeprom hw config before calling tg3_set_power_state().
16704 * In particular, the TG3_FLAG_IS_NIC flag must be
16705 * determined before calling tg3_set_power_state() so that
16706 * we know whether or not to switch out of Vaux power.
16707 * When the flag is set, it means that GPIO1 is used for eeprom
16708 * write protect and also implies that it is a LOM where GPIOs
16709 * are not used to switch power.
16710 */
16711 tg3_get_eeprom_hw_cfg(tp);
16712
16713 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16714 tg3_flag_clear(tp, TSO_CAPABLE);
16715 tg3_flag_clear(tp, TSO_BUG);
16716 tp->fw_needed = NULL;
16717 }
16718
16719 if (tg3_flag(tp, ENABLE_APE)) {
16720 /* Allow reads and writes to the
16721 * APE register and memory space.
16722 */
16723 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16724 PCISTATE_ALLOW_APE_SHMEM_WR |
16725 PCISTATE_ALLOW_APE_PSPACE_WR;
16726 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16727 pci_state_reg);
16728
16729 tg3_ape_lock_init(tp);
16730 tp->ape_hb_interval =
16731 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16732 }
16733
16734 /* Set up tp->grc_local_ctrl before calling
16735 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16736 * will bring 5700's external PHY out of reset.
16737 * It is also used as eeprom write protect on LOMs.
16738 */
16739 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16740 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16741 tg3_flag(tp, EEPROM_WRITE_PROT))
16742 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16743 GRC_LCLCTRL_GPIO_OUTPUT1);
16744 /* Unused GPIO3 must be driven as output on 5752 because there
16745 * are no pull-up resistors on unused GPIO pins.
16746 */
16747 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16748 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16749
16750 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16751 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16752 tg3_flag(tp, 57765_CLASS))
16753 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16754
16755 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16756 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16757 /* Turn off the debug UART. */
16758 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16759 if (tg3_flag(tp, IS_NIC))
16760 /* Keep VMain power. */
16761 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16762 GRC_LCLCTRL_GPIO_OUTPUT0;
16763 }
16764
16765 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16766 tp->grc_local_ctrl |=
16767 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16768
16769 /* Switch out of Vaux if it is a NIC */
16770 tg3_pwrsrc_switch_to_vmain(tp);
16771
16772 /* Derive initial jumbo mode from MTU assigned in
16773 * ether_setup() via the alloc_etherdev() call
16774 */
16775 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16776 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16777
16778 /* Determine WakeOnLan speed to use. */
16779 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16780 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16781 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16782 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16783 tg3_flag_clear(tp, WOL_SPEED_100MB);
16784 } else {
16785 tg3_flag_set(tp, WOL_SPEED_100MB);
16786 }
16787
16788 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16789 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16790
16791 /* A few boards don't want Ethernet@WireSpeed phy feature */
16792 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16793 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16794 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16795 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16796 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16797 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16798 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16799
16800 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16801 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16802 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16803 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16804 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16805
16806 if (tg3_flag(tp, 5705_PLUS) &&
16807 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16808 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16809 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16810 !tg3_flag(tp, 57765_PLUS)) {
16811 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16812 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16813 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16814 tg3_asic_rev(tp) == ASIC_REV_5761) {
16815 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16816 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16817 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16818 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16819 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16820 } else
16821 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16822 }
16823
16824 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16825 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16826 tp->phy_otp = tg3_read_otp_phycfg(tp);
16827 if (tp->phy_otp == 0)
16828 tp->phy_otp = TG3_OTP_DEFAULT;
16829 }
16830
16831 if (tg3_flag(tp, CPMU_PRESENT))
16832 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16833 else
16834 tp->mi_mode = MAC_MI_MODE_BASE;
16835
16836 tp->coalesce_mode = 0;
16837 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16838 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16839 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16840
16841 /* Set these bits to enable statistics workaround. */
16842 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16843 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16844 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16845 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16846 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16847 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16848 }
16849
16850 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16851 tg3_asic_rev(tp) == ASIC_REV_57780)
16852 tg3_flag_set(tp, USE_PHYLIB);
16853
16854 err = tg3_mdio_init(tp);
16855 if (err)
16856 return err;
16857
16858 /* Initialize data/descriptor byte/word swapping. */
16859 val = tr32(GRC_MODE);
16860 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16861 tg3_asic_rev(tp) == ASIC_REV_5762)
16862 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16863 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16864 GRC_MODE_B2HRX_ENABLE |
16865 GRC_MODE_HTX2B_ENABLE |
16866 GRC_MODE_HOST_STACKUP);
16867 else
16868 val &= GRC_MODE_HOST_STACKUP;
16869
16870 tw32(GRC_MODE, val | tp->grc_mode);
16871
16872 tg3_switch_clocks(tp);
16873
16874 /* Clear this out for sanity. */
16875 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16876
16877 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16878 tw32(TG3PCI_REG_BASE_ADDR, 0);
16879
16880 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16881 &pci_state_reg);
16882 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16883 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16884 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16885 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16886 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16887 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16888 void __iomem *sram_base;
16889
16890 /* Write some dummy words into the SRAM status block
16891 * area, see if it reads back correctly. If the return
16892 * value is bad, force enable the PCIX workaround.
16893 */
16894 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16895
16896 writel(0x00000000, sram_base);
16897 writel(0x00000000, sram_base + 4);
16898 writel(0xffffffff, sram_base + 4);
16899 if (readl(sram_base) != 0x00000000)
16900 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16901 }
16902 }
16903
16904 udelay(50);
16905 tg3_nvram_init(tp);
16906
16907 /* If the device has an NVRAM, no need to load patch firmware */
16908 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16909 !tg3_flag(tp, NO_NVRAM))
16910 tp->fw_needed = NULL;
16911
16912 grc_misc_cfg = tr32(GRC_MISC_CFG);
16913 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16914
16915 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16916 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16917 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16918 tg3_flag_set(tp, IS_5788);
16919
16920 if (!tg3_flag(tp, IS_5788) &&
16921 tg3_asic_rev(tp) != ASIC_REV_5700)
16922 tg3_flag_set(tp, TAGGED_STATUS);
16923 if (tg3_flag(tp, TAGGED_STATUS)) {
16924 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16925 HOSTCC_MODE_CLRTICK_TXBD);
16926
16927 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16928 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16929 tp->misc_host_ctrl);
16930 }
16931
16932 /* Preserve the APE MAC_MODE bits */
16933 if (tg3_flag(tp, ENABLE_APE))
16934 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16935 else
16936 tp->mac_mode = 0;
16937
16938 if (tg3_10_100_only_device(tp, ent))
16939 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16940
16941 err = tg3_phy_probe(tp);
16942 if (err) {
16943 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16944 /* ... but do not return immediately ... */
16945 tg3_mdio_fini(tp);
16946 }
16947
16948 tg3_read_vpd(tp);
16949 tg3_read_fw_ver(tp);
16950
16951 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16952 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16953 } else {
16954 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16955 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16956 else
16957 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16958 }
16959
16960 /* 5700 {AX,BX} chips have a broken status block link
16961 * change bit implementation, so we must use the
16962 * status register in those cases.
16963 */
16964 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16965 tg3_flag_set(tp, USE_LINKCHG_REG);
16966 else
16967 tg3_flag_clear(tp, USE_LINKCHG_REG);
16968
16969 /* The led_ctrl is set during tg3_phy_probe, here we might
16970 * have to force the link status polling mechanism based
16971 * upon subsystem IDs.
16972 */
16973 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16974 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16975 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16976 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16977 tg3_flag_set(tp, USE_LINKCHG_REG);
16978 }
16979
16980 /* For all SERDES we poll the MAC status register. */
16981 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16982 tg3_flag_set(tp, POLL_SERDES);
16983 else
16984 tg3_flag_clear(tp, POLL_SERDES);
16985
16986 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16987 tg3_flag_set(tp, POLL_CPMU_LINK);
16988
16989 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16990 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16991 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16992 tg3_flag(tp, PCIX_MODE)) {
16993 tp->rx_offset = NET_SKB_PAD;
16994 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16995 tp->rx_copy_thresh = ~(u16)0;
16996 #endif
16997 }
16998
16999 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
17000 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
17001 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
17002
17003 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
17004
17005 /* Increment the rx prod index on the rx std ring by at most
17006 * 8 for these chips to workaround hw errata.
17007 */
17008 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
17009 tg3_asic_rev(tp) == ASIC_REV_5752 ||
17010 tg3_asic_rev(tp) == ASIC_REV_5755)
17011 tp->rx_std_max_post = 8;
17012
17013 if (tg3_flag(tp, ASPM_WORKAROUND))
17014 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
17015 PCIE_PWR_MGMT_L1_THRESH_MSK;
17016
17017 return err;
17018 }
17019
tg3_get_device_address(struct tg3 * tp,u8 * addr)17020 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
17021 {
17022 u32 hi, lo, mac_offset;
17023 int addr_ok = 0;
17024 int err;
17025
17026 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
17027 return 0;
17028
17029 if (tg3_flag(tp, IS_SSB_CORE)) {
17030 err = ssb_gige_get_macaddr(tp->pdev, addr);
17031 if (!err && is_valid_ether_addr(addr))
17032 return 0;
17033 }
17034
17035 mac_offset = 0x7c;
17036 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17037 tg3_flag(tp, 5780_CLASS)) {
17038 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17039 mac_offset = 0xcc;
17040 if (tg3_nvram_lock(tp))
17041 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17042 else
17043 tg3_nvram_unlock(tp);
17044 } else if (tg3_flag(tp, 5717_PLUS)) {
17045 if (tp->pci_fn & 1)
17046 mac_offset = 0xcc;
17047 if (tp->pci_fn > 1)
17048 mac_offset += 0x18c;
17049 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17050 mac_offset = 0x10;
17051
17052 /* First try to get it from MAC address mailbox. */
17053 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17054 if ((hi >> 16) == 0x484b) {
17055 addr[0] = (hi >> 8) & 0xff;
17056 addr[1] = (hi >> 0) & 0xff;
17057
17058 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17059 addr[2] = (lo >> 24) & 0xff;
17060 addr[3] = (lo >> 16) & 0xff;
17061 addr[4] = (lo >> 8) & 0xff;
17062 addr[5] = (lo >> 0) & 0xff;
17063
17064 /* Some old bootcode may report a 0 MAC address in SRAM */
17065 addr_ok = is_valid_ether_addr(addr);
17066 }
17067 if (!addr_ok) {
17068 /* Next, try NVRAM. */
17069 if (!tg3_flag(tp, NO_NVRAM) &&
17070 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17071 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17072 memcpy(&addr[0], ((char *)&hi) + 2, 2);
17073 memcpy(&addr[2], (char *)&lo, sizeof(lo));
17074 }
17075 /* Finally just fetch it out of the MAC control regs. */
17076 else {
17077 hi = tr32(MAC_ADDR_0_HIGH);
17078 lo = tr32(MAC_ADDR_0_LOW);
17079
17080 addr[5] = lo & 0xff;
17081 addr[4] = (lo >> 8) & 0xff;
17082 addr[3] = (lo >> 16) & 0xff;
17083 addr[2] = (lo >> 24) & 0xff;
17084 addr[1] = hi & 0xff;
17085 addr[0] = (hi >> 8) & 0xff;
17086 }
17087 }
17088
17089 if (!is_valid_ether_addr(addr))
17090 return -EINVAL;
17091 return 0;
17092 }
17093
17094 #define BOUNDARY_SINGLE_CACHELINE 1
17095 #define BOUNDARY_MULTI_CACHELINE 2
17096
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)17097 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17098 {
17099 int cacheline_size;
17100 u8 byte;
17101 int goal;
17102
17103 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17104 if (byte == 0)
17105 cacheline_size = 1024;
17106 else
17107 cacheline_size = (int) byte * 4;
17108
17109 /* On 5703 and later chips, the boundary bits have no
17110 * effect.
17111 */
17112 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17113 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17114 !tg3_flag(tp, PCI_EXPRESS))
17115 goto out;
17116
17117 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
17118 goal = BOUNDARY_MULTI_CACHELINE;
17119 #else
17120 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17121 goal = BOUNDARY_SINGLE_CACHELINE;
17122 #else
17123 goal = 0;
17124 #endif
17125 #endif
17126
17127 if (tg3_flag(tp, 57765_PLUS)) {
17128 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17129 goto out;
17130 }
17131
17132 if (!goal)
17133 goto out;
17134
17135 /* PCI controllers on most RISC systems tend to disconnect
17136 * when a device tries to burst across a cache-line boundary.
17137 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17138 *
17139 * Unfortunately, for PCI-E there are only limited
17140 * write-side controls for this, and thus for reads
17141 * we will still get the disconnects. We'll also waste
17142 * these PCI cycles for both read and write for chips
17143 * other than 5700 and 5701 which do not implement the
17144 * boundary bits.
17145 */
17146 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17147 switch (cacheline_size) {
17148 case 16:
17149 case 32:
17150 case 64:
17151 case 128:
17152 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17153 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17154 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17155 } else {
17156 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17157 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17158 }
17159 break;
17160
17161 case 256:
17162 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17163 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17164 break;
17165
17166 default:
17167 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17168 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17169 break;
17170 }
17171 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17172 switch (cacheline_size) {
17173 case 16:
17174 case 32:
17175 case 64:
17176 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17177 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17178 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17179 break;
17180 }
17181 fallthrough;
17182 case 128:
17183 default:
17184 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17185 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17186 break;
17187 }
17188 } else {
17189 switch (cacheline_size) {
17190 case 16:
17191 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17192 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17193 DMA_RWCTRL_WRITE_BNDRY_16);
17194 break;
17195 }
17196 fallthrough;
17197 case 32:
17198 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17199 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17200 DMA_RWCTRL_WRITE_BNDRY_32);
17201 break;
17202 }
17203 fallthrough;
17204 case 64:
17205 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17206 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17207 DMA_RWCTRL_WRITE_BNDRY_64);
17208 break;
17209 }
17210 fallthrough;
17211 case 128:
17212 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17213 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17214 DMA_RWCTRL_WRITE_BNDRY_128);
17215 break;
17216 }
17217 fallthrough;
17218 case 256:
17219 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17220 DMA_RWCTRL_WRITE_BNDRY_256);
17221 break;
17222 case 512:
17223 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17224 DMA_RWCTRL_WRITE_BNDRY_512);
17225 break;
17226 case 1024:
17227 default:
17228 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17229 DMA_RWCTRL_WRITE_BNDRY_1024);
17230 break;
17231 }
17232 }
17233
17234 out:
17235 return val;
17236 }
17237
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17238 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17239 int size, bool to_device)
17240 {
17241 struct tg3_internal_buffer_desc test_desc;
17242 u32 sram_dma_descs;
17243 int i, ret;
17244
17245 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17246
17247 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17248 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17249 tw32(RDMAC_STATUS, 0);
17250 tw32(WDMAC_STATUS, 0);
17251
17252 tw32(BUFMGR_MODE, 0);
17253 tw32(FTQ_RESET, 0);
17254
17255 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17256 test_desc.addr_lo = buf_dma & 0xffffffff;
17257 test_desc.nic_mbuf = 0x00002100;
17258 test_desc.len = size;
17259
17260 /*
17261 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17262 * the *second* time the tg3 driver was getting loaded after an
17263 * initial scan.
17264 *
17265 * Broadcom tells me:
17266 * ...the DMA engine is connected to the GRC block and a DMA
17267 * reset may affect the GRC block in some unpredictable way...
17268 * The behavior of resets to individual blocks has not been tested.
17269 *
17270 * Broadcom noted the GRC reset will also reset all sub-components.
17271 */
17272 if (to_device) {
17273 test_desc.cqid_sqid = (13 << 8) | 2;
17274
17275 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17276 udelay(40);
17277 } else {
17278 test_desc.cqid_sqid = (16 << 8) | 7;
17279
17280 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17281 udelay(40);
17282 }
17283 test_desc.flags = 0x00000005;
17284
17285 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17286 u32 val;
17287
17288 val = *(((u32 *)&test_desc) + i);
17289 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17290 sram_dma_descs + (i * sizeof(u32)));
17291 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17292 }
17293 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17294
17295 if (to_device)
17296 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17297 else
17298 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17299
17300 ret = -ENODEV;
17301 for (i = 0; i < 40; i++) {
17302 u32 val;
17303
17304 if (to_device)
17305 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17306 else
17307 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17308 if ((val & 0xffff) == sram_dma_descs) {
17309 ret = 0;
17310 break;
17311 }
17312
17313 udelay(100);
17314 }
17315
17316 return ret;
17317 }
17318
17319 #define TEST_BUFFER_SIZE 0x2000
17320
17321 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17322 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17323 { },
17324 };
17325
tg3_test_dma(struct tg3 * tp)17326 static int tg3_test_dma(struct tg3 *tp)
17327 {
17328 dma_addr_t buf_dma;
17329 u32 *buf, saved_dma_rwctrl;
17330 int ret = 0;
17331
17332 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17333 &buf_dma, GFP_KERNEL);
17334 if (!buf) {
17335 ret = -ENOMEM;
17336 goto out_nofree;
17337 }
17338
17339 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17340 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17341
17342 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17343
17344 if (tg3_flag(tp, 57765_PLUS))
17345 goto out;
17346
17347 if (tg3_flag(tp, PCI_EXPRESS)) {
17348 /* DMA read watermark not used on PCIE */
17349 tp->dma_rwctrl |= 0x00180000;
17350 } else if (!tg3_flag(tp, PCIX_MODE)) {
17351 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17352 tg3_asic_rev(tp) == ASIC_REV_5750)
17353 tp->dma_rwctrl |= 0x003f0000;
17354 else
17355 tp->dma_rwctrl |= 0x003f000f;
17356 } else {
17357 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17358 tg3_asic_rev(tp) == ASIC_REV_5704) {
17359 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17360 u32 read_water = 0x7;
17361
17362 /* If the 5704 is behind the EPB bridge, we can
17363 * do the less restrictive ONE_DMA workaround for
17364 * better performance.
17365 */
17366 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17367 tg3_asic_rev(tp) == ASIC_REV_5704)
17368 tp->dma_rwctrl |= 0x8000;
17369 else if (ccval == 0x6 || ccval == 0x7)
17370 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17371
17372 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17373 read_water = 4;
17374 /* Set bit 23 to enable PCIX hw bug fix */
17375 tp->dma_rwctrl |=
17376 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17377 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17378 (1 << 23);
17379 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17380 /* 5780 always in PCIX mode */
17381 tp->dma_rwctrl |= 0x00144000;
17382 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17383 /* 5714 always in PCIX mode */
17384 tp->dma_rwctrl |= 0x00148000;
17385 } else {
17386 tp->dma_rwctrl |= 0x001b000f;
17387 }
17388 }
17389 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17390 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17391
17392 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17393 tg3_asic_rev(tp) == ASIC_REV_5704)
17394 tp->dma_rwctrl &= 0xfffffff0;
17395
17396 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17397 tg3_asic_rev(tp) == ASIC_REV_5701) {
17398 /* Remove this if it causes problems for some boards. */
17399 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17400
17401 /* On 5700/5701 chips, we need to set this bit.
17402 * Otherwise the chip will issue cacheline transactions
17403 * to streamable DMA memory with not all the byte
17404 * enables turned on. This is an error on several
17405 * RISC PCI controllers, in particular sparc64.
17406 *
17407 * On 5703/5704 chips, this bit has been reassigned
17408 * a different meaning. In particular, it is used
17409 * on those chips to enable a PCI-X workaround.
17410 */
17411 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17412 }
17413
17414 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17415
17416
17417 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17418 tg3_asic_rev(tp) != ASIC_REV_5701)
17419 goto out;
17420
17421 /* It is best to perform DMA test with maximum write burst size
17422 * to expose the 5700/5701 write DMA bug.
17423 */
17424 saved_dma_rwctrl = tp->dma_rwctrl;
17425 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17426 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17427
17428 while (1) {
17429 u32 *p = buf, i;
17430
17431 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17432 p[i] = i;
17433
17434 /* Send the buffer to the chip. */
17435 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17436 if (ret) {
17437 dev_err(&tp->pdev->dev,
17438 "%s: Buffer write failed. err = %d\n",
17439 __func__, ret);
17440 break;
17441 }
17442
17443 /* Now read it back. */
17444 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17445 if (ret) {
17446 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17447 "err = %d\n", __func__, ret);
17448 break;
17449 }
17450
17451 /* Verify it. */
17452 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17453 if (p[i] == i)
17454 continue;
17455
17456 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17457 DMA_RWCTRL_WRITE_BNDRY_16) {
17458 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17459 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17460 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17461 break;
17462 } else {
17463 dev_err(&tp->pdev->dev,
17464 "%s: Buffer corrupted on read back! "
17465 "(%d != %d)\n", __func__, p[i], i);
17466 ret = -ENODEV;
17467 goto out;
17468 }
17469 }
17470
17471 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17472 /* Success. */
17473 ret = 0;
17474 break;
17475 }
17476 }
17477 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17478 DMA_RWCTRL_WRITE_BNDRY_16) {
17479 /* DMA test passed without adjusting DMA boundary,
17480 * now look for chipsets that are known to expose the
17481 * DMA bug without failing the test.
17482 */
17483 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17484 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17485 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17486 } else {
17487 /* Safe to use the calculated DMA boundary. */
17488 tp->dma_rwctrl = saved_dma_rwctrl;
17489 }
17490
17491 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17492 }
17493
17494 out:
17495 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17496 out_nofree:
17497 return ret;
17498 }
17499
tg3_init_bufmgr_config(struct tg3 * tp)17500 static void tg3_init_bufmgr_config(struct tg3 *tp)
17501 {
17502 if (tg3_flag(tp, 57765_PLUS)) {
17503 tp->bufmgr_config.mbuf_read_dma_low_water =
17504 DEFAULT_MB_RDMA_LOW_WATER_5705;
17505 tp->bufmgr_config.mbuf_mac_rx_low_water =
17506 DEFAULT_MB_MACRX_LOW_WATER_57765;
17507 tp->bufmgr_config.mbuf_high_water =
17508 DEFAULT_MB_HIGH_WATER_57765;
17509
17510 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17511 DEFAULT_MB_RDMA_LOW_WATER_5705;
17512 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17513 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17514 tp->bufmgr_config.mbuf_high_water_jumbo =
17515 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17516 } else if (tg3_flag(tp, 5705_PLUS)) {
17517 tp->bufmgr_config.mbuf_read_dma_low_water =
17518 DEFAULT_MB_RDMA_LOW_WATER_5705;
17519 tp->bufmgr_config.mbuf_mac_rx_low_water =
17520 DEFAULT_MB_MACRX_LOW_WATER_5705;
17521 tp->bufmgr_config.mbuf_high_water =
17522 DEFAULT_MB_HIGH_WATER_5705;
17523 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17524 tp->bufmgr_config.mbuf_mac_rx_low_water =
17525 DEFAULT_MB_MACRX_LOW_WATER_5906;
17526 tp->bufmgr_config.mbuf_high_water =
17527 DEFAULT_MB_HIGH_WATER_5906;
17528 }
17529
17530 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17531 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17532 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17533 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17534 tp->bufmgr_config.mbuf_high_water_jumbo =
17535 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17536 } else {
17537 tp->bufmgr_config.mbuf_read_dma_low_water =
17538 DEFAULT_MB_RDMA_LOW_WATER;
17539 tp->bufmgr_config.mbuf_mac_rx_low_water =
17540 DEFAULT_MB_MACRX_LOW_WATER;
17541 tp->bufmgr_config.mbuf_high_water =
17542 DEFAULT_MB_HIGH_WATER;
17543
17544 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17545 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17546 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17547 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17548 tp->bufmgr_config.mbuf_high_water_jumbo =
17549 DEFAULT_MB_HIGH_WATER_JUMBO;
17550 }
17551
17552 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17553 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17554 }
17555
tg3_phy_string(struct tg3 * tp)17556 static char *tg3_phy_string(struct tg3 *tp)
17557 {
17558 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17559 case TG3_PHY_ID_BCM5400: return "5400";
17560 case TG3_PHY_ID_BCM5401: return "5401";
17561 case TG3_PHY_ID_BCM5411: return "5411";
17562 case TG3_PHY_ID_BCM5701: return "5701";
17563 case TG3_PHY_ID_BCM5703: return "5703";
17564 case TG3_PHY_ID_BCM5704: return "5704";
17565 case TG3_PHY_ID_BCM5705: return "5705";
17566 case TG3_PHY_ID_BCM5750: return "5750";
17567 case TG3_PHY_ID_BCM5752: return "5752";
17568 case TG3_PHY_ID_BCM5714: return "5714";
17569 case TG3_PHY_ID_BCM5780: return "5780";
17570 case TG3_PHY_ID_BCM5755: return "5755";
17571 case TG3_PHY_ID_BCM5787: return "5787";
17572 case TG3_PHY_ID_BCM5784: return "5784";
17573 case TG3_PHY_ID_BCM5756: return "5722/5756";
17574 case TG3_PHY_ID_BCM5906: return "5906";
17575 case TG3_PHY_ID_BCM5761: return "5761";
17576 case TG3_PHY_ID_BCM5718C: return "5718C";
17577 case TG3_PHY_ID_BCM5718S: return "5718S";
17578 case TG3_PHY_ID_BCM57765: return "57765";
17579 case TG3_PHY_ID_BCM5719C: return "5719C";
17580 case TG3_PHY_ID_BCM5720C: return "5720C";
17581 case TG3_PHY_ID_BCM5762: return "5762C";
17582 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17583 case 0: return "serdes";
17584 default: return "unknown";
17585 }
17586 }
17587
tg3_bus_string(struct tg3 * tp,char * str)17588 static char *tg3_bus_string(struct tg3 *tp, char *str)
17589 {
17590 if (tg3_flag(tp, PCI_EXPRESS)) {
17591 strcpy(str, "PCI Express");
17592 return str;
17593 } else if (tg3_flag(tp, PCIX_MODE)) {
17594 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17595
17596 strcpy(str, "PCIX:");
17597
17598 if ((clock_ctrl == 7) ||
17599 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17600 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17601 strcat(str, "133MHz");
17602 else if (clock_ctrl == 0)
17603 strcat(str, "33MHz");
17604 else if (clock_ctrl == 2)
17605 strcat(str, "50MHz");
17606 else if (clock_ctrl == 4)
17607 strcat(str, "66MHz");
17608 else if (clock_ctrl == 6)
17609 strcat(str, "100MHz");
17610 } else {
17611 strcpy(str, "PCI:");
17612 if (tg3_flag(tp, PCI_HIGH_SPEED))
17613 strcat(str, "66MHz");
17614 else
17615 strcat(str, "33MHz");
17616 }
17617 if (tg3_flag(tp, PCI_32BIT))
17618 strcat(str, ":32-bit");
17619 else
17620 strcat(str, ":64-bit");
17621 return str;
17622 }
17623
tg3_init_coal(struct tg3 * tp)17624 static void tg3_init_coal(struct tg3 *tp)
17625 {
17626 struct ethtool_coalesce *ec = &tp->coal;
17627
17628 memset(ec, 0, sizeof(*ec));
17629 ec->cmd = ETHTOOL_GCOALESCE;
17630 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17631 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17632 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17633 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17634 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17635 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17636 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17637 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17638 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17639
17640 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17641 HOSTCC_MODE_CLRTICK_TXBD)) {
17642 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17643 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17644 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17645 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17646 }
17647
17648 if (tg3_flag(tp, 5705_PLUS)) {
17649 ec->rx_coalesce_usecs_irq = 0;
17650 ec->tx_coalesce_usecs_irq = 0;
17651 ec->stats_block_coalesce_usecs = 0;
17652 }
17653 }
17654
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17655 static int tg3_init_one(struct pci_dev *pdev,
17656 const struct pci_device_id *ent)
17657 {
17658 struct net_device *dev;
17659 struct tg3 *tp;
17660 int i, err;
17661 u32 sndmbx, rcvmbx, intmbx;
17662 char str[40];
17663 u64 dma_mask, persist_dma_mask;
17664 netdev_features_t features = 0;
17665 u8 addr[ETH_ALEN] __aligned(2);
17666
17667 err = pci_enable_device(pdev);
17668 if (err) {
17669 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17670 return err;
17671 }
17672
17673 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17674 if (err) {
17675 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17676 goto err_out_disable_pdev;
17677 }
17678
17679 pci_set_master(pdev);
17680
17681 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17682 if (!dev) {
17683 err = -ENOMEM;
17684 goto err_out_free_res;
17685 }
17686
17687 SET_NETDEV_DEV(dev, &pdev->dev);
17688
17689 tp = netdev_priv(dev);
17690 tp->pdev = pdev;
17691 tp->dev = dev;
17692 tp->rx_mode = TG3_DEF_RX_MODE;
17693 tp->tx_mode = TG3_DEF_TX_MODE;
17694 tp->irq_sync = 1;
17695 tp->pcierr_recovery = false;
17696
17697 if (tg3_debug > 0)
17698 tp->msg_enable = tg3_debug;
17699 else
17700 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17701
17702 if (pdev_is_ssb_gige_core(pdev)) {
17703 tg3_flag_set(tp, IS_SSB_CORE);
17704 if (ssb_gige_must_flush_posted_writes(pdev))
17705 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17706 if (ssb_gige_one_dma_at_once(pdev))
17707 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17708 if (ssb_gige_have_roboswitch(pdev)) {
17709 tg3_flag_set(tp, USE_PHYLIB);
17710 tg3_flag_set(tp, ROBOSWITCH);
17711 }
17712 if (ssb_gige_is_rgmii(pdev))
17713 tg3_flag_set(tp, RGMII_MODE);
17714 }
17715
17716 /* The word/byte swap controls here control register access byte
17717 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17718 * setting below.
17719 */
17720 tp->misc_host_ctrl =
17721 MISC_HOST_CTRL_MASK_PCI_INT |
17722 MISC_HOST_CTRL_WORD_SWAP |
17723 MISC_HOST_CTRL_INDIR_ACCESS |
17724 MISC_HOST_CTRL_PCISTATE_RW;
17725
17726 /* The NONFRM (non-frame) byte/word swap controls take effect
17727 * on descriptor entries, anything which isn't packet data.
17728 *
17729 * The StrongARM chips on the board (one for tx, one for rx)
17730 * are running in big-endian mode.
17731 */
17732 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17733 GRC_MODE_WSWAP_NONFRM_DATA);
17734 #ifdef __BIG_ENDIAN
17735 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17736 #endif
17737 spin_lock_init(&tp->lock);
17738 spin_lock_init(&tp->indirect_lock);
17739 INIT_WORK(&tp->reset_task, tg3_reset_task);
17740
17741 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17742 if (!tp->regs) {
17743 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17744 err = -ENOMEM;
17745 goto err_out_free_dev;
17746 }
17747
17748 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17749 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17750 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17751 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17752 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17753 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17754 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17755 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17756 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17757 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17758 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17759 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17760 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17761 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17762 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17763 tg3_flag_set(tp, ENABLE_APE);
17764 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17765 if (!tp->aperegs) {
17766 dev_err(&pdev->dev,
17767 "Cannot map APE registers, aborting\n");
17768 err = -ENOMEM;
17769 goto err_out_iounmap;
17770 }
17771 }
17772
17773 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17774 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17775
17776 dev->ethtool_ops = &tg3_ethtool_ops;
17777 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17778 dev->netdev_ops = &tg3_netdev_ops;
17779 dev->irq = pdev->irq;
17780
17781 err = tg3_get_invariants(tp, ent);
17782 if (err) {
17783 dev_err(&pdev->dev,
17784 "Problem fetching invariants of chip, aborting\n");
17785 goto err_out_apeunmap;
17786 }
17787
17788 /* The EPB bridge inside 5714, 5715, and 5780 and any
17789 * device behind the EPB cannot support DMA addresses > 40-bit.
17790 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17791 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17792 * do DMA address check in __tg3_start_xmit().
17793 */
17794 if (tg3_flag(tp, IS_5788))
17795 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17796 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17797 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17798 #ifdef CONFIG_HIGHMEM
17799 dma_mask = DMA_BIT_MASK(64);
17800 #endif
17801 } else
17802 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17803
17804 /* Configure DMA attributes. */
17805 if (dma_mask > DMA_BIT_MASK(32)) {
17806 err = dma_set_mask(&pdev->dev, dma_mask);
17807 if (!err) {
17808 features |= NETIF_F_HIGHDMA;
17809 err = dma_set_coherent_mask(&pdev->dev,
17810 persist_dma_mask);
17811 if (err < 0) {
17812 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17813 "DMA for consistent allocations\n");
17814 goto err_out_apeunmap;
17815 }
17816 }
17817 }
17818 if (err || dma_mask == DMA_BIT_MASK(32)) {
17819 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17820 if (err) {
17821 dev_err(&pdev->dev,
17822 "No usable DMA configuration, aborting\n");
17823 goto err_out_apeunmap;
17824 }
17825 }
17826
17827 tg3_init_bufmgr_config(tp);
17828
17829 /* 5700 B0 chips do not support checksumming correctly due
17830 * to hardware bugs.
17831 */
17832 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17833 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17834
17835 if (tg3_flag(tp, 5755_PLUS))
17836 features |= NETIF_F_IPV6_CSUM;
17837 }
17838
17839 /* TSO is on by default on chips that support hardware TSO.
17840 * Firmware TSO on older chips gives lower performance, so it
17841 * is off by default, but can be enabled using ethtool.
17842 */
17843 if ((tg3_flag(tp, HW_TSO_1) ||
17844 tg3_flag(tp, HW_TSO_2) ||
17845 tg3_flag(tp, HW_TSO_3)) &&
17846 (features & NETIF_F_IP_CSUM))
17847 features |= NETIF_F_TSO;
17848 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17849 if (features & NETIF_F_IPV6_CSUM)
17850 features |= NETIF_F_TSO6;
17851 if (tg3_flag(tp, HW_TSO_3) ||
17852 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17853 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17854 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17855 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17856 tg3_asic_rev(tp) == ASIC_REV_57780)
17857 features |= NETIF_F_TSO_ECN;
17858 }
17859
17860 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17861 NETIF_F_HW_VLAN_CTAG_RX;
17862 dev->vlan_features |= features;
17863
17864 /*
17865 * Add loopback capability only for a subset of devices that support
17866 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17867 * loopback for the remaining devices.
17868 */
17869 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17870 !tg3_flag(tp, CPMU_PRESENT))
17871 /* Add the loopback capability */
17872 features |= NETIF_F_LOOPBACK;
17873
17874 dev->hw_features |= features;
17875 dev->priv_flags |= IFF_UNICAST_FLT;
17876
17877 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17878 dev->min_mtu = TG3_MIN_MTU;
17879 dev->max_mtu = TG3_MAX_MTU(tp);
17880
17881 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17882 !tg3_flag(tp, TSO_CAPABLE) &&
17883 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17884 tg3_flag_set(tp, MAX_RXPEND_64);
17885 tp->rx_pending = 63;
17886 }
17887
17888 err = tg3_get_device_address(tp, addr);
17889 if (err) {
17890 dev_err(&pdev->dev,
17891 "Could not obtain valid ethernet address, aborting\n");
17892 goto err_out_apeunmap;
17893 }
17894 eth_hw_addr_set(dev, addr);
17895
17896 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17897 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17898 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17899 for (i = 0; i < tp->irq_max; i++) {
17900 struct tg3_napi *tnapi = &tp->napi[i];
17901
17902 tnapi->tp = tp;
17903 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17904
17905 tnapi->int_mbox = intmbx;
17906 intmbx += 0x8;
17907
17908 tnapi->consmbox = rcvmbx;
17909 tnapi->prodmbox = sndmbx;
17910
17911 if (i)
17912 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17913 else
17914 tnapi->coal_now = HOSTCC_MODE_NOW;
17915
17916 if (!tg3_flag(tp, SUPPORT_MSIX))
17917 break;
17918
17919 /*
17920 * If we support MSIX, we'll be using RSS. If we're using
17921 * RSS, the first vector only handles link interrupts and the
17922 * remaining vectors handle rx and tx interrupts. Reuse the
17923 * mailbox values for the next iteration. The values we setup
17924 * above are still useful for the single vectored mode.
17925 */
17926 if (!i)
17927 continue;
17928
17929 rcvmbx += 0x8;
17930
17931 if (sndmbx & 0x4)
17932 sndmbx -= 0x4;
17933 else
17934 sndmbx += 0xc;
17935 }
17936
17937 /*
17938 * Reset chip in case UNDI or EFI driver did not shutdown
17939 * DMA self test will enable WDMAC and we'll see (spurious)
17940 * pending DMA on the PCI bus at that point.
17941 */
17942 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17943 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17944 tg3_full_lock(tp, 0);
17945 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17946 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17947 tg3_full_unlock(tp);
17948 }
17949
17950 err = tg3_test_dma(tp);
17951 if (err) {
17952 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17953 goto err_out_apeunmap;
17954 }
17955
17956 tg3_init_coal(tp);
17957
17958 pci_set_drvdata(pdev, dev);
17959
17960 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17961 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17962 tg3_asic_rev(tp) == ASIC_REV_5762)
17963 tg3_flag_set(tp, PTP_CAPABLE);
17964
17965 tg3_timer_init(tp);
17966
17967 tg3_carrier_off(tp);
17968
17969 err = register_netdev(dev);
17970 if (err) {
17971 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17972 goto err_out_apeunmap;
17973 }
17974
17975 if (tg3_flag(tp, PTP_CAPABLE)) {
17976 tg3_ptp_init(tp);
17977 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17978 &tp->pdev->dev);
17979 if (IS_ERR(tp->ptp_clock))
17980 tp->ptp_clock = NULL;
17981 }
17982
17983 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17984 tp->board_part_number,
17985 tg3_chip_rev_id(tp),
17986 tg3_bus_string(tp, str),
17987 dev->dev_addr);
17988
17989 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17990 char *ethtype;
17991
17992 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17993 ethtype = "10/100Base-TX";
17994 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17995 ethtype = "1000Base-SX";
17996 else
17997 ethtype = "10/100/1000Base-T";
17998
17999 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
18000 "(WireSpeed[%d], EEE[%d])\n",
18001 tg3_phy_string(tp), ethtype,
18002 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18003 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18004 }
18005
18006 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18007 (dev->features & NETIF_F_RXCSUM) != 0,
18008 tg3_flag(tp, USE_LINKCHG_REG) != 0,
18009 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18010 tg3_flag(tp, ENABLE_ASF) != 0,
18011 tg3_flag(tp, TSO_CAPABLE) != 0);
18012 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18013 tp->dma_rwctrl,
18014 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18015 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18016
18017 pci_save_state(pdev);
18018
18019 return 0;
18020
18021 err_out_apeunmap:
18022 if (tp->aperegs) {
18023 iounmap(tp->aperegs);
18024 tp->aperegs = NULL;
18025 }
18026
18027 err_out_iounmap:
18028 if (tp->regs) {
18029 iounmap(tp->regs);
18030 tp->regs = NULL;
18031 }
18032
18033 err_out_free_dev:
18034 free_netdev(dev);
18035
18036 err_out_free_res:
18037 pci_release_regions(pdev);
18038
18039 err_out_disable_pdev:
18040 if (pci_is_enabled(pdev))
18041 pci_disable_device(pdev);
18042 return err;
18043 }
18044
tg3_remove_one(struct pci_dev * pdev)18045 static void tg3_remove_one(struct pci_dev *pdev)
18046 {
18047 struct net_device *dev = pci_get_drvdata(pdev);
18048
18049 if (dev) {
18050 struct tg3 *tp = netdev_priv(dev);
18051
18052 tg3_ptp_fini(tp);
18053
18054 release_firmware(tp->fw);
18055
18056 tg3_reset_task_cancel(tp);
18057
18058 if (tg3_flag(tp, USE_PHYLIB)) {
18059 tg3_phy_fini(tp);
18060 tg3_mdio_fini(tp);
18061 }
18062
18063 unregister_netdev(dev);
18064 if (tp->aperegs) {
18065 iounmap(tp->aperegs);
18066 tp->aperegs = NULL;
18067 }
18068 if (tp->regs) {
18069 iounmap(tp->regs);
18070 tp->regs = NULL;
18071 }
18072 free_netdev(dev);
18073 pci_release_regions(pdev);
18074 pci_disable_device(pdev);
18075 }
18076 }
18077
18078 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)18079 static int tg3_suspend(struct device *device)
18080 {
18081 struct net_device *dev = dev_get_drvdata(device);
18082 struct tg3 *tp = netdev_priv(dev);
18083
18084 rtnl_lock();
18085
18086 if (!netif_running(dev))
18087 goto unlock;
18088
18089 tg3_reset_task_cancel(tp);
18090 tg3_phy_stop(tp);
18091 tg3_netif_stop(tp);
18092
18093 tg3_timer_stop(tp);
18094
18095 tg3_full_lock(tp, 1);
18096 tg3_disable_ints(tp);
18097 tg3_full_unlock(tp);
18098
18099 netif_device_detach(dev);
18100
18101 tg3_full_lock(tp, 0);
18102 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18103 tg3_flag_clear(tp, INIT_COMPLETE);
18104 tg3_full_unlock(tp);
18105
18106 tg3_power_down_prepare(tp);
18107
18108 unlock:
18109 rtnl_unlock();
18110 return 0;
18111 }
18112
tg3_resume(struct device * device)18113 static int tg3_resume(struct device *device)
18114 {
18115 struct net_device *dev = dev_get_drvdata(device);
18116 struct tg3 *tp = netdev_priv(dev);
18117 int err = 0;
18118
18119 rtnl_lock();
18120
18121 if (!netif_running(dev))
18122 goto unlock;
18123
18124 netif_device_attach(dev);
18125
18126 tg3_full_lock(tp, 0);
18127
18128 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18129
18130 tg3_flag_set(tp, INIT_COMPLETE);
18131 err = tg3_restart_hw(tp,
18132 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18133 if (err)
18134 goto out;
18135
18136 tg3_timer_start(tp);
18137
18138 tg3_netif_start(tp);
18139
18140 out:
18141 tg3_full_unlock(tp);
18142
18143 if (!err)
18144 tg3_phy_start(tp);
18145
18146 unlock:
18147 rtnl_unlock();
18148 return err;
18149 }
18150 #endif /* CONFIG_PM_SLEEP */
18151
18152 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18153
tg3_shutdown(struct pci_dev * pdev)18154 static void tg3_shutdown(struct pci_dev *pdev)
18155 {
18156 struct net_device *dev = pci_get_drvdata(pdev);
18157 struct tg3 *tp = netdev_priv(dev);
18158
18159 tg3_reset_task_cancel(tp);
18160
18161 rtnl_lock();
18162
18163 netif_device_detach(dev);
18164
18165 if (netif_running(dev))
18166 dev_close(dev);
18167
18168 if (system_state == SYSTEM_POWER_OFF)
18169 tg3_power_down(tp);
18170
18171 rtnl_unlock();
18172
18173 pci_disable_device(pdev);
18174 }
18175
18176 /**
18177 * tg3_io_error_detected - called when PCI error is detected
18178 * @pdev: Pointer to PCI device
18179 * @state: The current pci connection state
18180 *
18181 * This function is called after a PCI bus error affecting
18182 * this device has been detected.
18183 */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18184 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18185 pci_channel_state_t state)
18186 {
18187 struct net_device *netdev = pci_get_drvdata(pdev);
18188 struct tg3 *tp = netdev_priv(netdev);
18189 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18190
18191 netdev_info(netdev, "PCI I/O error detected\n");
18192
18193 /* Want to make sure that the reset task doesn't run */
18194 tg3_reset_task_cancel(tp);
18195
18196 rtnl_lock();
18197
18198 /* Could be second call or maybe we don't have netdev yet */
18199 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18200 goto done;
18201
18202 /* We needn't recover from permanent error */
18203 if (state == pci_channel_io_frozen)
18204 tp->pcierr_recovery = true;
18205
18206 tg3_phy_stop(tp);
18207
18208 tg3_netif_stop(tp);
18209
18210 tg3_timer_stop(tp);
18211
18212 netif_device_detach(netdev);
18213
18214 /* Clean up software state, even if MMIO is blocked */
18215 tg3_full_lock(tp, 0);
18216 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18217 tg3_full_unlock(tp);
18218
18219 done:
18220 if (state == pci_channel_io_perm_failure) {
18221 if (netdev) {
18222 tg3_napi_enable(tp);
18223 dev_close(netdev);
18224 }
18225 err = PCI_ERS_RESULT_DISCONNECT;
18226 } else {
18227 pci_disable_device(pdev);
18228 }
18229
18230 rtnl_unlock();
18231
18232 return err;
18233 }
18234
18235 /**
18236 * tg3_io_slot_reset - called after the pci bus has been reset.
18237 * @pdev: Pointer to PCI device
18238 *
18239 * Restart the card from scratch, as if from a cold-boot.
18240 * At this point, the card has exprienced a hard reset,
18241 * followed by fixups by BIOS, and has its config space
18242 * set up identically to what it was at cold boot.
18243 */
tg3_io_slot_reset(struct pci_dev * pdev)18244 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18245 {
18246 struct net_device *netdev = pci_get_drvdata(pdev);
18247 struct tg3 *tp = netdev_priv(netdev);
18248 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18249 int err;
18250
18251 rtnl_lock();
18252
18253 if (pci_enable_device(pdev)) {
18254 dev_err(&pdev->dev,
18255 "Cannot re-enable PCI device after reset.\n");
18256 goto done;
18257 }
18258
18259 pci_set_master(pdev);
18260 pci_restore_state(pdev);
18261 pci_save_state(pdev);
18262
18263 if (!netdev || !netif_running(netdev)) {
18264 rc = PCI_ERS_RESULT_RECOVERED;
18265 goto done;
18266 }
18267
18268 err = tg3_power_up(tp);
18269 if (err)
18270 goto done;
18271
18272 rc = PCI_ERS_RESULT_RECOVERED;
18273
18274 done:
18275 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18276 tg3_napi_enable(tp);
18277 dev_close(netdev);
18278 }
18279 rtnl_unlock();
18280
18281 return rc;
18282 }
18283
18284 /**
18285 * tg3_io_resume - called when traffic can start flowing again.
18286 * @pdev: Pointer to PCI device
18287 *
18288 * This callback is called when the error recovery driver tells
18289 * us that its OK to resume normal operation.
18290 */
tg3_io_resume(struct pci_dev * pdev)18291 static void tg3_io_resume(struct pci_dev *pdev)
18292 {
18293 struct net_device *netdev = pci_get_drvdata(pdev);
18294 struct tg3 *tp = netdev_priv(netdev);
18295 int err;
18296
18297 rtnl_lock();
18298
18299 if (!netdev || !netif_running(netdev))
18300 goto done;
18301
18302 tg3_full_lock(tp, 0);
18303 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18304 tg3_flag_set(tp, INIT_COMPLETE);
18305 err = tg3_restart_hw(tp, true);
18306 if (err) {
18307 tg3_full_unlock(tp);
18308 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18309 goto done;
18310 }
18311
18312 netif_device_attach(netdev);
18313
18314 tg3_timer_start(tp);
18315
18316 tg3_netif_start(tp);
18317
18318 tg3_full_unlock(tp);
18319
18320 tg3_phy_start(tp);
18321
18322 done:
18323 tp->pcierr_recovery = false;
18324 rtnl_unlock();
18325 }
18326
18327 static const struct pci_error_handlers tg3_err_handler = {
18328 .error_detected = tg3_io_error_detected,
18329 .slot_reset = tg3_io_slot_reset,
18330 .resume = tg3_io_resume
18331 };
18332
18333 static struct pci_driver tg3_driver = {
18334 .name = DRV_MODULE_NAME,
18335 .id_table = tg3_pci_tbl,
18336 .probe = tg3_init_one,
18337 .remove = tg3_remove_one,
18338 .err_handler = &tg3_err_handler,
18339 .driver.pm = &tg3_pm_ops,
18340 .shutdown = tg3_shutdown,
18341 };
18342
18343 module_pci_driver(tg3_driver);
18344