1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
11 *
12 * Firmware is:
13 * Derived from proprietary unpublished source code,
14 * Copyright (C) 2000-2016 Broadcom Corporation.
15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
18 *
19 * Permission is hereby granted for the distribution of this firmware
20 * data in hexadecimal or equivalent format, provided this copyright
21 * notice is accompanying it.
22 */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32.h>
58 #include <linux/dmi.h>
59
60 #include <net/checksum.h>
61 #include <net/gso.h>
62 #include <net/ip.h>
63
64 #include <linux/io.h>
65 #include <asm/byteorder.h>
66 #include <linux/uaccess.h>
67
68 #include <uapi/linux/net_tstamp.h>
69 #include <linux/ptp_clock_kernel.h>
70
71 #define BAR_0 0
72 #define BAR_2 2
73
74 #include "tg3.h"
75
76 /* Functions & macros to verify TG3_FLAGS types */
77
_tg3_flag(enum TG3_FLAGS flag,unsigned long * bits)78 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80 return test_bit(flag, bits);
81 }
82
_tg3_flag_set(enum TG3_FLAGS flag,unsigned long * bits)83 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85 set_bit(flag, bits);
86 }
87
_tg3_flag_clear(enum TG3_FLAGS flag,unsigned long * bits)88 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
89 {
90 clear_bit(flag, bits);
91 }
92
93 #define tg3_flag(tp, flag) \
94 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_set(tp, flag) \
96 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
97 #define tg3_flag_clear(tp, flag) \
98 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
99
100 #define DRV_MODULE_NAME "tg3"
101 /* DO NOT UPDATE TG3_*_NUM defines */
102 #define TG3_MAJ_NUM 3
103 #define TG3_MIN_NUM 137
104
105 #define RESET_KIND_SHUTDOWN 0
106 #define RESET_KIND_INIT 1
107 #define RESET_KIND_SUSPEND 2
108
109 #define TG3_DEF_RX_MODE 0
110 #define TG3_DEF_TX_MODE 0
111 #define TG3_DEF_MSG_ENABLE \
112 (NETIF_MSG_DRV | \
113 NETIF_MSG_PROBE | \
114 NETIF_MSG_LINK | \
115 NETIF_MSG_TIMER | \
116 NETIF_MSG_IFDOWN | \
117 NETIF_MSG_IFUP | \
118 NETIF_MSG_RX_ERR | \
119 NETIF_MSG_TX_ERR)
120
121 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
122
123 /* length of time before we decide the hardware is borked,
124 * and dev->tx_timeout() should be called to fix the problem
125 */
126
127 #define TG3_TX_TIMEOUT (5 * HZ)
128
129 /* hardware minimum and maximum for a single frame's data payload */
130 #define TG3_MIN_MTU ETH_ZLEN
131 #define TG3_MAX_MTU(tp) \
132 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
133
134 /* These numbers seem to be hard coded in the NIC firmware somehow.
135 * You can't change the ring sizes, but you can change where you place
136 * them in the NIC onboard memory.
137 */
138 #define TG3_RX_STD_RING_SIZE(tp) \
139 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
141 #define TG3_DEF_RX_RING_PENDING 200
142 #define TG3_RX_JMB_RING_SIZE(tp) \
143 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
144 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
145 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
146
147 /* Do not place this n-ring entries value into the tp struct itself,
148 * we really want to expose these constants to GCC so that modulo et
149 * al. operations are done with shifts and masks instead of with
150 * hw multiply/modulo instructions. Another solution would be to
151 * replace things like '% foo' with '& (foo - 1)'.
152 */
153
154 #define TG3_TX_RING_SIZE 512
155 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
156
157 #define TG3_RX_STD_RING_BYTES(tp) \
158 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
159 #define TG3_RX_JMB_RING_BYTES(tp) \
160 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
161 #define TG3_RX_RCB_RING_BYTES(tp) \
162 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
163 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
164 TG3_TX_RING_SIZE)
165 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
166
167 #define TG3_DMA_BYTE_ENAB 64
168
169 #define TG3_RX_STD_DMA_SZ 1536
170 #define TG3_RX_JMB_DMA_SZ 9046
171
172 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
173
174 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
175 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
176
177 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
179
180 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
181 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
182
183 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
184 * that are at least dword aligned when used in PCIX mode. The driver
185 * works around this bug by double copying the packet. This workaround
186 * is built into the normal double copy length check for efficiency.
187 *
188 * However, the double copy is only necessary on those architectures
189 * where unaligned memory accesses are inefficient. For those architectures
190 * where unaligned memory accesses incur little penalty, we can reintegrate
191 * the 5701 in the normal rx path. Doing so saves a device structure
192 * dereference by hardcoding the double copy threshold in place.
193 */
194 #define TG3_RX_COPY_THRESHOLD 256
195 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
196 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
197 #else
198 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
199 #endif
200
201 #if (NET_IP_ALIGN != 0)
202 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
203 #else
204 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
205 #endif
206
207 /* minimum number of free TX descriptors required to wake up TX process */
208 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
209 #define TG3_TX_BD_DMA_MAX_2K 2048
210 #define TG3_TX_BD_DMA_MAX_4K 4096
211
212 #define TG3_RAW_IP_ALIGN 2
213
214 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
215 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
216
217 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
218 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
219
220 #define FIRMWARE_TG3 "tigon/tg3.bin"
221 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
222 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
223 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
224
225 MODULE_AUTHOR("David S. Miller <davem@redhat.com> and Jeff Garzik <jgarzik@pobox.com>");
226 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227 MODULE_LICENSE("GPL");
228 MODULE_FIRMWARE(FIRMWARE_TG3);
229 MODULE_FIRMWARE(FIRMWARE_TG357766);
230 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232
233 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
234 module_param(tg3_debug, int, 0);
235 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236
237 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
238 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
239
240 static const struct pci_device_id tg3_pci_tbl[] = {
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264 TG3_DRV_DATA_FLAG_5705_10_100},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268 TG3_DRV_DATA_FLAG_5705_10_100},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290 PCI_VENDOR_ID_LENOVO,
291 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 {}
357 };
358
359 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360
361 static const struct {
362 const char string[ETH_GSTRING_LEN];
363 } ethtool_stats_keys[] = {
364 { "rx_octets" },
365 { "rx_fragments" },
366 { "rx_ucast_packets" },
367 { "rx_mcast_packets" },
368 { "rx_bcast_packets" },
369 { "rx_fcs_errors" },
370 { "rx_align_errors" },
371 { "rx_xon_pause_rcvd" },
372 { "rx_xoff_pause_rcvd" },
373 { "rx_mac_ctrl_rcvd" },
374 { "rx_xoff_entered" },
375 { "rx_frame_too_long_errors" },
376 { "rx_jabbers" },
377 { "rx_undersize_packets" },
378 { "rx_in_length_errors" },
379 { "rx_out_length_errors" },
380 { "rx_64_or_less_octet_packets" },
381 { "rx_65_to_127_octet_packets" },
382 { "rx_128_to_255_octet_packets" },
383 { "rx_256_to_511_octet_packets" },
384 { "rx_512_to_1023_octet_packets" },
385 { "rx_1024_to_1522_octet_packets" },
386 { "rx_1523_to_2047_octet_packets" },
387 { "rx_2048_to_4095_octet_packets" },
388 { "rx_4096_to_8191_octet_packets" },
389 { "rx_8192_to_9022_octet_packets" },
390
391 { "tx_octets" },
392 { "tx_collisions" },
393
394 { "tx_xon_sent" },
395 { "tx_xoff_sent" },
396 { "tx_flow_control" },
397 { "tx_mac_errors" },
398 { "tx_single_collisions" },
399 { "tx_mult_collisions" },
400 { "tx_deferred" },
401 { "tx_excessive_collisions" },
402 { "tx_late_collisions" },
403 { "tx_collide_2times" },
404 { "tx_collide_3times" },
405 { "tx_collide_4times" },
406 { "tx_collide_5times" },
407 { "tx_collide_6times" },
408 { "tx_collide_7times" },
409 { "tx_collide_8times" },
410 { "tx_collide_9times" },
411 { "tx_collide_10times" },
412 { "tx_collide_11times" },
413 { "tx_collide_12times" },
414 { "tx_collide_13times" },
415 { "tx_collide_14times" },
416 { "tx_collide_15times" },
417 { "tx_ucast_packets" },
418 { "tx_mcast_packets" },
419 { "tx_bcast_packets" },
420 { "tx_carrier_sense_errors" },
421 { "tx_discards" },
422 { "tx_errors" },
423
424 { "dma_writeq_full" },
425 { "dma_write_prioq_full" },
426 { "rxbds_empty" },
427 { "rx_discards" },
428 { "rx_errors" },
429 { "rx_threshold_hit" },
430
431 { "dma_readq_full" },
432 { "dma_read_prioq_full" },
433 { "tx_comp_queue_full" },
434
435 { "ring_set_send_prod_index" },
436 { "ring_status_update" },
437 { "nic_irqs" },
438 { "nic_avoided_irqs" },
439 { "nic_tx_threshold_hit" },
440
441 { "mbuf_lwm_thresh_hit" },
442 };
443
444 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
445 #define TG3_NVRAM_TEST 0
446 #define TG3_LINK_TEST 1
447 #define TG3_REGISTER_TEST 2
448 #define TG3_MEMORY_TEST 3
449 #define TG3_MAC_LOOPB_TEST 4
450 #define TG3_PHY_LOOPB_TEST 5
451 #define TG3_EXT_LOOPB_TEST 6
452 #define TG3_INTERRUPT_TEST 7
453
454
455 static const struct {
456 const char string[ETH_GSTRING_LEN];
457 } ethtool_test_keys[] = {
458 [TG3_NVRAM_TEST] = { "nvram test (online) " },
459 [TG3_LINK_TEST] = { "link test (online) " },
460 [TG3_REGISTER_TEST] = { "register test (offline)" },
461 [TG3_MEMORY_TEST] = { "memory test (offline)" },
462 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
463 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
464 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
465 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
466 };
467
468 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
469
470
tg3_write32(struct tg3 * tp,u32 off,u32 val)471 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472 {
473 writel(val, tp->regs + off);
474 }
475
tg3_read32(struct tg3 * tp,u32 off)476 static u32 tg3_read32(struct tg3 *tp, u32 off)
477 {
478 return readl(tp->regs + off);
479 }
480
tg3_ape_write32(struct tg3 * tp,u32 off,u32 val)481 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482 {
483 writel(val, tp->aperegs + off);
484 }
485
tg3_ape_read32(struct tg3 * tp,u32 off)486 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487 {
488 return readl(tp->aperegs + off);
489 }
490
tg3_write_indirect_reg32(struct tg3 * tp,u32 off,u32 val)491 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 {
493 unsigned long flags;
494
495 spin_lock_irqsave(&tp->indirect_lock, flags);
496 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498 spin_unlock_irqrestore(&tp->indirect_lock, flags);
499 }
500
tg3_write_flush_reg32(struct tg3 * tp,u32 off,u32 val)501 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502 {
503 writel(val, tp->regs + off);
504 readl(tp->regs + off);
505 }
506
tg3_read_indirect_reg32(struct tg3 * tp,u32 off)507 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
508 {
509 unsigned long flags;
510 u32 val;
511
512 spin_lock_irqsave(&tp->indirect_lock, flags);
513 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515 spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 return val;
517 }
518
tg3_write_indirect_mbox(struct tg3 * tp,u32 off,u32 val)519 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 {
521 unsigned long flags;
522
523 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525 TG3_64BIT_REG_LOW, val);
526 return;
527 }
528 if (off == TG3_RX_STD_PROD_IDX_REG) {
529 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530 TG3_64BIT_REG_LOW, val);
531 return;
532 }
533
534 spin_lock_irqsave(&tp->indirect_lock, flags);
535 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537 spin_unlock_irqrestore(&tp->indirect_lock, flags);
538
539 /* In indirect mode when disabling interrupts, we also need
540 * to clear the interrupt bit in the GRC local ctrl register.
541 */
542 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543 (val == 0x1)) {
544 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 }
547 }
548
tg3_read_indirect_mbox(struct tg3 * tp,u32 off)549 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
550 {
551 unsigned long flags;
552 u32 val;
553
554 spin_lock_irqsave(&tp->indirect_lock, flags);
555 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557 spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 return val;
559 }
560
561 /* usec_wait specifies the wait time in usec when writing to certain registers
562 * where it is unsafe to read back the register without some delay.
563 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565 */
_tw32_flush(struct tg3 * tp,u32 off,u32 val,u32 usec_wait)566 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567 {
568 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569 /* Non-posted methods */
570 tp->write32(tp, off, val);
571 else {
572 /* Posted method */
573 tg3_write32(tp, off, val);
574 if (usec_wait)
575 udelay(usec_wait);
576 tp->read32(tp, off);
577 }
578 /* Wait again after the read for the posted method to guarantee that
579 * the wait time is met.
580 */
581 if (usec_wait)
582 udelay(usec_wait);
583 }
584
tw32_mailbox_flush(struct tg3 * tp,u32 off,u32 val)585 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586 {
587 tp->write32_mbox(tp, off, val);
588 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590 !tg3_flag(tp, ICH_WORKAROUND)))
591 tp->read32_mbox(tp, off);
592 }
593
tg3_write32_tx_mbox(struct tg3 * tp,u32 off,u32 val)594 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595 {
596 void __iomem *mbox = tp->regs + off;
597 writel(val, mbox);
598 if (tg3_flag(tp, TXD_MBOX_HWBUG))
599 writel(val, mbox);
600 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601 tg3_flag(tp, FLUSH_POSTED_WRITES))
602 readl(mbox);
603 }
604
tg3_read32_mbox_5906(struct tg3 * tp,u32 off)605 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606 {
607 return readl(tp->regs + off + GRCMBOX_BASE);
608 }
609
tg3_write32_mbox_5906(struct tg3 * tp,u32 off,u32 val)610 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611 {
612 writel(val, tp->regs + off + GRCMBOX_BASE);
613 }
614
615 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
616 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
617 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
618 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
619 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
620
621 #define tw32(reg, val) tp->write32(tp, reg, val)
622 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
623 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
624 #define tr32(reg) tp->read32(tp, reg)
625
tg3_write_mem(struct tg3 * tp,u32 off,u32 val)626 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 {
628 unsigned long flags;
629
630 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
632 return;
633
634 spin_lock_irqsave(&tp->indirect_lock, flags);
635 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638
639 /* Always leave this as zero. */
640 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641 } else {
642 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643 tw32_f(TG3PCI_MEM_WIN_DATA, val);
644
645 /* Always leave this as zero. */
646 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647 }
648 spin_unlock_irqrestore(&tp->indirect_lock, flags);
649 }
650
tg3_read_mem(struct tg3 * tp,u32 off,u32 * val)651 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 {
653 unsigned long flags;
654
655 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
657 *val = 0;
658 return;
659 }
660
661 spin_lock_irqsave(&tp->indirect_lock, flags);
662 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665
666 /* Always leave this as zero. */
667 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668 } else {
669 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670 *val = tr32(TG3PCI_MEM_WIN_DATA);
671
672 /* Always leave this as zero. */
673 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674 }
675 spin_unlock_irqrestore(&tp->indirect_lock, flags);
676 }
677
tg3_ape_lock_init(struct tg3 * tp)678 static void tg3_ape_lock_init(struct tg3 *tp)
679 {
680 int i;
681 u32 regbase, bit;
682
683 if (tg3_asic_rev(tp) == ASIC_REV_5761)
684 regbase = TG3_APE_LOCK_GRANT;
685 else
686 regbase = TG3_APE_PER_LOCK_GRANT;
687
688 /* Make sure the driver hasn't any stale locks. */
689 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690 switch (i) {
691 case TG3_APE_LOCK_PHY0:
692 case TG3_APE_LOCK_PHY1:
693 case TG3_APE_LOCK_PHY2:
694 case TG3_APE_LOCK_PHY3:
695 bit = APE_LOCK_GRANT_DRIVER;
696 break;
697 default:
698 if (!tp->pci_fn)
699 bit = APE_LOCK_GRANT_DRIVER;
700 else
701 bit = 1 << tp->pci_fn;
702 }
703 tg3_ape_write32(tp, regbase + 4 * i, bit);
704 }
705
706 }
707
tg3_ape_lock(struct tg3 * tp,int locknum)708 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 {
710 int i, off;
711 int ret = 0;
712 u32 status, req, gnt, bit;
713
714 if (!tg3_flag(tp, ENABLE_APE))
715 return 0;
716
717 switch (locknum) {
718 case TG3_APE_LOCK_GPIO:
719 if (tg3_asic_rev(tp) == ASIC_REV_5761)
720 return 0;
721 fallthrough;
722 case TG3_APE_LOCK_GRC:
723 case TG3_APE_LOCK_MEM:
724 if (!tp->pci_fn)
725 bit = APE_LOCK_REQ_DRIVER;
726 else
727 bit = 1 << tp->pci_fn;
728 break;
729 case TG3_APE_LOCK_PHY0:
730 case TG3_APE_LOCK_PHY1:
731 case TG3_APE_LOCK_PHY2:
732 case TG3_APE_LOCK_PHY3:
733 bit = APE_LOCK_REQ_DRIVER;
734 break;
735 default:
736 return -EINVAL;
737 }
738
739 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
740 req = TG3_APE_LOCK_REQ;
741 gnt = TG3_APE_LOCK_GRANT;
742 } else {
743 req = TG3_APE_PER_LOCK_REQ;
744 gnt = TG3_APE_PER_LOCK_GRANT;
745 }
746
747 off = 4 * locknum;
748
749 tg3_ape_write32(tp, req + off, bit);
750
751 /* Wait for up to 1 millisecond to acquire lock. */
752 for (i = 0; i < 100; i++) {
753 status = tg3_ape_read32(tp, gnt + off);
754 if (status == bit)
755 break;
756 if (pci_channel_offline(tp->pdev))
757 break;
758
759 udelay(10);
760 }
761
762 if (status != bit) {
763 /* Revoke the lock request. */
764 tg3_ape_write32(tp, gnt + off, bit);
765 ret = -EBUSY;
766 }
767
768 return ret;
769 }
770
tg3_ape_unlock(struct tg3 * tp,int locknum)771 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
772 {
773 u32 gnt, bit;
774
775 if (!tg3_flag(tp, ENABLE_APE))
776 return;
777
778 switch (locknum) {
779 case TG3_APE_LOCK_GPIO:
780 if (tg3_asic_rev(tp) == ASIC_REV_5761)
781 return;
782 fallthrough;
783 case TG3_APE_LOCK_GRC:
784 case TG3_APE_LOCK_MEM:
785 if (!tp->pci_fn)
786 bit = APE_LOCK_GRANT_DRIVER;
787 else
788 bit = 1 << tp->pci_fn;
789 break;
790 case TG3_APE_LOCK_PHY0:
791 case TG3_APE_LOCK_PHY1:
792 case TG3_APE_LOCK_PHY2:
793 case TG3_APE_LOCK_PHY3:
794 bit = APE_LOCK_GRANT_DRIVER;
795 break;
796 default:
797 return;
798 }
799
800 if (tg3_asic_rev(tp) == ASIC_REV_5761)
801 gnt = TG3_APE_LOCK_GRANT;
802 else
803 gnt = TG3_APE_PER_LOCK_GRANT;
804
805 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
806 }
807
tg3_ape_event_lock(struct tg3 * tp,u32 timeout_us)808 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
809 {
810 u32 apedata;
811
812 while (timeout_us) {
813 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
814 return -EBUSY;
815
816 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
817 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
818 break;
819
820 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
821
822 udelay(10);
823 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
824 }
825
826 return timeout_us ? 0 : -EBUSY;
827 }
828
829 #ifdef CONFIG_TIGON3_HWMON
tg3_ape_wait_for_event(struct tg3 * tp,u32 timeout_us)830 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
831 {
832 u32 i, apedata;
833
834 for (i = 0; i < timeout_us / 10; i++) {
835 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
836
837 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
838 break;
839
840 udelay(10);
841 }
842
843 return i == timeout_us / 10;
844 }
845
tg3_ape_scratchpad_read(struct tg3 * tp,u32 * data,u32 base_off,u32 len)846 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
847 u32 len)
848 {
849 int err;
850 u32 i, bufoff, msgoff, maxlen, apedata;
851
852 if (!tg3_flag(tp, APE_HAS_NCSI))
853 return 0;
854
855 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
856 if (apedata != APE_SEG_SIG_MAGIC)
857 return -ENODEV;
858
859 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
860 if (!(apedata & APE_FW_STATUS_READY))
861 return -EAGAIN;
862
863 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
864 TG3_APE_SHMEM_BASE;
865 msgoff = bufoff + 2 * sizeof(u32);
866 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
867
868 while (len) {
869 u32 length;
870
871 /* Cap xfer sizes to scratchpad limits. */
872 length = (len > maxlen) ? maxlen : len;
873 len -= length;
874
875 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
876 if (!(apedata & APE_FW_STATUS_READY))
877 return -EAGAIN;
878
879 /* Wait for up to 1 msec for APE to service previous event. */
880 err = tg3_ape_event_lock(tp, 1000);
881 if (err)
882 return err;
883
884 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
885 APE_EVENT_STATUS_SCRTCHPD_READ |
886 APE_EVENT_STATUS_EVENT_PENDING;
887 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
888
889 tg3_ape_write32(tp, bufoff, base_off);
890 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
891
892 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
893 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
894
895 base_off += length;
896
897 if (tg3_ape_wait_for_event(tp, 30000))
898 return -EAGAIN;
899
900 for (i = 0; length; i += 4, length -= 4) {
901 u32 val = tg3_ape_read32(tp, msgoff + i);
902 memcpy(data, &val, sizeof(u32));
903 data++;
904 }
905 }
906
907 return 0;
908 }
909 #endif
910
tg3_ape_send_event(struct tg3 * tp,u32 event)911 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
912 {
913 int err;
914 u32 apedata;
915
916 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
917 if (apedata != APE_SEG_SIG_MAGIC)
918 return -EAGAIN;
919
920 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
921 if (!(apedata & APE_FW_STATUS_READY))
922 return -EAGAIN;
923
924 /* Wait for up to 20 millisecond for APE to service previous event. */
925 err = tg3_ape_event_lock(tp, 20000);
926 if (err)
927 return err;
928
929 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
930 event | APE_EVENT_STATUS_EVENT_PENDING);
931
932 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
933 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
934
935 return 0;
936 }
937
tg3_ape_driver_state_change(struct tg3 * tp,int kind)938 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
939 {
940 u32 event;
941 u32 apedata;
942
943 if (!tg3_flag(tp, ENABLE_APE))
944 return;
945
946 switch (kind) {
947 case RESET_KIND_INIT:
948 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
949 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
950 APE_HOST_SEG_SIG_MAGIC);
951 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
952 APE_HOST_SEG_LEN_MAGIC);
953 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
954 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
955 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
956 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
957 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
958 APE_HOST_BEHAV_NO_PHYLOCK);
959 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
960 TG3_APE_HOST_DRVR_STATE_START);
961
962 event = APE_EVENT_STATUS_STATE_START;
963 break;
964 case RESET_KIND_SHUTDOWN:
965 if (device_may_wakeup(&tp->pdev->dev) &&
966 tg3_flag(tp, WOL_ENABLE)) {
967 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
968 TG3_APE_HOST_WOL_SPEED_AUTO);
969 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
970 } else
971 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
972
973 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
974
975 event = APE_EVENT_STATUS_STATE_UNLOAD;
976 break;
977 default:
978 return;
979 }
980
981 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
982
983 tg3_ape_send_event(tp, event);
984 }
985
tg3_send_ape_heartbeat(struct tg3 * tp,unsigned long interval)986 static void tg3_send_ape_heartbeat(struct tg3 *tp,
987 unsigned long interval)
988 {
989 /* Check if hb interval has exceeded */
990 if (!tg3_flag(tp, ENABLE_APE) ||
991 time_before(jiffies, tp->ape_hb_jiffies + interval))
992 return;
993
994 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
995 tp->ape_hb_jiffies = jiffies;
996 }
997
tg3_disable_ints(struct tg3 * tp)998 static void tg3_disable_ints(struct tg3 *tp)
999 {
1000 int i;
1001
1002 tw32(TG3PCI_MISC_HOST_CTRL,
1003 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1004 for (i = 0; i < tp->irq_max; i++)
1005 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1006 }
1007
tg3_enable_ints(struct tg3 * tp)1008 static void tg3_enable_ints(struct tg3 *tp)
1009 {
1010 int i;
1011
1012 tp->irq_sync = 0;
1013 wmb();
1014
1015 tw32(TG3PCI_MISC_HOST_CTRL,
1016 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1017
1018 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1019 for (i = 0; i < tp->irq_cnt; i++) {
1020 struct tg3_napi *tnapi = &tp->napi[i];
1021
1022 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1023 if (tg3_flag(tp, 1SHOT_MSI))
1024 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1025
1026 tp->coal_now |= tnapi->coal_now;
1027 }
1028
1029 /* Force an initial interrupt */
1030 if (!tg3_flag(tp, TAGGED_STATUS) &&
1031 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1032 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1033 else
1034 tw32(HOSTCC_MODE, tp->coal_now);
1035
1036 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1037 }
1038
tg3_has_work(struct tg3_napi * tnapi)1039 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1040 {
1041 struct tg3 *tp = tnapi->tp;
1042 struct tg3_hw_status *sblk = tnapi->hw_status;
1043 unsigned int work_exists = 0;
1044
1045 /* check for phy events */
1046 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1047 if (sblk->status & SD_STATUS_LINK_CHG)
1048 work_exists = 1;
1049 }
1050
1051 /* check for TX work to do */
1052 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1053 work_exists = 1;
1054
1055 /* check for RX work to do */
1056 if (tnapi->rx_rcb_prod_idx &&
1057 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1058 work_exists = 1;
1059
1060 return work_exists;
1061 }
1062
1063 /* tg3_int_reenable
1064 * similar to tg3_enable_ints, but it accurately determines whether there
1065 * is new work pending and can return without flushing the PIO write
1066 * which reenables interrupts
1067 */
tg3_int_reenable(struct tg3_napi * tnapi)1068 static void tg3_int_reenable(struct tg3_napi *tnapi)
1069 {
1070 struct tg3 *tp = tnapi->tp;
1071
1072 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1073
1074 /* When doing tagged status, this work check is unnecessary.
1075 * The last_tag we write above tells the chip which piece of
1076 * work we've completed.
1077 */
1078 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1079 tw32(HOSTCC_MODE, tp->coalesce_mode |
1080 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1081 }
1082
tg3_switch_clocks(struct tg3 * tp)1083 static void tg3_switch_clocks(struct tg3 *tp)
1084 {
1085 u32 clock_ctrl;
1086 u32 orig_clock_ctrl;
1087
1088 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1089 return;
1090
1091 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1092
1093 orig_clock_ctrl = clock_ctrl;
1094 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1095 CLOCK_CTRL_CLKRUN_OENABLE |
1096 0x1f);
1097 tp->pci_clock_ctrl = clock_ctrl;
1098
1099 if (tg3_flag(tp, 5705_PLUS)) {
1100 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1101 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1102 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1103 }
1104 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1105 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1106 clock_ctrl |
1107 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1108 40);
1109 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1110 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1111 40);
1112 }
1113 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1114 }
1115
1116 #define PHY_BUSY_LOOPS 5000
1117
__tg3_readphy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 * val)1118 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1119 u32 *val)
1120 {
1121 u32 frame_val;
1122 unsigned int loops;
1123 int ret;
1124
1125 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1126 tw32_f(MAC_MI_MODE,
1127 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1128 udelay(80);
1129 }
1130
1131 tg3_ape_lock(tp, tp->phy_ape_lock);
1132
1133 *val = 0x0;
1134
1135 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1136 MI_COM_PHY_ADDR_MASK);
1137 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1138 MI_COM_REG_ADDR_MASK);
1139 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1140
1141 tw32_f(MAC_MI_COM, frame_val);
1142
1143 loops = PHY_BUSY_LOOPS;
1144 while (loops != 0) {
1145 udelay(10);
1146 frame_val = tr32(MAC_MI_COM);
1147
1148 if ((frame_val & MI_COM_BUSY) == 0) {
1149 udelay(5);
1150 frame_val = tr32(MAC_MI_COM);
1151 break;
1152 }
1153 loops -= 1;
1154 }
1155
1156 ret = -EBUSY;
1157 if (loops != 0) {
1158 *val = frame_val & MI_COM_DATA_MASK;
1159 ret = 0;
1160 }
1161
1162 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1163 tw32_f(MAC_MI_MODE, tp->mi_mode);
1164 udelay(80);
1165 }
1166
1167 tg3_ape_unlock(tp, tp->phy_ape_lock);
1168
1169 return ret;
1170 }
1171
tg3_readphy(struct tg3 * tp,int reg,u32 * val)1172 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1173 {
1174 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1175 }
1176
__tg3_writephy(struct tg3 * tp,unsigned int phy_addr,int reg,u32 val)1177 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1178 u32 val)
1179 {
1180 u32 frame_val;
1181 unsigned int loops;
1182 int ret;
1183
1184 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1185 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1186 return 0;
1187
1188 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1189 tw32_f(MAC_MI_MODE,
1190 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1191 udelay(80);
1192 }
1193
1194 tg3_ape_lock(tp, tp->phy_ape_lock);
1195
1196 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1197 MI_COM_PHY_ADDR_MASK);
1198 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1199 MI_COM_REG_ADDR_MASK);
1200 frame_val |= (val & MI_COM_DATA_MASK);
1201 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1202
1203 tw32_f(MAC_MI_COM, frame_val);
1204
1205 loops = PHY_BUSY_LOOPS;
1206 while (loops != 0) {
1207 udelay(10);
1208 frame_val = tr32(MAC_MI_COM);
1209 if ((frame_val & MI_COM_BUSY) == 0) {
1210 udelay(5);
1211 frame_val = tr32(MAC_MI_COM);
1212 break;
1213 }
1214 loops -= 1;
1215 }
1216
1217 ret = -EBUSY;
1218 if (loops != 0)
1219 ret = 0;
1220
1221 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1222 tw32_f(MAC_MI_MODE, tp->mi_mode);
1223 udelay(80);
1224 }
1225
1226 tg3_ape_unlock(tp, tp->phy_ape_lock);
1227
1228 return ret;
1229 }
1230
tg3_writephy(struct tg3 * tp,int reg,u32 val)1231 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1232 {
1233 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1234 }
1235
tg3_phy_cl45_write(struct tg3 * tp,u32 devad,u32 addr,u32 val)1236 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1237 {
1238 int err;
1239
1240 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1241 if (err)
1242 goto done;
1243
1244 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1245 if (err)
1246 goto done;
1247
1248 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1249 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1250 if (err)
1251 goto done;
1252
1253 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1254
1255 done:
1256 return err;
1257 }
1258
tg3_phy_cl45_read(struct tg3 * tp,u32 devad,u32 addr,u32 * val)1259 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1260 {
1261 int err;
1262
1263 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1264 if (err)
1265 goto done;
1266
1267 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1268 if (err)
1269 goto done;
1270
1271 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1272 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1273 if (err)
1274 goto done;
1275
1276 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1277
1278 done:
1279 return err;
1280 }
1281
tg3_phydsp_read(struct tg3 * tp,u32 reg,u32 * val)1282 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1283 {
1284 int err;
1285
1286 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1287 if (!err)
1288 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1289
1290 return err;
1291 }
1292
tg3_phydsp_write(struct tg3 * tp,u32 reg,u32 val)1293 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1294 {
1295 int err;
1296
1297 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1298 if (!err)
1299 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1300
1301 return err;
1302 }
1303
tg3_phy_auxctl_read(struct tg3 * tp,int reg,u32 * val)1304 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1305 {
1306 int err;
1307
1308 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1309 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1310 MII_TG3_AUXCTL_SHDWSEL_MISC);
1311 if (!err)
1312 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1313
1314 return err;
1315 }
1316
tg3_phy_auxctl_write(struct tg3 * tp,int reg,u32 set)1317 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1318 {
1319 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1320 set |= MII_TG3_AUXCTL_MISC_WREN;
1321
1322 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1323 }
1324
tg3_phy_toggle_auxctl_smdsp(struct tg3 * tp,bool enable)1325 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1326 {
1327 u32 val;
1328 int err;
1329
1330 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1331
1332 if (err)
1333 return err;
1334
1335 if (enable)
1336 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1337 else
1338 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1339
1340 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1341 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1342
1343 return err;
1344 }
1345
tg3_phy_shdw_write(struct tg3 * tp,int reg,u32 val)1346 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1347 {
1348 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1349 reg | val | MII_TG3_MISC_SHDW_WREN);
1350 }
1351
tg3_bmcr_reset(struct tg3 * tp)1352 static int tg3_bmcr_reset(struct tg3 *tp)
1353 {
1354 u32 phy_control;
1355 int limit, err;
1356
1357 /* OK, reset it, and poll the BMCR_RESET bit until it
1358 * clears or we time out.
1359 */
1360 phy_control = BMCR_RESET;
1361 err = tg3_writephy(tp, MII_BMCR, phy_control);
1362 if (err != 0)
1363 return -EBUSY;
1364
1365 limit = 5000;
1366 while (limit--) {
1367 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1368 if (err != 0)
1369 return -EBUSY;
1370
1371 if ((phy_control & BMCR_RESET) == 0) {
1372 udelay(40);
1373 break;
1374 }
1375 udelay(10);
1376 }
1377 if (limit < 0)
1378 return -EBUSY;
1379
1380 return 0;
1381 }
1382
tg3_mdio_read(struct mii_bus * bp,int mii_id,int reg)1383 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1384 {
1385 struct tg3 *tp = bp->priv;
1386 u32 val;
1387
1388 spin_lock_bh(&tp->lock);
1389
1390 if (__tg3_readphy(tp, mii_id, reg, &val))
1391 val = -EIO;
1392
1393 spin_unlock_bh(&tp->lock);
1394
1395 return val;
1396 }
1397
tg3_mdio_write(struct mii_bus * bp,int mii_id,int reg,u16 val)1398 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1399 {
1400 struct tg3 *tp = bp->priv;
1401 u32 ret = 0;
1402
1403 spin_lock_bh(&tp->lock);
1404
1405 if (__tg3_writephy(tp, mii_id, reg, val))
1406 ret = -EIO;
1407
1408 spin_unlock_bh(&tp->lock);
1409
1410 return ret;
1411 }
1412
tg3_mdio_config_5785(struct tg3 * tp)1413 static void tg3_mdio_config_5785(struct tg3 *tp)
1414 {
1415 u32 val;
1416 struct phy_device *phydev;
1417
1418 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1419 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1420 case PHY_ID_BCM50610:
1421 case PHY_ID_BCM50610M:
1422 val = MAC_PHYCFG2_50610_LED_MODES;
1423 break;
1424 case PHY_ID_BCMAC131:
1425 val = MAC_PHYCFG2_AC131_LED_MODES;
1426 break;
1427 case PHY_ID_RTL8211C:
1428 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1429 break;
1430 case PHY_ID_RTL8201E:
1431 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1432 break;
1433 default:
1434 return;
1435 }
1436
1437 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1438 tw32(MAC_PHYCFG2, val);
1439
1440 val = tr32(MAC_PHYCFG1);
1441 val &= ~(MAC_PHYCFG1_RGMII_INT |
1442 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1443 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1444 tw32(MAC_PHYCFG1, val);
1445
1446 return;
1447 }
1448
1449 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1450 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1451 MAC_PHYCFG2_FMODE_MASK_MASK |
1452 MAC_PHYCFG2_GMODE_MASK_MASK |
1453 MAC_PHYCFG2_ACT_MASK_MASK |
1454 MAC_PHYCFG2_QUAL_MASK_MASK |
1455 MAC_PHYCFG2_INBAND_ENABLE;
1456
1457 tw32(MAC_PHYCFG2, val);
1458
1459 val = tr32(MAC_PHYCFG1);
1460 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1461 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1462 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1463 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1464 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1465 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1466 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1467 }
1468 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1469 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1470 tw32(MAC_PHYCFG1, val);
1471
1472 val = tr32(MAC_EXT_RGMII_MODE);
1473 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1474 MAC_RGMII_MODE_RX_QUALITY |
1475 MAC_RGMII_MODE_RX_ACTIVITY |
1476 MAC_RGMII_MODE_RX_ENG_DET |
1477 MAC_RGMII_MODE_TX_ENABLE |
1478 MAC_RGMII_MODE_TX_LOWPWR |
1479 MAC_RGMII_MODE_TX_RESET);
1480 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1481 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1482 val |= MAC_RGMII_MODE_RX_INT_B |
1483 MAC_RGMII_MODE_RX_QUALITY |
1484 MAC_RGMII_MODE_RX_ACTIVITY |
1485 MAC_RGMII_MODE_RX_ENG_DET;
1486 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1487 val |= MAC_RGMII_MODE_TX_ENABLE |
1488 MAC_RGMII_MODE_TX_LOWPWR |
1489 MAC_RGMII_MODE_TX_RESET;
1490 }
1491 tw32(MAC_EXT_RGMII_MODE, val);
1492 }
1493
tg3_mdio_start(struct tg3 * tp)1494 static void tg3_mdio_start(struct tg3 *tp)
1495 {
1496 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1497 tw32_f(MAC_MI_MODE, tp->mi_mode);
1498 udelay(80);
1499
1500 if (tg3_flag(tp, MDIOBUS_INITED) &&
1501 tg3_asic_rev(tp) == ASIC_REV_5785)
1502 tg3_mdio_config_5785(tp);
1503 }
1504
tg3_mdio_init(struct tg3 * tp)1505 static int tg3_mdio_init(struct tg3 *tp)
1506 {
1507 int i;
1508 u32 reg;
1509 struct phy_device *phydev;
1510
1511 if (tg3_flag(tp, 5717_PLUS)) {
1512 u32 is_serdes;
1513
1514 tp->phy_addr = tp->pci_fn + 1;
1515
1516 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1517 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1518 else
1519 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1520 TG3_CPMU_PHY_STRAP_IS_SERDES;
1521 if (is_serdes)
1522 tp->phy_addr += 7;
1523 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1524 int addr;
1525
1526 addr = ssb_gige_get_phyaddr(tp->pdev);
1527 if (addr < 0)
1528 return addr;
1529 tp->phy_addr = addr;
1530 } else
1531 tp->phy_addr = TG3_PHY_MII_ADDR;
1532
1533 tg3_mdio_start(tp);
1534
1535 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1536 return 0;
1537
1538 tp->mdio_bus = mdiobus_alloc();
1539 if (tp->mdio_bus == NULL)
1540 return -ENOMEM;
1541
1542 tp->mdio_bus->name = "tg3 mdio bus";
1543 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x", pci_dev_id(tp->pdev));
1544 tp->mdio_bus->priv = tp;
1545 tp->mdio_bus->parent = &tp->pdev->dev;
1546 tp->mdio_bus->read = &tg3_mdio_read;
1547 tp->mdio_bus->write = &tg3_mdio_write;
1548 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1549
1550 /* The bus registration will look for all the PHYs on the mdio bus.
1551 * Unfortunately, it does not ensure the PHY is powered up before
1552 * accessing the PHY ID registers. A chip reset is the
1553 * quickest way to bring the device back to an operational state..
1554 */
1555 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1556 tg3_bmcr_reset(tp);
1557
1558 i = mdiobus_register(tp->mdio_bus);
1559 if (i) {
1560 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1561 mdiobus_free(tp->mdio_bus);
1562 return i;
1563 }
1564
1565 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1566
1567 if (!phydev || !phydev->drv) {
1568 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1569 mdiobus_unregister(tp->mdio_bus);
1570 mdiobus_free(tp->mdio_bus);
1571 return -ENODEV;
1572 }
1573
1574 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1575 case PHY_ID_BCM57780:
1576 phydev->interface = PHY_INTERFACE_MODE_GMII;
1577 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1578 break;
1579 case PHY_ID_BCM50610:
1580 case PHY_ID_BCM50610M:
1581 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1582 PHY_BRCM_RX_REFCLK_UNUSED |
1583 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1584 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1585 fallthrough;
1586 case PHY_ID_RTL8211C:
1587 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1588 break;
1589 case PHY_ID_RTL8201E:
1590 case PHY_ID_BCMAC131:
1591 phydev->interface = PHY_INTERFACE_MODE_MII;
1592 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1593 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1594 break;
1595 }
1596
1597 tg3_flag_set(tp, MDIOBUS_INITED);
1598
1599 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1600 tg3_mdio_config_5785(tp);
1601
1602 return 0;
1603 }
1604
tg3_mdio_fini(struct tg3 * tp)1605 static void tg3_mdio_fini(struct tg3 *tp)
1606 {
1607 if (tg3_flag(tp, MDIOBUS_INITED)) {
1608 tg3_flag_clear(tp, MDIOBUS_INITED);
1609 mdiobus_unregister(tp->mdio_bus);
1610 mdiobus_free(tp->mdio_bus);
1611 }
1612 }
1613
1614 /* tp->lock is held. */
tg3_generate_fw_event(struct tg3 * tp)1615 static inline void tg3_generate_fw_event(struct tg3 *tp)
1616 {
1617 u32 val;
1618
1619 val = tr32(GRC_RX_CPU_EVENT);
1620 val |= GRC_RX_CPU_DRIVER_EVENT;
1621 tw32_f(GRC_RX_CPU_EVENT, val);
1622
1623 tp->last_event_jiffies = jiffies;
1624 }
1625
1626 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1627
1628 /* tp->lock is held. */
tg3_wait_for_event_ack(struct tg3 * tp)1629 static void tg3_wait_for_event_ack(struct tg3 *tp)
1630 {
1631 int i;
1632 unsigned int delay_cnt;
1633 long time_remain;
1634
1635 /* If enough time has passed, no wait is necessary. */
1636 time_remain = (long)(tp->last_event_jiffies + 1 +
1637 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1638 (long)jiffies;
1639 if (time_remain < 0)
1640 return;
1641
1642 /* Check if we can shorten the wait time. */
1643 delay_cnt = jiffies_to_usecs(time_remain);
1644 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1645 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1646 delay_cnt = (delay_cnt >> 3) + 1;
1647
1648 for (i = 0; i < delay_cnt; i++) {
1649 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1650 break;
1651 if (pci_channel_offline(tp->pdev))
1652 break;
1653
1654 udelay(8);
1655 }
1656 }
1657
1658 /* tp->lock is held. */
tg3_phy_gather_ump_data(struct tg3 * tp,u32 * data)1659 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1660 {
1661 u32 reg, val;
1662
1663 val = 0;
1664 if (!tg3_readphy(tp, MII_BMCR, ®))
1665 val = reg << 16;
1666 if (!tg3_readphy(tp, MII_BMSR, ®))
1667 val |= (reg & 0xffff);
1668 *data++ = val;
1669
1670 val = 0;
1671 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1672 val = reg << 16;
1673 if (!tg3_readphy(tp, MII_LPA, ®))
1674 val |= (reg & 0xffff);
1675 *data++ = val;
1676
1677 val = 0;
1678 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1679 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1680 val = reg << 16;
1681 if (!tg3_readphy(tp, MII_STAT1000, ®))
1682 val |= (reg & 0xffff);
1683 }
1684 *data++ = val;
1685
1686 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1687 val = reg << 16;
1688 else
1689 val = 0;
1690 *data++ = val;
1691 }
1692
1693 /* tp->lock is held. */
tg3_ump_link_report(struct tg3 * tp)1694 static void tg3_ump_link_report(struct tg3 *tp)
1695 {
1696 u32 data[4];
1697
1698 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1699 return;
1700
1701 tg3_phy_gather_ump_data(tp, data);
1702
1703 tg3_wait_for_event_ack(tp);
1704
1705 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1706 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1707 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1708 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1709 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1710 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1711
1712 tg3_generate_fw_event(tp);
1713 }
1714
1715 /* tp->lock is held. */
tg3_stop_fw(struct tg3 * tp)1716 static void tg3_stop_fw(struct tg3 *tp)
1717 {
1718 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1719 /* Wait for RX cpu to ACK the previous event. */
1720 tg3_wait_for_event_ack(tp);
1721
1722 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1723
1724 tg3_generate_fw_event(tp);
1725
1726 /* Wait for RX cpu to ACK this event. */
1727 tg3_wait_for_event_ack(tp);
1728 }
1729 }
1730
1731 /* tp->lock is held. */
tg3_write_sig_pre_reset(struct tg3 * tp,int kind)1732 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1733 {
1734 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1735 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1736
1737 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1738 switch (kind) {
1739 case RESET_KIND_INIT:
1740 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741 DRV_STATE_START);
1742 break;
1743
1744 case RESET_KIND_SHUTDOWN:
1745 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1746 DRV_STATE_UNLOAD);
1747 break;
1748
1749 case RESET_KIND_SUSPEND:
1750 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1751 DRV_STATE_SUSPEND);
1752 break;
1753
1754 default:
1755 break;
1756 }
1757 }
1758 }
1759
1760 /* tp->lock is held. */
tg3_write_sig_post_reset(struct tg3 * tp,int kind)1761 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1762 {
1763 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1764 switch (kind) {
1765 case RESET_KIND_INIT:
1766 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1767 DRV_STATE_START_DONE);
1768 break;
1769
1770 case RESET_KIND_SHUTDOWN:
1771 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 DRV_STATE_UNLOAD_DONE);
1773 break;
1774
1775 default:
1776 break;
1777 }
1778 }
1779 }
1780
1781 /* tp->lock is held. */
tg3_write_sig_legacy(struct tg3 * tp,int kind)1782 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1783 {
1784 if (tg3_flag(tp, ENABLE_ASF)) {
1785 switch (kind) {
1786 case RESET_KIND_INIT:
1787 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 DRV_STATE_START);
1789 break;
1790
1791 case RESET_KIND_SHUTDOWN:
1792 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793 DRV_STATE_UNLOAD);
1794 break;
1795
1796 case RESET_KIND_SUSPEND:
1797 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1798 DRV_STATE_SUSPEND);
1799 break;
1800
1801 default:
1802 break;
1803 }
1804 }
1805 }
1806
tg3_poll_fw(struct tg3 * tp)1807 static int tg3_poll_fw(struct tg3 *tp)
1808 {
1809 int i;
1810 u32 val;
1811
1812 if (tg3_flag(tp, NO_FWARE_REPORTED))
1813 return 0;
1814
1815 if (tg3_flag(tp, IS_SSB_CORE)) {
1816 /* We don't use firmware. */
1817 return 0;
1818 }
1819
1820 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1821 /* Wait up to 20ms for init done. */
1822 for (i = 0; i < 200; i++) {
1823 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1824 return 0;
1825 if (pci_channel_offline(tp->pdev))
1826 return -ENODEV;
1827
1828 udelay(100);
1829 }
1830 return -ENODEV;
1831 }
1832
1833 /* Wait for firmware initialization to complete. */
1834 for (i = 0; i < 100000; i++) {
1835 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1836 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1837 break;
1838 if (pci_channel_offline(tp->pdev)) {
1839 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1840 tg3_flag_set(tp, NO_FWARE_REPORTED);
1841 netdev_info(tp->dev, "No firmware running\n");
1842 }
1843
1844 break;
1845 }
1846
1847 udelay(10);
1848 }
1849
1850 /* Chip might not be fitted with firmware. Some Sun onboard
1851 * parts are configured like that. So don't signal the timeout
1852 * of the above loop as an error, but do report the lack of
1853 * running firmware once.
1854 */
1855 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1856 tg3_flag_set(tp, NO_FWARE_REPORTED);
1857
1858 netdev_info(tp->dev, "No firmware running\n");
1859 }
1860
1861 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1862 /* The 57765 A0 needs a little more
1863 * time to do some important work.
1864 */
1865 mdelay(10);
1866 }
1867
1868 return 0;
1869 }
1870
tg3_link_report(struct tg3 * tp)1871 static void tg3_link_report(struct tg3 *tp)
1872 {
1873 if (!netif_carrier_ok(tp->dev)) {
1874 netif_info(tp, link, tp->dev, "Link is down\n");
1875 tg3_ump_link_report(tp);
1876 } else if (netif_msg_link(tp)) {
1877 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1878 (tp->link_config.active_speed == SPEED_1000 ?
1879 1000 :
1880 (tp->link_config.active_speed == SPEED_100 ?
1881 100 : 10)),
1882 (tp->link_config.active_duplex == DUPLEX_FULL ?
1883 "full" : "half"));
1884
1885 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1886 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1887 "on" : "off",
1888 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1889 "on" : "off");
1890
1891 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1892 netdev_info(tp->dev, "EEE is %s\n",
1893 tp->setlpicnt ? "enabled" : "disabled");
1894
1895 tg3_ump_link_report(tp);
1896 }
1897
1898 tp->link_up = netif_carrier_ok(tp->dev);
1899 }
1900
tg3_decode_flowctrl_1000T(u32 adv)1901 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1902 {
1903 u32 flowctrl = 0;
1904
1905 if (adv & ADVERTISE_PAUSE_CAP) {
1906 flowctrl |= FLOW_CTRL_RX;
1907 if (!(adv & ADVERTISE_PAUSE_ASYM))
1908 flowctrl |= FLOW_CTRL_TX;
1909 } else if (adv & ADVERTISE_PAUSE_ASYM)
1910 flowctrl |= FLOW_CTRL_TX;
1911
1912 return flowctrl;
1913 }
1914
tg3_advert_flowctrl_1000X(u8 flow_ctrl)1915 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1916 {
1917 u16 miireg;
1918
1919 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1920 miireg = ADVERTISE_1000XPAUSE;
1921 else if (flow_ctrl & FLOW_CTRL_TX)
1922 miireg = ADVERTISE_1000XPSE_ASYM;
1923 else if (flow_ctrl & FLOW_CTRL_RX)
1924 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1925 else
1926 miireg = 0;
1927
1928 return miireg;
1929 }
1930
tg3_decode_flowctrl_1000X(u32 adv)1931 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1932 {
1933 u32 flowctrl = 0;
1934
1935 if (adv & ADVERTISE_1000XPAUSE) {
1936 flowctrl |= FLOW_CTRL_RX;
1937 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1938 flowctrl |= FLOW_CTRL_TX;
1939 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1940 flowctrl |= FLOW_CTRL_TX;
1941
1942 return flowctrl;
1943 }
1944
tg3_resolve_flowctrl_1000X(u16 lcladv,u16 rmtadv)1945 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1946 {
1947 u8 cap = 0;
1948
1949 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1950 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1951 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1952 if (lcladv & ADVERTISE_1000XPAUSE)
1953 cap = FLOW_CTRL_RX;
1954 if (rmtadv & ADVERTISE_1000XPAUSE)
1955 cap = FLOW_CTRL_TX;
1956 }
1957
1958 return cap;
1959 }
1960
tg3_setup_flow_control(struct tg3 * tp,u32 lcladv,u32 rmtadv)1961 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1962 {
1963 u8 autoneg;
1964 u8 flowctrl = 0;
1965 u32 old_rx_mode = tp->rx_mode;
1966 u32 old_tx_mode = tp->tx_mode;
1967
1968 if (tg3_flag(tp, USE_PHYLIB))
1969 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1970 else
1971 autoneg = tp->link_config.autoneg;
1972
1973 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1974 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1975 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1976 else
1977 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1978 } else
1979 flowctrl = tp->link_config.flowctrl;
1980
1981 tp->link_config.active_flowctrl = flowctrl;
1982
1983 if (flowctrl & FLOW_CTRL_RX)
1984 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1985 else
1986 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1987
1988 if (old_rx_mode != tp->rx_mode)
1989 tw32_f(MAC_RX_MODE, tp->rx_mode);
1990
1991 if (flowctrl & FLOW_CTRL_TX)
1992 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1993 else
1994 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1995
1996 if (old_tx_mode != tp->tx_mode)
1997 tw32_f(MAC_TX_MODE, tp->tx_mode);
1998 }
1999
tg3_adjust_link(struct net_device * dev)2000 static void tg3_adjust_link(struct net_device *dev)
2001 {
2002 u8 oldflowctrl, linkmesg = 0;
2003 u32 mac_mode, lcl_adv, rmt_adv;
2004 struct tg3 *tp = netdev_priv(dev);
2005 struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2006
2007 spin_lock_bh(&tp->lock);
2008
2009 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2010 MAC_MODE_HALF_DUPLEX);
2011
2012 oldflowctrl = tp->link_config.active_flowctrl;
2013
2014 if (phydev->link) {
2015 lcl_adv = 0;
2016 rmt_adv = 0;
2017
2018 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2019 mac_mode |= MAC_MODE_PORT_MODE_MII;
2020 else if (phydev->speed == SPEED_1000 ||
2021 tg3_asic_rev(tp) != ASIC_REV_5785)
2022 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2023 else
2024 mac_mode |= MAC_MODE_PORT_MODE_MII;
2025
2026 if (phydev->duplex == DUPLEX_HALF)
2027 mac_mode |= MAC_MODE_HALF_DUPLEX;
2028 else {
2029 lcl_adv = mii_advertise_flowctrl(
2030 tp->link_config.flowctrl);
2031
2032 if (phydev->pause)
2033 rmt_adv = LPA_PAUSE_CAP;
2034 if (phydev->asym_pause)
2035 rmt_adv |= LPA_PAUSE_ASYM;
2036 }
2037
2038 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2039 } else
2040 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2041
2042 if (mac_mode != tp->mac_mode) {
2043 tp->mac_mode = mac_mode;
2044 tw32_f(MAC_MODE, tp->mac_mode);
2045 udelay(40);
2046 }
2047
2048 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2049 if (phydev->speed == SPEED_10)
2050 tw32(MAC_MI_STAT,
2051 MAC_MI_STAT_10MBPS_MODE |
2052 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053 else
2054 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055 }
2056
2057 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2058 tw32(MAC_TX_LENGTHS,
2059 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2060 (6 << TX_LENGTHS_IPG_SHIFT) |
2061 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2062 else
2063 tw32(MAC_TX_LENGTHS,
2064 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065 (6 << TX_LENGTHS_IPG_SHIFT) |
2066 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2067
2068 if (phydev->link != tp->old_link ||
2069 phydev->speed != tp->link_config.active_speed ||
2070 phydev->duplex != tp->link_config.active_duplex ||
2071 oldflowctrl != tp->link_config.active_flowctrl)
2072 linkmesg = 1;
2073
2074 tp->old_link = phydev->link;
2075 tp->link_config.active_speed = phydev->speed;
2076 tp->link_config.active_duplex = phydev->duplex;
2077
2078 spin_unlock_bh(&tp->lock);
2079
2080 if (linkmesg)
2081 tg3_link_report(tp);
2082 }
2083
tg3_phy_init(struct tg3 * tp)2084 static int tg3_phy_init(struct tg3 *tp)
2085 {
2086 struct phy_device *phydev;
2087
2088 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2089 return 0;
2090
2091 /* Bring the PHY back to a known state. */
2092 tg3_bmcr_reset(tp);
2093
2094 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2095
2096 /* Attach the MAC to the PHY. */
2097 phydev = phy_connect(tp->dev, phydev_name(phydev),
2098 tg3_adjust_link, phydev->interface);
2099 if (IS_ERR(phydev)) {
2100 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2101 return PTR_ERR(phydev);
2102 }
2103
2104 /* Mask with MAC supported features. */
2105 switch (phydev->interface) {
2106 case PHY_INTERFACE_MODE_GMII:
2107 case PHY_INTERFACE_MODE_RGMII:
2108 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2109 phy_set_max_speed(phydev, SPEED_1000);
2110 phy_support_asym_pause(phydev);
2111 break;
2112 }
2113 fallthrough;
2114 case PHY_INTERFACE_MODE_MII:
2115 phy_set_max_speed(phydev, SPEED_100);
2116 phy_support_asym_pause(phydev);
2117 break;
2118 default:
2119 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2120 return -EINVAL;
2121 }
2122
2123 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2124
2125 phy_attached_info(phydev);
2126
2127 return 0;
2128 }
2129
tg3_phy_start(struct tg3 * tp)2130 static void tg3_phy_start(struct tg3 *tp)
2131 {
2132 struct phy_device *phydev;
2133
2134 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2135 return;
2136
2137 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2138
2139 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2140 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2141 phydev->speed = tp->link_config.speed;
2142 phydev->duplex = tp->link_config.duplex;
2143 phydev->autoneg = tp->link_config.autoneg;
2144 ethtool_convert_legacy_u32_to_link_mode(
2145 phydev->advertising, tp->link_config.advertising);
2146 }
2147
2148 phy_start(phydev);
2149
2150 phy_start_aneg(phydev);
2151 }
2152
tg3_phy_stop(struct tg3 * tp)2153 static void tg3_phy_stop(struct tg3 *tp)
2154 {
2155 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2156 return;
2157
2158 phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2159 }
2160
tg3_phy_fini(struct tg3 * tp)2161 static void tg3_phy_fini(struct tg3 *tp)
2162 {
2163 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2164 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2165 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2166 }
2167 }
2168
tg3_phy_set_extloopbk(struct tg3 * tp)2169 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2170 {
2171 int err;
2172 u32 val;
2173
2174 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2175 return 0;
2176
2177 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2178 /* Cannot do read-modify-write on 5401 */
2179 err = tg3_phy_auxctl_write(tp,
2180 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2181 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2182 0x4c20);
2183 goto done;
2184 }
2185
2186 err = tg3_phy_auxctl_read(tp,
2187 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2188 if (err)
2189 return err;
2190
2191 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2192 err = tg3_phy_auxctl_write(tp,
2193 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2194
2195 done:
2196 return err;
2197 }
2198
tg3_phy_fet_toggle_apd(struct tg3 * tp,bool enable)2199 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2200 {
2201 u32 phytest;
2202
2203 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2204 u32 phy;
2205
2206 tg3_writephy(tp, MII_TG3_FET_TEST,
2207 phytest | MII_TG3_FET_SHADOW_EN);
2208 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2209 if (enable)
2210 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 else
2212 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2213 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2214 }
2215 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2216 }
2217 }
2218
tg3_phy_toggle_apd(struct tg3 * tp,bool enable)2219 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2220 {
2221 u32 reg;
2222
2223 if (!tg3_flag(tp, 5705_PLUS) ||
2224 (tg3_flag(tp, 5717_PLUS) &&
2225 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2226 return;
2227
2228 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2229 tg3_phy_fet_toggle_apd(tp, enable);
2230 return;
2231 }
2232
2233 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2234 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2235 MII_TG3_MISC_SHDW_SCR5_SDTL |
2236 MII_TG3_MISC_SHDW_SCR5_C125OE;
2237 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2238 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2239
2240 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2241
2242
2243 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2244 if (enable)
2245 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2246
2247 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2248 }
2249
tg3_phy_toggle_automdix(struct tg3 * tp,bool enable)2250 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2251 {
2252 u32 phy;
2253
2254 if (!tg3_flag(tp, 5705_PLUS) ||
2255 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2256 return;
2257
2258 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2259 u32 ephy;
2260
2261 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2262 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2263
2264 tg3_writephy(tp, MII_TG3_FET_TEST,
2265 ephy | MII_TG3_FET_SHADOW_EN);
2266 if (!tg3_readphy(tp, reg, &phy)) {
2267 if (enable)
2268 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269 else
2270 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271 tg3_writephy(tp, reg, phy);
2272 }
2273 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2274 }
2275 } else {
2276 int ret;
2277
2278 ret = tg3_phy_auxctl_read(tp,
2279 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2280 if (!ret) {
2281 if (enable)
2282 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283 else
2284 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285 tg3_phy_auxctl_write(tp,
2286 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2287 }
2288 }
2289 }
2290
tg3_phy_set_wirespeed(struct tg3 * tp)2291 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2292 {
2293 int ret;
2294 u32 val;
2295
2296 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2297 return;
2298
2299 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2300 if (!ret)
2301 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2302 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2303 }
2304
tg3_phy_apply_otp(struct tg3 * tp)2305 static void tg3_phy_apply_otp(struct tg3 *tp)
2306 {
2307 u32 otp, phy;
2308
2309 if (!tp->phy_otp)
2310 return;
2311
2312 otp = tp->phy_otp;
2313
2314 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2315 return;
2316
2317 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2318 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2319 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2320
2321 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2322 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2323 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2324
2325 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2326 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2327 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2328
2329 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2330 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2331
2332 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2333 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2334
2335 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2336 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2337 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2338
2339 tg3_phy_toggle_auxctl_smdsp(tp, false);
2340 }
2341
tg3_eee_pull_config(struct tg3 * tp,struct ethtool_keee * eee)2342 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_keee *eee)
2343 {
2344 u32 val;
2345 struct ethtool_keee *dest = &tp->eee;
2346
2347 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2348 return;
2349
2350 if (eee)
2351 dest = eee;
2352
2353 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2354 return;
2355
2356 /* Pull eee_active */
2357 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2358 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2359 dest->eee_active = 1;
2360 } else
2361 dest->eee_active = 0;
2362
2363 /* Pull lp advertised settings */
2364 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2365 return;
2366 mii_eee_cap1_mod_linkmode_t(dest->lp_advertised, val);
2367
2368 /* Pull advertised and eee_enabled settings */
2369 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2370 return;
2371 dest->eee_enabled = !!val;
2372 mii_eee_cap1_mod_linkmode_t(dest->advertised, val);
2373
2374 /* Pull tx_lpi_enabled */
2375 val = tr32(TG3_CPMU_EEE_MODE);
2376 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2377
2378 /* Pull lpi timer value */
2379 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2380 }
2381
tg3_phy_eee_adjust(struct tg3 * tp,bool current_link_up)2382 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2383 {
2384 u32 val;
2385
2386 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2387 return;
2388
2389 tp->setlpicnt = 0;
2390
2391 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2392 current_link_up &&
2393 tp->link_config.active_duplex == DUPLEX_FULL &&
2394 (tp->link_config.active_speed == SPEED_100 ||
2395 tp->link_config.active_speed == SPEED_1000)) {
2396 u32 eeectl;
2397
2398 if (tp->link_config.active_speed == SPEED_1000)
2399 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2400 else
2401 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2402
2403 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2404
2405 tg3_eee_pull_config(tp, NULL);
2406 if (tp->eee.eee_active)
2407 tp->setlpicnt = 2;
2408 }
2409
2410 if (!tp->setlpicnt) {
2411 if (current_link_up &&
2412 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2413 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2414 tg3_phy_toggle_auxctl_smdsp(tp, false);
2415 }
2416
2417 val = tr32(TG3_CPMU_EEE_MODE);
2418 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2419 }
2420 }
2421
tg3_phy_eee_enable(struct tg3 * tp)2422 static void tg3_phy_eee_enable(struct tg3 *tp)
2423 {
2424 u32 val;
2425
2426 if (tp->link_config.active_speed == SPEED_1000 &&
2427 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2428 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2429 tg3_flag(tp, 57765_CLASS)) &&
2430 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2431 val = MII_TG3_DSP_TAP26_ALNOKO |
2432 MII_TG3_DSP_TAP26_RMRXSTO;
2433 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2434 tg3_phy_toggle_auxctl_smdsp(tp, false);
2435 }
2436
2437 val = tr32(TG3_CPMU_EEE_MODE);
2438 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2439 }
2440
tg3_wait_macro_done(struct tg3 * tp)2441 static int tg3_wait_macro_done(struct tg3 *tp)
2442 {
2443 int limit = 100;
2444
2445 while (limit--) {
2446 u32 tmp32;
2447
2448 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2449 if ((tmp32 & 0x1000) == 0)
2450 break;
2451 }
2452 }
2453 if (limit < 0)
2454 return -EBUSY;
2455
2456 return 0;
2457 }
2458
tg3_phy_write_and_check_testpat(struct tg3 * tp,int * resetp)2459 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2460 {
2461 static const u32 test_pat[4][6] = {
2462 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2463 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2464 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2465 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2466 };
2467 int chan;
2468
2469 for (chan = 0; chan < 4; chan++) {
2470 int i;
2471
2472 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2473 (chan * 0x2000) | 0x0200);
2474 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2475
2476 for (i = 0; i < 6; i++)
2477 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2478 test_pat[chan][i]);
2479
2480 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2481 if (tg3_wait_macro_done(tp)) {
2482 *resetp = 1;
2483 return -EBUSY;
2484 }
2485
2486 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2487 (chan * 0x2000) | 0x0200);
2488 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2489 if (tg3_wait_macro_done(tp)) {
2490 *resetp = 1;
2491 return -EBUSY;
2492 }
2493
2494 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2495 if (tg3_wait_macro_done(tp)) {
2496 *resetp = 1;
2497 return -EBUSY;
2498 }
2499
2500 for (i = 0; i < 6; i += 2) {
2501 u32 low, high;
2502
2503 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2504 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2505 tg3_wait_macro_done(tp)) {
2506 *resetp = 1;
2507 return -EBUSY;
2508 }
2509 low &= 0x7fff;
2510 high &= 0x000f;
2511 if (low != test_pat[chan][i] ||
2512 high != test_pat[chan][i+1]) {
2513 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2514 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2515 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2516
2517 return -EBUSY;
2518 }
2519 }
2520 }
2521
2522 return 0;
2523 }
2524
tg3_phy_reset_chanpat(struct tg3 * tp)2525 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2526 {
2527 int chan;
2528
2529 for (chan = 0; chan < 4; chan++) {
2530 int i;
2531
2532 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2533 (chan * 0x2000) | 0x0200);
2534 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2535 for (i = 0; i < 6; i++)
2536 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2537 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2538 if (tg3_wait_macro_done(tp))
2539 return -EBUSY;
2540 }
2541
2542 return 0;
2543 }
2544
tg3_phy_reset_5703_4_5(struct tg3 * tp)2545 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2546 {
2547 u32 reg32, phy9_orig;
2548 int retries, do_phy_reset, err;
2549
2550 retries = 10;
2551 do_phy_reset = 1;
2552 do {
2553 if (do_phy_reset) {
2554 err = tg3_bmcr_reset(tp);
2555 if (err)
2556 return err;
2557 do_phy_reset = 0;
2558 }
2559
2560 /* Disable transmitter and interrupt. */
2561 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2562 continue;
2563
2564 reg32 |= 0x3000;
2565 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2566
2567 /* Set full-duplex, 1000 mbps. */
2568 tg3_writephy(tp, MII_BMCR,
2569 BMCR_FULLDPLX | BMCR_SPEED1000);
2570
2571 /* Set to master mode. */
2572 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2573 continue;
2574
2575 tg3_writephy(tp, MII_CTRL1000,
2576 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2577
2578 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2579 if (err)
2580 return err;
2581
2582 /* Block the PHY control access. */
2583 tg3_phydsp_write(tp, 0x8005, 0x0800);
2584
2585 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2586 if (!err)
2587 break;
2588 } while (--retries);
2589
2590 err = tg3_phy_reset_chanpat(tp);
2591 if (err)
2592 return err;
2593
2594 tg3_phydsp_write(tp, 0x8005, 0x0000);
2595
2596 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2597 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2598
2599 tg3_phy_toggle_auxctl_smdsp(tp, false);
2600
2601 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2602
2603 err = tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32);
2604 if (err)
2605 return err;
2606
2607 reg32 &= ~0x3000;
2608 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2609
2610 return 0;
2611 }
2612
tg3_carrier_off(struct tg3 * tp)2613 static void tg3_carrier_off(struct tg3 *tp)
2614 {
2615 netif_carrier_off(tp->dev);
2616 tp->link_up = false;
2617 }
2618
tg3_warn_mgmt_link_flap(struct tg3 * tp)2619 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2620 {
2621 if (tg3_flag(tp, ENABLE_ASF))
2622 netdev_warn(tp->dev,
2623 "Management side-band traffic will be interrupted during phy settings change\n");
2624 }
2625
2626 /* This will reset the tigon3 PHY if there is no valid
2627 * link unless the FORCE argument is non-zero.
2628 */
tg3_phy_reset(struct tg3 * tp)2629 static int tg3_phy_reset(struct tg3 *tp)
2630 {
2631 u32 val, cpmuctrl;
2632 int err;
2633
2634 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2635 val = tr32(GRC_MISC_CFG);
2636 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2637 udelay(40);
2638 }
2639 err = tg3_readphy(tp, MII_BMSR, &val);
2640 err |= tg3_readphy(tp, MII_BMSR, &val);
2641 if (err != 0)
2642 return -EBUSY;
2643
2644 if (netif_running(tp->dev) && tp->link_up) {
2645 netif_carrier_off(tp->dev);
2646 tg3_link_report(tp);
2647 }
2648
2649 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2650 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2651 tg3_asic_rev(tp) == ASIC_REV_5705) {
2652 err = tg3_phy_reset_5703_4_5(tp);
2653 if (err)
2654 return err;
2655 goto out;
2656 }
2657
2658 cpmuctrl = 0;
2659 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2660 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2661 cpmuctrl = tr32(TG3_CPMU_CTRL);
2662 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2663 tw32(TG3_CPMU_CTRL,
2664 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2665 }
2666
2667 err = tg3_bmcr_reset(tp);
2668 if (err)
2669 return err;
2670
2671 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2672 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2673 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2674
2675 tw32(TG3_CPMU_CTRL, cpmuctrl);
2676 }
2677
2678 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2679 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2680 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2681 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2682 CPMU_LSPD_1000MB_MACCLK_12_5) {
2683 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2684 udelay(40);
2685 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2686 }
2687 }
2688
2689 if (tg3_flag(tp, 5717_PLUS) &&
2690 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2691 return 0;
2692
2693 tg3_phy_apply_otp(tp);
2694
2695 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2696 tg3_phy_toggle_apd(tp, true);
2697 else
2698 tg3_phy_toggle_apd(tp, false);
2699
2700 out:
2701 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2702 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2703 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2704 tg3_phydsp_write(tp, 0x000a, 0x0323);
2705 tg3_phy_toggle_auxctl_smdsp(tp, false);
2706 }
2707
2708 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2709 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2710 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2711 }
2712
2713 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2714 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2715 tg3_phydsp_write(tp, 0x000a, 0x310b);
2716 tg3_phydsp_write(tp, 0x201f, 0x9506);
2717 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2718 tg3_phy_toggle_auxctl_smdsp(tp, false);
2719 }
2720 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2721 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2722 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2723 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2724 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2725 tg3_writephy(tp, MII_TG3_TEST1,
2726 MII_TG3_TEST1_TRIM_EN | 0x4);
2727 } else
2728 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2729
2730 tg3_phy_toggle_auxctl_smdsp(tp, false);
2731 }
2732 }
2733
2734 /* Set Extended packet length bit (bit 14) on all chips that */
2735 /* support jumbo frames */
2736 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2737 /* Cannot do read-modify-write on 5401 */
2738 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2739 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2740 /* Set bit 14 with read-modify-write to preserve other bits */
2741 err = tg3_phy_auxctl_read(tp,
2742 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2743 if (!err)
2744 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2745 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2746 }
2747
2748 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2749 * jumbo frames transmission.
2750 */
2751 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2752 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2753 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2754 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2755 }
2756
2757 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2758 /* adjust output voltage */
2759 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2760 }
2761
2762 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2763 tg3_phydsp_write(tp, 0xffb, 0x4000);
2764
2765 tg3_phy_toggle_automdix(tp, true);
2766 tg3_phy_set_wirespeed(tp);
2767 return 0;
2768 }
2769
2770 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2771 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2772 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2773 TG3_GPIO_MSG_NEED_VAUX)
2774 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2775 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2776 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2777 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2778 (TG3_GPIO_MSG_DRVR_PRES << 12))
2779
2780 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2781 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2782 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2783 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2784 (TG3_GPIO_MSG_NEED_VAUX << 12))
2785
tg3_set_function_status(struct tg3 * tp,u32 newstat)2786 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2787 {
2788 u32 status, shift;
2789
2790 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2791 tg3_asic_rev(tp) == ASIC_REV_5719)
2792 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2793 else
2794 status = tr32(TG3_CPMU_DRV_STATUS);
2795
2796 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2797 status &= ~(TG3_GPIO_MSG_MASK << shift);
2798 status |= (newstat << shift);
2799
2800 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2801 tg3_asic_rev(tp) == ASIC_REV_5719)
2802 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2803 else
2804 tw32(TG3_CPMU_DRV_STATUS, status);
2805
2806 return status >> TG3_APE_GPIO_MSG_SHIFT;
2807 }
2808
tg3_pwrsrc_switch_to_vmain(struct tg3 * tp)2809 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2810 {
2811 if (!tg3_flag(tp, IS_NIC))
2812 return 0;
2813
2814 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2815 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2816 tg3_asic_rev(tp) == ASIC_REV_5720) {
2817 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2818 return -EIO;
2819
2820 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2821
2822 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2823 TG3_GRC_LCLCTL_PWRSW_DELAY);
2824
2825 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2826 } else {
2827 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828 TG3_GRC_LCLCTL_PWRSW_DELAY);
2829 }
2830
2831 return 0;
2832 }
2833
tg3_pwrsrc_die_with_vmain(struct tg3 * tp)2834 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2835 {
2836 u32 grc_local_ctrl;
2837
2838 if (!tg3_flag(tp, IS_NIC) ||
2839 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2840 tg3_asic_rev(tp) == ASIC_REV_5701)
2841 return;
2842
2843 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2844
2845 tw32_wait_f(GRC_LOCAL_CTRL,
2846 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2847 TG3_GRC_LCLCTL_PWRSW_DELAY);
2848
2849 tw32_wait_f(GRC_LOCAL_CTRL,
2850 grc_local_ctrl,
2851 TG3_GRC_LCLCTL_PWRSW_DELAY);
2852
2853 tw32_wait_f(GRC_LOCAL_CTRL,
2854 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2855 TG3_GRC_LCLCTL_PWRSW_DELAY);
2856 }
2857
tg3_pwrsrc_switch_to_vaux(struct tg3 * tp)2858 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2859 {
2860 if (!tg3_flag(tp, IS_NIC))
2861 return;
2862
2863 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2864 tg3_asic_rev(tp) == ASIC_REV_5701) {
2865 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2866 (GRC_LCLCTRL_GPIO_OE0 |
2867 GRC_LCLCTRL_GPIO_OE1 |
2868 GRC_LCLCTRL_GPIO_OE2 |
2869 GRC_LCLCTRL_GPIO_OUTPUT0 |
2870 GRC_LCLCTRL_GPIO_OUTPUT1),
2871 TG3_GRC_LCLCTL_PWRSW_DELAY);
2872 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2873 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2874 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2875 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2876 GRC_LCLCTRL_GPIO_OE1 |
2877 GRC_LCLCTRL_GPIO_OE2 |
2878 GRC_LCLCTRL_GPIO_OUTPUT0 |
2879 GRC_LCLCTRL_GPIO_OUTPUT1 |
2880 tp->grc_local_ctrl;
2881 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2882 TG3_GRC_LCLCTL_PWRSW_DELAY);
2883
2884 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2885 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2886 TG3_GRC_LCLCTL_PWRSW_DELAY);
2887
2888 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2889 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2890 TG3_GRC_LCLCTL_PWRSW_DELAY);
2891 } else {
2892 u32 no_gpio2;
2893 u32 grc_local_ctrl = 0;
2894
2895 /* Workaround to prevent overdrawing Amps. */
2896 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2897 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2898 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2899 grc_local_ctrl,
2900 TG3_GRC_LCLCTL_PWRSW_DELAY);
2901 }
2902
2903 /* On 5753 and variants, GPIO2 cannot be used. */
2904 no_gpio2 = tp->nic_sram_data_cfg &
2905 NIC_SRAM_DATA_CFG_NO_GPIO2;
2906
2907 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2908 GRC_LCLCTRL_GPIO_OE1 |
2909 GRC_LCLCTRL_GPIO_OE2 |
2910 GRC_LCLCTRL_GPIO_OUTPUT1 |
2911 GRC_LCLCTRL_GPIO_OUTPUT2;
2912 if (no_gpio2) {
2913 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2914 GRC_LCLCTRL_GPIO_OUTPUT2);
2915 }
2916 tw32_wait_f(GRC_LOCAL_CTRL,
2917 tp->grc_local_ctrl | grc_local_ctrl,
2918 TG3_GRC_LCLCTL_PWRSW_DELAY);
2919
2920 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2921
2922 tw32_wait_f(GRC_LOCAL_CTRL,
2923 tp->grc_local_ctrl | grc_local_ctrl,
2924 TG3_GRC_LCLCTL_PWRSW_DELAY);
2925
2926 if (!no_gpio2) {
2927 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2928 tw32_wait_f(GRC_LOCAL_CTRL,
2929 tp->grc_local_ctrl | grc_local_ctrl,
2930 TG3_GRC_LCLCTL_PWRSW_DELAY);
2931 }
2932 }
2933 }
2934
tg3_frob_aux_power_5717(struct tg3 * tp,bool wol_enable)2935 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2936 {
2937 u32 msg = 0;
2938
2939 /* Serialize power state transitions */
2940 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2941 return;
2942
2943 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2944 msg = TG3_GPIO_MSG_NEED_VAUX;
2945
2946 msg = tg3_set_function_status(tp, msg);
2947
2948 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2949 goto done;
2950
2951 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2952 tg3_pwrsrc_switch_to_vaux(tp);
2953 else
2954 tg3_pwrsrc_die_with_vmain(tp);
2955
2956 done:
2957 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2958 }
2959
tg3_frob_aux_power(struct tg3 * tp,bool include_wol)2960 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2961 {
2962 bool need_vaux = false;
2963
2964 /* The GPIOs do something completely different on 57765. */
2965 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2966 return;
2967
2968 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2969 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2970 tg3_asic_rev(tp) == ASIC_REV_5720) {
2971 tg3_frob_aux_power_5717(tp, include_wol ?
2972 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2973 return;
2974 }
2975
2976 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2977 struct net_device *dev_peer;
2978
2979 dev_peer = pci_get_drvdata(tp->pdev_peer);
2980
2981 /* remove_one() may have been run on the peer. */
2982 if (dev_peer) {
2983 struct tg3 *tp_peer = netdev_priv(dev_peer);
2984
2985 if (tg3_flag(tp_peer, INIT_COMPLETE))
2986 return;
2987
2988 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2989 tg3_flag(tp_peer, ENABLE_ASF))
2990 need_vaux = true;
2991 }
2992 }
2993
2994 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2995 tg3_flag(tp, ENABLE_ASF))
2996 need_vaux = true;
2997
2998 if (need_vaux)
2999 tg3_pwrsrc_switch_to_vaux(tp);
3000 else
3001 tg3_pwrsrc_die_with_vmain(tp);
3002 }
3003
tg3_5700_link_polarity(struct tg3 * tp,u32 speed)3004 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3005 {
3006 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3007 return 1;
3008 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3009 if (speed != SPEED_10)
3010 return 1;
3011 } else if (speed == SPEED_10)
3012 return 1;
3013
3014 return 0;
3015 }
3016
tg3_phy_power_bug(struct tg3 * tp)3017 static bool tg3_phy_power_bug(struct tg3 *tp)
3018 {
3019 switch (tg3_asic_rev(tp)) {
3020 case ASIC_REV_5700:
3021 case ASIC_REV_5704:
3022 return true;
3023 case ASIC_REV_5780:
3024 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3025 return true;
3026 return false;
3027 case ASIC_REV_5717:
3028 if (!tp->pci_fn)
3029 return true;
3030 return false;
3031 case ASIC_REV_5719:
3032 case ASIC_REV_5720:
3033 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3034 !tp->pci_fn)
3035 return true;
3036 return false;
3037 }
3038
3039 return false;
3040 }
3041
tg3_phy_led_bug(struct tg3 * tp)3042 static bool tg3_phy_led_bug(struct tg3 *tp)
3043 {
3044 switch (tg3_asic_rev(tp)) {
3045 case ASIC_REV_5719:
3046 case ASIC_REV_5720:
3047 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3048 !tp->pci_fn)
3049 return true;
3050 return false;
3051 }
3052
3053 return false;
3054 }
3055
tg3_power_down_phy(struct tg3 * tp,bool do_low_power)3056 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3057 {
3058 u32 val;
3059
3060 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3061 return;
3062
3063 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3064 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3065 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3066 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3067
3068 sg_dig_ctrl |=
3069 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3070 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3071 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3072 }
3073 return;
3074 }
3075
3076 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3077 tg3_bmcr_reset(tp);
3078 val = tr32(GRC_MISC_CFG);
3079 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3080 udelay(40);
3081 return;
3082 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3083 u32 phytest;
3084 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3085 u32 phy;
3086
3087 tg3_writephy(tp, MII_ADVERTISE, 0);
3088 tg3_writephy(tp, MII_BMCR,
3089 BMCR_ANENABLE | BMCR_ANRESTART);
3090
3091 tg3_writephy(tp, MII_TG3_FET_TEST,
3092 phytest | MII_TG3_FET_SHADOW_EN);
3093 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3094 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3095 tg3_writephy(tp,
3096 MII_TG3_FET_SHDW_AUXMODE4,
3097 phy);
3098 }
3099 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3100 }
3101 return;
3102 } else if (do_low_power) {
3103 if (!tg3_phy_led_bug(tp))
3104 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3105 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3106
3107 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3108 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3109 MII_TG3_AUXCTL_PCTL_VREG_11V;
3110 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3111 }
3112
3113 /* The PHY should not be powered down on some chips because
3114 * of bugs.
3115 */
3116 if (tg3_phy_power_bug(tp))
3117 return;
3118
3119 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3120 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3121 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3122 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3123 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3124 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3125 }
3126
3127 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3128 }
3129
3130 /* tp->lock is held. */
tg3_nvram_lock(struct tg3 * tp)3131 static int tg3_nvram_lock(struct tg3 *tp)
3132 {
3133 if (tg3_flag(tp, NVRAM)) {
3134 int i;
3135
3136 if (tp->nvram_lock_cnt == 0) {
3137 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3138 for (i = 0; i < 8000; i++) {
3139 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3140 break;
3141 udelay(20);
3142 }
3143 if (i == 8000) {
3144 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3145 return -ENODEV;
3146 }
3147 }
3148 tp->nvram_lock_cnt++;
3149 }
3150 return 0;
3151 }
3152
3153 /* tp->lock is held. */
tg3_nvram_unlock(struct tg3 * tp)3154 static void tg3_nvram_unlock(struct tg3 *tp)
3155 {
3156 if (tg3_flag(tp, NVRAM)) {
3157 if (tp->nvram_lock_cnt > 0)
3158 tp->nvram_lock_cnt--;
3159 if (tp->nvram_lock_cnt == 0)
3160 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3161 }
3162 }
3163
3164 /* tp->lock is held. */
tg3_enable_nvram_access(struct tg3 * tp)3165 static void tg3_enable_nvram_access(struct tg3 *tp)
3166 {
3167 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3168 u32 nvaccess = tr32(NVRAM_ACCESS);
3169
3170 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3171 }
3172 }
3173
3174 /* tp->lock is held. */
tg3_disable_nvram_access(struct tg3 * tp)3175 static void tg3_disable_nvram_access(struct tg3 *tp)
3176 {
3177 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3178 u32 nvaccess = tr32(NVRAM_ACCESS);
3179
3180 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3181 }
3182 }
3183
tg3_nvram_read_using_eeprom(struct tg3 * tp,u32 offset,u32 * val)3184 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3185 u32 offset, u32 *val)
3186 {
3187 u32 tmp;
3188 int i;
3189
3190 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3191 return -EINVAL;
3192
3193 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3194 EEPROM_ADDR_DEVID_MASK |
3195 EEPROM_ADDR_READ);
3196 tw32(GRC_EEPROM_ADDR,
3197 tmp |
3198 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3199 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3200 EEPROM_ADDR_ADDR_MASK) |
3201 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3202
3203 for (i = 0; i < 1000; i++) {
3204 tmp = tr32(GRC_EEPROM_ADDR);
3205
3206 if (tmp & EEPROM_ADDR_COMPLETE)
3207 break;
3208 msleep(1);
3209 }
3210 if (!(tmp & EEPROM_ADDR_COMPLETE))
3211 return -EBUSY;
3212
3213 tmp = tr32(GRC_EEPROM_DATA);
3214
3215 /*
3216 * The data will always be opposite the native endian
3217 * format. Perform a blind byteswap to compensate.
3218 */
3219 *val = swab32(tmp);
3220
3221 return 0;
3222 }
3223
3224 #define NVRAM_CMD_TIMEOUT 10000
3225
tg3_nvram_exec_cmd(struct tg3 * tp,u32 nvram_cmd)3226 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3227 {
3228 int i;
3229
3230 tw32(NVRAM_CMD, nvram_cmd);
3231 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3232 usleep_range(10, 40);
3233 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3234 udelay(10);
3235 break;
3236 }
3237 }
3238
3239 if (i == NVRAM_CMD_TIMEOUT)
3240 return -EBUSY;
3241
3242 return 0;
3243 }
3244
tg3_nvram_phys_addr(struct tg3 * tp,u32 addr)3245 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3246 {
3247 if (tg3_flag(tp, NVRAM) &&
3248 tg3_flag(tp, NVRAM_BUFFERED) &&
3249 tg3_flag(tp, FLASH) &&
3250 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3251 (tp->nvram_jedecnum == JEDEC_ATMEL))
3252
3253 addr = ((addr / tp->nvram_pagesize) <<
3254 ATMEL_AT45DB0X1B_PAGE_POS) +
3255 (addr % tp->nvram_pagesize);
3256
3257 return addr;
3258 }
3259
tg3_nvram_logical_addr(struct tg3 * tp,u32 addr)3260 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3261 {
3262 if (tg3_flag(tp, NVRAM) &&
3263 tg3_flag(tp, NVRAM_BUFFERED) &&
3264 tg3_flag(tp, FLASH) &&
3265 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3266 (tp->nvram_jedecnum == JEDEC_ATMEL))
3267
3268 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3269 tp->nvram_pagesize) +
3270 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3271
3272 return addr;
3273 }
3274
3275 /* NOTE: Data read in from NVRAM is byteswapped according to
3276 * the byteswapping settings for all other register accesses.
3277 * tg3 devices are BE devices, so on a BE machine, the data
3278 * returned will be exactly as it is seen in NVRAM. On a LE
3279 * machine, the 32-bit value will be byteswapped.
3280 */
tg3_nvram_read(struct tg3 * tp,u32 offset,u32 * val)3281 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3282 {
3283 int ret;
3284
3285 if (!tg3_flag(tp, NVRAM))
3286 return tg3_nvram_read_using_eeprom(tp, offset, val);
3287
3288 offset = tg3_nvram_phys_addr(tp, offset);
3289
3290 if (offset > NVRAM_ADDR_MSK)
3291 return -EINVAL;
3292
3293 ret = tg3_nvram_lock(tp);
3294 if (ret)
3295 return ret;
3296
3297 tg3_enable_nvram_access(tp);
3298
3299 tw32(NVRAM_ADDR, offset);
3300 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3301 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3302
3303 if (ret == 0)
3304 *val = tr32(NVRAM_RDDATA);
3305
3306 tg3_disable_nvram_access(tp);
3307
3308 tg3_nvram_unlock(tp);
3309
3310 return ret;
3311 }
3312
3313 /* Ensures NVRAM data is in bytestream format. */
tg3_nvram_read_be32(struct tg3 * tp,u32 offset,__be32 * val)3314 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3315 {
3316 u32 v;
3317 int res = tg3_nvram_read(tp, offset, &v);
3318 if (!res)
3319 *val = cpu_to_be32(v);
3320 return res;
3321 }
3322
tg3_nvram_write_block_using_eeprom(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3323 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3324 u32 offset, u32 len, u8 *buf)
3325 {
3326 int i, j, rc = 0;
3327 u32 val;
3328
3329 for (i = 0; i < len; i += 4) {
3330 u32 addr;
3331 __be32 data;
3332
3333 addr = offset + i;
3334
3335 memcpy(&data, buf + i, 4);
3336
3337 /*
3338 * The SEEPROM interface expects the data to always be opposite
3339 * the native endian format. We accomplish this by reversing
3340 * all the operations that would have been performed on the
3341 * data from a call to tg3_nvram_read_be32().
3342 */
3343 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3344
3345 val = tr32(GRC_EEPROM_ADDR);
3346 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3347
3348 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3349 EEPROM_ADDR_READ);
3350 tw32(GRC_EEPROM_ADDR, val |
3351 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3352 (addr & EEPROM_ADDR_ADDR_MASK) |
3353 EEPROM_ADDR_START |
3354 EEPROM_ADDR_WRITE);
3355
3356 for (j = 0; j < 1000; j++) {
3357 val = tr32(GRC_EEPROM_ADDR);
3358
3359 if (val & EEPROM_ADDR_COMPLETE)
3360 break;
3361 msleep(1);
3362 }
3363 if (!(val & EEPROM_ADDR_COMPLETE)) {
3364 rc = -EBUSY;
3365 break;
3366 }
3367 }
3368
3369 return rc;
3370 }
3371
3372 /* offset and length are dword aligned */
tg3_nvram_write_block_unbuffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3373 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3374 u8 *buf)
3375 {
3376 int ret = 0;
3377 u32 pagesize = tp->nvram_pagesize;
3378 u32 pagemask = pagesize - 1;
3379 u32 nvram_cmd;
3380 u8 *tmp;
3381
3382 tmp = kmalloc(pagesize, GFP_KERNEL);
3383 if (tmp == NULL)
3384 return -ENOMEM;
3385
3386 while (len) {
3387 int j;
3388 u32 phy_addr, page_off, size;
3389
3390 phy_addr = offset & ~pagemask;
3391
3392 for (j = 0; j < pagesize; j += 4) {
3393 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3394 (__be32 *) (tmp + j));
3395 if (ret)
3396 break;
3397 }
3398 if (ret)
3399 break;
3400
3401 page_off = offset & pagemask;
3402 size = pagesize;
3403 if (len < size)
3404 size = len;
3405
3406 len -= size;
3407
3408 memcpy(tmp + page_off, buf, size);
3409
3410 offset = offset + (pagesize - page_off);
3411
3412 tg3_enable_nvram_access(tp);
3413
3414 /*
3415 * Before we can erase the flash page, we need
3416 * to issue a special "write enable" command.
3417 */
3418 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3419
3420 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3421 break;
3422
3423 /* Erase the target page */
3424 tw32(NVRAM_ADDR, phy_addr);
3425
3426 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3427 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3428
3429 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3430 break;
3431
3432 /* Issue another write enable to start the write. */
3433 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3434
3435 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3436 break;
3437
3438 for (j = 0; j < pagesize; j += 4) {
3439 __be32 data;
3440
3441 data = *((__be32 *) (tmp + j));
3442
3443 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3444
3445 tw32(NVRAM_ADDR, phy_addr + j);
3446
3447 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3448 NVRAM_CMD_WR;
3449
3450 if (j == 0)
3451 nvram_cmd |= NVRAM_CMD_FIRST;
3452 else if (j == (pagesize - 4))
3453 nvram_cmd |= NVRAM_CMD_LAST;
3454
3455 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3456 if (ret)
3457 break;
3458 }
3459 if (ret)
3460 break;
3461 }
3462
3463 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3464 tg3_nvram_exec_cmd(tp, nvram_cmd);
3465
3466 kfree(tmp);
3467
3468 return ret;
3469 }
3470
3471 /* offset and length are dword aligned */
tg3_nvram_write_block_buffered(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3472 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3473 u8 *buf)
3474 {
3475 int i, ret = 0;
3476
3477 for (i = 0; i < len; i += 4, offset += 4) {
3478 u32 page_off, phy_addr, nvram_cmd;
3479 __be32 data;
3480
3481 memcpy(&data, buf + i, 4);
3482 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3483
3484 page_off = offset % tp->nvram_pagesize;
3485
3486 phy_addr = tg3_nvram_phys_addr(tp, offset);
3487
3488 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3489
3490 if (page_off == 0 || i == 0)
3491 nvram_cmd |= NVRAM_CMD_FIRST;
3492 if (page_off == (tp->nvram_pagesize - 4))
3493 nvram_cmd |= NVRAM_CMD_LAST;
3494
3495 if (i == (len - 4))
3496 nvram_cmd |= NVRAM_CMD_LAST;
3497
3498 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3499 !tg3_flag(tp, FLASH) ||
3500 !tg3_flag(tp, 57765_PLUS))
3501 tw32(NVRAM_ADDR, phy_addr);
3502
3503 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3504 !tg3_flag(tp, 5755_PLUS) &&
3505 (tp->nvram_jedecnum == JEDEC_ST) &&
3506 (nvram_cmd & NVRAM_CMD_FIRST)) {
3507 u32 cmd;
3508
3509 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3510 ret = tg3_nvram_exec_cmd(tp, cmd);
3511 if (ret)
3512 break;
3513 }
3514 if (!tg3_flag(tp, FLASH)) {
3515 /* We always do complete word writes to eeprom. */
3516 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3517 }
3518
3519 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3520 if (ret)
3521 break;
3522 }
3523 return ret;
3524 }
3525
3526 /* offset and length are dword aligned */
tg3_nvram_write_block(struct tg3 * tp,u32 offset,u32 len,u8 * buf)3527 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3528 {
3529 int ret;
3530
3531 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3532 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3533 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3534 udelay(40);
3535 }
3536
3537 if (!tg3_flag(tp, NVRAM)) {
3538 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3539 } else {
3540 u32 grc_mode;
3541
3542 ret = tg3_nvram_lock(tp);
3543 if (ret)
3544 return ret;
3545
3546 tg3_enable_nvram_access(tp);
3547 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3548 tw32(NVRAM_WRITE1, 0x406);
3549
3550 grc_mode = tr32(GRC_MODE);
3551 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3552
3553 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3554 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3555 buf);
3556 } else {
3557 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3558 buf);
3559 }
3560
3561 grc_mode = tr32(GRC_MODE);
3562 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3563
3564 tg3_disable_nvram_access(tp);
3565 tg3_nvram_unlock(tp);
3566 }
3567
3568 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3569 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3570 udelay(40);
3571 }
3572
3573 return ret;
3574 }
3575
3576 #define RX_CPU_SCRATCH_BASE 0x30000
3577 #define RX_CPU_SCRATCH_SIZE 0x04000
3578 #define TX_CPU_SCRATCH_BASE 0x34000
3579 #define TX_CPU_SCRATCH_SIZE 0x04000
3580
3581 /* tp->lock is held. */
tg3_pause_cpu(struct tg3 * tp,u32 cpu_base)3582 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3583 {
3584 int i;
3585 const int iters = 10000;
3586
3587 for (i = 0; i < iters; i++) {
3588 tw32(cpu_base + CPU_STATE, 0xffffffff);
3589 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3590 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3591 break;
3592 if (pci_channel_offline(tp->pdev))
3593 return -EBUSY;
3594 }
3595
3596 return (i == iters) ? -EBUSY : 0;
3597 }
3598
3599 /* tp->lock is held. */
tg3_rxcpu_pause(struct tg3 * tp)3600 static int tg3_rxcpu_pause(struct tg3 *tp)
3601 {
3602 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3603
3604 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3605 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3606 udelay(10);
3607
3608 return rc;
3609 }
3610
3611 /* tp->lock is held. */
tg3_txcpu_pause(struct tg3 * tp)3612 static int tg3_txcpu_pause(struct tg3 *tp)
3613 {
3614 return tg3_pause_cpu(tp, TX_CPU_BASE);
3615 }
3616
3617 /* tp->lock is held. */
tg3_resume_cpu(struct tg3 * tp,u32 cpu_base)3618 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3619 {
3620 tw32(cpu_base + CPU_STATE, 0xffffffff);
3621 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3622 }
3623
3624 /* tp->lock is held. */
tg3_rxcpu_resume(struct tg3 * tp)3625 static void tg3_rxcpu_resume(struct tg3 *tp)
3626 {
3627 tg3_resume_cpu(tp, RX_CPU_BASE);
3628 }
3629
3630 /* tp->lock is held. */
tg3_halt_cpu(struct tg3 * tp,u32 cpu_base)3631 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3632 {
3633 int rc;
3634
3635 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3636
3637 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3638 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3639
3640 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3641 return 0;
3642 }
3643 if (cpu_base == RX_CPU_BASE) {
3644 rc = tg3_rxcpu_pause(tp);
3645 } else {
3646 /*
3647 * There is only an Rx CPU for the 5750 derivative in the
3648 * BCM4785.
3649 */
3650 if (tg3_flag(tp, IS_SSB_CORE))
3651 return 0;
3652
3653 rc = tg3_txcpu_pause(tp);
3654 }
3655
3656 if (rc) {
3657 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3658 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3659 return -ENODEV;
3660 }
3661
3662 /* Clear firmware's nvram arbitration. */
3663 if (tg3_flag(tp, NVRAM))
3664 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3665 return 0;
3666 }
3667
tg3_fw_data_len(struct tg3 * tp,const struct tg3_firmware_hdr * fw_hdr)3668 static int tg3_fw_data_len(struct tg3 *tp,
3669 const struct tg3_firmware_hdr *fw_hdr)
3670 {
3671 int fw_len;
3672
3673 /* Non fragmented firmware have one firmware header followed by a
3674 * contiguous chunk of data to be written. The length field in that
3675 * header is not the length of data to be written but the complete
3676 * length of the bss. The data length is determined based on
3677 * tp->fw->size minus headers.
3678 *
3679 * Fragmented firmware have a main header followed by multiple
3680 * fragments. Each fragment is identical to non fragmented firmware
3681 * with a firmware header followed by a contiguous chunk of data. In
3682 * the main header, the length field is unused and set to 0xffffffff.
3683 * In each fragment header the length is the entire size of that
3684 * fragment i.e. fragment data + header length. Data length is
3685 * therefore length field in the header minus TG3_FW_HDR_LEN.
3686 */
3687 if (tp->fw_len == 0xffffffff)
3688 fw_len = be32_to_cpu(fw_hdr->len);
3689 else
3690 fw_len = tp->fw->size;
3691
3692 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3693 }
3694
3695 /* tp->lock is held. */
tg3_load_firmware_cpu(struct tg3 * tp,u32 cpu_base,u32 cpu_scratch_base,int cpu_scratch_size,const struct tg3_firmware_hdr * fw_hdr)3696 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3697 u32 cpu_scratch_base, int cpu_scratch_size,
3698 const struct tg3_firmware_hdr *fw_hdr)
3699 {
3700 int err, i;
3701 void (*write_op)(struct tg3 *, u32, u32);
3702 int total_len = tp->fw->size;
3703
3704 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3705 netdev_err(tp->dev,
3706 "%s: Trying to load TX cpu firmware which is 5705\n",
3707 __func__);
3708 return -EINVAL;
3709 }
3710
3711 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3712 write_op = tg3_write_mem;
3713 else
3714 write_op = tg3_write_indirect_reg32;
3715
3716 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3717 /* It is possible that bootcode is still loading at this point.
3718 * Get the nvram lock first before halting the cpu.
3719 */
3720 int lock_err = tg3_nvram_lock(tp);
3721 err = tg3_halt_cpu(tp, cpu_base);
3722 if (!lock_err)
3723 tg3_nvram_unlock(tp);
3724 if (err)
3725 goto out;
3726
3727 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3728 write_op(tp, cpu_scratch_base + i, 0);
3729 tw32(cpu_base + CPU_STATE, 0xffffffff);
3730 tw32(cpu_base + CPU_MODE,
3731 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3732 } else {
3733 /* Subtract additional main header for fragmented firmware and
3734 * advance to the first fragment
3735 */
3736 total_len -= TG3_FW_HDR_LEN;
3737 fw_hdr++;
3738 }
3739
3740 do {
3741 __be32 *fw_data = (__be32 *)(fw_hdr + 1);
3742 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3743 write_op(tp, cpu_scratch_base +
3744 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3745 (i * sizeof(u32)),
3746 be32_to_cpu(fw_data[i]));
3747
3748 total_len -= be32_to_cpu(fw_hdr->len);
3749
3750 /* Advance to next fragment */
3751 fw_hdr = (struct tg3_firmware_hdr *)
3752 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3753 } while (total_len > 0);
3754
3755 err = 0;
3756
3757 out:
3758 return err;
3759 }
3760
3761 /* tp->lock is held. */
tg3_pause_cpu_and_set_pc(struct tg3 * tp,u32 cpu_base,u32 pc)3762 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3763 {
3764 int i;
3765 const int iters = 5;
3766
3767 tw32(cpu_base + CPU_STATE, 0xffffffff);
3768 tw32_f(cpu_base + CPU_PC, pc);
3769
3770 for (i = 0; i < iters; i++) {
3771 if (tr32(cpu_base + CPU_PC) == pc)
3772 break;
3773 tw32(cpu_base + CPU_STATE, 0xffffffff);
3774 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3775 tw32_f(cpu_base + CPU_PC, pc);
3776 udelay(1000);
3777 }
3778
3779 return (i == iters) ? -EBUSY : 0;
3780 }
3781
3782 /* tp->lock is held. */
tg3_load_5701_a0_firmware_fix(struct tg3 * tp)3783 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3784 {
3785 const struct tg3_firmware_hdr *fw_hdr;
3786 int err;
3787
3788 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3789
3790 /* Firmware blob starts with version numbers, followed by
3791 start address and length. We are setting complete length.
3792 length = end_address_of_bss - start_address_of_text.
3793 Remainder is the blob to be loaded contiguously
3794 from start address. */
3795
3796 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3797 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3798 fw_hdr);
3799 if (err)
3800 return err;
3801
3802 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3803 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3804 fw_hdr);
3805 if (err)
3806 return err;
3807
3808 /* Now startup only the RX cpu. */
3809 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3810 be32_to_cpu(fw_hdr->base_addr));
3811 if (err) {
3812 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3813 "should be %08x\n", __func__,
3814 tr32(RX_CPU_BASE + CPU_PC),
3815 be32_to_cpu(fw_hdr->base_addr));
3816 return -ENODEV;
3817 }
3818
3819 tg3_rxcpu_resume(tp);
3820
3821 return 0;
3822 }
3823
tg3_validate_rxcpu_state(struct tg3 * tp)3824 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3825 {
3826 const int iters = 1000;
3827 int i;
3828 u32 val;
3829
3830 /* Wait for boot code to complete initialization and enter service
3831 * loop. It is then safe to download service patches
3832 */
3833 for (i = 0; i < iters; i++) {
3834 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3835 break;
3836
3837 udelay(10);
3838 }
3839
3840 if (i == iters) {
3841 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3842 return -EBUSY;
3843 }
3844
3845 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3846 if (val & 0xff) {
3847 netdev_warn(tp->dev,
3848 "Other patches exist. Not downloading EEE patch\n");
3849 return -EEXIST;
3850 }
3851
3852 return 0;
3853 }
3854
3855 /* tp->lock is held. */
tg3_load_57766_firmware(struct tg3 * tp)3856 static void tg3_load_57766_firmware(struct tg3 *tp)
3857 {
3858 struct tg3_firmware_hdr *fw_hdr;
3859
3860 if (!tg3_flag(tp, NO_NVRAM))
3861 return;
3862
3863 if (tg3_validate_rxcpu_state(tp))
3864 return;
3865
3866 if (!tp->fw)
3867 return;
3868
3869 /* This firmware blob has a different format than older firmware
3870 * releases as given below. The main difference is we have fragmented
3871 * data to be written to non-contiguous locations.
3872 *
3873 * In the beginning we have a firmware header identical to other
3874 * firmware which consists of version, base addr and length. The length
3875 * here is unused and set to 0xffffffff.
3876 *
3877 * This is followed by a series of firmware fragments which are
3878 * individually identical to previous firmware. i.e. they have the
3879 * firmware header and followed by data for that fragment. The version
3880 * field of the individual fragment header is unused.
3881 */
3882
3883 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3884 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3885 return;
3886
3887 if (tg3_rxcpu_pause(tp))
3888 return;
3889
3890 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3891 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3892
3893 tg3_rxcpu_resume(tp);
3894 }
3895
3896 /* tp->lock is held. */
tg3_load_tso_firmware(struct tg3 * tp)3897 static int tg3_load_tso_firmware(struct tg3 *tp)
3898 {
3899 const struct tg3_firmware_hdr *fw_hdr;
3900 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3901 int err;
3902
3903 if (!tg3_flag(tp, FW_TSO))
3904 return 0;
3905
3906 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3907
3908 /* Firmware blob starts with version numbers, followed by
3909 start address and length. We are setting complete length.
3910 length = end_address_of_bss - start_address_of_text.
3911 Remainder is the blob to be loaded contiguously
3912 from start address. */
3913
3914 cpu_scratch_size = tp->fw_len;
3915
3916 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3917 cpu_base = RX_CPU_BASE;
3918 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3919 } else {
3920 cpu_base = TX_CPU_BASE;
3921 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3922 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3923 }
3924
3925 err = tg3_load_firmware_cpu(tp, cpu_base,
3926 cpu_scratch_base, cpu_scratch_size,
3927 fw_hdr);
3928 if (err)
3929 return err;
3930
3931 /* Now startup the cpu. */
3932 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3933 be32_to_cpu(fw_hdr->base_addr));
3934 if (err) {
3935 netdev_err(tp->dev,
3936 "%s fails to set CPU PC, is %08x should be %08x\n",
3937 __func__, tr32(cpu_base + CPU_PC),
3938 be32_to_cpu(fw_hdr->base_addr));
3939 return -ENODEV;
3940 }
3941
3942 tg3_resume_cpu(tp, cpu_base);
3943 return 0;
3944 }
3945
3946 /* tp->lock is held. */
__tg3_set_one_mac_addr(struct tg3 * tp,const u8 * mac_addr,int index)3947 static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
3948 int index)
3949 {
3950 u32 addr_high, addr_low;
3951
3952 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3953 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3954 (mac_addr[4] << 8) | mac_addr[5]);
3955
3956 if (index < 4) {
3957 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3958 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3959 } else {
3960 index -= 4;
3961 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3962 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3963 }
3964 }
3965
3966 /* tp->lock is held. */
__tg3_set_mac_addr(struct tg3 * tp,bool skip_mac_1)3967 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3968 {
3969 u32 addr_high;
3970 int i;
3971
3972 for (i = 0; i < 4; i++) {
3973 if (i == 1 && skip_mac_1)
3974 continue;
3975 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3976 }
3977
3978 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3979 tg3_asic_rev(tp) == ASIC_REV_5704) {
3980 for (i = 4; i < 16; i++)
3981 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3982 }
3983
3984 addr_high = (tp->dev->dev_addr[0] +
3985 tp->dev->dev_addr[1] +
3986 tp->dev->dev_addr[2] +
3987 tp->dev->dev_addr[3] +
3988 tp->dev->dev_addr[4] +
3989 tp->dev->dev_addr[5]) &
3990 TX_BACKOFF_SEED_MASK;
3991 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3992 }
3993
tg3_enable_register_access(struct tg3 * tp)3994 static void tg3_enable_register_access(struct tg3 *tp)
3995 {
3996 /*
3997 * Make sure register accesses (indirect or otherwise) will function
3998 * correctly.
3999 */
4000 pci_write_config_dword(tp->pdev,
4001 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4002 }
4003
tg3_power_up(struct tg3 * tp)4004 static int tg3_power_up(struct tg3 *tp)
4005 {
4006 int err;
4007
4008 tg3_enable_register_access(tp);
4009
4010 err = pci_set_power_state(tp->pdev, PCI_D0);
4011 if (!err) {
4012 /* Switch out of Vaux if it is a NIC */
4013 tg3_pwrsrc_switch_to_vmain(tp);
4014 } else {
4015 netdev_err(tp->dev, "Transition to D0 failed\n");
4016 }
4017
4018 return err;
4019 }
4020
4021 static int tg3_setup_phy(struct tg3 *, bool);
4022
tg3_power_down_prepare(struct tg3 * tp)4023 static void tg3_power_down_prepare(struct tg3 *tp)
4024 {
4025 u32 misc_host_ctrl;
4026 bool device_should_wake, do_low_power;
4027
4028 tg3_enable_register_access(tp);
4029
4030 /* Restore the CLKREQ setting. */
4031 if (tg3_flag(tp, CLKREQ_BUG))
4032 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4033 PCI_EXP_LNKCTL_CLKREQ_EN);
4034
4035 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4036 tw32(TG3PCI_MISC_HOST_CTRL,
4037 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4038
4039 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4040 tg3_flag(tp, WOL_ENABLE);
4041
4042 if (tg3_flag(tp, USE_PHYLIB)) {
4043 do_low_power = false;
4044 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4045 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4046 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4047 struct phy_device *phydev;
4048 u32 phyid;
4049
4050 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4051
4052 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4053
4054 tp->link_config.speed = phydev->speed;
4055 tp->link_config.duplex = phydev->duplex;
4056 tp->link_config.autoneg = phydev->autoneg;
4057 ethtool_convert_link_mode_to_legacy_u32(
4058 &tp->link_config.advertising,
4059 phydev->advertising);
4060
4061 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4062 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4063 advertising);
4064 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4065 advertising);
4066 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4067 advertising);
4068
4069 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4070 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4071 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4072 advertising);
4073 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4074 advertising);
4075 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4076 advertising);
4077 } else {
4078 linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4079 advertising);
4080 }
4081 }
4082
4083 linkmode_copy(phydev->advertising, advertising);
4084 phy_start_aneg(phydev);
4085
4086 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4087 if (phyid != PHY_ID_BCMAC131) {
4088 phyid &= PHY_BCM_OUI_MASK;
4089 if (phyid == PHY_BCM_OUI_1 ||
4090 phyid == PHY_BCM_OUI_2 ||
4091 phyid == PHY_BCM_OUI_3)
4092 do_low_power = true;
4093 }
4094 }
4095 } else {
4096 do_low_power = true;
4097
4098 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4099 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4100
4101 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4102 tg3_setup_phy(tp, false);
4103 }
4104
4105 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4106 u32 val;
4107
4108 val = tr32(GRC_VCPU_EXT_CTRL);
4109 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4110 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4111 int i;
4112 u32 val;
4113
4114 for (i = 0; i < 200; i++) {
4115 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4116 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4117 break;
4118 msleep(1);
4119 }
4120 }
4121 if (tg3_flag(tp, WOL_CAP))
4122 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4123 WOL_DRV_STATE_SHUTDOWN |
4124 WOL_DRV_WOL |
4125 WOL_SET_MAGIC_PKT);
4126
4127 if (device_should_wake) {
4128 u32 mac_mode;
4129
4130 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4131 if (do_low_power &&
4132 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4133 tg3_phy_auxctl_write(tp,
4134 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4135 MII_TG3_AUXCTL_PCTL_WOL_EN |
4136 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4137 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4138 udelay(40);
4139 }
4140
4141 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4142 mac_mode = MAC_MODE_PORT_MODE_GMII;
4143 else if (tp->phy_flags &
4144 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4145 if (tp->link_config.active_speed == SPEED_1000)
4146 mac_mode = MAC_MODE_PORT_MODE_GMII;
4147 else
4148 mac_mode = MAC_MODE_PORT_MODE_MII;
4149 } else
4150 mac_mode = MAC_MODE_PORT_MODE_MII;
4151
4152 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4153 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4154 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4155 SPEED_100 : SPEED_10;
4156 if (tg3_5700_link_polarity(tp, speed))
4157 mac_mode |= MAC_MODE_LINK_POLARITY;
4158 else
4159 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4160 }
4161 } else {
4162 mac_mode = MAC_MODE_PORT_MODE_TBI;
4163 }
4164
4165 if (!tg3_flag(tp, 5750_PLUS))
4166 tw32(MAC_LED_CTRL, tp->led_ctrl);
4167
4168 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4169 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4170 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4171 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4172
4173 if (tg3_flag(tp, ENABLE_APE))
4174 mac_mode |= MAC_MODE_APE_TX_EN |
4175 MAC_MODE_APE_RX_EN |
4176 MAC_MODE_TDE_ENABLE;
4177
4178 tw32_f(MAC_MODE, mac_mode);
4179 udelay(100);
4180
4181 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4182 udelay(10);
4183 }
4184
4185 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4186 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4187 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4188 u32 base_val;
4189
4190 base_val = tp->pci_clock_ctrl;
4191 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4192 CLOCK_CTRL_TXCLK_DISABLE);
4193
4194 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4195 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4196 } else if (tg3_flag(tp, 5780_CLASS) ||
4197 tg3_flag(tp, CPMU_PRESENT) ||
4198 tg3_asic_rev(tp) == ASIC_REV_5906) {
4199 /* do nothing */
4200 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4201 u32 newbits1, newbits2;
4202
4203 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4204 tg3_asic_rev(tp) == ASIC_REV_5701) {
4205 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4206 CLOCK_CTRL_TXCLK_DISABLE |
4207 CLOCK_CTRL_ALTCLK);
4208 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4209 } else if (tg3_flag(tp, 5705_PLUS)) {
4210 newbits1 = CLOCK_CTRL_625_CORE;
4211 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4212 } else {
4213 newbits1 = CLOCK_CTRL_ALTCLK;
4214 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4215 }
4216
4217 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4218 40);
4219
4220 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4221 40);
4222
4223 if (!tg3_flag(tp, 5705_PLUS)) {
4224 u32 newbits3;
4225
4226 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4227 tg3_asic_rev(tp) == ASIC_REV_5701) {
4228 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4229 CLOCK_CTRL_TXCLK_DISABLE |
4230 CLOCK_CTRL_44MHZ_CORE);
4231 } else {
4232 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4233 }
4234
4235 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4236 tp->pci_clock_ctrl | newbits3, 40);
4237 }
4238 }
4239
4240 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4241 tg3_power_down_phy(tp, do_low_power);
4242
4243 tg3_frob_aux_power(tp, true);
4244
4245 /* Workaround for unstable PLL clock */
4246 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4247 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4248 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4249 u32 val = tr32(0x7d00);
4250
4251 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4252 tw32(0x7d00, val);
4253 if (!tg3_flag(tp, ENABLE_ASF)) {
4254 int err;
4255
4256 err = tg3_nvram_lock(tp);
4257 tg3_halt_cpu(tp, RX_CPU_BASE);
4258 if (!err)
4259 tg3_nvram_unlock(tp);
4260 }
4261 }
4262
4263 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4264
4265 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4266
4267 return;
4268 }
4269
tg3_power_down(struct tg3 * tp)4270 static void tg3_power_down(struct tg3 *tp)
4271 {
4272 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4273 pci_set_power_state(tp->pdev, PCI_D3hot);
4274 }
4275
tg3_aux_stat_to_speed_duplex(struct tg3 * tp,u32 val,u32 * speed,u8 * duplex)4276 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4277 {
4278 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4279 case MII_TG3_AUX_STAT_10HALF:
4280 *speed = SPEED_10;
4281 *duplex = DUPLEX_HALF;
4282 break;
4283
4284 case MII_TG3_AUX_STAT_10FULL:
4285 *speed = SPEED_10;
4286 *duplex = DUPLEX_FULL;
4287 break;
4288
4289 case MII_TG3_AUX_STAT_100HALF:
4290 *speed = SPEED_100;
4291 *duplex = DUPLEX_HALF;
4292 break;
4293
4294 case MII_TG3_AUX_STAT_100FULL:
4295 *speed = SPEED_100;
4296 *duplex = DUPLEX_FULL;
4297 break;
4298
4299 case MII_TG3_AUX_STAT_1000HALF:
4300 *speed = SPEED_1000;
4301 *duplex = DUPLEX_HALF;
4302 break;
4303
4304 case MII_TG3_AUX_STAT_1000FULL:
4305 *speed = SPEED_1000;
4306 *duplex = DUPLEX_FULL;
4307 break;
4308
4309 default:
4310 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4311 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4312 SPEED_10;
4313 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4314 DUPLEX_HALF;
4315 break;
4316 }
4317 *speed = SPEED_UNKNOWN;
4318 *duplex = DUPLEX_UNKNOWN;
4319 break;
4320 }
4321 }
4322
tg3_phy_autoneg_cfg(struct tg3 * tp,u32 advertise,u32 flowctrl)4323 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4324 {
4325 int err = 0;
4326 u32 val, new_adv;
4327
4328 new_adv = ADVERTISE_CSMA;
4329 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4330 new_adv |= mii_advertise_flowctrl(flowctrl);
4331
4332 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4333 if (err)
4334 goto done;
4335
4336 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4337 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4338
4339 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4340 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4341 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4342
4343 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4344 if (err)
4345 goto done;
4346 }
4347
4348 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4349 goto done;
4350
4351 tw32(TG3_CPMU_EEE_MODE,
4352 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4353
4354 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4355 if (!err) {
4356 u32 err2;
4357
4358 if (!tp->eee.eee_enabled)
4359 val = 0;
4360 else
4361 val = ethtool_adv_to_mmd_eee_adv_t(advertise);
4362
4363 mii_eee_cap1_mod_linkmode_t(tp->eee.advertised, val);
4364 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4365 if (err)
4366 val = 0;
4367
4368 switch (tg3_asic_rev(tp)) {
4369 case ASIC_REV_5717:
4370 case ASIC_REV_57765:
4371 case ASIC_REV_57766:
4372 case ASIC_REV_5719:
4373 /* If we advertised any eee advertisements above... */
4374 if (val)
4375 val = MII_TG3_DSP_TAP26_ALNOKO |
4376 MII_TG3_DSP_TAP26_RMRXSTO |
4377 MII_TG3_DSP_TAP26_OPCSINPT;
4378 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4379 fallthrough;
4380 case ASIC_REV_5720:
4381 case ASIC_REV_5762:
4382 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4383 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4384 MII_TG3_DSP_CH34TP2_HIBW01);
4385 }
4386
4387 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4388 if (!err)
4389 err = err2;
4390 }
4391
4392 done:
4393 return err;
4394 }
4395
tg3_phy_copper_begin(struct tg3 * tp)4396 static void tg3_phy_copper_begin(struct tg3 *tp)
4397 {
4398 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4399 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4400 u32 adv, fc;
4401
4402 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4403 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4404 adv = ADVERTISED_10baseT_Half |
4405 ADVERTISED_10baseT_Full;
4406 if (tg3_flag(tp, WOL_SPEED_100MB))
4407 adv |= ADVERTISED_100baseT_Half |
4408 ADVERTISED_100baseT_Full;
4409 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4410 if (!(tp->phy_flags &
4411 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4412 adv |= ADVERTISED_1000baseT_Half;
4413 adv |= ADVERTISED_1000baseT_Full;
4414 }
4415
4416 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4417 } else {
4418 adv = tp->link_config.advertising;
4419 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4420 adv &= ~(ADVERTISED_1000baseT_Half |
4421 ADVERTISED_1000baseT_Full);
4422
4423 fc = tp->link_config.flowctrl;
4424 }
4425
4426 tg3_phy_autoneg_cfg(tp, adv, fc);
4427
4428 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4429 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4430 /* Normally during power down we want to autonegotiate
4431 * the lowest possible speed for WOL. However, to avoid
4432 * link flap, we leave it untouched.
4433 */
4434 return;
4435 }
4436
4437 tg3_writephy(tp, MII_BMCR,
4438 BMCR_ANENABLE | BMCR_ANRESTART);
4439 } else {
4440 int i;
4441 u32 bmcr, orig_bmcr;
4442
4443 tp->link_config.active_speed = tp->link_config.speed;
4444 tp->link_config.active_duplex = tp->link_config.duplex;
4445
4446 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4447 /* With autoneg disabled, 5715 only links up when the
4448 * advertisement register has the configured speed
4449 * enabled.
4450 */
4451 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4452 }
4453
4454 bmcr = 0;
4455 switch (tp->link_config.speed) {
4456 default:
4457 case SPEED_10:
4458 break;
4459
4460 case SPEED_100:
4461 bmcr |= BMCR_SPEED100;
4462 break;
4463
4464 case SPEED_1000:
4465 bmcr |= BMCR_SPEED1000;
4466 break;
4467 }
4468
4469 if (tp->link_config.duplex == DUPLEX_FULL)
4470 bmcr |= BMCR_FULLDPLX;
4471
4472 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4473 (bmcr != orig_bmcr)) {
4474 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4475 for (i = 0; i < 1500; i++) {
4476 u32 tmp;
4477
4478 udelay(10);
4479 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4480 tg3_readphy(tp, MII_BMSR, &tmp))
4481 continue;
4482 if (!(tmp & BMSR_LSTATUS)) {
4483 udelay(40);
4484 break;
4485 }
4486 }
4487 tg3_writephy(tp, MII_BMCR, bmcr);
4488 udelay(40);
4489 }
4490 }
4491 }
4492
tg3_phy_pull_config(struct tg3 * tp)4493 static int tg3_phy_pull_config(struct tg3 *tp)
4494 {
4495 int err;
4496 u32 val;
4497
4498 err = tg3_readphy(tp, MII_BMCR, &val);
4499 if (err)
4500 goto done;
4501
4502 if (!(val & BMCR_ANENABLE)) {
4503 tp->link_config.autoneg = AUTONEG_DISABLE;
4504 tp->link_config.advertising = 0;
4505 tg3_flag_clear(tp, PAUSE_AUTONEG);
4506
4507 err = -EIO;
4508
4509 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4510 case 0:
4511 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4512 goto done;
4513
4514 tp->link_config.speed = SPEED_10;
4515 break;
4516 case BMCR_SPEED100:
4517 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4518 goto done;
4519
4520 tp->link_config.speed = SPEED_100;
4521 break;
4522 case BMCR_SPEED1000:
4523 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4524 tp->link_config.speed = SPEED_1000;
4525 break;
4526 }
4527 fallthrough;
4528 default:
4529 goto done;
4530 }
4531
4532 if (val & BMCR_FULLDPLX)
4533 tp->link_config.duplex = DUPLEX_FULL;
4534 else
4535 tp->link_config.duplex = DUPLEX_HALF;
4536
4537 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4538
4539 err = 0;
4540 goto done;
4541 }
4542
4543 tp->link_config.autoneg = AUTONEG_ENABLE;
4544 tp->link_config.advertising = ADVERTISED_Autoneg;
4545 tg3_flag_set(tp, PAUSE_AUTONEG);
4546
4547 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4548 u32 adv;
4549
4550 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4551 if (err)
4552 goto done;
4553
4554 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4555 tp->link_config.advertising |= adv | ADVERTISED_TP;
4556
4557 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4558 } else {
4559 tp->link_config.advertising |= ADVERTISED_FIBRE;
4560 }
4561
4562 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4563 u32 adv;
4564
4565 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4566 err = tg3_readphy(tp, MII_CTRL1000, &val);
4567 if (err)
4568 goto done;
4569
4570 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4571 } else {
4572 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4573 if (err)
4574 goto done;
4575
4576 adv = tg3_decode_flowctrl_1000X(val);
4577 tp->link_config.flowctrl = adv;
4578
4579 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4580 adv = mii_adv_to_ethtool_adv_x(val);
4581 }
4582
4583 tp->link_config.advertising |= adv;
4584 }
4585
4586 done:
4587 return err;
4588 }
4589
tg3_init_5401phy_dsp(struct tg3 * tp)4590 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4591 {
4592 int err;
4593
4594 /* Turn off tap power management. */
4595 /* Set Extended packet length bit */
4596 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4597
4598 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4599 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4600 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4601 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4602 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4603
4604 udelay(40);
4605
4606 return err;
4607 }
4608
tg3_phy_eee_config_ok(struct tg3 * tp)4609 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4610 {
4611 struct ethtool_keee eee = {};
4612
4613 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4614 return true;
4615
4616 tg3_eee_pull_config(tp, &eee);
4617
4618 if (tp->eee.eee_enabled) {
4619 if (!linkmode_equal(tp->eee.advertised, eee.advertised) ||
4620 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4621 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4622 return false;
4623 } else {
4624 /* EEE is disabled but we're advertising */
4625 if (!linkmode_empty(eee.advertised))
4626 return false;
4627 }
4628
4629 return true;
4630 }
4631
tg3_phy_copper_an_config_ok(struct tg3 * tp,u32 * lcladv)4632 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4633 {
4634 u32 advmsk, tgtadv, advertising;
4635
4636 advertising = tp->link_config.advertising;
4637 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4638
4639 advmsk = ADVERTISE_ALL;
4640 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4641 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4642 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4643 }
4644
4645 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4646 return false;
4647
4648 if ((*lcladv & advmsk) != tgtadv)
4649 return false;
4650
4651 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4652 u32 tg3_ctrl;
4653
4654 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4655
4656 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4657 return false;
4658
4659 if (tgtadv &&
4660 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4661 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4662 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4663 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4664 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4665 } else {
4666 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4667 }
4668
4669 if (tg3_ctrl != tgtadv)
4670 return false;
4671 }
4672
4673 return true;
4674 }
4675
tg3_phy_copper_fetch_rmtadv(struct tg3 * tp,u32 * rmtadv)4676 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4677 {
4678 u32 lpeth = 0;
4679
4680 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4681 u32 val;
4682
4683 if (tg3_readphy(tp, MII_STAT1000, &val))
4684 return false;
4685
4686 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4687 }
4688
4689 if (tg3_readphy(tp, MII_LPA, rmtadv))
4690 return false;
4691
4692 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4693 tp->link_config.rmt_adv = lpeth;
4694
4695 return true;
4696 }
4697
tg3_test_and_report_link_chg(struct tg3 * tp,bool curr_link_up)4698 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4699 {
4700 if (curr_link_up != tp->link_up) {
4701 if (curr_link_up) {
4702 netif_carrier_on(tp->dev);
4703 } else {
4704 netif_carrier_off(tp->dev);
4705 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4706 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4707 }
4708
4709 tg3_link_report(tp);
4710 return true;
4711 }
4712
4713 return false;
4714 }
4715
tg3_clear_mac_status(struct tg3 * tp)4716 static void tg3_clear_mac_status(struct tg3 *tp)
4717 {
4718 tw32(MAC_EVENT, 0);
4719
4720 tw32_f(MAC_STATUS,
4721 MAC_STATUS_SYNC_CHANGED |
4722 MAC_STATUS_CFG_CHANGED |
4723 MAC_STATUS_MI_COMPLETION |
4724 MAC_STATUS_LNKSTATE_CHANGED);
4725 udelay(40);
4726 }
4727
tg3_setup_eee(struct tg3 * tp)4728 static void tg3_setup_eee(struct tg3 *tp)
4729 {
4730 u32 val;
4731
4732 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4733 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4734 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4735 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4736
4737 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4738
4739 tw32_f(TG3_CPMU_EEE_CTRL,
4740 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4741
4742 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4743 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4744 TG3_CPMU_EEEMD_LPI_IN_RX |
4745 TG3_CPMU_EEEMD_EEE_ENABLE;
4746
4747 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4748 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4749
4750 if (tg3_flag(tp, ENABLE_APE))
4751 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4752
4753 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4754
4755 tw32_f(TG3_CPMU_EEE_DBTMR1,
4756 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4757 (tp->eee.tx_lpi_timer & 0xffff));
4758
4759 tw32_f(TG3_CPMU_EEE_DBTMR2,
4760 TG3_CPMU_DBTMR2_APE_TX_2047US |
4761 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4762 }
4763
tg3_setup_copper_phy(struct tg3 * tp,bool force_reset)4764 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4765 {
4766 bool current_link_up;
4767 u32 bmsr, val;
4768 u32 lcl_adv, rmt_adv;
4769 u32 current_speed;
4770 u8 current_duplex;
4771 int i, err;
4772
4773 tg3_clear_mac_status(tp);
4774
4775 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4776 tw32_f(MAC_MI_MODE,
4777 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4778 udelay(80);
4779 }
4780
4781 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4782
4783 /* Some third-party PHYs need to be reset on link going
4784 * down.
4785 */
4786 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4787 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4788 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4789 tp->link_up) {
4790 tg3_readphy(tp, MII_BMSR, &bmsr);
4791 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4792 !(bmsr & BMSR_LSTATUS))
4793 force_reset = true;
4794 }
4795 if (force_reset)
4796 tg3_phy_reset(tp);
4797
4798 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4799 tg3_readphy(tp, MII_BMSR, &bmsr);
4800 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4801 !tg3_flag(tp, INIT_COMPLETE))
4802 bmsr = 0;
4803
4804 if (!(bmsr & BMSR_LSTATUS)) {
4805 err = tg3_init_5401phy_dsp(tp);
4806 if (err)
4807 return err;
4808
4809 tg3_readphy(tp, MII_BMSR, &bmsr);
4810 for (i = 0; i < 1000; i++) {
4811 udelay(10);
4812 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4813 (bmsr & BMSR_LSTATUS)) {
4814 udelay(40);
4815 break;
4816 }
4817 }
4818
4819 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4820 TG3_PHY_REV_BCM5401_B0 &&
4821 !(bmsr & BMSR_LSTATUS) &&
4822 tp->link_config.active_speed == SPEED_1000) {
4823 err = tg3_phy_reset(tp);
4824 if (!err)
4825 err = tg3_init_5401phy_dsp(tp);
4826 if (err)
4827 return err;
4828 }
4829 }
4830 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4831 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4832 /* 5701 {A0,B0} CRC bug workaround */
4833 tg3_writephy(tp, 0x15, 0x0a75);
4834 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4835 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4836 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4837 }
4838
4839 /* Clear pending interrupts... */
4840 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4841 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4842
4843 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4844 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4845 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4846 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4847
4848 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4849 tg3_asic_rev(tp) == ASIC_REV_5701) {
4850 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4851 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4852 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4853 else
4854 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4855 }
4856
4857 current_link_up = false;
4858 current_speed = SPEED_UNKNOWN;
4859 current_duplex = DUPLEX_UNKNOWN;
4860 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4861 tp->link_config.rmt_adv = 0;
4862
4863 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4864 err = tg3_phy_auxctl_read(tp,
4865 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4866 &val);
4867 if (!err && !(val & (1 << 10))) {
4868 tg3_phy_auxctl_write(tp,
4869 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4870 val | (1 << 10));
4871 goto relink;
4872 }
4873 }
4874
4875 bmsr = 0;
4876 for (i = 0; i < 100; i++) {
4877 tg3_readphy(tp, MII_BMSR, &bmsr);
4878 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4879 (bmsr & BMSR_LSTATUS))
4880 break;
4881 udelay(40);
4882 }
4883
4884 if (bmsr & BMSR_LSTATUS) {
4885 u32 aux_stat, bmcr;
4886
4887 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4888 for (i = 0; i < 2000; i++) {
4889 udelay(10);
4890 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4891 aux_stat)
4892 break;
4893 }
4894
4895 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4896 ¤t_speed,
4897 ¤t_duplex);
4898
4899 bmcr = 0;
4900 for (i = 0; i < 200; i++) {
4901 tg3_readphy(tp, MII_BMCR, &bmcr);
4902 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4903 continue;
4904 if (bmcr && bmcr != 0x7fff)
4905 break;
4906 udelay(10);
4907 }
4908
4909 lcl_adv = 0;
4910 rmt_adv = 0;
4911
4912 tp->link_config.active_speed = current_speed;
4913 tp->link_config.active_duplex = current_duplex;
4914
4915 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4916 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4917
4918 if ((bmcr & BMCR_ANENABLE) &&
4919 eee_config_ok &&
4920 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4921 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4922 current_link_up = true;
4923
4924 /* EEE settings changes take effect only after a phy
4925 * reset. If we have skipped a reset due to Link Flap
4926 * Avoidance being enabled, do it now.
4927 */
4928 if (!eee_config_ok &&
4929 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4930 !force_reset) {
4931 tg3_setup_eee(tp);
4932 tg3_phy_reset(tp);
4933 }
4934 } else {
4935 if (!(bmcr & BMCR_ANENABLE) &&
4936 tp->link_config.speed == current_speed &&
4937 tp->link_config.duplex == current_duplex) {
4938 current_link_up = true;
4939 }
4940 }
4941
4942 if (current_link_up &&
4943 tp->link_config.active_duplex == DUPLEX_FULL) {
4944 u32 reg, bit;
4945
4946 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4947 reg = MII_TG3_FET_GEN_STAT;
4948 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4949 } else {
4950 reg = MII_TG3_EXT_STAT;
4951 bit = MII_TG3_EXT_STAT_MDIX;
4952 }
4953
4954 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4955 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4956
4957 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4958 }
4959 }
4960
4961 relink:
4962 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4963 tg3_phy_copper_begin(tp);
4964
4965 if (tg3_flag(tp, ROBOSWITCH)) {
4966 current_link_up = true;
4967 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4968 current_speed = SPEED_1000;
4969 current_duplex = DUPLEX_FULL;
4970 tp->link_config.active_speed = current_speed;
4971 tp->link_config.active_duplex = current_duplex;
4972 }
4973
4974 tg3_readphy(tp, MII_BMSR, &bmsr);
4975 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4976 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4977 current_link_up = true;
4978 }
4979
4980 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4981 if (current_link_up) {
4982 if (tp->link_config.active_speed == SPEED_100 ||
4983 tp->link_config.active_speed == SPEED_10)
4984 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4985 else
4986 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4987 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4988 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4989 else
4990 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4991
4992 /* In order for the 5750 core in BCM4785 chip to work properly
4993 * in RGMII mode, the Led Control Register must be set up.
4994 */
4995 if (tg3_flag(tp, RGMII_MODE)) {
4996 u32 led_ctrl = tr32(MAC_LED_CTRL);
4997 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4998
4999 if (tp->link_config.active_speed == SPEED_10)
5000 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5001 else if (tp->link_config.active_speed == SPEED_100)
5002 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5003 LED_CTRL_100MBPS_ON);
5004 else if (tp->link_config.active_speed == SPEED_1000)
5005 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5006 LED_CTRL_1000MBPS_ON);
5007
5008 tw32(MAC_LED_CTRL, led_ctrl);
5009 udelay(40);
5010 }
5011
5012 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5013 if (tp->link_config.active_duplex == DUPLEX_HALF)
5014 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5015
5016 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5017 if (current_link_up &&
5018 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5019 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5020 else
5021 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5022 }
5023
5024 /* ??? Without this setting Netgear GA302T PHY does not
5025 * ??? send/receive packets...
5026 */
5027 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5028 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5029 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5030 tw32_f(MAC_MI_MODE, tp->mi_mode);
5031 udelay(80);
5032 }
5033
5034 tw32_f(MAC_MODE, tp->mac_mode);
5035 udelay(40);
5036
5037 tg3_phy_eee_adjust(tp, current_link_up);
5038
5039 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5040 /* Polled via timer. */
5041 tw32_f(MAC_EVENT, 0);
5042 } else {
5043 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5044 }
5045 udelay(40);
5046
5047 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5048 current_link_up &&
5049 tp->link_config.active_speed == SPEED_1000 &&
5050 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5051 udelay(120);
5052 tw32_f(MAC_STATUS,
5053 (MAC_STATUS_SYNC_CHANGED |
5054 MAC_STATUS_CFG_CHANGED));
5055 udelay(40);
5056 tg3_write_mem(tp,
5057 NIC_SRAM_FIRMWARE_MBOX,
5058 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5059 }
5060
5061 /* Prevent send BD corruption. */
5062 if (tg3_flag(tp, CLKREQ_BUG)) {
5063 if (tp->link_config.active_speed == SPEED_100 ||
5064 tp->link_config.active_speed == SPEED_10)
5065 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5066 PCI_EXP_LNKCTL_CLKREQ_EN);
5067 else
5068 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5069 PCI_EXP_LNKCTL_CLKREQ_EN);
5070 }
5071
5072 tg3_test_and_report_link_chg(tp, current_link_up);
5073
5074 return 0;
5075 }
5076
5077 struct tg3_fiber_aneginfo {
5078 int state;
5079 #define ANEG_STATE_UNKNOWN 0
5080 #define ANEG_STATE_AN_ENABLE 1
5081 #define ANEG_STATE_RESTART_INIT 2
5082 #define ANEG_STATE_RESTART 3
5083 #define ANEG_STATE_DISABLE_LINK_OK 4
5084 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5085 #define ANEG_STATE_ABILITY_DETECT 6
5086 #define ANEG_STATE_ACK_DETECT_INIT 7
5087 #define ANEG_STATE_ACK_DETECT 8
5088 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5089 #define ANEG_STATE_COMPLETE_ACK 10
5090 #define ANEG_STATE_IDLE_DETECT_INIT 11
5091 #define ANEG_STATE_IDLE_DETECT 12
5092 #define ANEG_STATE_LINK_OK 13
5093 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5094 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5095
5096 u32 flags;
5097 #define MR_AN_ENABLE 0x00000001
5098 #define MR_RESTART_AN 0x00000002
5099 #define MR_AN_COMPLETE 0x00000004
5100 #define MR_PAGE_RX 0x00000008
5101 #define MR_NP_LOADED 0x00000010
5102 #define MR_TOGGLE_TX 0x00000020
5103 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5104 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5105 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5106 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5107 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5108 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5109 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5110 #define MR_TOGGLE_RX 0x00002000
5111 #define MR_NP_RX 0x00004000
5112
5113 #define MR_LINK_OK 0x80000000
5114
5115 unsigned long link_time, cur_time;
5116
5117 u32 ability_match_cfg;
5118 int ability_match_count;
5119
5120 char ability_match, idle_match, ack_match;
5121
5122 u32 txconfig, rxconfig;
5123 #define ANEG_CFG_NP 0x00000080
5124 #define ANEG_CFG_ACK 0x00000040
5125 #define ANEG_CFG_RF2 0x00000020
5126 #define ANEG_CFG_RF1 0x00000010
5127 #define ANEG_CFG_PS2 0x00000001
5128 #define ANEG_CFG_PS1 0x00008000
5129 #define ANEG_CFG_HD 0x00004000
5130 #define ANEG_CFG_FD 0x00002000
5131 #define ANEG_CFG_INVAL 0x00001f06
5132
5133 };
5134 #define ANEG_OK 0
5135 #define ANEG_DONE 1
5136 #define ANEG_TIMER_ENAB 2
5137 #define ANEG_FAILED -1
5138
5139 #define ANEG_STATE_SETTLE_TIME 10000
5140
tg3_fiber_aneg_smachine(struct tg3 * tp,struct tg3_fiber_aneginfo * ap)5141 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5142 struct tg3_fiber_aneginfo *ap)
5143 {
5144 u16 flowctrl;
5145 unsigned long delta;
5146 u32 rx_cfg_reg;
5147 int ret;
5148
5149 if (ap->state == ANEG_STATE_UNKNOWN) {
5150 ap->rxconfig = 0;
5151 ap->link_time = 0;
5152 ap->cur_time = 0;
5153 ap->ability_match_cfg = 0;
5154 ap->ability_match_count = 0;
5155 ap->ability_match = 0;
5156 ap->idle_match = 0;
5157 ap->ack_match = 0;
5158 }
5159 ap->cur_time++;
5160
5161 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5162 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5163
5164 if (rx_cfg_reg != ap->ability_match_cfg) {
5165 ap->ability_match_cfg = rx_cfg_reg;
5166 ap->ability_match = 0;
5167 ap->ability_match_count = 0;
5168 } else {
5169 if (++ap->ability_match_count > 1) {
5170 ap->ability_match = 1;
5171 ap->ability_match_cfg = rx_cfg_reg;
5172 }
5173 }
5174 if (rx_cfg_reg & ANEG_CFG_ACK)
5175 ap->ack_match = 1;
5176 else
5177 ap->ack_match = 0;
5178
5179 ap->idle_match = 0;
5180 } else {
5181 ap->idle_match = 1;
5182 ap->ability_match_cfg = 0;
5183 ap->ability_match_count = 0;
5184 ap->ability_match = 0;
5185 ap->ack_match = 0;
5186
5187 rx_cfg_reg = 0;
5188 }
5189
5190 ap->rxconfig = rx_cfg_reg;
5191 ret = ANEG_OK;
5192
5193 switch (ap->state) {
5194 case ANEG_STATE_UNKNOWN:
5195 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5196 ap->state = ANEG_STATE_AN_ENABLE;
5197
5198 fallthrough;
5199 case ANEG_STATE_AN_ENABLE:
5200 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5201 if (ap->flags & MR_AN_ENABLE) {
5202 ap->link_time = 0;
5203 ap->cur_time = 0;
5204 ap->ability_match_cfg = 0;
5205 ap->ability_match_count = 0;
5206 ap->ability_match = 0;
5207 ap->idle_match = 0;
5208 ap->ack_match = 0;
5209
5210 ap->state = ANEG_STATE_RESTART_INIT;
5211 } else {
5212 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5213 }
5214 break;
5215
5216 case ANEG_STATE_RESTART_INIT:
5217 ap->link_time = ap->cur_time;
5218 ap->flags &= ~(MR_NP_LOADED);
5219 ap->txconfig = 0;
5220 tw32(MAC_TX_AUTO_NEG, 0);
5221 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5222 tw32_f(MAC_MODE, tp->mac_mode);
5223 udelay(40);
5224
5225 ret = ANEG_TIMER_ENAB;
5226 ap->state = ANEG_STATE_RESTART;
5227
5228 fallthrough;
5229 case ANEG_STATE_RESTART:
5230 delta = ap->cur_time - ap->link_time;
5231 if (delta > ANEG_STATE_SETTLE_TIME)
5232 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5233 else
5234 ret = ANEG_TIMER_ENAB;
5235 break;
5236
5237 case ANEG_STATE_DISABLE_LINK_OK:
5238 ret = ANEG_DONE;
5239 break;
5240
5241 case ANEG_STATE_ABILITY_DETECT_INIT:
5242 ap->flags &= ~(MR_TOGGLE_TX);
5243 ap->txconfig = ANEG_CFG_FD;
5244 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5245 if (flowctrl & ADVERTISE_1000XPAUSE)
5246 ap->txconfig |= ANEG_CFG_PS1;
5247 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5248 ap->txconfig |= ANEG_CFG_PS2;
5249 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5250 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5251 tw32_f(MAC_MODE, tp->mac_mode);
5252 udelay(40);
5253
5254 ap->state = ANEG_STATE_ABILITY_DETECT;
5255 break;
5256
5257 case ANEG_STATE_ABILITY_DETECT:
5258 if (ap->ability_match != 0 && ap->rxconfig != 0)
5259 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5260 break;
5261
5262 case ANEG_STATE_ACK_DETECT_INIT:
5263 ap->txconfig |= ANEG_CFG_ACK;
5264 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5265 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5266 tw32_f(MAC_MODE, tp->mac_mode);
5267 udelay(40);
5268
5269 ap->state = ANEG_STATE_ACK_DETECT;
5270
5271 fallthrough;
5272 case ANEG_STATE_ACK_DETECT:
5273 if (ap->ack_match != 0) {
5274 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5275 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5276 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5277 } else {
5278 ap->state = ANEG_STATE_AN_ENABLE;
5279 }
5280 } else if (ap->ability_match != 0 &&
5281 ap->rxconfig == 0) {
5282 ap->state = ANEG_STATE_AN_ENABLE;
5283 }
5284 break;
5285
5286 case ANEG_STATE_COMPLETE_ACK_INIT:
5287 if (ap->rxconfig & ANEG_CFG_INVAL) {
5288 ret = ANEG_FAILED;
5289 break;
5290 }
5291 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5292 MR_LP_ADV_HALF_DUPLEX |
5293 MR_LP_ADV_SYM_PAUSE |
5294 MR_LP_ADV_ASYM_PAUSE |
5295 MR_LP_ADV_REMOTE_FAULT1 |
5296 MR_LP_ADV_REMOTE_FAULT2 |
5297 MR_LP_ADV_NEXT_PAGE |
5298 MR_TOGGLE_RX |
5299 MR_NP_RX);
5300 if (ap->rxconfig & ANEG_CFG_FD)
5301 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5302 if (ap->rxconfig & ANEG_CFG_HD)
5303 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5304 if (ap->rxconfig & ANEG_CFG_PS1)
5305 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5306 if (ap->rxconfig & ANEG_CFG_PS2)
5307 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5308 if (ap->rxconfig & ANEG_CFG_RF1)
5309 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5310 if (ap->rxconfig & ANEG_CFG_RF2)
5311 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5312 if (ap->rxconfig & ANEG_CFG_NP)
5313 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5314
5315 ap->link_time = ap->cur_time;
5316
5317 ap->flags ^= (MR_TOGGLE_TX);
5318 if (ap->rxconfig & 0x0008)
5319 ap->flags |= MR_TOGGLE_RX;
5320 if (ap->rxconfig & ANEG_CFG_NP)
5321 ap->flags |= MR_NP_RX;
5322 ap->flags |= MR_PAGE_RX;
5323
5324 ap->state = ANEG_STATE_COMPLETE_ACK;
5325 ret = ANEG_TIMER_ENAB;
5326 break;
5327
5328 case ANEG_STATE_COMPLETE_ACK:
5329 if (ap->ability_match != 0 &&
5330 ap->rxconfig == 0) {
5331 ap->state = ANEG_STATE_AN_ENABLE;
5332 break;
5333 }
5334 delta = ap->cur_time - ap->link_time;
5335 if (delta > ANEG_STATE_SETTLE_TIME) {
5336 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5337 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5338 } else {
5339 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5340 !(ap->flags & MR_NP_RX)) {
5341 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5342 } else {
5343 ret = ANEG_FAILED;
5344 }
5345 }
5346 }
5347 break;
5348
5349 case ANEG_STATE_IDLE_DETECT_INIT:
5350 ap->link_time = ap->cur_time;
5351 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5352 tw32_f(MAC_MODE, tp->mac_mode);
5353 udelay(40);
5354
5355 ap->state = ANEG_STATE_IDLE_DETECT;
5356 ret = ANEG_TIMER_ENAB;
5357 break;
5358
5359 case ANEG_STATE_IDLE_DETECT:
5360 if (ap->ability_match != 0 &&
5361 ap->rxconfig == 0) {
5362 ap->state = ANEG_STATE_AN_ENABLE;
5363 break;
5364 }
5365 delta = ap->cur_time - ap->link_time;
5366 if (delta > ANEG_STATE_SETTLE_TIME) {
5367 /* XXX another gem from the Broadcom driver :( */
5368 ap->state = ANEG_STATE_LINK_OK;
5369 }
5370 break;
5371
5372 case ANEG_STATE_LINK_OK:
5373 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5374 ret = ANEG_DONE;
5375 break;
5376
5377 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5378 /* ??? unimplemented */
5379 break;
5380
5381 case ANEG_STATE_NEXT_PAGE_WAIT:
5382 /* ??? unimplemented */
5383 break;
5384
5385 default:
5386 ret = ANEG_FAILED;
5387 break;
5388 }
5389
5390 return ret;
5391 }
5392
fiber_autoneg(struct tg3 * tp,u32 * txflags,u32 * rxflags)5393 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5394 {
5395 int res = 0;
5396 struct tg3_fiber_aneginfo aninfo;
5397 int status = ANEG_FAILED;
5398 unsigned int tick;
5399 u32 tmp;
5400
5401 tw32_f(MAC_TX_AUTO_NEG, 0);
5402
5403 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5404 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5405 udelay(40);
5406
5407 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5408 udelay(40);
5409
5410 memset(&aninfo, 0, sizeof(aninfo));
5411 aninfo.flags |= MR_AN_ENABLE;
5412 aninfo.state = ANEG_STATE_UNKNOWN;
5413 aninfo.cur_time = 0;
5414 tick = 0;
5415 while (++tick < 195000) {
5416 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5417 if (status == ANEG_DONE || status == ANEG_FAILED)
5418 break;
5419
5420 udelay(1);
5421 }
5422
5423 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5424 tw32_f(MAC_MODE, tp->mac_mode);
5425 udelay(40);
5426
5427 *txflags = aninfo.txconfig;
5428 *rxflags = aninfo.flags;
5429
5430 if (status == ANEG_DONE &&
5431 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5432 MR_LP_ADV_FULL_DUPLEX)))
5433 res = 1;
5434
5435 return res;
5436 }
5437
tg3_init_bcm8002(struct tg3 * tp)5438 static void tg3_init_bcm8002(struct tg3 *tp)
5439 {
5440 u32 mac_status = tr32(MAC_STATUS);
5441 int i;
5442
5443 /* Reset when initting first time or we have a link. */
5444 if (tg3_flag(tp, INIT_COMPLETE) &&
5445 !(mac_status & MAC_STATUS_PCS_SYNCED))
5446 return;
5447
5448 /* Set PLL lock range. */
5449 tg3_writephy(tp, 0x16, 0x8007);
5450
5451 /* SW reset */
5452 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5453
5454 /* Wait for reset to complete. */
5455 /* XXX schedule_timeout() ... */
5456 for (i = 0; i < 500; i++)
5457 udelay(10);
5458
5459 /* Config mode; select PMA/Ch 1 regs. */
5460 tg3_writephy(tp, 0x10, 0x8411);
5461
5462 /* Enable auto-lock and comdet, select txclk for tx. */
5463 tg3_writephy(tp, 0x11, 0x0a10);
5464
5465 tg3_writephy(tp, 0x18, 0x00a0);
5466 tg3_writephy(tp, 0x16, 0x41ff);
5467
5468 /* Assert and deassert POR. */
5469 tg3_writephy(tp, 0x13, 0x0400);
5470 udelay(40);
5471 tg3_writephy(tp, 0x13, 0x0000);
5472
5473 tg3_writephy(tp, 0x11, 0x0a50);
5474 udelay(40);
5475 tg3_writephy(tp, 0x11, 0x0a10);
5476
5477 /* Wait for signal to stabilize */
5478 /* XXX schedule_timeout() ... */
5479 for (i = 0; i < 15000; i++)
5480 udelay(10);
5481
5482 /* Deselect the channel register so we can read the PHYID
5483 * later.
5484 */
5485 tg3_writephy(tp, 0x10, 0x8011);
5486 }
5487
tg3_setup_fiber_hw_autoneg(struct tg3 * tp,u32 mac_status)5488 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5489 {
5490 u16 flowctrl;
5491 bool current_link_up;
5492 u32 sg_dig_ctrl, sg_dig_status;
5493 u32 serdes_cfg, expected_sg_dig_ctrl;
5494 int workaround, port_a;
5495
5496 serdes_cfg = 0;
5497 workaround = 0;
5498 port_a = 1;
5499 current_link_up = false;
5500
5501 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5502 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5503 workaround = 1;
5504 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5505 port_a = 0;
5506
5507 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5508 /* preserve bits 20-23 for voltage regulator */
5509 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5510 }
5511
5512 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5513
5514 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5515 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5516 if (workaround) {
5517 u32 val = serdes_cfg;
5518
5519 if (port_a)
5520 val |= 0xc010000;
5521 else
5522 val |= 0x4010000;
5523 tw32_f(MAC_SERDES_CFG, val);
5524 }
5525
5526 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5527 }
5528 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5529 tg3_setup_flow_control(tp, 0, 0);
5530 current_link_up = true;
5531 }
5532 goto out;
5533 }
5534
5535 /* Want auto-negotiation. */
5536 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5537
5538 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5539 if (flowctrl & ADVERTISE_1000XPAUSE)
5540 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5541 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5542 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5543
5544 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5545 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5546 tp->serdes_counter &&
5547 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5548 MAC_STATUS_RCVD_CFG)) ==
5549 MAC_STATUS_PCS_SYNCED)) {
5550 tp->serdes_counter--;
5551 current_link_up = true;
5552 goto out;
5553 }
5554 restart_autoneg:
5555 if (workaround)
5556 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5557 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5558 udelay(5);
5559 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5560
5561 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5562 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5563 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5564 MAC_STATUS_SIGNAL_DET)) {
5565 sg_dig_status = tr32(SG_DIG_STATUS);
5566 mac_status = tr32(MAC_STATUS);
5567
5568 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5569 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5570 u32 local_adv = 0, remote_adv = 0;
5571
5572 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5573 local_adv |= ADVERTISE_1000XPAUSE;
5574 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5575 local_adv |= ADVERTISE_1000XPSE_ASYM;
5576
5577 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5578 remote_adv |= LPA_1000XPAUSE;
5579 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5580 remote_adv |= LPA_1000XPAUSE_ASYM;
5581
5582 tp->link_config.rmt_adv =
5583 mii_adv_to_ethtool_adv_x(remote_adv);
5584
5585 tg3_setup_flow_control(tp, local_adv, remote_adv);
5586 current_link_up = true;
5587 tp->serdes_counter = 0;
5588 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5589 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5590 if (tp->serdes_counter)
5591 tp->serdes_counter--;
5592 else {
5593 if (workaround) {
5594 u32 val = serdes_cfg;
5595
5596 if (port_a)
5597 val |= 0xc010000;
5598 else
5599 val |= 0x4010000;
5600
5601 tw32_f(MAC_SERDES_CFG, val);
5602 }
5603
5604 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5605 udelay(40);
5606
5607 /* Link parallel detection - link is up */
5608 /* only if we have PCS_SYNC and not */
5609 /* receiving config code words */
5610 mac_status = tr32(MAC_STATUS);
5611 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5612 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5613 tg3_setup_flow_control(tp, 0, 0);
5614 current_link_up = true;
5615 tp->phy_flags |=
5616 TG3_PHYFLG_PARALLEL_DETECT;
5617 tp->serdes_counter =
5618 SERDES_PARALLEL_DET_TIMEOUT;
5619 } else
5620 goto restart_autoneg;
5621 }
5622 }
5623 } else {
5624 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5625 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5626 }
5627
5628 out:
5629 return current_link_up;
5630 }
5631
tg3_setup_fiber_by_hand(struct tg3 * tp,u32 mac_status)5632 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5633 {
5634 bool current_link_up = false;
5635
5636 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5637 goto out;
5638
5639 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5640 u32 txflags, rxflags;
5641 int i;
5642
5643 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5644 u32 local_adv = 0, remote_adv = 0;
5645
5646 if (txflags & ANEG_CFG_PS1)
5647 local_adv |= ADVERTISE_1000XPAUSE;
5648 if (txflags & ANEG_CFG_PS2)
5649 local_adv |= ADVERTISE_1000XPSE_ASYM;
5650
5651 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5652 remote_adv |= LPA_1000XPAUSE;
5653 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5654 remote_adv |= LPA_1000XPAUSE_ASYM;
5655
5656 tp->link_config.rmt_adv =
5657 mii_adv_to_ethtool_adv_x(remote_adv);
5658
5659 tg3_setup_flow_control(tp, local_adv, remote_adv);
5660
5661 current_link_up = true;
5662 }
5663 for (i = 0; i < 30; i++) {
5664 udelay(20);
5665 tw32_f(MAC_STATUS,
5666 (MAC_STATUS_SYNC_CHANGED |
5667 MAC_STATUS_CFG_CHANGED));
5668 udelay(40);
5669 if ((tr32(MAC_STATUS) &
5670 (MAC_STATUS_SYNC_CHANGED |
5671 MAC_STATUS_CFG_CHANGED)) == 0)
5672 break;
5673 }
5674
5675 mac_status = tr32(MAC_STATUS);
5676 if (!current_link_up &&
5677 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5678 !(mac_status & MAC_STATUS_RCVD_CFG))
5679 current_link_up = true;
5680 } else {
5681 tg3_setup_flow_control(tp, 0, 0);
5682
5683 /* Forcing 1000FD link up. */
5684 current_link_up = true;
5685
5686 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5687 udelay(40);
5688
5689 tw32_f(MAC_MODE, tp->mac_mode);
5690 udelay(40);
5691 }
5692
5693 out:
5694 return current_link_up;
5695 }
5696
tg3_setup_fiber_phy(struct tg3 * tp,bool force_reset)5697 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5698 {
5699 u32 orig_pause_cfg;
5700 u32 orig_active_speed;
5701 u8 orig_active_duplex;
5702 u32 mac_status;
5703 bool current_link_up;
5704 int i;
5705
5706 orig_pause_cfg = tp->link_config.active_flowctrl;
5707 orig_active_speed = tp->link_config.active_speed;
5708 orig_active_duplex = tp->link_config.active_duplex;
5709
5710 if (!tg3_flag(tp, HW_AUTONEG) &&
5711 tp->link_up &&
5712 tg3_flag(tp, INIT_COMPLETE)) {
5713 mac_status = tr32(MAC_STATUS);
5714 mac_status &= (MAC_STATUS_PCS_SYNCED |
5715 MAC_STATUS_SIGNAL_DET |
5716 MAC_STATUS_CFG_CHANGED |
5717 MAC_STATUS_RCVD_CFG);
5718 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5719 MAC_STATUS_SIGNAL_DET)) {
5720 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5721 MAC_STATUS_CFG_CHANGED));
5722 return 0;
5723 }
5724 }
5725
5726 tw32_f(MAC_TX_AUTO_NEG, 0);
5727
5728 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5729 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5730 tw32_f(MAC_MODE, tp->mac_mode);
5731 udelay(40);
5732
5733 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5734 tg3_init_bcm8002(tp);
5735
5736 /* Enable link change event even when serdes polling. */
5737 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5738 udelay(40);
5739
5740 tp->link_config.rmt_adv = 0;
5741 mac_status = tr32(MAC_STATUS);
5742
5743 if (tg3_flag(tp, HW_AUTONEG))
5744 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5745 else
5746 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5747
5748 tp->napi[0].hw_status->status =
5749 (SD_STATUS_UPDATED |
5750 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5751
5752 for (i = 0; i < 100; i++) {
5753 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5754 MAC_STATUS_CFG_CHANGED));
5755 udelay(5);
5756 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5757 MAC_STATUS_CFG_CHANGED |
5758 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5759 break;
5760 }
5761
5762 mac_status = tr32(MAC_STATUS);
5763 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5764 current_link_up = false;
5765 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5766 tp->serdes_counter == 0) {
5767 tw32_f(MAC_MODE, (tp->mac_mode |
5768 MAC_MODE_SEND_CONFIGS));
5769 udelay(1);
5770 tw32_f(MAC_MODE, tp->mac_mode);
5771 }
5772 }
5773
5774 if (current_link_up) {
5775 tp->link_config.active_speed = SPEED_1000;
5776 tp->link_config.active_duplex = DUPLEX_FULL;
5777 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5778 LED_CTRL_LNKLED_OVERRIDE |
5779 LED_CTRL_1000MBPS_ON));
5780 } else {
5781 tp->link_config.active_speed = SPEED_UNKNOWN;
5782 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5783 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5784 LED_CTRL_LNKLED_OVERRIDE |
5785 LED_CTRL_TRAFFIC_OVERRIDE));
5786 }
5787
5788 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5789 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5790 if (orig_pause_cfg != now_pause_cfg ||
5791 orig_active_speed != tp->link_config.active_speed ||
5792 orig_active_duplex != tp->link_config.active_duplex)
5793 tg3_link_report(tp);
5794 }
5795
5796 return 0;
5797 }
5798
tg3_setup_fiber_mii_phy(struct tg3 * tp,bool force_reset)5799 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5800 {
5801 int err = 0;
5802 u32 bmsr, bmcr;
5803 u32 current_speed = SPEED_UNKNOWN;
5804 u8 current_duplex = DUPLEX_UNKNOWN;
5805 bool current_link_up = false;
5806 u32 local_adv = 0, remote_adv = 0, sgsr;
5807
5808 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5809 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5810 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5811 (sgsr & SERDES_TG3_SGMII_MODE)) {
5812
5813 if (force_reset)
5814 tg3_phy_reset(tp);
5815
5816 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5817
5818 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5819 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5820 } else {
5821 current_link_up = true;
5822 if (sgsr & SERDES_TG3_SPEED_1000) {
5823 current_speed = SPEED_1000;
5824 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5825 } else if (sgsr & SERDES_TG3_SPEED_100) {
5826 current_speed = SPEED_100;
5827 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5828 } else {
5829 current_speed = SPEED_10;
5830 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5831 }
5832
5833 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5834 current_duplex = DUPLEX_FULL;
5835 else
5836 current_duplex = DUPLEX_HALF;
5837 }
5838
5839 tw32_f(MAC_MODE, tp->mac_mode);
5840 udelay(40);
5841
5842 tg3_clear_mac_status(tp);
5843
5844 goto fiber_setup_done;
5845 }
5846
5847 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5848 tw32_f(MAC_MODE, tp->mac_mode);
5849 udelay(40);
5850
5851 tg3_clear_mac_status(tp);
5852
5853 if (force_reset)
5854 tg3_phy_reset(tp);
5855
5856 tp->link_config.rmt_adv = 0;
5857
5858 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5859 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5860 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5861 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5862 bmsr |= BMSR_LSTATUS;
5863 else
5864 bmsr &= ~BMSR_LSTATUS;
5865 }
5866
5867 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5868
5869 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5870 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5871 /* do nothing, just check for link up at the end */
5872 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5873 u32 adv, newadv;
5874
5875 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5876 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5877 ADVERTISE_1000XPAUSE |
5878 ADVERTISE_1000XPSE_ASYM |
5879 ADVERTISE_SLCT);
5880
5881 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5882 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5883
5884 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5885 tg3_writephy(tp, MII_ADVERTISE, newadv);
5886 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5887 tg3_writephy(tp, MII_BMCR, bmcr);
5888
5889 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5890 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5891 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5892
5893 return err;
5894 }
5895 } else {
5896 u32 new_bmcr;
5897
5898 bmcr &= ~BMCR_SPEED1000;
5899 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5900
5901 if (tp->link_config.duplex == DUPLEX_FULL)
5902 new_bmcr |= BMCR_FULLDPLX;
5903
5904 if (new_bmcr != bmcr) {
5905 /* BMCR_SPEED1000 is a reserved bit that needs
5906 * to be set on write.
5907 */
5908 new_bmcr |= BMCR_SPEED1000;
5909
5910 /* Force a linkdown */
5911 if (tp->link_up) {
5912 u32 adv;
5913
5914 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5915 adv &= ~(ADVERTISE_1000XFULL |
5916 ADVERTISE_1000XHALF |
5917 ADVERTISE_SLCT);
5918 tg3_writephy(tp, MII_ADVERTISE, adv);
5919 tg3_writephy(tp, MII_BMCR, bmcr |
5920 BMCR_ANRESTART |
5921 BMCR_ANENABLE);
5922 udelay(10);
5923 tg3_carrier_off(tp);
5924 }
5925 tg3_writephy(tp, MII_BMCR, new_bmcr);
5926 bmcr = new_bmcr;
5927 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5928 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5929 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5930 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5931 bmsr |= BMSR_LSTATUS;
5932 else
5933 bmsr &= ~BMSR_LSTATUS;
5934 }
5935 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5936 }
5937 }
5938
5939 if (bmsr & BMSR_LSTATUS) {
5940 current_speed = SPEED_1000;
5941 current_link_up = true;
5942 if (bmcr & BMCR_FULLDPLX)
5943 current_duplex = DUPLEX_FULL;
5944 else
5945 current_duplex = DUPLEX_HALF;
5946
5947 if (bmcr & BMCR_ANENABLE) {
5948 u32 common;
5949
5950 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5951 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5952 common = local_adv & remote_adv;
5953 if (common & (ADVERTISE_1000XHALF |
5954 ADVERTISE_1000XFULL)) {
5955 if (common & ADVERTISE_1000XFULL)
5956 current_duplex = DUPLEX_FULL;
5957 else
5958 current_duplex = DUPLEX_HALF;
5959
5960 tp->link_config.rmt_adv =
5961 mii_adv_to_ethtool_adv_x(remote_adv);
5962 } else if (!tg3_flag(tp, 5780_CLASS)) {
5963 /* Link is up via parallel detect */
5964 } else {
5965 current_link_up = false;
5966 }
5967 }
5968 }
5969
5970 fiber_setup_done:
5971 if (current_link_up && current_duplex == DUPLEX_FULL)
5972 tg3_setup_flow_control(tp, local_adv, remote_adv);
5973
5974 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5975 if (tp->link_config.active_duplex == DUPLEX_HALF)
5976 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5977
5978 tw32_f(MAC_MODE, tp->mac_mode);
5979 udelay(40);
5980
5981 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5982
5983 tp->link_config.active_speed = current_speed;
5984 tp->link_config.active_duplex = current_duplex;
5985
5986 tg3_test_and_report_link_chg(tp, current_link_up);
5987 return err;
5988 }
5989
tg3_serdes_parallel_detect(struct tg3 * tp)5990 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5991 {
5992 if (tp->serdes_counter) {
5993 /* Give autoneg time to complete. */
5994 tp->serdes_counter--;
5995 return;
5996 }
5997
5998 if (!tp->link_up &&
5999 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6000 u32 bmcr;
6001
6002 tg3_readphy(tp, MII_BMCR, &bmcr);
6003 if (bmcr & BMCR_ANENABLE) {
6004 u32 phy1, phy2;
6005
6006 /* Select shadow register 0x1f */
6007 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6008 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6009
6010 /* Select expansion interrupt status register */
6011 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6012 MII_TG3_DSP_EXP1_INT_STAT);
6013 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6014 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6015
6016 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6017 /* We have signal detect and not receiving
6018 * config code words, link is up by parallel
6019 * detection.
6020 */
6021
6022 bmcr &= ~BMCR_ANENABLE;
6023 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6024 tg3_writephy(tp, MII_BMCR, bmcr);
6025 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6026 }
6027 }
6028 } else if (tp->link_up &&
6029 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6030 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6031 u32 phy2;
6032
6033 /* Select expansion interrupt status register */
6034 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6035 MII_TG3_DSP_EXP1_INT_STAT);
6036 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6037 if (phy2 & 0x20) {
6038 u32 bmcr;
6039
6040 /* Config code words received, turn on autoneg. */
6041 tg3_readphy(tp, MII_BMCR, &bmcr);
6042 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6043
6044 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6045
6046 }
6047 }
6048 }
6049
tg3_setup_phy(struct tg3 * tp,bool force_reset)6050 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6051 {
6052 u32 val;
6053 int err;
6054
6055 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6056 err = tg3_setup_fiber_phy(tp, force_reset);
6057 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6058 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6059 else
6060 err = tg3_setup_copper_phy(tp, force_reset);
6061
6062 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6063 u32 scale;
6064
6065 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6066 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6067 scale = 65;
6068 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6069 scale = 6;
6070 else
6071 scale = 12;
6072
6073 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6074 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6075 tw32(GRC_MISC_CFG, val);
6076 }
6077
6078 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6079 (6 << TX_LENGTHS_IPG_SHIFT);
6080 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6081 tg3_asic_rev(tp) == ASIC_REV_5762)
6082 val |= tr32(MAC_TX_LENGTHS) &
6083 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6084 TX_LENGTHS_CNT_DWN_VAL_MSK);
6085
6086 if (tp->link_config.active_speed == SPEED_1000 &&
6087 tp->link_config.active_duplex == DUPLEX_HALF)
6088 tw32(MAC_TX_LENGTHS, val |
6089 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6090 else
6091 tw32(MAC_TX_LENGTHS, val |
6092 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6093
6094 if (!tg3_flag(tp, 5705_PLUS)) {
6095 if (tp->link_up) {
6096 tw32(HOSTCC_STAT_COAL_TICKS,
6097 tp->coal.stats_block_coalesce_usecs);
6098 } else {
6099 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6100 }
6101 }
6102
6103 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6104 val = tr32(PCIE_PWR_MGMT_THRESH);
6105 if (!tp->link_up)
6106 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6107 tp->pwrmgmt_thresh;
6108 else
6109 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6110 tw32(PCIE_PWR_MGMT_THRESH, val);
6111 }
6112
6113 return err;
6114 }
6115
6116 /* tp->lock must be held */
tg3_refclk_read(struct tg3 * tp,struct ptp_system_timestamp * sts)6117 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6118 {
6119 u64 stamp;
6120
6121 ptp_read_system_prets(sts);
6122 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6123 ptp_read_system_postts(sts);
6124 stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6125
6126 return stamp;
6127 }
6128
6129 /* tp->lock must be held */
tg3_refclk_write(struct tg3 * tp,u64 newval)6130 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6131 {
6132 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6133
6134 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6135 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6136 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6137 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6138 }
6139
6140 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6141 static inline void tg3_full_unlock(struct tg3 *tp);
tg3_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * info)6142 static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
6143 {
6144 struct tg3 *tp = netdev_priv(dev);
6145
6146 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE;
6147
6148 if (tg3_flag(tp, PTP_CAPABLE)) {
6149 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6150 SOF_TIMESTAMPING_RX_HARDWARE |
6151 SOF_TIMESTAMPING_RAW_HARDWARE;
6152 }
6153
6154 if (tp->ptp_clock)
6155 info->phc_index = ptp_clock_index(tp->ptp_clock);
6156
6157 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6158
6159 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6160 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6161 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6162 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6163 return 0;
6164 }
6165
tg3_ptp_adjfine(struct ptp_clock_info * ptp,long scaled_ppm)6166 static int tg3_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
6167 {
6168 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6169 u64 correction;
6170 bool neg_adj;
6171
6172 /* Frequency adjustment is performed using hardware with a 24 bit
6173 * accumulator and a programmable correction value. On each clk, the
6174 * correction value gets added to the accumulator and when it
6175 * overflows, the time counter is incremented/decremented.
6176 */
6177 neg_adj = diff_by_scaled_ppm(1 << 24, scaled_ppm, &correction);
6178
6179 tg3_full_lock(tp, 0);
6180
6181 if (correction)
6182 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6183 TG3_EAV_REF_CLK_CORRECT_EN |
6184 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) |
6185 ((u32)correction & TG3_EAV_REF_CLK_CORRECT_MASK));
6186 else
6187 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6188
6189 tg3_full_unlock(tp);
6190
6191 return 0;
6192 }
6193
tg3_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)6194 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6195 {
6196 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6197
6198 tg3_full_lock(tp, 0);
6199 tp->ptp_adjust += delta;
6200 tg3_full_unlock(tp);
6201
6202 return 0;
6203 }
6204
tg3_ptp_gettimex(struct ptp_clock_info * ptp,struct timespec64 * ts,struct ptp_system_timestamp * sts)6205 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6206 struct ptp_system_timestamp *sts)
6207 {
6208 u64 ns;
6209 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6210
6211 tg3_full_lock(tp, 0);
6212 ns = tg3_refclk_read(tp, sts);
6213 ns += tp->ptp_adjust;
6214 tg3_full_unlock(tp);
6215
6216 *ts = ns_to_timespec64(ns);
6217
6218 return 0;
6219 }
6220
tg3_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)6221 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6222 const struct timespec64 *ts)
6223 {
6224 u64 ns;
6225 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6226
6227 ns = timespec64_to_ns(ts);
6228
6229 tg3_full_lock(tp, 0);
6230 tg3_refclk_write(tp, ns);
6231 tp->ptp_adjust = 0;
6232 tg3_full_unlock(tp);
6233
6234 return 0;
6235 }
6236
tg3_ptp_enable(struct ptp_clock_info * ptp,struct ptp_clock_request * rq,int on)6237 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6238 struct ptp_clock_request *rq, int on)
6239 {
6240 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6241 u32 clock_ctl;
6242 int rval = 0;
6243
6244 switch (rq->type) {
6245 case PTP_CLK_REQ_PEROUT:
6246 /* Reject requests with unsupported flags */
6247 if (rq->perout.flags)
6248 return -EOPNOTSUPP;
6249
6250 if (rq->perout.index != 0)
6251 return -EINVAL;
6252
6253 tg3_full_lock(tp, 0);
6254 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6255 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6256
6257 if (on) {
6258 u64 nsec;
6259
6260 nsec = rq->perout.start.sec * 1000000000ULL +
6261 rq->perout.start.nsec;
6262
6263 if (rq->perout.period.sec || rq->perout.period.nsec) {
6264 netdev_warn(tp->dev,
6265 "Device supports only a one-shot timesync output, period must be 0\n");
6266 rval = -EINVAL;
6267 goto err_out;
6268 }
6269
6270 if (nsec & (1ULL << 63)) {
6271 netdev_warn(tp->dev,
6272 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6273 rval = -EINVAL;
6274 goto err_out;
6275 }
6276
6277 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6278 tw32(TG3_EAV_WATCHDOG0_MSB,
6279 TG3_EAV_WATCHDOG0_EN |
6280 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6281
6282 tw32(TG3_EAV_REF_CLCK_CTL,
6283 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6284 } else {
6285 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6286 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6287 }
6288
6289 err_out:
6290 tg3_full_unlock(tp);
6291 return rval;
6292
6293 default:
6294 break;
6295 }
6296
6297 return -EOPNOTSUPP;
6298 }
6299
tg3_hwclock_to_timestamp(struct tg3 * tp,u64 hwclock,struct skb_shared_hwtstamps * timestamp)6300 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6301 struct skb_shared_hwtstamps *timestamp)
6302 {
6303 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6304 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6305 tp->ptp_adjust);
6306 }
6307
tg3_read_tx_tstamp(struct tg3 * tp,u64 * hwclock)6308 static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
6309 {
6310 *hwclock = tr32(TG3_TX_TSTAMP_LSB);
6311 *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6312 }
6313
tg3_ptp_ts_aux_work(struct ptp_clock_info * ptp)6314 static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
6315 {
6316 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6317 struct skb_shared_hwtstamps timestamp;
6318 u64 hwclock;
6319
6320 if (tp->ptp_txts_retrycnt > 2)
6321 goto done;
6322
6323 tg3_read_tx_tstamp(tp, &hwclock);
6324
6325 if (hwclock != tp->pre_tx_ts) {
6326 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6327 skb_tstamp_tx(tp->tx_tstamp_skb, ×tamp);
6328 goto done;
6329 }
6330 tp->ptp_txts_retrycnt++;
6331 return HZ / 10;
6332 done:
6333 dev_consume_skb_any(tp->tx_tstamp_skb);
6334 tp->tx_tstamp_skb = NULL;
6335 tp->ptp_txts_retrycnt = 0;
6336 tp->pre_tx_ts = 0;
6337 return -1;
6338 }
6339
6340 static const struct ptp_clock_info tg3_ptp_caps = {
6341 .owner = THIS_MODULE,
6342 .name = "tg3 clock",
6343 .max_adj = 250000000,
6344 .n_alarm = 0,
6345 .n_ext_ts = 0,
6346 .n_per_out = 1,
6347 .n_pins = 0,
6348 .pps = 0,
6349 .adjfine = tg3_ptp_adjfine,
6350 .adjtime = tg3_ptp_adjtime,
6351 .do_aux_work = tg3_ptp_ts_aux_work,
6352 .gettimex64 = tg3_ptp_gettimex,
6353 .settime64 = tg3_ptp_settime,
6354 .enable = tg3_ptp_enable,
6355 };
6356
6357 /* tp->lock must be held */
tg3_ptp_init(struct tg3 * tp)6358 static void tg3_ptp_init(struct tg3 *tp)
6359 {
6360 if (!tg3_flag(tp, PTP_CAPABLE))
6361 return;
6362
6363 /* Initialize the hardware clock to the system time. */
6364 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6365 tp->ptp_adjust = 0;
6366 tp->ptp_info = tg3_ptp_caps;
6367 }
6368
6369 /* tp->lock must be held */
tg3_ptp_resume(struct tg3 * tp)6370 static void tg3_ptp_resume(struct tg3 *tp)
6371 {
6372 if (!tg3_flag(tp, PTP_CAPABLE))
6373 return;
6374
6375 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6376 tp->ptp_adjust = 0;
6377 }
6378
tg3_ptp_fini(struct tg3 * tp)6379 static void tg3_ptp_fini(struct tg3 *tp)
6380 {
6381 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6382 return;
6383
6384 ptp_clock_unregister(tp->ptp_clock);
6385 tp->ptp_clock = NULL;
6386 tp->ptp_adjust = 0;
6387 dev_consume_skb_any(tp->tx_tstamp_skb);
6388 tp->tx_tstamp_skb = NULL;
6389 }
6390
tg3_irq_sync(struct tg3 * tp)6391 static inline int tg3_irq_sync(struct tg3 *tp)
6392 {
6393 return tp->irq_sync;
6394 }
6395
tg3_rd32_loop(struct tg3 * tp,u32 * dst,u32 off,u32 len)6396 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6397 {
6398 int i;
6399
6400 dst = (u32 *)((u8 *)dst + off);
6401 for (i = 0; i < len; i += sizeof(u32))
6402 *dst++ = tr32(off + i);
6403 }
6404
tg3_dump_legacy_regs(struct tg3 * tp,u32 * regs)6405 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6406 {
6407 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6408 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6409 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6410 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6411 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6412 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6413 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6414 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6415 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6416 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6417 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6418 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6419 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6420 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6421 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6422 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6423 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6424 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6425 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6426
6427 if (tg3_flag(tp, SUPPORT_MSIX))
6428 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6429
6430 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6431 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6432 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6433 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6434 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6435 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6436 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6437 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6438
6439 if (!tg3_flag(tp, 5705_PLUS)) {
6440 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6441 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6442 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6443 }
6444
6445 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6446 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6447 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6448 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6449 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6450
6451 if (tg3_flag(tp, NVRAM))
6452 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6453 }
6454
tg3_dump_state(struct tg3 * tp)6455 static void tg3_dump_state(struct tg3 *tp)
6456 {
6457 int i;
6458 u32 *regs;
6459
6460 /* If it is a PCI error, all registers will be 0xffff,
6461 * we don't dump them out, just report the error and return
6462 */
6463 if (tp->pdev->error_state != pci_channel_io_normal) {
6464 netdev_err(tp->dev, "PCI channel ERROR!\n");
6465 return;
6466 }
6467
6468 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6469 if (!regs)
6470 return;
6471
6472 if (tg3_flag(tp, PCI_EXPRESS)) {
6473 /* Read up to but not including private PCI registers */
6474 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6475 regs[i / sizeof(u32)] = tr32(i);
6476 } else
6477 tg3_dump_legacy_regs(tp, regs);
6478
6479 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6480 if (!regs[i + 0] && !regs[i + 1] &&
6481 !regs[i + 2] && !regs[i + 3])
6482 continue;
6483
6484 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6485 i * 4,
6486 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6487 }
6488
6489 kfree(regs);
6490
6491 for (i = 0; i < tp->irq_cnt; i++) {
6492 struct tg3_napi *tnapi = &tp->napi[i];
6493
6494 /* SW status block */
6495 netdev_err(tp->dev,
6496 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6497 i,
6498 tnapi->hw_status->status,
6499 tnapi->hw_status->status_tag,
6500 tnapi->hw_status->rx_jumbo_consumer,
6501 tnapi->hw_status->rx_consumer,
6502 tnapi->hw_status->rx_mini_consumer,
6503 tnapi->hw_status->idx[0].rx_producer,
6504 tnapi->hw_status->idx[0].tx_consumer);
6505
6506 netdev_err(tp->dev,
6507 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6508 i,
6509 tnapi->last_tag, tnapi->last_irq_tag,
6510 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6511 tnapi->rx_rcb_ptr,
6512 tnapi->prodring.rx_std_prod_idx,
6513 tnapi->prodring.rx_std_cons_idx,
6514 tnapi->prodring.rx_jmb_prod_idx,
6515 tnapi->prodring.rx_jmb_cons_idx);
6516 }
6517 }
6518
6519 /* This is called whenever we suspect that the system chipset is re-
6520 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6521 * is bogus tx completions. We try to recover by setting the
6522 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6523 * in the workqueue.
6524 */
tg3_tx_recover(struct tg3 * tp)6525 static void tg3_tx_recover(struct tg3 *tp)
6526 {
6527 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6528 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6529
6530 netdev_warn(tp->dev,
6531 "The system may be re-ordering memory-mapped I/O "
6532 "cycles to the network device, attempting to recover. "
6533 "Please report the problem to the driver maintainer "
6534 "and include system chipset information.\n");
6535
6536 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6537 }
6538
tg3_tx_avail(struct tg3_napi * tnapi)6539 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6540 {
6541 /* Tell compiler to fetch tx indices from memory. */
6542 barrier();
6543 return tnapi->tx_pending -
6544 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6545 }
6546
6547 /* Tigon3 never reports partial packet sends. So we do not
6548 * need special logic to handle SKBs that have not had all
6549 * of their frags sent yet, like SunGEM does.
6550 */
tg3_tx(struct tg3_napi * tnapi)6551 static void tg3_tx(struct tg3_napi *tnapi)
6552 {
6553 struct tg3 *tp = tnapi->tp;
6554 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6555 u32 sw_idx = tnapi->tx_cons;
6556 struct netdev_queue *txq;
6557 int index = tnapi - tp->napi;
6558 unsigned int pkts_compl = 0, bytes_compl = 0;
6559
6560 if (tg3_flag(tp, ENABLE_TSS))
6561 index--;
6562
6563 txq = netdev_get_tx_queue(tp->dev, index);
6564
6565 while (sw_idx != hw_idx) {
6566 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6567 bool complete_skb_later = false;
6568 struct sk_buff *skb = ri->skb;
6569 int i, tx_bug = 0;
6570
6571 if (unlikely(skb == NULL)) {
6572 tg3_tx_recover(tp);
6573 return;
6574 }
6575
6576 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6577 struct skb_shared_hwtstamps timestamp;
6578 u64 hwclock;
6579
6580 tg3_read_tx_tstamp(tp, &hwclock);
6581 if (hwclock != tp->pre_tx_ts) {
6582 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6583 skb_tstamp_tx(skb, ×tamp);
6584 tp->pre_tx_ts = 0;
6585 } else {
6586 tp->tx_tstamp_skb = skb;
6587 complete_skb_later = true;
6588 }
6589 }
6590
6591 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6592 skb_headlen(skb), DMA_TO_DEVICE);
6593
6594 ri->skb = NULL;
6595
6596 while (ri->fragmented) {
6597 ri->fragmented = false;
6598 sw_idx = NEXT_TX(sw_idx);
6599 ri = &tnapi->tx_buffers[sw_idx];
6600 }
6601
6602 sw_idx = NEXT_TX(sw_idx);
6603
6604 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6605 ri = &tnapi->tx_buffers[sw_idx];
6606 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6607 tx_bug = 1;
6608
6609 dma_unmap_page(&tp->pdev->dev,
6610 dma_unmap_addr(ri, mapping),
6611 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6612 DMA_TO_DEVICE);
6613
6614 while (ri->fragmented) {
6615 ri->fragmented = false;
6616 sw_idx = NEXT_TX(sw_idx);
6617 ri = &tnapi->tx_buffers[sw_idx];
6618 }
6619
6620 sw_idx = NEXT_TX(sw_idx);
6621 }
6622
6623 pkts_compl++;
6624 bytes_compl += skb->len;
6625
6626 if (!complete_skb_later)
6627 dev_consume_skb_any(skb);
6628 else
6629 ptp_schedule_worker(tp->ptp_clock, 0);
6630
6631 if (unlikely(tx_bug)) {
6632 tg3_tx_recover(tp);
6633 return;
6634 }
6635 }
6636
6637 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6638
6639 tnapi->tx_cons = sw_idx;
6640
6641 /* Need to make the tx_cons update visible to __tg3_start_xmit()
6642 * before checking for netif_queue_stopped(). Without the
6643 * memory barrier, there is a small possibility that __tg3_start_xmit()
6644 * will miss it and cause the queue to be stopped forever.
6645 */
6646 smp_mb();
6647
6648 if (unlikely(netif_tx_queue_stopped(txq) &&
6649 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6650 __netif_tx_lock(txq, smp_processor_id());
6651 if (netif_tx_queue_stopped(txq) &&
6652 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6653 netif_tx_wake_queue(txq);
6654 __netif_tx_unlock(txq);
6655 }
6656 }
6657
tg3_frag_free(bool is_frag,void * data)6658 static void tg3_frag_free(bool is_frag, void *data)
6659 {
6660 if (is_frag)
6661 skb_free_frag(data);
6662 else
6663 kfree(data);
6664 }
6665
tg3_rx_data_free(struct tg3 * tp,struct ring_info * ri,u32 map_sz)6666 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6667 {
6668 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6669 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6670
6671 if (!ri->data)
6672 return;
6673
6674 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6675 DMA_FROM_DEVICE);
6676 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6677 ri->data = NULL;
6678 }
6679
6680
6681 /* Returns size of skb allocated or < 0 on error.
6682 *
6683 * We only need to fill in the address because the other members
6684 * of the RX descriptor are invariant, see tg3_init_rings.
6685 *
6686 * Note the purposeful asymmetry of cpu vs. chip accesses. For
6687 * posting buffers we only dirty the first cache line of the RX
6688 * descriptor (containing the address). Whereas for the RX status
6689 * buffers the cpu only reads the last cacheline of the RX descriptor
6690 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6691 */
tg3_alloc_rx_data(struct tg3 * tp,struct tg3_rx_prodring_set * tpr,u32 opaque_key,u32 dest_idx_unmasked,unsigned int * frag_size)6692 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6693 u32 opaque_key, u32 dest_idx_unmasked,
6694 unsigned int *frag_size)
6695 {
6696 struct tg3_rx_buffer_desc *desc;
6697 struct ring_info *map;
6698 u8 *data;
6699 dma_addr_t mapping;
6700 int skb_size, data_size, dest_idx;
6701
6702 switch (opaque_key) {
6703 case RXD_OPAQUE_RING_STD:
6704 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6705 desc = &tpr->rx_std[dest_idx];
6706 map = &tpr->rx_std_buffers[dest_idx];
6707 data_size = tp->rx_pkt_map_sz;
6708 break;
6709
6710 case RXD_OPAQUE_RING_JUMBO:
6711 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6712 desc = &tpr->rx_jmb[dest_idx].std;
6713 map = &tpr->rx_jmb_buffers[dest_idx];
6714 data_size = TG3_RX_JMB_MAP_SZ;
6715 break;
6716
6717 default:
6718 return -EINVAL;
6719 }
6720
6721 /* Do not overwrite any of the map or rp information
6722 * until we are sure we can commit to a new buffer.
6723 *
6724 * Callers depend upon this behavior and assume that
6725 * we leave everything unchanged if we fail.
6726 */
6727 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6728 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6729 if (skb_size <= PAGE_SIZE) {
6730 data = napi_alloc_frag(skb_size);
6731 *frag_size = skb_size;
6732 } else {
6733 data = kmalloc(skb_size, GFP_ATOMIC);
6734 *frag_size = 0;
6735 }
6736 if (!data)
6737 return -ENOMEM;
6738
6739 mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6740 data_size, DMA_FROM_DEVICE);
6741 if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6742 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6743 return -EIO;
6744 }
6745
6746 map->data = data;
6747 dma_unmap_addr_set(map, mapping, mapping);
6748
6749 desc->addr_hi = ((u64)mapping >> 32);
6750 desc->addr_lo = ((u64)mapping & 0xffffffff);
6751
6752 return data_size;
6753 }
6754
6755 /* We only need to move over in the address because the other
6756 * members of the RX descriptor are invariant. See notes above
6757 * tg3_alloc_rx_data for full details.
6758 */
tg3_recycle_rx(struct tg3_napi * tnapi,struct tg3_rx_prodring_set * dpr,u32 opaque_key,int src_idx,u32 dest_idx_unmasked)6759 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6760 struct tg3_rx_prodring_set *dpr,
6761 u32 opaque_key, int src_idx,
6762 u32 dest_idx_unmasked)
6763 {
6764 struct tg3 *tp = tnapi->tp;
6765 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6766 struct ring_info *src_map, *dest_map;
6767 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6768 int dest_idx;
6769
6770 switch (opaque_key) {
6771 case RXD_OPAQUE_RING_STD:
6772 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6773 dest_desc = &dpr->rx_std[dest_idx];
6774 dest_map = &dpr->rx_std_buffers[dest_idx];
6775 src_desc = &spr->rx_std[src_idx];
6776 src_map = &spr->rx_std_buffers[src_idx];
6777 break;
6778
6779 case RXD_OPAQUE_RING_JUMBO:
6780 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6781 dest_desc = &dpr->rx_jmb[dest_idx].std;
6782 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6783 src_desc = &spr->rx_jmb[src_idx].std;
6784 src_map = &spr->rx_jmb_buffers[src_idx];
6785 break;
6786
6787 default:
6788 return;
6789 }
6790
6791 dest_map->data = src_map->data;
6792 dma_unmap_addr_set(dest_map, mapping,
6793 dma_unmap_addr(src_map, mapping));
6794 dest_desc->addr_hi = src_desc->addr_hi;
6795 dest_desc->addr_lo = src_desc->addr_lo;
6796
6797 /* Ensure that the update to the skb happens after the physical
6798 * addresses have been transferred to the new BD location.
6799 */
6800 smp_wmb();
6801
6802 src_map->data = NULL;
6803 }
6804
6805 /* The RX ring scheme is composed of multiple rings which post fresh
6806 * buffers to the chip, and one special ring the chip uses to report
6807 * status back to the host.
6808 *
6809 * The special ring reports the status of received packets to the
6810 * host. The chip does not write into the original descriptor the
6811 * RX buffer was obtained from. The chip simply takes the original
6812 * descriptor as provided by the host, updates the status and length
6813 * field, then writes this into the next status ring entry.
6814 *
6815 * Each ring the host uses to post buffers to the chip is described
6816 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6817 * it is first placed into the on-chip ram. When the packet's length
6818 * is known, it walks down the TG3_BDINFO entries to select the ring.
6819 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6820 * which is within the range of the new packet's length is chosen.
6821 *
6822 * The "separate ring for rx status" scheme may sound queer, but it makes
6823 * sense from a cache coherency perspective. If only the host writes
6824 * to the buffer post rings, and only the chip writes to the rx status
6825 * rings, then cache lines never move beyond shared-modified state.
6826 * If both the host and chip were to write into the same ring, cache line
6827 * eviction could occur since both entities want it in an exclusive state.
6828 */
tg3_rx(struct tg3_napi * tnapi,int budget)6829 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6830 {
6831 struct tg3 *tp = tnapi->tp;
6832 u32 work_mask, rx_std_posted = 0;
6833 u32 std_prod_idx, jmb_prod_idx;
6834 u32 sw_idx = tnapi->rx_rcb_ptr;
6835 u16 hw_idx;
6836 int received;
6837 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6838
6839 hw_idx = *(tnapi->rx_rcb_prod_idx);
6840 /*
6841 * We need to order the read of hw_idx and the read of
6842 * the opaque cookie.
6843 */
6844 rmb();
6845 work_mask = 0;
6846 received = 0;
6847 std_prod_idx = tpr->rx_std_prod_idx;
6848 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6849 while (sw_idx != hw_idx && budget > 0) {
6850 struct ring_info *ri;
6851 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6852 unsigned int len;
6853 struct sk_buff *skb;
6854 dma_addr_t dma_addr;
6855 u32 opaque_key, desc_idx, *post_ptr;
6856 u8 *data;
6857 u64 tstamp = 0;
6858
6859 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6860 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6861 if (opaque_key == RXD_OPAQUE_RING_STD) {
6862 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6863 dma_addr = dma_unmap_addr(ri, mapping);
6864 data = ri->data;
6865 post_ptr = &std_prod_idx;
6866 rx_std_posted++;
6867 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6868 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6869 dma_addr = dma_unmap_addr(ri, mapping);
6870 data = ri->data;
6871 post_ptr = &jmb_prod_idx;
6872 } else
6873 goto next_pkt_nopost;
6874
6875 work_mask |= opaque_key;
6876
6877 if (desc->err_vlan & RXD_ERR_MASK) {
6878 drop_it:
6879 tg3_recycle_rx(tnapi, tpr, opaque_key,
6880 desc_idx, *post_ptr);
6881 drop_it_no_recycle:
6882 /* Other statistics kept track of by card. */
6883 tnapi->rx_dropped++;
6884 goto next_pkt;
6885 }
6886
6887 prefetch(data + TG3_RX_OFFSET(tp));
6888 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6889 ETH_FCS_LEN;
6890
6891 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6892 RXD_FLAG_PTPSTAT_PTPV1 ||
6893 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6894 RXD_FLAG_PTPSTAT_PTPV2) {
6895 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6896 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6897 }
6898
6899 if (len > TG3_RX_COPY_THRESH(tp)) {
6900 int skb_size;
6901 unsigned int frag_size;
6902
6903 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6904 *post_ptr, &frag_size);
6905 if (skb_size < 0)
6906 goto drop_it;
6907
6908 dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6909 DMA_FROM_DEVICE);
6910
6911 /* Ensure that the update to the data happens
6912 * after the usage of the old DMA mapping.
6913 */
6914 smp_wmb();
6915
6916 ri->data = NULL;
6917
6918 if (frag_size)
6919 skb = build_skb(data, frag_size);
6920 else
6921 skb = slab_build_skb(data);
6922 if (!skb) {
6923 tg3_frag_free(frag_size != 0, data);
6924 goto drop_it_no_recycle;
6925 }
6926 skb_reserve(skb, TG3_RX_OFFSET(tp));
6927 } else {
6928 tg3_recycle_rx(tnapi, tpr, opaque_key,
6929 desc_idx, *post_ptr);
6930
6931 skb = netdev_alloc_skb(tp->dev,
6932 len + TG3_RAW_IP_ALIGN);
6933 if (skb == NULL)
6934 goto drop_it_no_recycle;
6935
6936 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6937 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6938 DMA_FROM_DEVICE);
6939 memcpy(skb->data,
6940 data + TG3_RX_OFFSET(tp),
6941 len);
6942 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6943 len, DMA_FROM_DEVICE);
6944 }
6945
6946 skb_put(skb, len);
6947 if (tstamp)
6948 tg3_hwclock_to_timestamp(tp, tstamp,
6949 skb_hwtstamps(skb));
6950
6951 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6952 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6953 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6954 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6955 skb->ip_summed = CHECKSUM_UNNECESSARY;
6956 else
6957 skb_checksum_none_assert(skb);
6958
6959 skb->protocol = eth_type_trans(skb, tp->dev);
6960
6961 if (len > (tp->dev->mtu + ETH_HLEN) &&
6962 skb->protocol != htons(ETH_P_8021Q) &&
6963 skb->protocol != htons(ETH_P_8021AD)) {
6964 dev_kfree_skb_any(skb);
6965 goto drop_it_no_recycle;
6966 }
6967
6968 if (desc->type_flags & RXD_FLAG_VLAN &&
6969 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6970 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6971 desc->err_vlan & RXD_VLAN_MASK);
6972
6973 napi_gro_receive(&tnapi->napi, skb);
6974
6975 received++;
6976 budget--;
6977
6978 next_pkt:
6979 (*post_ptr)++;
6980
6981 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6982 tpr->rx_std_prod_idx = std_prod_idx &
6983 tp->rx_std_ring_mask;
6984 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6985 tpr->rx_std_prod_idx);
6986 work_mask &= ~RXD_OPAQUE_RING_STD;
6987 rx_std_posted = 0;
6988 }
6989 next_pkt_nopost:
6990 sw_idx++;
6991 sw_idx &= tp->rx_ret_ring_mask;
6992
6993 /* Refresh hw_idx to see if there is new work */
6994 if (sw_idx == hw_idx) {
6995 hw_idx = *(tnapi->rx_rcb_prod_idx);
6996 rmb();
6997 }
6998 }
6999
7000 /* ACK the status ring. */
7001 tnapi->rx_rcb_ptr = sw_idx;
7002 tw32_rx_mbox(tnapi->consmbox, sw_idx);
7003
7004 /* Refill RX ring(s). */
7005 if (!tg3_flag(tp, ENABLE_RSS)) {
7006 /* Sync BD data before updating mailbox */
7007 wmb();
7008
7009 if (work_mask & RXD_OPAQUE_RING_STD) {
7010 tpr->rx_std_prod_idx = std_prod_idx &
7011 tp->rx_std_ring_mask;
7012 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7013 tpr->rx_std_prod_idx);
7014 }
7015 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
7016 tpr->rx_jmb_prod_idx = jmb_prod_idx &
7017 tp->rx_jmb_ring_mask;
7018 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7019 tpr->rx_jmb_prod_idx);
7020 }
7021 } else if (work_mask) {
7022 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7023 * updated before the producer indices can be updated.
7024 */
7025 smp_wmb();
7026
7027 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7028 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7029
7030 if (tnapi != &tp->napi[1]) {
7031 tp->rx_refill = true;
7032 napi_schedule(&tp->napi[1].napi);
7033 }
7034 }
7035
7036 return received;
7037 }
7038
tg3_poll_link(struct tg3 * tp)7039 static void tg3_poll_link(struct tg3 *tp)
7040 {
7041 /* handle link change and other phy events */
7042 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7043 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7044
7045 if (sblk->status & SD_STATUS_LINK_CHG) {
7046 sblk->status = SD_STATUS_UPDATED |
7047 (sblk->status & ~SD_STATUS_LINK_CHG);
7048 spin_lock(&tp->lock);
7049 if (tg3_flag(tp, USE_PHYLIB)) {
7050 tw32_f(MAC_STATUS,
7051 (MAC_STATUS_SYNC_CHANGED |
7052 MAC_STATUS_CFG_CHANGED |
7053 MAC_STATUS_MI_COMPLETION |
7054 MAC_STATUS_LNKSTATE_CHANGED));
7055 udelay(40);
7056 } else
7057 tg3_setup_phy(tp, false);
7058 spin_unlock(&tp->lock);
7059 }
7060 }
7061 }
7062
tg3_rx_prodring_xfer(struct tg3 * tp,struct tg3_rx_prodring_set * dpr,struct tg3_rx_prodring_set * spr)7063 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7064 struct tg3_rx_prodring_set *dpr,
7065 struct tg3_rx_prodring_set *spr)
7066 {
7067 u32 si, di, cpycnt, src_prod_idx;
7068 int i, err = 0;
7069
7070 while (1) {
7071 src_prod_idx = spr->rx_std_prod_idx;
7072
7073 /* Make sure updates to the rx_std_buffers[] entries and the
7074 * standard producer index are seen in the correct order.
7075 */
7076 smp_rmb();
7077
7078 if (spr->rx_std_cons_idx == src_prod_idx)
7079 break;
7080
7081 if (spr->rx_std_cons_idx < src_prod_idx)
7082 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7083 else
7084 cpycnt = tp->rx_std_ring_mask + 1 -
7085 spr->rx_std_cons_idx;
7086
7087 cpycnt = min(cpycnt,
7088 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7089
7090 si = spr->rx_std_cons_idx;
7091 di = dpr->rx_std_prod_idx;
7092
7093 for (i = di; i < di + cpycnt; i++) {
7094 if (dpr->rx_std_buffers[i].data) {
7095 cpycnt = i - di;
7096 err = -ENOSPC;
7097 break;
7098 }
7099 }
7100
7101 if (!cpycnt)
7102 break;
7103
7104 /* Ensure that updates to the rx_std_buffers ring and the
7105 * shadowed hardware producer ring from tg3_recycle_skb() are
7106 * ordered correctly WRT the skb check above.
7107 */
7108 smp_rmb();
7109
7110 memcpy(&dpr->rx_std_buffers[di],
7111 &spr->rx_std_buffers[si],
7112 cpycnt * sizeof(struct ring_info));
7113
7114 for (i = 0; i < cpycnt; i++, di++, si++) {
7115 struct tg3_rx_buffer_desc *sbd, *dbd;
7116 sbd = &spr->rx_std[si];
7117 dbd = &dpr->rx_std[di];
7118 dbd->addr_hi = sbd->addr_hi;
7119 dbd->addr_lo = sbd->addr_lo;
7120 }
7121
7122 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7123 tp->rx_std_ring_mask;
7124 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7125 tp->rx_std_ring_mask;
7126 }
7127
7128 while (1) {
7129 src_prod_idx = spr->rx_jmb_prod_idx;
7130
7131 /* Make sure updates to the rx_jmb_buffers[] entries and
7132 * the jumbo producer index are seen in the correct order.
7133 */
7134 smp_rmb();
7135
7136 if (spr->rx_jmb_cons_idx == src_prod_idx)
7137 break;
7138
7139 if (spr->rx_jmb_cons_idx < src_prod_idx)
7140 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7141 else
7142 cpycnt = tp->rx_jmb_ring_mask + 1 -
7143 spr->rx_jmb_cons_idx;
7144
7145 cpycnt = min(cpycnt,
7146 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7147
7148 si = spr->rx_jmb_cons_idx;
7149 di = dpr->rx_jmb_prod_idx;
7150
7151 for (i = di; i < di + cpycnt; i++) {
7152 if (dpr->rx_jmb_buffers[i].data) {
7153 cpycnt = i - di;
7154 err = -ENOSPC;
7155 break;
7156 }
7157 }
7158
7159 if (!cpycnt)
7160 break;
7161
7162 /* Ensure that updates to the rx_jmb_buffers ring and the
7163 * shadowed hardware producer ring from tg3_recycle_skb() are
7164 * ordered correctly WRT the skb check above.
7165 */
7166 smp_rmb();
7167
7168 memcpy(&dpr->rx_jmb_buffers[di],
7169 &spr->rx_jmb_buffers[si],
7170 cpycnt * sizeof(struct ring_info));
7171
7172 for (i = 0; i < cpycnt; i++, di++, si++) {
7173 struct tg3_rx_buffer_desc *sbd, *dbd;
7174 sbd = &spr->rx_jmb[si].std;
7175 dbd = &dpr->rx_jmb[di].std;
7176 dbd->addr_hi = sbd->addr_hi;
7177 dbd->addr_lo = sbd->addr_lo;
7178 }
7179
7180 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7181 tp->rx_jmb_ring_mask;
7182 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7183 tp->rx_jmb_ring_mask;
7184 }
7185
7186 return err;
7187 }
7188
tg3_poll_work(struct tg3_napi * tnapi,int work_done,int budget)7189 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7190 {
7191 struct tg3 *tp = tnapi->tp;
7192
7193 /* run TX completion thread */
7194 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7195 tg3_tx(tnapi);
7196 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7197 return work_done;
7198 }
7199
7200 if (!tnapi->rx_rcb_prod_idx)
7201 return work_done;
7202
7203 /* run RX thread, within the bounds set by NAPI.
7204 * All RX "locking" is done by ensuring outside
7205 * code synchronizes with tg3->napi.poll()
7206 */
7207 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7208 work_done += tg3_rx(tnapi, budget - work_done);
7209
7210 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7211 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7212 int i, err = 0;
7213 u32 std_prod_idx = dpr->rx_std_prod_idx;
7214 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7215
7216 tp->rx_refill = false;
7217 for (i = 1; i <= tp->rxq_cnt; i++)
7218 err |= tg3_rx_prodring_xfer(tp, dpr,
7219 &tp->napi[i].prodring);
7220
7221 wmb();
7222
7223 if (std_prod_idx != dpr->rx_std_prod_idx)
7224 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7225 dpr->rx_std_prod_idx);
7226
7227 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7228 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7229 dpr->rx_jmb_prod_idx);
7230
7231 if (err)
7232 tw32_f(HOSTCC_MODE, tp->coal_now);
7233 }
7234
7235 return work_done;
7236 }
7237
tg3_reset_task_schedule(struct tg3 * tp)7238 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7239 {
7240 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7241 schedule_work(&tp->reset_task);
7242 }
7243
tg3_reset_task_cancel(struct tg3 * tp)7244 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7245 {
7246 if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7247 cancel_work_sync(&tp->reset_task);
7248 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7249 }
7250
tg3_poll_msix(struct napi_struct * napi,int budget)7251 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7252 {
7253 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7254 struct tg3 *tp = tnapi->tp;
7255 int work_done = 0;
7256 struct tg3_hw_status *sblk = tnapi->hw_status;
7257
7258 while (1) {
7259 work_done = tg3_poll_work(tnapi, work_done, budget);
7260
7261 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7262 goto tx_recovery;
7263
7264 if (unlikely(work_done >= budget))
7265 break;
7266
7267 /* tp->last_tag is used in tg3_int_reenable() below
7268 * to tell the hw how much work has been processed,
7269 * so we must read it before checking for more work.
7270 */
7271 tnapi->last_tag = sblk->status_tag;
7272 tnapi->last_irq_tag = tnapi->last_tag;
7273 rmb();
7274
7275 /* check for RX/TX work to do */
7276 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7277 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7278
7279 /* This test here is not race free, but will reduce
7280 * the number of interrupts by looping again.
7281 */
7282 if (tnapi == &tp->napi[1] && tp->rx_refill)
7283 continue;
7284
7285 napi_complete_done(napi, work_done);
7286 /* Reenable interrupts. */
7287 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7288
7289 /* This test here is synchronized by napi_schedule()
7290 * and napi_complete() to close the race condition.
7291 */
7292 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7293 tw32(HOSTCC_MODE, tp->coalesce_mode |
7294 HOSTCC_MODE_ENABLE |
7295 tnapi->coal_now);
7296 }
7297 break;
7298 }
7299 }
7300
7301 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7302 return work_done;
7303
7304 tx_recovery:
7305 /* work_done is guaranteed to be less than budget. */
7306 napi_complete(napi);
7307 tg3_reset_task_schedule(tp);
7308 return work_done;
7309 }
7310
tg3_process_error(struct tg3 * tp)7311 static void tg3_process_error(struct tg3 *tp)
7312 {
7313 u32 val;
7314 bool real_error = false;
7315
7316 if (tg3_flag(tp, ERROR_PROCESSED))
7317 return;
7318
7319 /* Check Flow Attention register */
7320 val = tr32(HOSTCC_FLOW_ATTN);
7321 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7322 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7323 real_error = true;
7324 }
7325
7326 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7327 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7328 real_error = true;
7329 }
7330
7331 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7332 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7333 real_error = true;
7334 }
7335
7336 if (!real_error)
7337 return;
7338
7339 tg3_dump_state(tp);
7340
7341 tg3_flag_set(tp, ERROR_PROCESSED);
7342 tg3_reset_task_schedule(tp);
7343 }
7344
tg3_poll(struct napi_struct * napi,int budget)7345 static int tg3_poll(struct napi_struct *napi, int budget)
7346 {
7347 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7348 struct tg3 *tp = tnapi->tp;
7349 int work_done = 0;
7350 struct tg3_hw_status *sblk = tnapi->hw_status;
7351
7352 while (1) {
7353 if (sblk->status & SD_STATUS_ERROR)
7354 tg3_process_error(tp);
7355
7356 tg3_poll_link(tp);
7357
7358 work_done = tg3_poll_work(tnapi, work_done, budget);
7359
7360 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7361 goto tx_recovery;
7362
7363 if (unlikely(work_done >= budget))
7364 break;
7365
7366 if (tg3_flag(tp, TAGGED_STATUS)) {
7367 /* tp->last_tag is used in tg3_int_reenable() below
7368 * to tell the hw how much work has been processed,
7369 * so we must read it before checking for more work.
7370 */
7371 tnapi->last_tag = sblk->status_tag;
7372 tnapi->last_irq_tag = tnapi->last_tag;
7373 rmb();
7374 } else
7375 sblk->status &= ~SD_STATUS_UPDATED;
7376
7377 if (likely(!tg3_has_work(tnapi))) {
7378 napi_complete_done(napi, work_done);
7379 tg3_int_reenable(tnapi);
7380 break;
7381 }
7382 }
7383
7384 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7385 return work_done;
7386
7387 tx_recovery:
7388 /* work_done is guaranteed to be less than budget. */
7389 napi_complete(napi);
7390 tg3_reset_task_schedule(tp);
7391 return work_done;
7392 }
7393
tg3_napi_disable(struct tg3 * tp)7394 static void tg3_napi_disable(struct tg3 *tp)
7395 {
7396 int txq_idx = tp->txq_cnt - 1;
7397 int rxq_idx = tp->rxq_cnt - 1;
7398 struct tg3_napi *tnapi;
7399 int i;
7400
7401 for (i = tp->irq_cnt - 1; i >= 0; i--) {
7402 tnapi = &tp->napi[i];
7403 if (tnapi->tx_buffers) {
7404 netif_queue_set_napi(tp->dev, txq_idx,
7405 NETDEV_QUEUE_TYPE_TX, NULL);
7406 txq_idx--;
7407 }
7408 if (tnapi->rx_rcb) {
7409 netif_queue_set_napi(tp->dev, rxq_idx,
7410 NETDEV_QUEUE_TYPE_RX, NULL);
7411 rxq_idx--;
7412 }
7413 napi_disable(&tnapi->napi);
7414 }
7415 }
7416
tg3_napi_enable(struct tg3 * tp)7417 static void tg3_napi_enable(struct tg3 *tp)
7418 {
7419 int txq_idx = 0, rxq_idx = 0;
7420 struct tg3_napi *tnapi;
7421 int i;
7422
7423 for (i = 0; i < tp->irq_cnt; i++) {
7424 tnapi = &tp->napi[i];
7425 napi_enable_locked(&tnapi->napi);
7426 if (tnapi->tx_buffers) {
7427 netif_queue_set_napi(tp->dev, txq_idx,
7428 NETDEV_QUEUE_TYPE_TX,
7429 &tnapi->napi);
7430 txq_idx++;
7431 }
7432 if (tnapi->rx_rcb) {
7433 netif_queue_set_napi(tp->dev, rxq_idx,
7434 NETDEV_QUEUE_TYPE_RX,
7435 &tnapi->napi);
7436 rxq_idx++;
7437 }
7438 }
7439 }
7440
tg3_napi_init(struct tg3 * tp)7441 static void tg3_napi_init(struct tg3 *tp)
7442 {
7443 int i;
7444
7445 for (i = 0; i < tp->irq_cnt; i++) {
7446 netif_napi_add_locked(tp->dev, &tp->napi[i].napi,
7447 i ? tg3_poll_msix : tg3_poll);
7448 netif_napi_set_irq_locked(&tp->napi[i].napi,
7449 tp->napi[i].irq_vec);
7450 }
7451 }
7452
tg3_napi_fini(struct tg3 * tp)7453 static void tg3_napi_fini(struct tg3 *tp)
7454 {
7455 int i;
7456
7457 for (i = 0; i < tp->irq_cnt; i++)
7458 netif_napi_del(&tp->napi[i].napi);
7459 }
7460
tg3_netif_stop(struct tg3 * tp)7461 static inline void tg3_netif_stop(struct tg3 *tp)
7462 {
7463 netif_trans_update(tp->dev); /* prevent tx timeout */
7464 tg3_napi_disable(tp);
7465 netif_carrier_off(tp->dev);
7466 netif_tx_disable(tp->dev);
7467 }
7468
7469 /* tp->lock must be held */
tg3_netif_start(struct tg3 * tp)7470 static inline void tg3_netif_start(struct tg3 *tp)
7471 {
7472 tg3_ptp_resume(tp);
7473
7474 /* NOTE: unconditional netif_tx_wake_all_queues is only
7475 * appropriate so long as all callers are assured to
7476 * have free tx slots (such as after tg3_init_hw)
7477 */
7478 netif_tx_wake_all_queues(tp->dev);
7479
7480 if (tp->link_up)
7481 netif_carrier_on(tp->dev);
7482
7483 tg3_napi_enable(tp);
7484 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7485 tg3_enable_ints(tp);
7486 }
7487
tg3_irq_quiesce(struct tg3 * tp)7488 static void tg3_irq_quiesce(struct tg3 *tp)
7489 __releases(tp->lock)
7490 __acquires(tp->lock)
7491 {
7492 int i;
7493
7494 BUG_ON(tp->irq_sync);
7495
7496 tp->irq_sync = 1;
7497 smp_mb();
7498
7499 spin_unlock_bh(&tp->lock);
7500
7501 for (i = 0; i < tp->irq_cnt; i++)
7502 synchronize_irq(tp->napi[i].irq_vec);
7503
7504 spin_lock_bh(&tp->lock);
7505 }
7506
7507 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7508 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7509 * with as well. Most of the time, this is not necessary except when
7510 * shutting down the device.
7511 */
tg3_full_lock(struct tg3 * tp,int irq_sync)7512 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7513 {
7514 spin_lock_bh(&tp->lock);
7515 if (irq_sync)
7516 tg3_irq_quiesce(tp);
7517 }
7518
tg3_full_unlock(struct tg3 * tp)7519 static inline void tg3_full_unlock(struct tg3 *tp)
7520 {
7521 spin_unlock_bh(&tp->lock);
7522 }
7523
7524 /* One-shot MSI handler - Chip automatically disables interrupt
7525 * after sending MSI so driver doesn't have to do it.
7526 */
tg3_msi_1shot(int irq,void * dev_id)7527 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7528 {
7529 struct tg3_napi *tnapi = dev_id;
7530 struct tg3 *tp = tnapi->tp;
7531
7532 prefetch(tnapi->hw_status);
7533 if (tnapi->rx_rcb)
7534 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7535
7536 if (likely(!tg3_irq_sync(tp)))
7537 napi_schedule(&tnapi->napi);
7538
7539 return IRQ_HANDLED;
7540 }
7541
7542 /* MSI ISR - No need to check for interrupt sharing and no need to
7543 * flush status block and interrupt mailbox. PCI ordering rules
7544 * guarantee that MSI will arrive after the status block.
7545 */
tg3_msi(int irq,void * dev_id)7546 static irqreturn_t tg3_msi(int irq, void *dev_id)
7547 {
7548 struct tg3_napi *tnapi = dev_id;
7549 struct tg3 *tp = tnapi->tp;
7550
7551 prefetch(tnapi->hw_status);
7552 if (tnapi->rx_rcb)
7553 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7554 /*
7555 * Writing any value to intr-mbox-0 clears PCI INTA# and
7556 * chip-internal interrupt pending events.
7557 * Writing non-zero to intr-mbox-0 additional tells the
7558 * NIC to stop sending us irqs, engaging "in-intr-handler"
7559 * event coalescing.
7560 */
7561 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7562 if (likely(!tg3_irq_sync(tp)))
7563 napi_schedule(&tnapi->napi);
7564
7565 return IRQ_RETVAL(1);
7566 }
7567
tg3_interrupt(int irq,void * dev_id)7568 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7569 {
7570 struct tg3_napi *tnapi = dev_id;
7571 struct tg3 *tp = tnapi->tp;
7572 struct tg3_hw_status *sblk = tnapi->hw_status;
7573 unsigned int handled = 1;
7574
7575 /* In INTx mode, it is possible for the interrupt to arrive at
7576 * the CPU before the status block posted prior to the interrupt.
7577 * Reading the PCI State register will confirm whether the
7578 * interrupt is ours and will flush the status block.
7579 */
7580 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7581 if (tg3_flag(tp, CHIP_RESETTING) ||
7582 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7583 handled = 0;
7584 goto out;
7585 }
7586 }
7587
7588 /*
7589 * Writing any value to intr-mbox-0 clears PCI INTA# and
7590 * chip-internal interrupt pending events.
7591 * Writing non-zero to intr-mbox-0 additional tells the
7592 * NIC to stop sending us irqs, engaging "in-intr-handler"
7593 * event coalescing.
7594 *
7595 * Flush the mailbox to de-assert the IRQ immediately to prevent
7596 * spurious interrupts. The flush impacts performance but
7597 * excessive spurious interrupts can be worse in some cases.
7598 */
7599 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7600 if (tg3_irq_sync(tp))
7601 goto out;
7602 sblk->status &= ~SD_STATUS_UPDATED;
7603 if (likely(tg3_has_work(tnapi))) {
7604 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7605 napi_schedule(&tnapi->napi);
7606 } else {
7607 /* No work, shared interrupt perhaps? re-enable
7608 * interrupts, and flush that PCI write
7609 */
7610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7611 0x00000000);
7612 }
7613 out:
7614 return IRQ_RETVAL(handled);
7615 }
7616
tg3_interrupt_tagged(int irq,void * dev_id)7617 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7618 {
7619 struct tg3_napi *tnapi = dev_id;
7620 struct tg3 *tp = tnapi->tp;
7621 struct tg3_hw_status *sblk = tnapi->hw_status;
7622 unsigned int handled = 1;
7623
7624 /* In INTx mode, it is possible for the interrupt to arrive at
7625 * the CPU before the status block posted prior to the interrupt.
7626 * Reading the PCI State register will confirm whether the
7627 * interrupt is ours and will flush the status block.
7628 */
7629 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7630 if (tg3_flag(tp, CHIP_RESETTING) ||
7631 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7632 handled = 0;
7633 goto out;
7634 }
7635 }
7636
7637 /*
7638 * writing any value to intr-mbox-0 clears PCI INTA# and
7639 * chip-internal interrupt pending events.
7640 * writing non-zero to intr-mbox-0 additional tells the
7641 * NIC to stop sending us irqs, engaging "in-intr-handler"
7642 * event coalescing.
7643 *
7644 * Flush the mailbox to de-assert the IRQ immediately to prevent
7645 * spurious interrupts. The flush impacts performance but
7646 * excessive spurious interrupts can be worse in some cases.
7647 */
7648 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7649
7650 /*
7651 * In a shared interrupt configuration, sometimes other devices'
7652 * interrupts will scream. We record the current status tag here
7653 * so that the above check can report that the screaming interrupts
7654 * are unhandled. Eventually they will be silenced.
7655 */
7656 tnapi->last_irq_tag = sblk->status_tag;
7657
7658 if (tg3_irq_sync(tp))
7659 goto out;
7660
7661 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7662
7663 napi_schedule(&tnapi->napi);
7664
7665 out:
7666 return IRQ_RETVAL(handled);
7667 }
7668
7669 /* ISR for interrupt test */
tg3_test_isr(int irq,void * dev_id)7670 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7671 {
7672 struct tg3_napi *tnapi = dev_id;
7673 struct tg3 *tp = tnapi->tp;
7674 struct tg3_hw_status *sblk = tnapi->hw_status;
7675
7676 if ((sblk->status & SD_STATUS_UPDATED) ||
7677 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7678 tg3_disable_ints(tp);
7679 return IRQ_RETVAL(1);
7680 }
7681 return IRQ_RETVAL(0);
7682 }
7683
7684 #ifdef CONFIG_NET_POLL_CONTROLLER
tg3_poll_controller(struct net_device * dev)7685 static void tg3_poll_controller(struct net_device *dev)
7686 {
7687 int i;
7688 struct tg3 *tp = netdev_priv(dev);
7689
7690 if (tg3_irq_sync(tp))
7691 return;
7692
7693 for (i = 0; i < tp->irq_cnt; i++)
7694 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7695 }
7696 #endif
7697
tg3_tx_timeout(struct net_device * dev,unsigned int txqueue)7698 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7699 {
7700 struct tg3 *tp = netdev_priv(dev);
7701
7702 if (netif_msg_tx_err(tp)) {
7703 netdev_err(dev, "transmit timed out, resetting\n");
7704 tg3_dump_state(tp);
7705 }
7706
7707 tg3_reset_task_schedule(tp);
7708 }
7709
7710 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
tg3_4g_overflow_test(dma_addr_t mapping,int len)7711 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7712 {
7713 u32 base = (u32) mapping & 0xffffffff;
7714
7715 return base + len + 8 < base;
7716 }
7717
7718 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7719 * of any 4GB boundaries: 4G, 8G, etc
7720 */
tg3_4g_tso_overflow_test(struct tg3 * tp,dma_addr_t mapping,u32 len,u32 mss)7721 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7722 u32 len, u32 mss)
7723 {
7724 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7725 u32 base = (u32) mapping & 0xffffffff;
7726
7727 return ((base + len + (mss & 0x3fff)) < base);
7728 }
7729 return 0;
7730 }
7731
7732 /* Test for DMA addresses > 40-bit */
tg3_40bit_overflow_test(struct tg3 * tp,dma_addr_t mapping,int len)7733 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7734 int len)
7735 {
7736 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7737 if (tg3_flag(tp, 40BIT_DMA_BUG))
7738 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7739 return 0;
7740 #else
7741 return 0;
7742 #endif
7743 }
7744
tg3_tx_set_bd(struct tg3_tx_buffer_desc * txbd,dma_addr_t mapping,u32 len,u32 flags,u32 mss,u32 vlan)7745 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7746 dma_addr_t mapping, u32 len, u32 flags,
7747 u32 mss, u32 vlan)
7748 {
7749 txbd->addr_hi = ((u64) mapping >> 32);
7750 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7751 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7752 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7753 }
7754
tg3_tx_frag_set(struct tg3_napi * tnapi,u32 * entry,u32 * budget,dma_addr_t map,u32 len,u32 flags,u32 mss,u32 vlan)7755 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7756 dma_addr_t map, u32 len, u32 flags,
7757 u32 mss, u32 vlan)
7758 {
7759 struct tg3 *tp = tnapi->tp;
7760 bool hwbug = false;
7761
7762 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7763 hwbug = true;
7764
7765 if (tg3_4g_overflow_test(map, len))
7766 hwbug = true;
7767
7768 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7769 hwbug = true;
7770
7771 if (tg3_40bit_overflow_test(tp, map, len))
7772 hwbug = true;
7773
7774 if (tp->dma_limit) {
7775 u32 prvidx = *entry;
7776 u32 tmp_flag = flags & ~TXD_FLAG_END;
7777 while (len > tp->dma_limit && *budget) {
7778 u32 frag_len = tp->dma_limit;
7779 len -= tp->dma_limit;
7780
7781 /* Avoid the 8byte DMA problem */
7782 if (len <= 8) {
7783 len += tp->dma_limit / 2;
7784 frag_len = tp->dma_limit / 2;
7785 }
7786
7787 tnapi->tx_buffers[*entry].fragmented = true;
7788
7789 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7790 frag_len, tmp_flag, mss, vlan);
7791 *budget -= 1;
7792 prvidx = *entry;
7793 *entry = NEXT_TX(*entry);
7794
7795 map += frag_len;
7796 }
7797
7798 if (len) {
7799 if (*budget) {
7800 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7801 len, flags, mss, vlan);
7802 *budget -= 1;
7803 *entry = NEXT_TX(*entry);
7804 } else {
7805 hwbug = true;
7806 tnapi->tx_buffers[prvidx].fragmented = false;
7807 }
7808 }
7809 } else {
7810 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7811 len, flags, mss, vlan);
7812 *entry = NEXT_TX(*entry);
7813 }
7814
7815 return hwbug;
7816 }
7817
tg3_tx_skb_unmap(struct tg3_napi * tnapi,u32 entry,int last)7818 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7819 {
7820 int i;
7821 struct sk_buff *skb;
7822 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7823
7824 skb = txb->skb;
7825 txb->skb = NULL;
7826
7827 dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7828 skb_headlen(skb), DMA_TO_DEVICE);
7829
7830 while (txb->fragmented) {
7831 txb->fragmented = false;
7832 entry = NEXT_TX(entry);
7833 txb = &tnapi->tx_buffers[entry];
7834 }
7835
7836 for (i = 0; i <= last; i++) {
7837 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7838
7839 entry = NEXT_TX(entry);
7840 txb = &tnapi->tx_buffers[entry];
7841
7842 dma_unmap_page(&tnapi->tp->pdev->dev,
7843 dma_unmap_addr(txb, mapping),
7844 skb_frag_size(frag), DMA_TO_DEVICE);
7845
7846 while (txb->fragmented) {
7847 txb->fragmented = false;
7848 entry = NEXT_TX(entry);
7849 txb = &tnapi->tx_buffers[entry];
7850 }
7851 }
7852 }
7853
7854 /* Workaround 4GB and 40-bit hardware DMA bugs. */
tigon3_dma_hwbug_workaround(struct tg3_napi * tnapi,struct sk_buff ** pskb,u32 * entry,u32 * budget,u32 base_flags,u32 mss,u32 vlan)7855 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7856 struct sk_buff **pskb,
7857 u32 *entry, u32 *budget,
7858 u32 base_flags, u32 mss, u32 vlan)
7859 {
7860 struct tg3 *tp = tnapi->tp;
7861 struct sk_buff *new_skb, *skb = *pskb;
7862 dma_addr_t new_addr = 0;
7863 int ret = 0;
7864
7865 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7866 new_skb = skb_copy(skb, GFP_ATOMIC);
7867 else {
7868 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7869
7870 new_skb = skb_copy_expand(skb,
7871 skb_headroom(skb) + more_headroom,
7872 skb_tailroom(skb), GFP_ATOMIC);
7873 }
7874
7875 if (!new_skb) {
7876 ret = -1;
7877 } else {
7878 /* New SKB is guaranteed to be linear. */
7879 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7880 new_skb->len, DMA_TO_DEVICE);
7881 /* Make sure the mapping succeeded */
7882 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7883 dev_kfree_skb_any(new_skb);
7884 ret = -1;
7885 } else {
7886 u32 save_entry = *entry;
7887
7888 base_flags |= TXD_FLAG_END;
7889
7890 tnapi->tx_buffers[*entry].skb = new_skb;
7891 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7892 mapping, new_addr);
7893
7894 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7895 new_skb->len, base_flags,
7896 mss, vlan)) {
7897 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7898 dev_kfree_skb_any(new_skb);
7899 ret = -1;
7900 }
7901 }
7902 }
7903
7904 dev_consume_skb_any(skb);
7905 *pskb = new_skb;
7906 return ret;
7907 }
7908
tg3_tso_bug_gso_check(struct tg3_napi * tnapi,struct sk_buff * skb)7909 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7910 {
7911 /* Check if we will never have enough descriptors,
7912 * as gso_segs can be more than current ring size
7913 */
7914 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7915 }
7916
7917 static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
7918
7919 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7920 * indicated in tg3_tx_frag_set()
7921 */
tg3_tso_bug(struct tg3 * tp,struct tg3_napi * tnapi,struct netdev_queue * txq,struct sk_buff * skb)7922 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7923 struct netdev_queue *txq, struct sk_buff *skb)
7924 {
7925 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7926 struct sk_buff *segs, *seg, *next;
7927
7928 /* Estimate the number of fragments in the worst case */
7929 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7930 netif_tx_stop_queue(txq);
7931
7932 /* netif_tx_stop_queue() must be done before checking
7933 * checking tx index in tg3_tx_avail() below, because in
7934 * tg3_tx(), we update tx index before checking for
7935 * netif_tx_queue_stopped().
7936 */
7937 smp_mb();
7938 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7939 return NETDEV_TX_BUSY;
7940
7941 netif_tx_wake_queue(txq);
7942 }
7943
7944 segs = skb_gso_segment(skb, tp->dev->features &
7945 ~(NETIF_F_TSO | NETIF_F_TSO6));
7946 if (IS_ERR(segs) || !segs) {
7947 tnapi->tx_dropped++;
7948 goto tg3_tso_bug_end;
7949 }
7950
7951 skb_list_walk_safe(segs, seg, next) {
7952 skb_mark_not_on_list(seg);
7953 __tg3_start_xmit(seg, tp->dev);
7954 }
7955
7956 tg3_tso_bug_end:
7957 dev_consume_skb_any(skb);
7958
7959 return NETDEV_TX_OK;
7960 }
7961
7962 /* hard_start_xmit for all devices */
__tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)7963 static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7964 {
7965 struct tg3 *tp = netdev_priv(dev);
7966 u32 len, entry, base_flags, mss, vlan = 0;
7967 u32 budget;
7968 int i = -1, would_hit_hwbug;
7969 dma_addr_t mapping;
7970 struct tg3_napi *tnapi;
7971 struct netdev_queue *txq;
7972 unsigned int last;
7973 struct iphdr *iph = NULL;
7974 struct tcphdr *tcph = NULL;
7975 __sum16 tcp_csum = 0, ip_csum = 0;
7976 __be16 ip_tot_len = 0;
7977
7978 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7979 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7980 if (tg3_flag(tp, ENABLE_TSS))
7981 tnapi++;
7982
7983 budget = tg3_tx_avail(tnapi);
7984
7985 /* We are running in BH disabled context with netif_tx_lock
7986 * and TX reclaim runs via tp->napi.poll inside of a software
7987 * interrupt. Furthermore, IRQ processing runs lockless so we have
7988 * no IRQ context deadlocks to worry about either. Rejoice!
7989 */
7990 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7991 if (!netif_tx_queue_stopped(txq)) {
7992 netif_tx_stop_queue(txq);
7993
7994 /* This is a hard error, log it. */
7995 netdev_err(dev,
7996 "BUG! Tx Ring full when queue awake!\n");
7997 }
7998 return NETDEV_TX_BUSY;
7999 }
8000
8001 entry = tnapi->tx_prod;
8002 base_flags = 0;
8003
8004 mss = skb_shinfo(skb)->gso_size;
8005 if (mss) {
8006 u32 tcp_opt_len, hdr_len;
8007
8008 if (skb_cow_head(skb, 0))
8009 goto drop;
8010
8011 iph = ip_hdr(skb);
8012 tcp_opt_len = tcp_optlen(skb);
8013
8014 hdr_len = skb_tcp_all_headers(skb) - ETH_HLEN;
8015
8016 /* HW/FW can not correctly segment packets that have been
8017 * vlan encapsulated.
8018 */
8019 if (skb->protocol == htons(ETH_P_8021Q) ||
8020 skb->protocol == htons(ETH_P_8021AD)) {
8021 if (tg3_tso_bug_gso_check(tnapi, skb))
8022 return tg3_tso_bug(tp, tnapi, txq, skb);
8023 goto drop;
8024 }
8025
8026 if (!skb_is_gso_v6(skb)) {
8027 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
8028 tg3_flag(tp, TSO_BUG)) {
8029 if (tg3_tso_bug_gso_check(tnapi, skb))
8030 return tg3_tso_bug(tp, tnapi, txq, skb);
8031 goto drop;
8032 }
8033 ip_csum = iph->check;
8034 ip_tot_len = iph->tot_len;
8035 iph->check = 0;
8036 iph->tot_len = htons(mss + hdr_len);
8037 }
8038
8039 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
8040 TXD_FLAG_CPU_POST_DMA);
8041
8042 tcph = tcp_hdr(skb);
8043 tcp_csum = tcph->check;
8044
8045 if (tg3_flag(tp, HW_TSO_1) ||
8046 tg3_flag(tp, HW_TSO_2) ||
8047 tg3_flag(tp, HW_TSO_3)) {
8048 tcph->check = 0;
8049 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
8050 } else {
8051 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
8052 0, IPPROTO_TCP, 0);
8053 }
8054
8055 if (tg3_flag(tp, HW_TSO_3)) {
8056 mss |= (hdr_len & 0xc) << 12;
8057 if (hdr_len & 0x10)
8058 base_flags |= 0x00000010;
8059 base_flags |= (hdr_len & 0x3e0) << 5;
8060 } else if (tg3_flag(tp, HW_TSO_2))
8061 mss |= hdr_len << 9;
8062 else if (tg3_flag(tp, HW_TSO_1) ||
8063 tg3_asic_rev(tp) == ASIC_REV_5705) {
8064 if (tcp_opt_len || iph->ihl > 5) {
8065 int tsflags;
8066
8067 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8068 mss |= (tsflags << 11);
8069 }
8070 } else {
8071 if (tcp_opt_len || iph->ihl > 5) {
8072 int tsflags;
8073
8074 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8075 base_flags |= tsflags << 12;
8076 }
8077 }
8078 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8079 /* HW/FW can not correctly checksum packets that have been
8080 * vlan encapsulated.
8081 */
8082 if (skb->protocol == htons(ETH_P_8021Q) ||
8083 skb->protocol == htons(ETH_P_8021AD)) {
8084 if (skb_checksum_help(skb))
8085 goto drop;
8086 } else {
8087 base_flags |= TXD_FLAG_TCPUDP_CSUM;
8088 }
8089 }
8090
8091 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8092 !mss && skb->len > VLAN_ETH_FRAME_LEN)
8093 base_flags |= TXD_FLAG_JMB_PKT;
8094
8095 if (skb_vlan_tag_present(skb)) {
8096 base_flags |= TXD_FLAG_VLAN;
8097 vlan = skb_vlan_tag_get(skb);
8098 }
8099
8100 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8101 tg3_flag(tp, TX_TSTAMP_EN)) {
8102 tg3_full_lock(tp, 0);
8103 if (!tp->pre_tx_ts) {
8104 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8105 base_flags |= TXD_FLAG_HWTSTAMP;
8106 tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
8107 }
8108 tg3_full_unlock(tp);
8109 }
8110
8111 len = skb_headlen(skb);
8112
8113 mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8114 DMA_TO_DEVICE);
8115 if (dma_mapping_error(&tp->pdev->dev, mapping))
8116 goto drop;
8117
8118
8119 tnapi->tx_buffers[entry].skb = skb;
8120 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8121
8122 would_hit_hwbug = 0;
8123
8124 if (tg3_flag(tp, 5701_DMA_BUG))
8125 would_hit_hwbug = 1;
8126
8127 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8128 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8129 mss, vlan)) {
8130 would_hit_hwbug = 1;
8131 } else if (skb_shinfo(skb)->nr_frags > 0) {
8132 u32 tmp_mss = mss;
8133
8134 if (!tg3_flag(tp, HW_TSO_1) &&
8135 !tg3_flag(tp, HW_TSO_2) &&
8136 !tg3_flag(tp, HW_TSO_3))
8137 tmp_mss = 0;
8138
8139 /* Now loop through additional data
8140 * fragments, and queue them.
8141 */
8142 last = skb_shinfo(skb)->nr_frags - 1;
8143 for (i = 0; i <= last; i++) {
8144 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8145
8146 len = skb_frag_size(frag);
8147 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8148 len, DMA_TO_DEVICE);
8149
8150 tnapi->tx_buffers[entry].skb = NULL;
8151 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8152 mapping);
8153 if (dma_mapping_error(&tp->pdev->dev, mapping))
8154 goto dma_error;
8155
8156 if (!budget ||
8157 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8158 len, base_flags |
8159 ((i == last) ? TXD_FLAG_END : 0),
8160 tmp_mss, vlan)) {
8161 would_hit_hwbug = 1;
8162 break;
8163 }
8164 }
8165 }
8166
8167 if (would_hit_hwbug) {
8168 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8169
8170 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8171 /* If it's a TSO packet, do GSO instead of
8172 * allocating and copying to a large linear SKB
8173 */
8174 if (ip_tot_len) {
8175 iph->check = ip_csum;
8176 iph->tot_len = ip_tot_len;
8177 }
8178 tcph->check = tcp_csum;
8179 return tg3_tso_bug(tp, tnapi, txq, skb);
8180 }
8181
8182 /* If the workaround fails due to memory/mapping
8183 * failure, silently drop this packet.
8184 */
8185 entry = tnapi->tx_prod;
8186 budget = tg3_tx_avail(tnapi);
8187 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8188 base_flags, mss, vlan))
8189 goto drop_nofree;
8190 }
8191
8192 skb_tx_timestamp(skb);
8193 netdev_tx_sent_queue(txq, skb->len);
8194
8195 /* Sync BD data before updating mailbox */
8196 wmb();
8197
8198 tnapi->tx_prod = entry;
8199 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8200 netif_tx_stop_queue(txq);
8201
8202 /* netif_tx_stop_queue() must be done before checking
8203 * checking tx index in tg3_tx_avail() below, because in
8204 * tg3_tx(), we update tx index before checking for
8205 * netif_tx_queue_stopped().
8206 */
8207 smp_mb();
8208 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8209 netif_tx_wake_queue(txq);
8210 }
8211
8212 return NETDEV_TX_OK;
8213
8214 dma_error:
8215 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8216 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8217 drop:
8218 dev_kfree_skb_any(skb);
8219 drop_nofree:
8220 tnapi->tx_dropped++;
8221 return NETDEV_TX_OK;
8222 }
8223
tg3_start_xmit(struct sk_buff * skb,struct net_device * dev)8224 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8225 {
8226 struct netdev_queue *txq;
8227 u16 skb_queue_mapping;
8228 netdev_tx_t ret;
8229
8230 skb_queue_mapping = skb_get_queue_mapping(skb);
8231 txq = netdev_get_tx_queue(dev, skb_queue_mapping);
8232
8233 ret = __tg3_start_xmit(skb, dev);
8234
8235 /* Notify the hardware that packets are ready by updating the TX ring
8236 * tail pointer. We respect netdev_xmit_more() thus avoiding poking
8237 * the hardware for every packet. To guarantee forward progress the TX
8238 * ring must be drained when it is full as indicated by
8239 * netif_xmit_stopped(). This needs to happen even when the current
8240 * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
8241 * queued by previous __tg3_start_xmit() calls might get stuck in
8242 * the queue forever.
8243 */
8244 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8245 struct tg3_napi *tnapi;
8246 struct tg3 *tp;
8247
8248 tp = netdev_priv(dev);
8249 tnapi = &tp->napi[skb_queue_mapping];
8250
8251 if (tg3_flag(tp, ENABLE_TSS))
8252 tnapi++;
8253
8254 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
8255 }
8256
8257 return ret;
8258 }
8259
tg3_mac_loopback(struct tg3 * tp,bool enable)8260 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8261 {
8262 if (enable) {
8263 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8264 MAC_MODE_PORT_MODE_MASK);
8265
8266 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8267
8268 if (!tg3_flag(tp, 5705_PLUS))
8269 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8270
8271 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8272 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8273 else
8274 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8275 } else {
8276 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8277
8278 if (tg3_flag(tp, 5705_PLUS) ||
8279 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8280 tg3_asic_rev(tp) == ASIC_REV_5700)
8281 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8282 }
8283
8284 tw32(MAC_MODE, tp->mac_mode);
8285 udelay(40);
8286 }
8287
tg3_phy_lpbk_set(struct tg3 * tp,u32 speed,bool extlpbk)8288 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8289 {
8290 u32 val, bmcr, mac_mode, ptest = 0;
8291
8292 tg3_phy_toggle_apd(tp, false);
8293 tg3_phy_toggle_automdix(tp, false);
8294
8295 if (extlpbk && tg3_phy_set_extloopbk(tp))
8296 return -EIO;
8297
8298 bmcr = BMCR_FULLDPLX;
8299 switch (speed) {
8300 case SPEED_10:
8301 break;
8302 case SPEED_100:
8303 bmcr |= BMCR_SPEED100;
8304 break;
8305 case SPEED_1000:
8306 default:
8307 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8308 speed = SPEED_100;
8309 bmcr |= BMCR_SPEED100;
8310 } else {
8311 speed = SPEED_1000;
8312 bmcr |= BMCR_SPEED1000;
8313 }
8314 }
8315
8316 if (extlpbk) {
8317 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8318 tg3_readphy(tp, MII_CTRL1000, &val);
8319 val |= CTL1000_AS_MASTER |
8320 CTL1000_ENABLE_MASTER;
8321 tg3_writephy(tp, MII_CTRL1000, val);
8322 } else {
8323 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8324 MII_TG3_FET_PTEST_TRIM_2;
8325 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8326 }
8327 } else
8328 bmcr |= BMCR_LOOPBACK;
8329
8330 tg3_writephy(tp, MII_BMCR, bmcr);
8331
8332 /* The write needs to be flushed for the FETs */
8333 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8334 tg3_readphy(tp, MII_BMCR, &bmcr);
8335
8336 udelay(40);
8337
8338 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8339 tg3_asic_rev(tp) == ASIC_REV_5785) {
8340 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8341 MII_TG3_FET_PTEST_FRC_TX_LINK |
8342 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8343
8344 /* The write needs to be flushed for the AC131 */
8345 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8346 }
8347
8348 /* Reset to prevent losing 1st rx packet intermittently */
8349 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8350 tg3_flag(tp, 5780_CLASS)) {
8351 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8352 udelay(10);
8353 tw32_f(MAC_RX_MODE, tp->rx_mode);
8354 }
8355
8356 mac_mode = tp->mac_mode &
8357 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8358 if (speed == SPEED_1000)
8359 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8360 else
8361 mac_mode |= MAC_MODE_PORT_MODE_MII;
8362
8363 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8364 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8365
8366 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8367 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8368 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8369 mac_mode |= MAC_MODE_LINK_POLARITY;
8370
8371 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8372 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8373 }
8374
8375 tw32(MAC_MODE, mac_mode);
8376 udelay(40);
8377
8378 return 0;
8379 }
8380
tg3_set_loopback(struct net_device * dev,netdev_features_t features)8381 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8382 {
8383 struct tg3 *tp = netdev_priv(dev);
8384
8385 if (features & NETIF_F_LOOPBACK) {
8386 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8387 return;
8388
8389 spin_lock_bh(&tp->lock);
8390 tg3_mac_loopback(tp, true);
8391 netif_carrier_on(tp->dev);
8392 spin_unlock_bh(&tp->lock);
8393 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8394 } else {
8395 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8396 return;
8397
8398 spin_lock_bh(&tp->lock);
8399 tg3_mac_loopback(tp, false);
8400 /* Force link status check */
8401 tg3_setup_phy(tp, true);
8402 spin_unlock_bh(&tp->lock);
8403 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8404 }
8405 }
8406
tg3_fix_features(struct net_device * dev,netdev_features_t features)8407 static netdev_features_t tg3_fix_features(struct net_device *dev,
8408 netdev_features_t features)
8409 {
8410 struct tg3 *tp = netdev_priv(dev);
8411
8412 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8413 features &= ~NETIF_F_ALL_TSO;
8414
8415 return features;
8416 }
8417
tg3_set_features(struct net_device * dev,netdev_features_t features)8418 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8419 {
8420 netdev_features_t changed = dev->features ^ features;
8421
8422 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8423 tg3_set_loopback(dev, features);
8424
8425 return 0;
8426 }
8427
tg3_rx_prodring_free(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8428 static void tg3_rx_prodring_free(struct tg3 *tp,
8429 struct tg3_rx_prodring_set *tpr)
8430 {
8431 int i;
8432
8433 if (tpr != &tp->napi[0].prodring) {
8434 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8435 i = (i + 1) & tp->rx_std_ring_mask)
8436 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8437 tp->rx_pkt_map_sz);
8438
8439 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8440 for (i = tpr->rx_jmb_cons_idx;
8441 i != tpr->rx_jmb_prod_idx;
8442 i = (i + 1) & tp->rx_jmb_ring_mask) {
8443 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8444 TG3_RX_JMB_MAP_SZ);
8445 }
8446 }
8447
8448 return;
8449 }
8450
8451 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8452 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8453 tp->rx_pkt_map_sz);
8454
8455 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8456 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8457 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8458 TG3_RX_JMB_MAP_SZ);
8459 }
8460 }
8461
8462 /* Initialize rx rings for packet processing.
8463 *
8464 * The chip has been shut down and the driver detached from
8465 * the networking, so no interrupts or new tx packets will
8466 * end up in the driver. tp->{tx,}lock are held and thus
8467 * we may not sleep.
8468 */
tg3_rx_prodring_alloc(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8469 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8470 struct tg3_rx_prodring_set *tpr)
8471 {
8472 u32 i, rx_pkt_dma_sz;
8473
8474 tpr->rx_std_cons_idx = 0;
8475 tpr->rx_std_prod_idx = 0;
8476 tpr->rx_jmb_cons_idx = 0;
8477 tpr->rx_jmb_prod_idx = 0;
8478
8479 if (tpr != &tp->napi[0].prodring) {
8480 memset(&tpr->rx_std_buffers[0], 0,
8481 TG3_RX_STD_BUFF_RING_SIZE(tp));
8482 if (tpr->rx_jmb_buffers)
8483 memset(&tpr->rx_jmb_buffers[0], 0,
8484 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8485 goto done;
8486 }
8487
8488 /* Zero out all descriptors. */
8489 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8490
8491 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8492 if (tg3_flag(tp, 5780_CLASS) &&
8493 tp->dev->mtu > ETH_DATA_LEN)
8494 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8495 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8496
8497 /* Initialize invariants of the rings, we only set this
8498 * stuff once. This works because the card does not
8499 * write into the rx buffer posting rings.
8500 */
8501 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8502 struct tg3_rx_buffer_desc *rxd;
8503
8504 rxd = &tpr->rx_std[i];
8505 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8506 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8507 rxd->opaque = (RXD_OPAQUE_RING_STD |
8508 (i << RXD_OPAQUE_INDEX_SHIFT));
8509 }
8510
8511 /* Now allocate fresh SKBs for each rx ring. */
8512 for (i = 0; i < tp->rx_pending; i++) {
8513 unsigned int frag_size;
8514
8515 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8516 &frag_size) < 0) {
8517 netdev_warn(tp->dev,
8518 "Using a smaller RX standard ring. Only "
8519 "%d out of %d buffers were allocated "
8520 "successfully\n", i, tp->rx_pending);
8521 if (i == 0)
8522 goto initfail;
8523 tp->rx_pending = i;
8524 break;
8525 }
8526 }
8527
8528 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8529 goto done;
8530
8531 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8532
8533 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8534 goto done;
8535
8536 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8537 struct tg3_rx_buffer_desc *rxd;
8538
8539 rxd = &tpr->rx_jmb[i].std;
8540 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8541 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8542 RXD_FLAG_JUMBO;
8543 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8544 (i << RXD_OPAQUE_INDEX_SHIFT));
8545 }
8546
8547 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8548 unsigned int frag_size;
8549
8550 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8551 &frag_size) < 0) {
8552 netdev_warn(tp->dev,
8553 "Using a smaller RX jumbo ring. Only %d "
8554 "out of %d buffers were allocated "
8555 "successfully\n", i, tp->rx_jumbo_pending);
8556 if (i == 0)
8557 goto initfail;
8558 tp->rx_jumbo_pending = i;
8559 break;
8560 }
8561 }
8562
8563 done:
8564 return 0;
8565
8566 initfail:
8567 tg3_rx_prodring_free(tp, tpr);
8568 return -ENOMEM;
8569 }
8570
tg3_rx_prodring_fini(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8571 static void tg3_rx_prodring_fini(struct tg3 *tp,
8572 struct tg3_rx_prodring_set *tpr)
8573 {
8574 kfree(tpr->rx_std_buffers);
8575 tpr->rx_std_buffers = NULL;
8576 kfree(tpr->rx_jmb_buffers);
8577 tpr->rx_jmb_buffers = NULL;
8578 if (tpr->rx_std) {
8579 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8580 tpr->rx_std, tpr->rx_std_mapping);
8581 tpr->rx_std = NULL;
8582 }
8583 if (tpr->rx_jmb) {
8584 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8585 tpr->rx_jmb, tpr->rx_jmb_mapping);
8586 tpr->rx_jmb = NULL;
8587 }
8588 }
8589
tg3_rx_prodring_init(struct tg3 * tp,struct tg3_rx_prodring_set * tpr)8590 static int tg3_rx_prodring_init(struct tg3 *tp,
8591 struct tg3_rx_prodring_set *tpr)
8592 {
8593 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8594 GFP_KERNEL);
8595 if (!tpr->rx_std_buffers)
8596 return -ENOMEM;
8597
8598 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8599 TG3_RX_STD_RING_BYTES(tp),
8600 &tpr->rx_std_mapping,
8601 GFP_KERNEL);
8602 if (!tpr->rx_std)
8603 goto err_out;
8604
8605 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8606 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8607 GFP_KERNEL);
8608 if (!tpr->rx_jmb_buffers)
8609 goto err_out;
8610
8611 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8612 TG3_RX_JMB_RING_BYTES(tp),
8613 &tpr->rx_jmb_mapping,
8614 GFP_KERNEL);
8615 if (!tpr->rx_jmb)
8616 goto err_out;
8617 }
8618
8619 return 0;
8620
8621 err_out:
8622 tg3_rx_prodring_fini(tp, tpr);
8623 return -ENOMEM;
8624 }
8625
8626 /* Free up pending packets in all rx/tx rings.
8627 *
8628 * The chip has been shut down and the driver detached from
8629 * the networking, so no interrupts or new tx packets will
8630 * end up in the driver. tp->{tx,}lock is not held and we are not
8631 * in an interrupt context and thus may sleep.
8632 */
tg3_free_rings(struct tg3 * tp)8633 static void tg3_free_rings(struct tg3 *tp)
8634 {
8635 int i, j;
8636
8637 for (j = 0; j < tp->irq_cnt; j++) {
8638 struct tg3_napi *tnapi = &tp->napi[j];
8639
8640 tg3_rx_prodring_free(tp, &tnapi->prodring);
8641
8642 if (!tnapi->tx_buffers)
8643 continue;
8644
8645 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8646 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8647
8648 if (!skb)
8649 continue;
8650
8651 tg3_tx_skb_unmap(tnapi, i,
8652 skb_shinfo(skb)->nr_frags - 1);
8653
8654 dev_consume_skb_any(skb);
8655 }
8656 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8657 }
8658 }
8659
8660 /* Initialize tx/rx rings for packet processing.
8661 *
8662 * The chip has been shut down and the driver detached from
8663 * the networking, so no interrupts or new tx packets will
8664 * end up in the driver. tp->{tx,}lock are held and thus
8665 * we may not sleep.
8666 */
tg3_init_rings(struct tg3 * tp)8667 static int tg3_init_rings(struct tg3 *tp)
8668 {
8669 int i;
8670
8671 /* Free up all the SKBs. */
8672 tg3_free_rings(tp);
8673
8674 for (i = 0; i < tp->irq_cnt; i++) {
8675 struct tg3_napi *tnapi = &tp->napi[i];
8676
8677 tnapi->last_tag = 0;
8678 tnapi->last_irq_tag = 0;
8679 tnapi->hw_status->status = 0;
8680 tnapi->hw_status->status_tag = 0;
8681 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8682
8683 tnapi->tx_prod = 0;
8684 tnapi->tx_cons = 0;
8685 if (tnapi->tx_ring)
8686 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8687
8688 tnapi->rx_rcb_ptr = 0;
8689 if (tnapi->rx_rcb)
8690 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8691
8692 if (tnapi->prodring.rx_std &&
8693 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8694 tg3_free_rings(tp);
8695 return -ENOMEM;
8696 }
8697 }
8698
8699 return 0;
8700 }
8701
tg3_mem_tx_release(struct tg3 * tp)8702 static void tg3_mem_tx_release(struct tg3 *tp)
8703 {
8704 int i;
8705
8706 for (i = 0; i < tp->irq_max; i++) {
8707 struct tg3_napi *tnapi = &tp->napi[i];
8708
8709 if (tnapi->tx_ring) {
8710 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8711 tnapi->tx_ring, tnapi->tx_desc_mapping);
8712 tnapi->tx_ring = NULL;
8713 }
8714
8715 kfree(tnapi->tx_buffers);
8716 tnapi->tx_buffers = NULL;
8717 }
8718 }
8719
tg3_mem_tx_acquire(struct tg3 * tp)8720 static int tg3_mem_tx_acquire(struct tg3 *tp)
8721 {
8722 int i;
8723 struct tg3_napi *tnapi = &tp->napi[0];
8724
8725 /* If multivector TSS is enabled, vector 0 does not handle
8726 * tx interrupts. Don't allocate any resources for it.
8727 */
8728 if (tg3_flag(tp, ENABLE_TSS))
8729 tnapi++;
8730
8731 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8732 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8733 sizeof(struct tg3_tx_ring_info),
8734 GFP_KERNEL);
8735 if (!tnapi->tx_buffers)
8736 goto err_out;
8737
8738 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8739 TG3_TX_RING_BYTES,
8740 &tnapi->tx_desc_mapping,
8741 GFP_KERNEL);
8742 if (!tnapi->tx_ring)
8743 goto err_out;
8744 }
8745
8746 return 0;
8747
8748 err_out:
8749 tg3_mem_tx_release(tp);
8750 return -ENOMEM;
8751 }
8752
tg3_mem_rx_release(struct tg3 * tp)8753 static void tg3_mem_rx_release(struct tg3 *tp)
8754 {
8755 int i;
8756
8757 for (i = 0; i < tp->irq_max; i++) {
8758 struct tg3_napi *tnapi = &tp->napi[i];
8759
8760 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8761
8762 if (!tnapi->rx_rcb)
8763 continue;
8764
8765 dma_free_coherent(&tp->pdev->dev,
8766 TG3_RX_RCB_RING_BYTES(tp),
8767 tnapi->rx_rcb,
8768 tnapi->rx_rcb_mapping);
8769 tnapi->rx_rcb = NULL;
8770 }
8771 }
8772
tg3_mem_rx_acquire(struct tg3 * tp)8773 static int tg3_mem_rx_acquire(struct tg3 *tp)
8774 {
8775 unsigned int i, limit;
8776
8777 limit = tp->rxq_cnt;
8778
8779 /* If RSS is enabled, we need a (dummy) producer ring
8780 * set on vector zero. This is the true hw prodring.
8781 */
8782 if (tg3_flag(tp, ENABLE_RSS))
8783 limit++;
8784
8785 for (i = 0; i < limit; i++) {
8786 struct tg3_napi *tnapi = &tp->napi[i];
8787
8788 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8789 goto err_out;
8790
8791 /* If multivector RSS is enabled, vector 0
8792 * does not handle rx or tx interrupts.
8793 * Don't allocate any resources for it.
8794 */
8795 if (!i && tg3_flag(tp, ENABLE_RSS))
8796 continue;
8797
8798 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8799 TG3_RX_RCB_RING_BYTES(tp),
8800 &tnapi->rx_rcb_mapping,
8801 GFP_KERNEL);
8802 if (!tnapi->rx_rcb)
8803 goto err_out;
8804 }
8805
8806 return 0;
8807
8808 err_out:
8809 tg3_mem_rx_release(tp);
8810 return -ENOMEM;
8811 }
8812
8813 /*
8814 * Must not be invoked with interrupt sources disabled and
8815 * the hardware shutdown down.
8816 */
tg3_free_consistent(struct tg3 * tp)8817 static void tg3_free_consistent(struct tg3 *tp)
8818 {
8819 int i;
8820
8821 for (i = 0; i < tp->irq_cnt; i++) {
8822 struct tg3_napi *tnapi = &tp->napi[i];
8823
8824 if (tnapi->hw_status) {
8825 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8826 tnapi->hw_status,
8827 tnapi->status_mapping);
8828 tnapi->hw_status = NULL;
8829 }
8830 }
8831
8832 tg3_mem_rx_release(tp);
8833 tg3_mem_tx_release(tp);
8834
8835 /* tp->hw_stats can be referenced safely:
8836 * 1. under rtnl_lock
8837 * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8838 */
8839 if (tp->hw_stats) {
8840 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8841 tp->hw_stats, tp->stats_mapping);
8842 tp->hw_stats = NULL;
8843 }
8844 }
8845
8846 /*
8847 * Must not be invoked with interrupt sources disabled and
8848 * the hardware shutdown down. Can sleep.
8849 */
tg3_alloc_consistent(struct tg3 * tp)8850 static int tg3_alloc_consistent(struct tg3 *tp)
8851 {
8852 int i;
8853
8854 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8855 sizeof(struct tg3_hw_stats),
8856 &tp->stats_mapping, GFP_KERNEL);
8857 if (!tp->hw_stats)
8858 goto err_out;
8859
8860 for (i = 0; i < tp->irq_cnt; i++) {
8861 struct tg3_napi *tnapi = &tp->napi[i];
8862 struct tg3_hw_status *sblk;
8863
8864 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8865 TG3_HW_STATUS_SIZE,
8866 &tnapi->status_mapping,
8867 GFP_KERNEL);
8868 if (!tnapi->hw_status)
8869 goto err_out;
8870
8871 sblk = tnapi->hw_status;
8872
8873 if (tg3_flag(tp, ENABLE_RSS)) {
8874 u16 *prodptr = NULL;
8875
8876 /*
8877 * When RSS is enabled, the status block format changes
8878 * slightly. The "rx_jumbo_consumer", "reserved",
8879 * and "rx_mini_consumer" members get mapped to the
8880 * other three rx return ring producer indexes.
8881 */
8882 switch (i) {
8883 case 1:
8884 prodptr = &sblk->idx[0].rx_producer;
8885 break;
8886 case 2:
8887 prodptr = &sblk->rx_jumbo_consumer;
8888 break;
8889 case 3:
8890 prodptr = &sblk->reserved;
8891 break;
8892 case 4:
8893 prodptr = &sblk->rx_mini_consumer;
8894 break;
8895 }
8896 tnapi->rx_rcb_prod_idx = prodptr;
8897 } else {
8898 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8899 }
8900 }
8901
8902 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8903 goto err_out;
8904
8905 return 0;
8906
8907 err_out:
8908 tg3_free_consistent(tp);
8909 return -ENOMEM;
8910 }
8911
8912 #define MAX_WAIT_CNT 1000
8913
8914 /* To stop a block, clear the enable bit and poll till it
8915 * clears. tp->lock is held.
8916 */
tg3_stop_block(struct tg3 * tp,unsigned long ofs,u32 enable_bit,bool silent)8917 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8918 {
8919 unsigned int i;
8920 u32 val;
8921
8922 if (tg3_flag(tp, 5705_PLUS)) {
8923 switch (ofs) {
8924 case RCVLSC_MODE:
8925 case DMAC_MODE:
8926 case MBFREE_MODE:
8927 case BUFMGR_MODE:
8928 case MEMARB_MODE:
8929 /* We can't enable/disable these bits of the
8930 * 5705/5750, just say success.
8931 */
8932 return 0;
8933
8934 default:
8935 break;
8936 }
8937 }
8938
8939 val = tr32(ofs);
8940 val &= ~enable_bit;
8941 tw32_f(ofs, val);
8942
8943 for (i = 0; i < MAX_WAIT_CNT; i++) {
8944 if (pci_channel_offline(tp->pdev)) {
8945 dev_err(&tp->pdev->dev,
8946 "tg3_stop_block device offline, "
8947 "ofs=%lx enable_bit=%x\n",
8948 ofs, enable_bit);
8949 return -ENODEV;
8950 }
8951
8952 udelay(100);
8953 val = tr32(ofs);
8954 if ((val & enable_bit) == 0)
8955 break;
8956 }
8957
8958 if (i == MAX_WAIT_CNT && !silent) {
8959 dev_err(&tp->pdev->dev,
8960 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8961 ofs, enable_bit);
8962 return -ENODEV;
8963 }
8964
8965 return 0;
8966 }
8967
8968 /* tp->lock is held. */
tg3_abort_hw(struct tg3 * tp,bool silent)8969 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8970 {
8971 int i, err;
8972
8973 tg3_disable_ints(tp);
8974
8975 if (pci_channel_offline(tp->pdev)) {
8976 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8977 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8978 err = -ENODEV;
8979 goto err_no_dev;
8980 }
8981
8982 tp->rx_mode &= ~RX_MODE_ENABLE;
8983 tw32_f(MAC_RX_MODE, tp->rx_mode);
8984 udelay(10);
8985
8986 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8987 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8988 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8989 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8990 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8991 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8992
8993 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8994 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8995 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8996 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8997 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8998 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8999 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
9000
9001 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
9002 tw32_f(MAC_MODE, tp->mac_mode);
9003 udelay(40);
9004
9005 tp->tx_mode &= ~TX_MODE_ENABLE;
9006 tw32_f(MAC_TX_MODE, tp->tx_mode);
9007
9008 for (i = 0; i < MAX_WAIT_CNT; i++) {
9009 udelay(100);
9010 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
9011 break;
9012 }
9013 if (i >= MAX_WAIT_CNT) {
9014 dev_err(&tp->pdev->dev,
9015 "%s timed out, TX_MODE_ENABLE will not clear "
9016 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
9017 err |= -ENODEV;
9018 }
9019
9020 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
9021 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
9022 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
9023
9024 tw32(FTQ_RESET, 0xffffffff);
9025 tw32(FTQ_RESET, 0x00000000);
9026
9027 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
9028 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
9029
9030 err_no_dev:
9031 for (i = 0; i < tp->irq_cnt; i++) {
9032 struct tg3_napi *tnapi = &tp->napi[i];
9033 if (tnapi->hw_status)
9034 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9035 }
9036
9037 return err;
9038 }
9039
9040 /* Save PCI command register before chip reset */
tg3_save_pci_state(struct tg3 * tp)9041 static void tg3_save_pci_state(struct tg3 *tp)
9042 {
9043 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
9044 }
9045
9046 /* Restore PCI state after chip reset */
tg3_restore_pci_state(struct tg3 * tp)9047 static void tg3_restore_pci_state(struct tg3 *tp)
9048 {
9049 u32 val;
9050
9051 /* Re-enable indirect register accesses. */
9052 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9053 tp->misc_host_ctrl);
9054
9055 /* Set MAX PCI retry to zero. */
9056 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
9057 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9058 tg3_flag(tp, PCIX_MODE))
9059 val |= PCISTATE_RETRY_SAME_DMA;
9060 /* Allow reads and writes to the APE register and memory space. */
9061 if (tg3_flag(tp, ENABLE_APE))
9062 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9063 PCISTATE_ALLOW_APE_SHMEM_WR |
9064 PCISTATE_ALLOW_APE_PSPACE_WR;
9065 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
9066
9067 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
9068
9069 if (!tg3_flag(tp, PCI_EXPRESS)) {
9070 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
9071 tp->pci_cacheline_sz);
9072 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
9073 tp->pci_lat_timer);
9074 }
9075
9076 /* Make sure PCI-X relaxed ordering bit is clear. */
9077 if (tg3_flag(tp, PCIX_MODE)) {
9078 u16 pcix_cmd;
9079
9080 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9081 &pcix_cmd);
9082 pcix_cmd &= ~PCI_X_CMD_ERO;
9083 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9084 pcix_cmd);
9085 }
9086
9087 if (tg3_flag(tp, 5780_CLASS)) {
9088
9089 /* Chip reset on 5780 will reset MSI enable bit,
9090 * so need to restore it.
9091 */
9092 if (tg3_flag(tp, USING_MSI)) {
9093 u16 ctrl;
9094
9095 pci_read_config_word(tp->pdev,
9096 tp->msi_cap + PCI_MSI_FLAGS,
9097 &ctrl);
9098 pci_write_config_word(tp->pdev,
9099 tp->msi_cap + PCI_MSI_FLAGS,
9100 ctrl | PCI_MSI_FLAGS_ENABLE);
9101 val = tr32(MSGINT_MODE);
9102 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9103 }
9104 }
9105 }
9106
tg3_override_clk(struct tg3 * tp)9107 static void tg3_override_clk(struct tg3 *tp)
9108 {
9109 u32 val;
9110
9111 switch (tg3_asic_rev(tp)) {
9112 case ASIC_REV_5717:
9113 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9114 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9115 TG3_CPMU_MAC_ORIDE_ENABLE);
9116 break;
9117
9118 case ASIC_REV_5719:
9119 case ASIC_REV_5720:
9120 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9121 break;
9122
9123 default:
9124 return;
9125 }
9126 }
9127
tg3_restore_clk(struct tg3 * tp)9128 static void tg3_restore_clk(struct tg3 *tp)
9129 {
9130 u32 val;
9131
9132 switch (tg3_asic_rev(tp)) {
9133 case ASIC_REV_5717:
9134 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9135 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9136 val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9137 break;
9138
9139 case ASIC_REV_5719:
9140 case ASIC_REV_5720:
9141 val = tr32(TG3_CPMU_CLCK_ORIDE);
9142 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9143 break;
9144
9145 default:
9146 return;
9147 }
9148 }
9149
9150 /* tp->lock is held. */
tg3_chip_reset(struct tg3 * tp)9151 static int tg3_chip_reset(struct tg3 *tp)
9152 __releases(tp->lock)
9153 __acquires(tp->lock)
9154 {
9155 u32 val;
9156 void (*write_op)(struct tg3 *, u32, u32);
9157 int i, err;
9158
9159 if (!pci_device_is_present(tp->pdev))
9160 return -ENODEV;
9161
9162 tg3_nvram_lock(tp);
9163
9164 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9165
9166 /* No matching tg3_nvram_unlock() after this because
9167 * chip reset below will undo the nvram lock.
9168 */
9169 tp->nvram_lock_cnt = 0;
9170
9171 /* GRC_MISC_CFG core clock reset will clear the memory
9172 * enable bit in PCI register 4 and the MSI enable bit
9173 * on some chips, so we save relevant registers here.
9174 */
9175 tg3_save_pci_state(tp);
9176
9177 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9178 tg3_flag(tp, 5755_PLUS))
9179 tw32(GRC_FASTBOOT_PC, 0);
9180
9181 /*
9182 * We must avoid the readl() that normally takes place.
9183 * It locks machines, causes machine checks, and other
9184 * fun things. So, temporarily disable the 5701
9185 * hardware workaround, while we do the reset.
9186 */
9187 write_op = tp->write32;
9188 if (write_op == tg3_write_flush_reg32)
9189 tp->write32 = tg3_write32;
9190
9191 /* Prevent the irq handler from reading or writing PCI registers
9192 * during chip reset when the memory enable bit in the PCI command
9193 * register may be cleared. The chip does not generate interrupt
9194 * at this time, but the irq handler may still be called due to irq
9195 * sharing or irqpoll.
9196 */
9197 tg3_flag_set(tp, CHIP_RESETTING);
9198 for (i = 0; i < tp->irq_cnt; i++) {
9199 struct tg3_napi *tnapi = &tp->napi[i];
9200 if (tnapi->hw_status) {
9201 tnapi->hw_status->status = 0;
9202 tnapi->hw_status->status_tag = 0;
9203 }
9204 tnapi->last_tag = 0;
9205 tnapi->last_irq_tag = 0;
9206 }
9207 smp_mb();
9208
9209 tg3_full_unlock(tp);
9210
9211 for (i = 0; i < tp->irq_cnt; i++)
9212 synchronize_irq(tp->napi[i].irq_vec);
9213
9214 tg3_full_lock(tp, 0);
9215
9216 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9217 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9218 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9219 }
9220
9221 /* do the reset */
9222 val = GRC_MISC_CFG_CORECLK_RESET;
9223
9224 if (tg3_flag(tp, PCI_EXPRESS)) {
9225 /* Force PCIe 1.0a mode */
9226 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9227 !tg3_flag(tp, 57765_PLUS) &&
9228 tr32(TG3_PCIE_PHY_TSTCTL) ==
9229 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9230 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9231
9232 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9233 tw32(GRC_MISC_CFG, (1 << 29));
9234 val |= (1 << 29);
9235 }
9236 }
9237
9238 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9239 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9240 tw32(GRC_VCPU_EXT_CTRL,
9241 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9242 }
9243
9244 /* Set the clock to the highest frequency to avoid timeouts. With link
9245 * aware mode, the clock speed could be slow and bootcode does not
9246 * complete within the expected time. Override the clock to allow the
9247 * bootcode to finish sooner and then restore it.
9248 */
9249 tg3_override_clk(tp);
9250
9251 /* Manage gphy power for all CPMU absent PCIe devices. */
9252 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9253 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9254
9255 tw32(GRC_MISC_CFG, val);
9256
9257 /* restore 5701 hardware bug workaround write method */
9258 tp->write32 = write_op;
9259
9260 /* Unfortunately, we have to delay before the PCI read back.
9261 * Some 575X chips even will not respond to a PCI cfg access
9262 * when the reset command is given to the chip.
9263 *
9264 * How do these hardware designers expect things to work
9265 * properly if the PCI write is posted for a long period
9266 * of time? It is always necessary to have some method by
9267 * which a register read back can occur to push the write
9268 * out which does the reset.
9269 *
9270 * For most tg3 variants the trick below was working.
9271 * Ho hum...
9272 */
9273 udelay(120);
9274
9275 /* Flush PCI posted writes. The normal MMIO registers
9276 * are inaccessible at this time so this is the only
9277 * way to make this reliably (actually, this is no longer
9278 * the case, see above). I tried to use indirect
9279 * register read/write but this upset some 5701 variants.
9280 */
9281 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9282
9283 udelay(120);
9284
9285 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9286 u16 val16;
9287
9288 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9289 int j;
9290 u32 cfg_val;
9291
9292 /* Wait for link training to complete. */
9293 for (j = 0; j < 5000; j++)
9294 udelay(100);
9295
9296 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9297 pci_write_config_dword(tp->pdev, 0xc4,
9298 cfg_val | (1 << 15));
9299 }
9300
9301 /* Clear the "no snoop" and "relaxed ordering" bits. */
9302 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9303 /*
9304 * Older PCIe devices only support the 128 byte
9305 * MPS setting. Enforce the restriction.
9306 */
9307 if (!tg3_flag(tp, CPMU_PRESENT))
9308 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9309 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9310
9311 /* Clear error status */
9312 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9313 PCI_EXP_DEVSTA_CED |
9314 PCI_EXP_DEVSTA_NFED |
9315 PCI_EXP_DEVSTA_FED |
9316 PCI_EXP_DEVSTA_URD);
9317 }
9318
9319 tg3_restore_pci_state(tp);
9320
9321 tg3_flag_clear(tp, CHIP_RESETTING);
9322 tg3_flag_clear(tp, ERROR_PROCESSED);
9323
9324 val = 0;
9325 if (tg3_flag(tp, 5780_CLASS))
9326 val = tr32(MEMARB_MODE);
9327 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9328
9329 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9330 tg3_stop_fw(tp);
9331 tw32(0x5000, 0x400);
9332 }
9333
9334 if (tg3_flag(tp, IS_SSB_CORE)) {
9335 /*
9336 * BCM4785: In order to avoid repercussions from using
9337 * potentially defective internal ROM, stop the Rx RISC CPU,
9338 * which is not required.
9339 */
9340 tg3_stop_fw(tp);
9341 tg3_halt_cpu(tp, RX_CPU_BASE);
9342 }
9343
9344 err = tg3_poll_fw(tp);
9345 if (err)
9346 return err;
9347
9348 tw32(GRC_MODE, tp->grc_mode);
9349
9350 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9351 val = tr32(0xc4);
9352
9353 tw32(0xc4, val | (1 << 15));
9354 }
9355
9356 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9357 tg3_asic_rev(tp) == ASIC_REV_5705) {
9358 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9359 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9360 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9361 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9362 }
9363
9364 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9365 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9366 val = tp->mac_mode;
9367 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9368 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9369 val = tp->mac_mode;
9370 } else
9371 val = 0;
9372
9373 tw32_f(MAC_MODE, val);
9374 udelay(40);
9375
9376 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9377
9378 tg3_mdio_start(tp);
9379
9380 if (tg3_flag(tp, PCI_EXPRESS) &&
9381 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9382 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9383 !tg3_flag(tp, 57765_PLUS)) {
9384 val = tr32(0x7c00);
9385
9386 tw32(0x7c00, val | (1 << 25));
9387 }
9388
9389 tg3_restore_clk(tp);
9390
9391 /* Increase the core clock speed to fix tx timeout issue for 5762
9392 * with 100Mbps link speed.
9393 */
9394 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9395 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9396 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9397 TG3_CPMU_MAC_ORIDE_ENABLE);
9398 }
9399
9400 /* Reprobe ASF enable state. */
9401 tg3_flag_clear(tp, ENABLE_ASF);
9402 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9403 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9404
9405 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9406 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9407 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9408 u32 nic_cfg;
9409
9410 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9411 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9412 tg3_flag_set(tp, ENABLE_ASF);
9413 tp->last_event_jiffies = jiffies;
9414 if (tg3_flag(tp, 5750_PLUS))
9415 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9416
9417 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9418 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9419 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9420 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9421 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9422 }
9423 }
9424
9425 return 0;
9426 }
9427
9428 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9429 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9430 static void __tg3_set_rx_mode(struct net_device *);
9431
9432 /* tp->lock is held. */
tg3_halt(struct tg3 * tp,int kind,bool silent)9433 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9434 {
9435 int err, i;
9436
9437 tg3_stop_fw(tp);
9438
9439 tg3_write_sig_pre_reset(tp, kind);
9440
9441 tg3_abort_hw(tp, silent);
9442 err = tg3_chip_reset(tp);
9443
9444 __tg3_set_mac_addr(tp, false);
9445
9446 tg3_write_sig_legacy(tp, kind);
9447 tg3_write_sig_post_reset(tp, kind);
9448
9449 if (tp->hw_stats) {
9450 /* Save the stats across chip resets... */
9451 tg3_get_nstats(tp, &tp->net_stats_prev);
9452 tg3_get_estats(tp, &tp->estats_prev);
9453
9454 /* And make sure the next sample is new data */
9455 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9456
9457 for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
9458 struct tg3_napi *tnapi = &tp->napi[i];
9459
9460 tnapi->rx_dropped = 0;
9461 tnapi->tx_dropped = 0;
9462 }
9463 }
9464
9465 return err;
9466 }
9467
tg3_set_mac_addr(struct net_device * dev,void * p)9468 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9469 {
9470 struct tg3 *tp = netdev_priv(dev);
9471 struct sockaddr *addr = p;
9472 int err = 0;
9473 bool skip_mac_1 = false;
9474
9475 if (!is_valid_ether_addr(addr->sa_data))
9476 return -EADDRNOTAVAIL;
9477
9478 eth_hw_addr_set(dev, addr->sa_data);
9479
9480 if (!netif_running(dev))
9481 return 0;
9482
9483 if (tg3_flag(tp, ENABLE_ASF)) {
9484 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9485
9486 addr0_high = tr32(MAC_ADDR_0_HIGH);
9487 addr0_low = tr32(MAC_ADDR_0_LOW);
9488 addr1_high = tr32(MAC_ADDR_1_HIGH);
9489 addr1_low = tr32(MAC_ADDR_1_LOW);
9490
9491 /* Skip MAC addr 1 if ASF is using it. */
9492 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9493 !(addr1_high == 0 && addr1_low == 0))
9494 skip_mac_1 = true;
9495 }
9496 spin_lock_bh(&tp->lock);
9497 __tg3_set_mac_addr(tp, skip_mac_1);
9498 __tg3_set_rx_mode(dev);
9499 spin_unlock_bh(&tp->lock);
9500
9501 return err;
9502 }
9503
9504 /* tp->lock is held. */
tg3_set_bdinfo(struct tg3 * tp,u32 bdinfo_addr,dma_addr_t mapping,u32 maxlen_flags,u32 nic_addr)9505 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9506 dma_addr_t mapping, u32 maxlen_flags,
9507 u32 nic_addr)
9508 {
9509 tg3_write_mem(tp,
9510 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9511 ((u64) mapping >> 32));
9512 tg3_write_mem(tp,
9513 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9514 ((u64) mapping & 0xffffffff));
9515 tg3_write_mem(tp,
9516 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9517 maxlen_flags);
9518
9519 if (!tg3_flag(tp, 5705_PLUS))
9520 tg3_write_mem(tp,
9521 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9522 nic_addr);
9523 }
9524
9525
tg3_coal_tx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9526 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9527 {
9528 int i = 0;
9529
9530 if (!tg3_flag(tp, ENABLE_TSS)) {
9531 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9532 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9533 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9534 } else {
9535 tw32(HOSTCC_TXCOL_TICKS, 0);
9536 tw32(HOSTCC_TXMAX_FRAMES, 0);
9537 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9538
9539 for (; i < tp->txq_cnt; i++) {
9540 u32 reg;
9541
9542 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9543 tw32(reg, ec->tx_coalesce_usecs);
9544 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9545 tw32(reg, ec->tx_max_coalesced_frames);
9546 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9547 tw32(reg, ec->tx_max_coalesced_frames_irq);
9548 }
9549 }
9550
9551 for (; i < tp->irq_max - 1; i++) {
9552 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9553 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9554 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9555 }
9556 }
9557
tg3_coal_rx_init(struct tg3 * tp,struct ethtool_coalesce * ec)9558 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9559 {
9560 int i = 0;
9561 u32 limit = tp->rxq_cnt;
9562
9563 if (!tg3_flag(tp, ENABLE_RSS)) {
9564 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9565 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9566 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9567 limit--;
9568 } else {
9569 tw32(HOSTCC_RXCOL_TICKS, 0);
9570 tw32(HOSTCC_RXMAX_FRAMES, 0);
9571 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9572 }
9573
9574 for (; i < limit; i++) {
9575 u32 reg;
9576
9577 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9578 tw32(reg, ec->rx_coalesce_usecs);
9579 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9580 tw32(reg, ec->rx_max_coalesced_frames);
9581 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9582 tw32(reg, ec->rx_max_coalesced_frames_irq);
9583 }
9584
9585 for (; i < tp->irq_max - 1; i++) {
9586 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9587 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9588 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9589 }
9590 }
9591
__tg3_set_coalesce(struct tg3 * tp,struct ethtool_coalesce * ec)9592 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9593 {
9594 tg3_coal_tx_init(tp, ec);
9595 tg3_coal_rx_init(tp, ec);
9596
9597 if (!tg3_flag(tp, 5705_PLUS)) {
9598 u32 val = ec->stats_block_coalesce_usecs;
9599
9600 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9601 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9602
9603 if (!tp->link_up)
9604 val = 0;
9605
9606 tw32(HOSTCC_STAT_COAL_TICKS, val);
9607 }
9608 }
9609
9610 /* tp->lock is held. */
tg3_tx_rcbs_disable(struct tg3 * tp)9611 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9612 {
9613 u32 txrcb, limit;
9614
9615 /* Disable all transmit rings but the first. */
9616 if (!tg3_flag(tp, 5705_PLUS))
9617 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9618 else if (tg3_flag(tp, 5717_PLUS))
9619 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9620 else if (tg3_flag(tp, 57765_CLASS) ||
9621 tg3_asic_rev(tp) == ASIC_REV_5762)
9622 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9623 else
9624 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9625
9626 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9627 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9628 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9629 BDINFO_FLAGS_DISABLED);
9630 }
9631
9632 /* tp->lock is held. */
tg3_tx_rcbs_init(struct tg3 * tp)9633 static void tg3_tx_rcbs_init(struct tg3 *tp)
9634 {
9635 int i = 0;
9636 u32 txrcb = NIC_SRAM_SEND_RCB;
9637
9638 if (tg3_flag(tp, ENABLE_TSS))
9639 i++;
9640
9641 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9642 struct tg3_napi *tnapi = &tp->napi[i];
9643
9644 if (!tnapi->tx_ring)
9645 continue;
9646
9647 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9648 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9649 NIC_SRAM_TX_BUFFER_DESC);
9650 }
9651 }
9652
9653 /* tp->lock is held. */
tg3_rx_ret_rcbs_disable(struct tg3 * tp)9654 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9655 {
9656 u32 rxrcb, limit;
9657
9658 /* Disable all receive return rings but the first. */
9659 if (tg3_flag(tp, 5717_PLUS))
9660 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9661 else if (!tg3_flag(tp, 5705_PLUS))
9662 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9663 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9664 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9665 tg3_flag(tp, 57765_CLASS))
9666 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9667 else
9668 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9669
9670 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9671 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9672 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9673 BDINFO_FLAGS_DISABLED);
9674 }
9675
9676 /* tp->lock is held. */
tg3_rx_ret_rcbs_init(struct tg3 * tp)9677 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9678 {
9679 int i = 0;
9680 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9681
9682 if (tg3_flag(tp, ENABLE_RSS))
9683 i++;
9684
9685 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9686 struct tg3_napi *tnapi = &tp->napi[i];
9687
9688 if (!tnapi->rx_rcb)
9689 continue;
9690
9691 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9692 (tp->rx_ret_ring_mask + 1) <<
9693 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9694 }
9695 }
9696
9697 /* tp->lock is held. */
tg3_rings_reset(struct tg3 * tp)9698 static void tg3_rings_reset(struct tg3 *tp)
9699 {
9700 int i;
9701 u32 stblk;
9702 struct tg3_napi *tnapi = &tp->napi[0];
9703
9704 tg3_tx_rcbs_disable(tp);
9705
9706 tg3_rx_ret_rcbs_disable(tp);
9707
9708 /* Disable interrupts */
9709 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9710 tp->napi[0].chk_msi_cnt = 0;
9711 tp->napi[0].last_rx_cons = 0;
9712 tp->napi[0].last_tx_cons = 0;
9713
9714 /* Zero mailbox registers. */
9715 if (tg3_flag(tp, SUPPORT_MSIX)) {
9716 for (i = 1; i < tp->irq_max; i++) {
9717 tp->napi[i].tx_prod = 0;
9718 tp->napi[i].tx_cons = 0;
9719 if (tg3_flag(tp, ENABLE_TSS))
9720 tw32_mailbox(tp->napi[i].prodmbox, 0);
9721 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9722 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9723 tp->napi[i].chk_msi_cnt = 0;
9724 tp->napi[i].last_rx_cons = 0;
9725 tp->napi[i].last_tx_cons = 0;
9726 }
9727 if (!tg3_flag(tp, ENABLE_TSS))
9728 tw32_mailbox(tp->napi[0].prodmbox, 0);
9729 } else {
9730 tp->napi[0].tx_prod = 0;
9731 tp->napi[0].tx_cons = 0;
9732 tw32_mailbox(tp->napi[0].prodmbox, 0);
9733 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9734 }
9735
9736 /* Make sure the NIC-based send BD rings are disabled. */
9737 if (!tg3_flag(tp, 5705_PLUS)) {
9738 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9739 for (i = 0; i < 16; i++)
9740 tw32_tx_mbox(mbox + i * 8, 0);
9741 }
9742
9743 /* Clear status block in ram. */
9744 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9745
9746 /* Set status block DMA address */
9747 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9748 ((u64) tnapi->status_mapping >> 32));
9749 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9750 ((u64) tnapi->status_mapping & 0xffffffff));
9751
9752 stblk = HOSTCC_STATBLCK_RING1;
9753
9754 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9755 u64 mapping = (u64)tnapi->status_mapping;
9756 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9757 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9758 stblk += 8;
9759
9760 /* Clear status block in ram. */
9761 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9762 }
9763
9764 tg3_tx_rcbs_init(tp);
9765 tg3_rx_ret_rcbs_init(tp);
9766 }
9767
tg3_setup_rxbd_thresholds(struct tg3 * tp)9768 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9769 {
9770 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9771
9772 if (!tg3_flag(tp, 5750_PLUS) ||
9773 tg3_flag(tp, 5780_CLASS) ||
9774 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9775 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9776 tg3_flag(tp, 57765_PLUS))
9777 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9778 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9779 tg3_asic_rev(tp) == ASIC_REV_5787)
9780 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9781 else
9782 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9783
9784 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9785 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9786
9787 val = min(nic_rep_thresh, host_rep_thresh);
9788 tw32(RCVBDI_STD_THRESH, val);
9789
9790 if (tg3_flag(tp, 57765_PLUS))
9791 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9792
9793 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9794 return;
9795
9796 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9797
9798 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9799
9800 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9801 tw32(RCVBDI_JUMBO_THRESH, val);
9802
9803 if (tg3_flag(tp, 57765_PLUS))
9804 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9805 }
9806
calc_crc(unsigned char * buf,int len)9807 static inline u32 calc_crc(unsigned char *buf, int len)
9808 {
9809 return ~crc32(~0, buf, len);
9810 }
9811
tg3_set_multi(struct tg3 * tp,unsigned int accept_all)9812 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9813 {
9814 /* accept or reject all multicast frames */
9815 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9816 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9817 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9818 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9819 }
9820
__tg3_set_rx_mode(struct net_device * dev)9821 static void __tg3_set_rx_mode(struct net_device *dev)
9822 {
9823 struct tg3 *tp = netdev_priv(dev);
9824 u32 rx_mode;
9825
9826 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9827 RX_MODE_KEEP_VLAN_TAG);
9828
9829 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9830 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9831 * flag clear.
9832 */
9833 if (!tg3_flag(tp, ENABLE_ASF))
9834 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9835 #endif
9836
9837 if (dev->flags & IFF_PROMISC) {
9838 /* Promiscuous mode. */
9839 rx_mode |= RX_MODE_PROMISC;
9840 } else if (dev->flags & IFF_ALLMULTI) {
9841 /* Accept all multicast. */
9842 tg3_set_multi(tp, 1);
9843 } else if (netdev_mc_empty(dev)) {
9844 /* Reject all multicast. */
9845 tg3_set_multi(tp, 0);
9846 } else {
9847 /* Accept one or more multicast(s). */
9848 struct netdev_hw_addr *ha;
9849 u32 mc_filter[4] = { 0, };
9850 u32 regidx;
9851 u32 bit;
9852 u32 crc;
9853
9854 netdev_for_each_mc_addr(ha, dev) {
9855 crc = calc_crc(ha->addr, ETH_ALEN);
9856 bit = ~crc & 0x7f;
9857 regidx = (bit & 0x60) >> 5;
9858 bit &= 0x1f;
9859 mc_filter[regidx] |= (1 << bit);
9860 }
9861
9862 tw32(MAC_HASH_REG_0, mc_filter[0]);
9863 tw32(MAC_HASH_REG_1, mc_filter[1]);
9864 tw32(MAC_HASH_REG_2, mc_filter[2]);
9865 tw32(MAC_HASH_REG_3, mc_filter[3]);
9866 }
9867
9868 if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9869 rx_mode |= RX_MODE_PROMISC;
9870 } else if (!(dev->flags & IFF_PROMISC)) {
9871 /* Add all entries into to the mac addr filter list */
9872 int i = 0;
9873 struct netdev_hw_addr *ha;
9874
9875 netdev_for_each_uc_addr(ha, dev) {
9876 __tg3_set_one_mac_addr(tp, ha->addr,
9877 i + TG3_UCAST_ADDR_IDX(tp));
9878 i++;
9879 }
9880 }
9881
9882 if (rx_mode != tp->rx_mode) {
9883 tp->rx_mode = rx_mode;
9884 tw32_f(MAC_RX_MODE, rx_mode);
9885 udelay(10);
9886 }
9887 }
9888
tg3_rss_init_dflt_indir_tbl(struct tg3 * tp,u32 qcnt)9889 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9890 {
9891 int i;
9892
9893 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9894 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9895 }
9896
tg3_rss_check_indir_tbl(struct tg3 * tp)9897 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9898 {
9899 int i;
9900
9901 if (!tg3_flag(tp, SUPPORT_MSIX))
9902 return;
9903
9904 if (tp->rxq_cnt == 1) {
9905 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9906 return;
9907 }
9908
9909 /* Validate table against current IRQ count */
9910 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9911 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9912 break;
9913 }
9914
9915 if (i != TG3_RSS_INDIR_TBL_SIZE)
9916 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9917 }
9918
tg3_rss_write_indir_tbl(struct tg3 * tp)9919 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9920 {
9921 int i = 0;
9922 u32 reg = MAC_RSS_INDIR_TBL_0;
9923
9924 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9925 u32 val = tp->rss_ind_tbl[i];
9926 i++;
9927 for (; i % 8; i++) {
9928 val <<= 4;
9929 val |= tp->rss_ind_tbl[i];
9930 }
9931 tw32(reg, val);
9932 reg += 4;
9933 }
9934 }
9935
tg3_lso_rd_dma_workaround_bit(struct tg3 * tp)9936 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9937 {
9938 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9939 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9940 else
9941 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9942 }
9943
9944 /* tp->lock is held. */
tg3_reset_hw(struct tg3 * tp,bool reset_phy)9945 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9946 {
9947 u32 val, rdmac_mode;
9948 int i, err, limit;
9949 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9950
9951 tg3_disable_ints(tp);
9952
9953 tg3_stop_fw(tp);
9954
9955 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9956
9957 if (tg3_flag(tp, INIT_COMPLETE))
9958 tg3_abort_hw(tp, 1);
9959
9960 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9961 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9962 tg3_phy_pull_config(tp);
9963 tg3_eee_pull_config(tp, NULL);
9964 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9965 }
9966
9967 /* Enable MAC control of LPI */
9968 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9969 tg3_setup_eee(tp);
9970
9971 if (reset_phy)
9972 tg3_phy_reset(tp);
9973
9974 err = tg3_chip_reset(tp);
9975 if (err)
9976 return err;
9977
9978 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9979
9980 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9981 val = tr32(TG3_CPMU_CTRL);
9982 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9983 tw32(TG3_CPMU_CTRL, val);
9984
9985 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9986 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9987 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9988 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9989
9990 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9991 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9992 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9993 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9994
9995 val = tr32(TG3_CPMU_HST_ACC);
9996 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9997 val |= CPMU_HST_ACC_MACCLK_6_25;
9998 tw32(TG3_CPMU_HST_ACC, val);
9999 }
10000
10001 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
10002 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
10003 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
10004 PCIE_PWR_MGMT_L1_THRESH_4MS;
10005 tw32(PCIE_PWR_MGMT_THRESH, val);
10006
10007 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
10008 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
10009
10010 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
10011
10012 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
10013 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
10014 }
10015
10016 if (tg3_flag(tp, L1PLLPD_EN)) {
10017 u32 grc_mode = tr32(GRC_MODE);
10018
10019 /* Access the lower 1K of PL PCIE block registers. */
10020 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10021 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10022
10023 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
10024 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
10025 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
10026
10027 tw32(GRC_MODE, grc_mode);
10028 }
10029
10030 if (tg3_flag(tp, 57765_CLASS)) {
10031 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
10032 u32 grc_mode = tr32(GRC_MODE);
10033
10034 /* Access the lower 1K of PL PCIE block registers. */
10035 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10036 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
10037
10038 val = tr32(TG3_PCIE_TLDLPL_PORT +
10039 TG3_PCIE_PL_LO_PHYCTL5);
10040 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
10041 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
10042
10043 tw32(GRC_MODE, grc_mode);
10044 }
10045
10046 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
10047 u32 grc_mode;
10048
10049 /* Fix transmit hangs */
10050 val = tr32(TG3_CPMU_PADRNG_CTL);
10051 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
10052 tw32(TG3_CPMU_PADRNG_CTL, val);
10053
10054 grc_mode = tr32(GRC_MODE);
10055
10056 /* Access the lower 1K of DL PCIE block registers. */
10057 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
10058 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
10059
10060 val = tr32(TG3_PCIE_TLDLPL_PORT +
10061 TG3_PCIE_DL_LO_FTSMAX);
10062 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
10063 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
10064 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
10065
10066 tw32(GRC_MODE, grc_mode);
10067 }
10068
10069 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
10070 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
10071 val |= CPMU_LSPD_10MB_MACCLK_6_25;
10072 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
10073 }
10074
10075 /* This works around an issue with Athlon chipsets on
10076 * B3 tigon3 silicon. This bit has no effect on any
10077 * other revision. But do not set this on PCI Express
10078 * chips and don't even touch the clocks if the CPMU is present.
10079 */
10080 if (!tg3_flag(tp, CPMU_PRESENT)) {
10081 if (!tg3_flag(tp, PCI_EXPRESS))
10082 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10083 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10084 }
10085
10086 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10087 tg3_flag(tp, PCIX_MODE)) {
10088 val = tr32(TG3PCI_PCISTATE);
10089 val |= PCISTATE_RETRY_SAME_DMA;
10090 tw32(TG3PCI_PCISTATE, val);
10091 }
10092
10093 if (tg3_flag(tp, ENABLE_APE)) {
10094 /* Allow reads and writes to the
10095 * APE register and memory space.
10096 */
10097 val = tr32(TG3PCI_PCISTATE);
10098 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10099 PCISTATE_ALLOW_APE_SHMEM_WR |
10100 PCISTATE_ALLOW_APE_PSPACE_WR;
10101 tw32(TG3PCI_PCISTATE, val);
10102 }
10103
10104 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10105 /* Enable some hw fixes. */
10106 val = tr32(TG3PCI_MSI_DATA);
10107 val |= (1 << 26) | (1 << 28) | (1 << 29);
10108 tw32(TG3PCI_MSI_DATA, val);
10109 }
10110
10111 /* Descriptor ring init may make accesses to the
10112 * NIC SRAM area to setup the TX descriptors, so we
10113 * can only do this after the hardware has been
10114 * successfully reset.
10115 */
10116 err = tg3_init_rings(tp);
10117 if (err)
10118 return err;
10119
10120 if (tg3_flag(tp, 57765_PLUS)) {
10121 val = tr32(TG3PCI_DMA_RW_CTRL) &
10122 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10123 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10124 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10125 if (!tg3_flag(tp, 57765_CLASS) &&
10126 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10127 tg3_asic_rev(tp) != ASIC_REV_5762)
10128 val |= DMA_RWCTRL_TAGGED_STAT_WA;
10129 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10130 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10131 tg3_asic_rev(tp) != ASIC_REV_5761) {
10132 /* This value is determined during the probe time DMA
10133 * engine test, tg3_test_dma.
10134 */
10135 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10136 }
10137
10138 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10139 GRC_MODE_4X_NIC_SEND_RINGS |
10140 GRC_MODE_NO_TX_PHDR_CSUM |
10141 GRC_MODE_NO_RX_PHDR_CSUM);
10142 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10143
10144 /* Pseudo-header checksum is done by hardware logic and not
10145 * the offload processors, so make the chip do the pseudo-
10146 * header checksums on receive. For transmit it is more
10147 * convenient to do the pseudo-header checksum in software
10148 * as Linux does that on transmit for us in all cases.
10149 */
10150 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10151
10152 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10153 if (tp->rxptpctl)
10154 tw32(TG3_RX_PTP_CTL,
10155 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10156
10157 if (tg3_flag(tp, PTP_CAPABLE))
10158 val |= GRC_MODE_TIME_SYNC_ENABLE;
10159
10160 tw32(GRC_MODE, tp->grc_mode | val);
10161
10162 /* On one of the AMD platform, MRRS is restricted to 4000 because of
10163 * south bridge limitation. As a workaround, Driver is setting MRRS
10164 * to 2048 instead of default 4096.
10165 */
10166 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10167 tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10168 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10169 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10170 }
10171
10172 /* Setup the timer prescalar register. Clock is always 66Mhz. */
10173 val = tr32(GRC_MISC_CFG);
10174 val &= ~0xff;
10175 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10176 tw32(GRC_MISC_CFG, val);
10177
10178 /* Initialize MBUF/DESC pool. */
10179 if (tg3_flag(tp, 5750_PLUS)) {
10180 /* Do nothing. */
10181 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10182 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10183 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10184 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10185 else
10186 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10187 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10188 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10189 } else if (tg3_flag(tp, TSO_CAPABLE)) {
10190 int fw_len;
10191
10192 fw_len = tp->fw_len;
10193 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10194 tw32(BUFMGR_MB_POOL_ADDR,
10195 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10196 tw32(BUFMGR_MB_POOL_SIZE,
10197 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10198 }
10199
10200 if (tp->dev->mtu <= ETH_DATA_LEN) {
10201 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10202 tp->bufmgr_config.mbuf_read_dma_low_water);
10203 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10204 tp->bufmgr_config.mbuf_mac_rx_low_water);
10205 tw32(BUFMGR_MB_HIGH_WATER,
10206 tp->bufmgr_config.mbuf_high_water);
10207 } else {
10208 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10209 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10210 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10211 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10212 tw32(BUFMGR_MB_HIGH_WATER,
10213 tp->bufmgr_config.mbuf_high_water_jumbo);
10214 }
10215 tw32(BUFMGR_DMA_LOW_WATER,
10216 tp->bufmgr_config.dma_low_water);
10217 tw32(BUFMGR_DMA_HIGH_WATER,
10218 tp->bufmgr_config.dma_high_water);
10219
10220 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10221 if (tg3_asic_rev(tp) == ASIC_REV_5719)
10222 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10223 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10224 tg3_asic_rev(tp) == ASIC_REV_5762 ||
10225 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10226 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10227 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10228 tw32(BUFMGR_MODE, val);
10229 for (i = 0; i < 2000; i++) {
10230 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10231 break;
10232 udelay(10);
10233 }
10234 if (i >= 2000) {
10235 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10236 return -ENODEV;
10237 }
10238
10239 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10240 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10241
10242 tg3_setup_rxbd_thresholds(tp);
10243
10244 /* Initialize TG3_BDINFO's at:
10245 * RCVDBDI_STD_BD: standard eth size rx ring
10246 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10247 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10248 *
10249 * like so:
10250 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10251 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10252 * ring attribute flags
10253 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10254 *
10255 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10256 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10257 *
10258 * The size of each ring is fixed in the firmware, but the location is
10259 * configurable.
10260 */
10261 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10262 ((u64) tpr->rx_std_mapping >> 32));
10263 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10264 ((u64) tpr->rx_std_mapping & 0xffffffff));
10265 if (!tg3_flag(tp, 5717_PLUS))
10266 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10267 NIC_SRAM_RX_BUFFER_DESC);
10268
10269 /* Disable the mini ring */
10270 if (!tg3_flag(tp, 5705_PLUS))
10271 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10272 BDINFO_FLAGS_DISABLED);
10273
10274 /* Program the jumbo buffer descriptor ring control
10275 * blocks on those devices that have them.
10276 */
10277 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10278 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10279
10280 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10281 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10282 ((u64) tpr->rx_jmb_mapping >> 32));
10283 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10284 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10285 val = TG3_RX_JMB_RING_SIZE(tp) <<
10286 BDINFO_FLAGS_MAXLEN_SHIFT;
10287 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10288 val | BDINFO_FLAGS_USE_EXT_RECV);
10289 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10290 tg3_flag(tp, 57765_CLASS) ||
10291 tg3_asic_rev(tp) == ASIC_REV_5762)
10292 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10293 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10294 } else {
10295 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10296 BDINFO_FLAGS_DISABLED);
10297 }
10298
10299 if (tg3_flag(tp, 57765_PLUS)) {
10300 val = TG3_RX_STD_RING_SIZE(tp);
10301 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10302 val |= (TG3_RX_STD_DMA_SZ << 2);
10303 } else
10304 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10305 } else
10306 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10307
10308 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10309
10310 tpr->rx_std_prod_idx = tp->rx_pending;
10311 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10312
10313 tpr->rx_jmb_prod_idx =
10314 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10315 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10316
10317 tg3_rings_reset(tp);
10318
10319 /* Initialize MAC address and backoff seed. */
10320 __tg3_set_mac_addr(tp, false);
10321
10322 /* MTU + ethernet header + FCS + optional VLAN tag */
10323 tw32(MAC_RX_MTU_SIZE,
10324 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10325
10326 /* The slot time is changed by tg3_setup_phy if we
10327 * run at gigabit with half duplex.
10328 */
10329 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10330 (6 << TX_LENGTHS_IPG_SHIFT) |
10331 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10332
10333 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10334 tg3_asic_rev(tp) == ASIC_REV_5762)
10335 val |= tr32(MAC_TX_LENGTHS) &
10336 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10337 TX_LENGTHS_CNT_DWN_VAL_MSK);
10338
10339 tw32(MAC_TX_LENGTHS, val);
10340
10341 /* Receive rules. */
10342 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10343 tw32(RCVLPC_CONFIG, 0x0181);
10344
10345 /* Calculate RDMAC_MODE setting early, we need it to determine
10346 * the RCVLPC_STATE_ENABLE mask.
10347 */
10348 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10349 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10350 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10351 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10352 RDMAC_MODE_LNGREAD_ENAB);
10353
10354 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10355 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10356
10357 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10358 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10359 tg3_asic_rev(tp) == ASIC_REV_57780)
10360 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10361 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10362 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10363
10364 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10365 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10366 if (tg3_flag(tp, TSO_CAPABLE)) {
10367 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10368 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10369 !tg3_flag(tp, IS_5788)) {
10370 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10371 }
10372 }
10373
10374 if (tg3_flag(tp, PCI_EXPRESS))
10375 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10376
10377 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10378 tp->dma_limit = 0;
10379 if (tp->dev->mtu <= ETH_DATA_LEN) {
10380 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10381 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10382 }
10383 }
10384
10385 if (tg3_flag(tp, HW_TSO_1) ||
10386 tg3_flag(tp, HW_TSO_2) ||
10387 tg3_flag(tp, HW_TSO_3))
10388 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10389
10390 if (tg3_flag(tp, 57765_PLUS) ||
10391 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10392 tg3_asic_rev(tp) == ASIC_REV_57780)
10393 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10394
10395 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10396 tg3_asic_rev(tp) == ASIC_REV_5762)
10397 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10398
10399 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10400 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10401 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10402 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10403 tg3_flag(tp, 57765_PLUS)) {
10404 u32 tgtreg;
10405
10406 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10407 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10408 else
10409 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10410
10411 val = tr32(tgtreg);
10412 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10413 tg3_asic_rev(tp) == ASIC_REV_5762) {
10414 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10415 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10416 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10417 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10418 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10419 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10420 }
10421 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10422 }
10423
10424 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10425 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10426 tg3_asic_rev(tp) == ASIC_REV_5762) {
10427 u32 tgtreg;
10428
10429 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10430 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10431 else
10432 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10433
10434 val = tr32(tgtreg);
10435 tw32(tgtreg, val |
10436 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10437 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10438 }
10439
10440 /* Receive/send statistics. */
10441 if (tg3_flag(tp, 5750_PLUS)) {
10442 val = tr32(RCVLPC_STATS_ENABLE);
10443 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10444 tw32(RCVLPC_STATS_ENABLE, val);
10445 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10446 tg3_flag(tp, TSO_CAPABLE)) {
10447 val = tr32(RCVLPC_STATS_ENABLE);
10448 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10449 tw32(RCVLPC_STATS_ENABLE, val);
10450 } else {
10451 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10452 }
10453 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10454 tw32(SNDDATAI_STATSENAB, 0xffffff);
10455 tw32(SNDDATAI_STATSCTRL,
10456 (SNDDATAI_SCTRL_ENABLE |
10457 SNDDATAI_SCTRL_FASTUPD));
10458
10459 /* Setup host coalescing engine. */
10460 tw32(HOSTCC_MODE, 0);
10461 for (i = 0; i < 2000; i++) {
10462 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10463 break;
10464 udelay(10);
10465 }
10466
10467 __tg3_set_coalesce(tp, &tp->coal);
10468
10469 if (!tg3_flag(tp, 5705_PLUS)) {
10470 /* Status/statistics block address. See tg3_timer,
10471 * the tg3_periodic_fetch_stats call there, and
10472 * tg3_get_stats to see how this works for 5705/5750 chips.
10473 */
10474 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10475 ((u64) tp->stats_mapping >> 32));
10476 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10477 ((u64) tp->stats_mapping & 0xffffffff));
10478 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10479
10480 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10481
10482 /* Clear statistics and status block memory areas */
10483 for (i = NIC_SRAM_STATS_BLK;
10484 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10485 i += sizeof(u32)) {
10486 tg3_write_mem(tp, i, 0);
10487 udelay(40);
10488 }
10489 }
10490
10491 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10492
10493 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10494 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10495 if (!tg3_flag(tp, 5705_PLUS))
10496 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10497
10498 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10499 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10500 /* reset to prevent losing 1st rx packet intermittently */
10501 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10502 udelay(10);
10503 }
10504
10505 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10506 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10507 MAC_MODE_FHDE_ENABLE;
10508 if (tg3_flag(tp, ENABLE_APE))
10509 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10510 if (!tg3_flag(tp, 5705_PLUS) &&
10511 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10512 tg3_asic_rev(tp) != ASIC_REV_5700)
10513 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10514 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10515 udelay(40);
10516
10517 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10518 * If TG3_FLAG_IS_NIC is zero, we should read the
10519 * register to preserve the GPIO settings for LOMs. The GPIOs,
10520 * whether used as inputs or outputs, are set by boot code after
10521 * reset.
10522 */
10523 if (!tg3_flag(tp, IS_NIC)) {
10524 u32 gpio_mask;
10525
10526 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10527 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10528 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10529
10530 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10531 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10532 GRC_LCLCTRL_GPIO_OUTPUT3;
10533
10534 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10535 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10536
10537 tp->grc_local_ctrl &= ~gpio_mask;
10538 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10539
10540 /* GPIO1 must be driven high for eeprom write protect */
10541 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10542 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10543 GRC_LCLCTRL_GPIO_OUTPUT1);
10544 }
10545 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10546 udelay(100);
10547
10548 if (tg3_flag(tp, USING_MSIX)) {
10549 val = tr32(MSGINT_MODE);
10550 val |= MSGINT_MODE_ENABLE;
10551 if (tp->irq_cnt > 1)
10552 val |= MSGINT_MODE_MULTIVEC_EN;
10553 if (!tg3_flag(tp, 1SHOT_MSI))
10554 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10555 tw32(MSGINT_MODE, val);
10556 }
10557
10558 if (!tg3_flag(tp, 5705_PLUS)) {
10559 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10560 udelay(40);
10561 }
10562
10563 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10564 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10565 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10566 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10567 WDMAC_MODE_LNGREAD_ENAB);
10568
10569 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10570 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10571 if (tg3_flag(tp, TSO_CAPABLE) &&
10572 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10573 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10574 /* nothing */
10575 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10576 !tg3_flag(tp, IS_5788)) {
10577 val |= WDMAC_MODE_RX_ACCEL;
10578 }
10579 }
10580
10581 /* Enable host coalescing bug fix */
10582 if (tg3_flag(tp, 5755_PLUS))
10583 val |= WDMAC_MODE_STATUS_TAG_FIX;
10584
10585 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10586 val |= WDMAC_MODE_BURST_ALL_DATA;
10587
10588 tw32_f(WDMAC_MODE, val);
10589 udelay(40);
10590
10591 if (tg3_flag(tp, PCIX_MODE)) {
10592 u16 pcix_cmd;
10593
10594 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10595 &pcix_cmd);
10596 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10597 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10598 pcix_cmd |= PCI_X_CMD_READ_2K;
10599 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10600 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10601 pcix_cmd |= PCI_X_CMD_READ_2K;
10602 }
10603 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10604 pcix_cmd);
10605 }
10606
10607 tw32_f(RDMAC_MODE, rdmac_mode);
10608 udelay(40);
10609
10610 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10611 tg3_asic_rev(tp) == ASIC_REV_5720) {
10612 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10613 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10614 break;
10615 }
10616 if (i < TG3_NUM_RDMA_CHANNELS) {
10617 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10618 val |= tg3_lso_rd_dma_workaround_bit(tp);
10619 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10620 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10621 }
10622 }
10623
10624 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10625 if (!tg3_flag(tp, 5705_PLUS))
10626 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10627
10628 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10629 tw32(SNDDATAC_MODE,
10630 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10631 else
10632 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10633
10634 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10635 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10636 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10637 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10638 val |= RCVDBDI_MODE_LRG_RING_SZ;
10639 tw32(RCVDBDI_MODE, val);
10640 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10641 if (tg3_flag(tp, HW_TSO_1) ||
10642 tg3_flag(tp, HW_TSO_2) ||
10643 tg3_flag(tp, HW_TSO_3))
10644 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10645 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10646 if (tg3_flag(tp, ENABLE_TSS))
10647 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10648 tw32(SNDBDI_MODE, val);
10649 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10650
10651 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10652 err = tg3_load_5701_a0_firmware_fix(tp);
10653 if (err)
10654 return err;
10655 }
10656
10657 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10658 /* Ignore any errors for the firmware download. If download
10659 * fails, the device will operate with EEE disabled
10660 */
10661 tg3_load_57766_firmware(tp);
10662 }
10663
10664 if (tg3_flag(tp, TSO_CAPABLE)) {
10665 err = tg3_load_tso_firmware(tp);
10666 if (err)
10667 return err;
10668 }
10669
10670 tp->tx_mode = TX_MODE_ENABLE;
10671
10672 if (tg3_flag(tp, 5755_PLUS) ||
10673 tg3_asic_rev(tp) == ASIC_REV_5906)
10674 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10675
10676 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10677 tg3_asic_rev(tp) == ASIC_REV_5762) {
10678 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10679 tp->tx_mode &= ~val;
10680 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10681 }
10682
10683 tw32_f(MAC_TX_MODE, tp->tx_mode);
10684 udelay(100);
10685
10686 if (tg3_flag(tp, ENABLE_RSS)) {
10687 u32 rss_key[10];
10688
10689 tg3_rss_write_indir_tbl(tp);
10690
10691 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10692
10693 for (i = 0; i < 10 ; i++)
10694 tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10695 }
10696
10697 tp->rx_mode = RX_MODE_ENABLE;
10698 if (tg3_flag(tp, 5755_PLUS))
10699 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10700
10701 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10702 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10703
10704 if (tg3_flag(tp, ENABLE_RSS))
10705 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10706 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10707 RX_MODE_RSS_IPV6_HASH_EN |
10708 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10709 RX_MODE_RSS_IPV4_HASH_EN |
10710 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10711
10712 tw32_f(MAC_RX_MODE, tp->rx_mode);
10713 udelay(10);
10714
10715 tw32(MAC_LED_CTRL, tp->led_ctrl);
10716
10717 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10718 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10719 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10720 udelay(10);
10721 }
10722 tw32_f(MAC_RX_MODE, tp->rx_mode);
10723 udelay(10);
10724
10725 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10726 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10727 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10728 /* Set drive transmission level to 1.2V */
10729 /* only if the signal pre-emphasis bit is not set */
10730 val = tr32(MAC_SERDES_CFG);
10731 val &= 0xfffff000;
10732 val |= 0x880;
10733 tw32(MAC_SERDES_CFG, val);
10734 }
10735 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10736 tw32(MAC_SERDES_CFG, 0x616000);
10737 }
10738
10739 /* Prevent chip from dropping frames when flow control
10740 * is enabled.
10741 */
10742 if (tg3_flag(tp, 57765_CLASS))
10743 val = 1;
10744 else
10745 val = 2;
10746 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10747
10748 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10749 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10750 /* Use hardware link auto-negotiation */
10751 tg3_flag_set(tp, HW_AUTONEG);
10752 }
10753
10754 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10755 tg3_asic_rev(tp) == ASIC_REV_5714) {
10756 u32 tmp;
10757
10758 tmp = tr32(SERDES_RX_CTRL);
10759 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10760 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10761 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10762 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10763 }
10764
10765 if (!tg3_flag(tp, USE_PHYLIB)) {
10766 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10767 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10768
10769 err = tg3_setup_phy(tp, false);
10770 if (err)
10771 return err;
10772
10773 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10774 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10775 u32 tmp;
10776
10777 /* Clear CRC stats. */
10778 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10779 tg3_writephy(tp, MII_TG3_TEST1,
10780 tmp | MII_TG3_TEST1_CRC_EN);
10781 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10782 }
10783 }
10784 }
10785
10786 __tg3_set_rx_mode(tp->dev);
10787
10788 /* Initialize receive rules. */
10789 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10790 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10791 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10792 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10793
10794 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10795 limit = 8;
10796 else
10797 limit = 16;
10798 if (tg3_flag(tp, ENABLE_ASF))
10799 limit -= 4;
10800 switch (limit) {
10801 case 16:
10802 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10803 fallthrough;
10804 case 15:
10805 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10806 fallthrough;
10807 case 14:
10808 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10809 fallthrough;
10810 case 13:
10811 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10812 fallthrough;
10813 case 12:
10814 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10815 fallthrough;
10816 case 11:
10817 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10818 fallthrough;
10819 case 10:
10820 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10821 fallthrough;
10822 case 9:
10823 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10824 fallthrough;
10825 case 8:
10826 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10827 fallthrough;
10828 case 7:
10829 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10830 fallthrough;
10831 case 6:
10832 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10833 fallthrough;
10834 case 5:
10835 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10836 fallthrough;
10837 case 4:
10838 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10839 case 3:
10840 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10841 case 2:
10842 case 1:
10843
10844 default:
10845 break;
10846 }
10847
10848 if (tg3_flag(tp, ENABLE_APE))
10849 /* Write our heartbeat update interval to APE. */
10850 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10851 APE_HOST_HEARTBEAT_INT_5SEC);
10852
10853 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10854
10855 return 0;
10856 }
10857
10858 /* Called at device open time to get the chip ready for
10859 * packet processing. Invoked with tp->lock held.
10860 */
tg3_init_hw(struct tg3 * tp,bool reset_phy)10861 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10862 {
10863 /* Chip may have been just powered on. If so, the boot code may still
10864 * be running initialization. Wait for it to finish to avoid races in
10865 * accessing the hardware.
10866 */
10867 tg3_enable_register_access(tp);
10868 tg3_poll_fw(tp);
10869
10870 tg3_switch_clocks(tp);
10871
10872 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10873
10874 return tg3_reset_hw(tp, reset_phy);
10875 }
10876
10877 #ifdef CONFIG_TIGON3_HWMON
tg3_sd_scan_scratchpad(struct tg3 * tp,struct tg3_ocir * ocir)10878 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10879 {
10880 u32 off, len = TG3_OCIR_LEN;
10881 int i;
10882
10883 for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10884 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10885
10886 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10887 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10888 memset(ocir, 0, len);
10889 }
10890 }
10891
10892 /* sysfs attributes for hwmon */
tg3_show_temp(struct device * dev,struct device_attribute * devattr,char * buf)10893 static ssize_t tg3_show_temp(struct device *dev,
10894 struct device_attribute *devattr, char *buf)
10895 {
10896 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10897 struct tg3 *tp = dev_get_drvdata(dev);
10898 u32 temperature;
10899
10900 spin_lock_bh(&tp->lock);
10901 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10902 sizeof(temperature));
10903 spin_unlock_bh(&tp->lock);
10904 return sprintf(buf, "%u\n", temperature * 1000);
10905 }
10906
10907
10908 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10909 TG3_TEMP_SENSOR_OFFSET);
10910 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10911 TG3_TEMP_CAUTION_OFFSET);
10912 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10913 TG3_TEMP_MAX_OFFSET);
10914
10915 static struct attribute *tg3_attrs[] = {
10916 &sensor_dev_attr_temp1_input.dev_attr.attr,
10917 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10918 &sensor_dev_attr_temp1_max.dev_attr.attr,
10919 NULL
10920 };
10921 ATTRIBUTE_GROUPS(tg3);
10922
tg3_hwmon_close(struct tg3 * tp)10923 static void tg3_hwmon_close(struct tg3 *tp)
10924 {
10925 if (tp->hwmon_dev) {
10926 hwmon_device_unregister(tp->hwmon_dev);
10927 tp->hwmon_dev = NULL;
10928 }
10929 }
10930
tg3_hwmon_open(struct tg3 * tp)10931 static void tg3_hwmon_open(struct tg3 *tp)
10932 {
10933 int i;
10934 u32 size = 0;
10935 struct pci_dev *pdev = tp->pdev;
10936 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10937
10938 tg3_sd_scan_scratchpad(tp, ocirs);
10939
10940 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10941 if (!ocirs[i].src_data_length)
10942 continue;
10943
10944 size += ocirs[i].src_hdr_length;
10945 size += ocirs[i].src_data_length;
10946 }
10947
10948 if (!size)
10949 return;
10950
10951 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10952 tp, tg3_groups);
10953 if (IS_ERR(tp->hwmon_dev)) {
10954 tp->hwmon_dev = NULL;
10955 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10956 }
10957 }
10958 #else
tg3_hwmon_close(struct tg3 * tp)10959 static inline void tg3_hwmon_close(struct tg3 *tp) { }
tg3_hwmon_open(struct tg3 * tp)10960 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10961 #endif /* CONFIG_TIGON3_HWMON */
10962
10963
10964 #define TG3_STAT_ADD32(PSTAT, REG) \
10965 do { u32 __val = tr32(REG); \
10966 (PSTAT)->low += __val; \
10967 if ((PSTAT)->low < __val) \
10968 (PSTAT)->high += 1; \
10969 } while (0)
10970
tg3_periodic_fetch_stats(struct tg3 * tp)10971 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10972 {
10973 struct tg3_hw_stats *sp = tp->hw_stats;
10974
10975 if (!tp->link_up)
10976 return;
10977
10978 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10979 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10980 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10981 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10982 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10983 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10984 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10985 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10986 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10987 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10988 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10989 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10990 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10991 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10992 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10993 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10994 u32 val;
10995
10996 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10997 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10998 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10999 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
11000 }
11001
11002 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
11003 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
11004 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
11005 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
11006 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
11007 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
11008 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
11009 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
11010 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
11011 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
11012 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
11013 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
11014 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
11015 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
11016
11017 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
11018 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
11019 tg3_asic_rev(tp) != ASIC_REV_5762 &&
11020 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
11021 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
11022 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
11023 } else {
11024 u32 val = tr32(HOSTCC_FLOW_ATTN);
11025 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
11026 if (val) {
11027 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
11028 sp->rx_discards.low += val;
11029 if (sp->rx_discards.low < val)
11030 sp->rx_discards.high += 1;
11031 }
11032 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
11033 }
11034 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
11035 }
11036
tg3_chk_missed_msi(struct tg3 * tp)11037 static void tg3_chk_missed_msi(struct tg3 *tp)
11038 {
11039 u32 i;
11040
11041 for (i = 0; i < tp->irq_cnt; i++) {
11042 struct tg3_napi *tnapi = &tp->napi[i];
11043
11044 if (tg3_has_work(tnapi)) {
11045 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
11046 tnapi->last_tx_cons == tnapi->tx_cons) {
11047 if (tnapi->chk_msi_cnt < 1) {
11048 tnapi->chk_msi_cnt++;
11049 return;
11050 }
11051 tg3_msi(0, tnapi);
11052 }
11053 }
11054 tnapi->chk_msi_cnt = 0;
11055 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
11056 tnapi->last_tx_cons = tnapi->tx_cons;
11057 }
11058 }
11059
tg3_timer(struct timer_list * t)11060 static void tg3_timer(struct timer_list *t)
11061 {
11062 struct tg3 *tp = timer_container_of(tp, t, timer);
11063
11064 spin_lock(&tp->lock);
11065
11066 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
11067 spin_unlock(&tp->lock);
11068 goto restart_timer;
11069 }
11070
11071 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
11072 tg3_flag(tp, 57765_CLASS))
11073 tg3_chk_missed_msi(tp);
11074
11075 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
11076 /* BCM4785: Flush posted writes from GbE to host memory. */
11077 tr32(HOSTCC_MODE);
11078 }
11079
11080 if (!tg3_flag(tp, TAGGED_STATUS)) {
11081 /* All of this garbage is because when using non-tagged
11082 * IRQ status the mailbox/status_block protocol the chip
11083 * uses with the cpu is race prone.
11084 */
11085 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11086 tw32(GRC_LOCAL_CTRL,
11087 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11088 } else {
11089 tw32(HOSTCC_MODE, tp->coalesce_mode |
11090 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11091 }
11092
11093 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11094 spin_unlock(&tp->lock);
11095 tg3_reset_task_schedule(tp);
11096 goto restart_timer;
11097 }
11098 }
11099
11100 /* This part only runs once per second. */
11101 if (!--tp->timer_counter) {
11102 if (tg3_flag(tp, 5705_PLUS))
11103 tg3_periodic_fetch_stats(tp);
11104
11105 if (tp->setlpicnt && !--tp->setlpicnt)
11106 tg3_phy_eee_enable(tp);
11107
11108 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11109 u32 mac_stat;
11110 int phy_event;
11111
11112 mac_stat = tr32(MAC_STATUS);
11113
11114 phy_event = 0;
11115 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11116 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11117 phy_event = 1;
11118 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11119 phy_event = 1;
11120
11121 if (phy_event)
11122 tg3_setup_phy(tp, false);
11123 } else if (tg3_flag(tp, POLL_SERDES)) {
11124 u32 mac_stat = tr32(MAC_STATUS);
11125 int need_setup = 0;
11126
11127 if (tp->link_up &&
11128 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11129 need_setup = 1;
11130 }
11131 if (!tp->link_up &&
11132 (mac_stat & (MAC_STATUS_PCS_SYNCED |
11133 MAC_STATUS_SIGNAL_DET))) {
11134 need_setup = 1;
11135 }
11136 if (need_setup) {
11137 if (!tp->serdes_counter) {
11138 tw32_f(MAC_MODE,
11139 (tp->mac_mode &
11140 ~MAC_MODE_PORT_MODE_MASK));
11141 udelay(40);
11142 tw32_f(MAC_MODE, tp->mac_mode);
11143 udelay(40);
11144 }
11145 tg3_setup_phy(tp, false);
11146 }
11147 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11148 tg3_flag(tp, 5780_CLASS)) {
11149 tg3_serdes_parallel_detect(tp);
11150 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11151 u32 cpmu = tr32(TG3_CPMU_STATUS);
11152 bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11153 TG3_CPMU_STATUS_LINK_MASK);
11154
11155 if (link_up != tp->link_up)
11156 tg3_setup_phy(tp, false);
11157 }
11158
11159 tp->timer_counter = tp->timer_multiplier;
11160 }
11161
11162 /* Heartbeat is only sent once every 2 seconds.
11163 *
11164 * The heartbeat is to tell the ASF firmware that the host
11165 * driver is still alive. In the event that the OS crashes,
11166 * ASF needs to reset the hardware to free up the FIFO space
11167 * that may be filled with rx packets destined for the host.
11168 * If the FIFO is full, ASF will no longer function properly.
11169 *
11170 * Unintended resets have been reported on real time kernels
11171 * where the timer doesn't run on time. Netpoll will also have
11172 * same problem.
11173 *
11174 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11175 * to check the ring condition when the heartbeat is expiring
11176 * before doing the reset. This will prevent most unintended
11177 * resets.
11178 */
11179 if (!--tp->asf_counter) {
11180 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11181 tg3_wait_for_event_ack(tp);
11182
11183 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11184 FWCMD_NICDRV_ALIVE3);
11185 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11186 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11187 TG3_FW_UPDATE_TIMEOUT_SEC);
11188
11189 tg3_generate_fw_event(tp);
11190 }
11191 tp->asf_counter = tp->asf_multiplier;
11192 }
11193
11194 /* Update the APE heartbeat every 5 seconds.*/
11195 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11196
11197 spin_unlock(&tp->lock);
11198
11199 restart_timer:
11200 tp->timer.expires = jiffies + tp->timer_offset;
11201 add_timer(&tp->timer);
11202 }
11203
tg3_timer_init(struct tg3 * tp)11204 static void tg3_timer_init(struct tg3 *tp)
11205 {
11206 if (tg3_flag(tp, TAGGED_STATUS) &&
11207 tg3_asic_rev(tp) != ASIC_REV_5717 &&
11208 !tg3_flag(tp, 57765_CLASS))
11209 tp->timer_offset = HZ;
11210 else
11211 tp->timer_offset = HZ / 10;
11212
11213 BUG_ON(tp->timer_offset > HZ);
11214
11215 tp->timer_multiplier = (HZ / tp->timer_offset);
11216 tp->asf_multiplier = (HZ / tp->timer_offset) *
11217 TG3_FW_UPDATE_FREQ_SEC;
11218
11219 timer_setup(&tp->timer, tg3_timer, 0);
11220 }
11221
tg3_timer_start(struct tg3 * tp)11222 static void tg3_timer_start(struct tg3 *tp)
11223 {
11224 tp->asf_counter = tp->asf_multiplier;
11225 tp->timer_counter = tp->timer_multiplier;
11226
11227 tp->timer.expires = jiffies + tp->timer_offset;
11228 add_timer(&tp->timer);
11229 }
11230
tg3_timer_stop(struct tg3 * tp)11231 static void tg3_timer_stop(struct tg3 *tp)
11232 {
11233 timer_delete_sync(&tp->timer);
11234 }
11235
11236 /* Restart hardware after configuration changes, self-test, etc.
11237 * Invoked with tp->lock held.
11238 */
tg3_restart_hw(struct tg3 * tp,bool reset_phy)11239 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11240 __releases(tp->lock)
11241 __acquires(tp->lock)
11242 __releases(tp->dev->lock)
11243 __acquires(tp->dev->lock)
11244 {
11245 int err;
11246
11247 err = tg3_init_hw(tp, reset_phy);
11248 if (err) {
11249 netdev_err(tp->dev,
11250 "Failed to re-initialize device, aborting\n");
11251 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11252 tg3_full_unlock(tp);
11253 tg3_timer_stop(tp);
11254 tp->irq_sync = 0;
11255 tg3_napi_enable(tp);
11256 netdev_unlock(tp->dev);
11257 dev_close(tp->dev);
11258 netdev_lock(tp->dev);
11259 tg3_full_lock(tp, 0);
11260 }
11261 return err;
11262 }
11263
tg3_reset_task(struct work_struct * work)11264 static void tg3_reset_task(struct work_struct *work)
11265 {
11266 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11267 int err;
11268
11269 rtnl_lock();
11270 tg3_full_lock(tp, 0);
11271
11272 if (tp->pcierr_recovery || !netif_running(tp->dev) ||
11273 tp->pdev->error_state != pci_channel_io_normal) {
11274 tg3_flag_clear(tp, RESET_TASK_PENDING);
11275 tg3_full_unlock(tp);
11276 rtnl_unlock();
11277 return;
11278 }
11279
11280 tg3_full_unlock(tp);
11281
11282 tg3_phy_stop(tp);
11283
11284 tg3_netif_stop(tp);
11285
11286 netdev_lock(tp->dev);
11287 tg3_full_lock(tp, 1);
11288
11289 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11290 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11291 tp->write32_rx_mbox = tg3_write_flush_reg32;
11292 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11293 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11294 }
11295
11296 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11297 err = tg3_init_hw(tp, true);
11298 if (err) {
11299 tg3_full_unlock(tp);
11300 tp->irq_sync = 0;
11301 tg3_napi_enable(tp);
11302 /* Clear this flag so that tg3_reset_task_cancel() will not
11303 * call cancel_work_sync() and wait forever.
11304 */
11305 tg3_flag_clear(tp, RESET_TASK_PENDING);
11306 netdev_unlock(tp->dev);
11307 dev_close(tp->dev);
11308 goto out;
11309 }
11310
11311 tg3_netif_start(tp);
11312 tg3_full_unlock(tp);
11313 netdev_unlock(tp->dev);
11314 tg3_phy_start(tp);
11315 tg3_flag_clear(tp, RESET_TASK_PENDING);
11316 out:
11317 rtnl_unlock();
11318 }
11319
tg3_request_irq(struct tg3 * tp,int irq_num)11320 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11321 {
11322 irq_handler_t fn;
11323 unsigned long flags;
11324 char *name;
11325 struct tg3_napi *tnapi = &tp->napi[irq_num];
11326
11327 if (tp->irq_cnt == 1)
11328 name = tp->dev->name;
11329 else {
11330 name = &tnapi->irq_lbl[0];
11331 if (tnapi->tx_buffers && tnapi->rx_rcb)
11332 snprintf(name, sizeof(tnapi->irq_lbl),
11333 "%s-txrx-%d", tp->dev->name, irq_num);
11334 else if (tnapi->tx_buffers)
11335 snprintf(name, sizeof(tnapi->irq_lbl),
11336 "%s-tx-%d", tp->dev->name, irq_num);
11337 else if (tnapi->rx_rcb)
11338 snprintf(name, sizeof(tnapi->irq_lbl),
11339 "%s-rx-%d", tp->dev->name, irq_num);
11340 else
11341 snprintf(name, sizeof(tnapi->irq_lbl),
11342 "%s-%d", tp->dev->name, irq_num);
11343 }
11344
11345 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11346 fn = tg3_msi;
11347 if (tg3_flag(tp, 1SHOT_MSI))
11348 fn = tg3_msi_1shot;
11349 flags = 0;
11350 } else {
11351 fn = tg3_interrupt;
11352 if (tg3_flag(tp, TAGGED_STATUS))
11353 fn = tg3_interrupt_tagged;
11354 flags = IRQF_SHARED;
11355 }
11356
11357 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11358 }
11359
tg3_test_interrupt(struct tg3 * tp)11360 static int tg3_test_interrupt(struct tg3 *tp)
11361 {
11362 struct tg3_napi *tnapi = &tp->napi[0];
11363 struct net_device *dev = tp->dev;
11364 int err, i, intr_ok = 0;
11365 u32 val;
11366
11367 if (!netif_running(dev))
11368 return -ENODEV;
11369
11370 tg3_disable_ints(tp);
11371
11372 free_irq(tnapi->irq_vec, tnapi);
11373
11374 /*
11375 * Turn off MSI one shot mode. Otherwise this test has no
11376 * observable way to know whether the interrupt was delivered.
11377 */
11378 if (tg3_flag(tp, 57765_PLUS)) {
11379 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11380 tw32(MSGINT_MODE, val);
11381 }
11382
11383 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11384 IRQF_SHARED, dev->name, tnapi);
11385 if (err)
11386 return err;
11387
11388 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11389 tg3_enable_ints(tp);
11390
11391 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11392 tnapi->coal_now);
11393
11394 for (i = 0; i < 5; i++) {
11395 u32 int_mbox, misc_host_ctrl;
11396
11397 int_mbox = tr32_mailbox(tnapi->int_mbox);
11398 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11399
11400 if ((int_mbox != 0) ||
11401 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11402 intr_ok = 1;
11403 break;
11404 }
11405
11406 if (tg3_flag(tp, 57765_PLUS) &&
11407 tnapi->hw_status->status_tag != tnapi->last_tag)
11408 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11409
11410 msleep(10);
11411 }
11412
11413 tg3_disable_ints(tp);
11414
11415 free_irq(tnapi->irq_vec, tnapi);
11416
11417 err = tg3_request_irq(tp, 0);
11418
11419 if (err)
11420 return err;
11421
11422 if (intr_ok) {
11423 /* Reenable MSI one shot mode. */
11424 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11425 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11426 tw32(MSGINT_MODE, val);
11427 }
11428 return 0;
11429 }
11430
11431 return -EIO;
11432 }
11433
11434 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11435 * successfully restored
11436 */
tg3_test_msi(struct tg3 * tp)11437 static int tg3_test_msi(struct tg3 *tp)
11438 {
11439 int err;
11440 u16 pci_cmd;
11441
11442 if (!tg3_flag(tp, USING_MSI))
11443 return 0;
11444
11445 /* Turn off SERR reporting in case MSI terminates with Master
11446 * Abort.
11447 */
11448 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11449 pci_write_config_word(tp->pdev, PCI_COMMAND,
11450 pci_cmd & ~PCI_COMMAND_SERR);
11451
11452 err = tg3_test_interrupt(tp);
11453
11454 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11455
11456 if (!err)
11457 return 0;
11458
11459 /* other failures */
11460 if (err != -EIO)
11461 return err;
11462
11463 /* MSI test failed, go back to INTx mode */
11464 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11465 "to INTx mode. Please report this failure to the PCI "
11466 "maintainer and include system chipset information\n");
11467
11468 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11469
11470 pci_disable_msi(tp->pdev);
11471
11472 tg3_flag_clear(tp, USING_MSI);
11473 tp->napi[0].irq_vec = tp->pdev->irq;
11474
11475 err = tg3_request_irq(tp, 0);
11476 if (err)
11477 return err;
11478
11479 /* Need to reset the chip because the MSI cycle may have terminated
11480 * with Master Abort.
11481 */
11482 tg3_full_lock(tp, 1);
11483
11484 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11485 err = tg3_init_hw(tp, true);
11486
11487 tg3_full_unlock(tp);
11488
11489 if (err)
11490 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11491
11492 return err;
11493 }
11494
tg3_request_firmware(struct tg3 * tp)11495 static int tg3_request_firmware(struct tg3 *tp)
11496 {
11497 const struct tg3_firmware_hdr *fw_hdr;
11498
11499 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11500 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11501 tp->fw_needed);
11502 return -ENOENT;
11503 }
11504
11505 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11506
11507 /* Firmware blob starts with version numbers, followed by
11508 * start address and _full_ length including BSS sections
11509 * (which must be longer than the actual data, of course
11510 */
11511
11512 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11513 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11514 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11515 tp->fw_len, tp->fw_needed);
11516 release_firmware(tp->fw);
11517 tp->fw = NULL;
11518 return -EINVAL;
11519 }
11520
11521 /* We no longer need firmware; we have it. */
11522 tp->fw_needed = NULL;
11523 return 0;
11524 }
11525
tg3_irq_count(struct tg3 * tp)11526 static u32 tg3_irq_count(struct tg3 *tp)
11527 {
11528 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11529
11530 if (irq_cnt > 1) {
11531 /* We want as many rx rings enabled as there are cpus.
11532 * In multiqueue MSI-X mode, the first MSI-X vector
11533 * only deals with link interrupts, etc, so we add
11534 * one to the number of vectors we are requesting.
11535 */
11536 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11537 }
11538
11539 return irq_cnt;
11540 }
11541
tg3_enable_msix(struct tg3 * tp)11542 static bool tg3_enable_msix(struct tg3 *tp)
11543 {
11544 int i, rc;
11545 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11546
11547 tp->txq_cnt = tp->txq_req;
11548 tp->rxq_cnt = tp->rxq_req;
11549 if (!tp->rxq_cnt)
11550 tp->rxq_cnt = netif_get_num_default_rss_queues();
11551 if (tp->rxq_cnt > tp->rxq_max)
11552 tp->rxq_cnt = tp->rxq_max;
11553
11554 /* Disable multiple TX rings by default. Simple round-robin hardware
11555 * scheduling of the TX rings can cause starvation of rings with
11556 * small packets when other rings have TSO or jumbo packets.
11557 */
11558 if (!tp->txq_req)
11559 tp->txq_cnt = 1;
11560
11561 tp->irq_cnt = tg3_irq_count(tp);
11562
11563 for (i = 0; i < tp->irq_max; i++) {
11564 msix_ent[i].entry = i;
11565 msix_ent[i].vector = 0;
11566 }
11567
11568 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11569 if (rc < 0) {
11570 return false;
11571 } else if (rc < tp->irq_cnt) {
11572 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11573 tp->irq_cnt, rc);
11574 tp->irq_cnt = rc;
11575 tp->rxq_cnt = max(rc - 1, 1);
11576 if (tp->txq_cnt)
11577 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11578 }
11579
11580 for (i = 0; i < tp->irq_max; i++)
11581 tp->napi[i].irq_vec = msix_ent[i].vector;
11582
11583 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11584 pci_disable_msix(tp->pdev);
11585 return false;
11586 }
11587
11588 if (tp->irq_cnt == 1)
11589 return true;
11590
11591 tg3_flag_set(tp, ENABLE_RSS);
11592
11593 if (tp->txq_cnt > 1)
11594 tg3_flag_set(tp, ENABLE_TSS);
11595
11596 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11597
11598 return true;
11599 }
11600
tg3_ints_init(struct tg3 * tp)11601 static void tg3_ints_init(struct tg3 *tp)
11602 {
11603 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11604 !tg3_flag(tp, TAGGED_STATUS)) {
11605 /* All MSI supporting chips should support tagged
11606 * status. Assert that this is the case.
11607 */
11608 netdev_warn(tp->dev,
11609 "MSI without TAGGED_STATUS? Not using MSI\n");
11610 goto defcfg;
11611 }
11612
11613 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11614 tg3_flag_set(tp, USING_MSIX);
11615 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11616 tg3_flag_set(tp, USING_MSI);
11617
11618 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11619 u32 msi_mode = tr32(MSGINT_MODE);
11620 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11621 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11622 if (!tg3_flag(tp, 1SHOT_MSI))
11623 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11624 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11625 }
11626 defcfg:
11627 if (!tg3_flag(tp, USING_MSIX)) {
11628 tp->irq_cnt = 1;
11629 tp->napi[0].irq_vec = tp->pdev->irq;
11630 }
11631
11632 if (tp->irq_cnt == 1) {
11633 tp->txq_cnt = 1;
11634 tp->rxq_cnt = 1;
11635 netif_set_real_num_tx_queues(tp->dev, 1);
11636 netif_set_real_num_rx_queues(tp->dev, 1);
11637 }
11638 }
11639
tg3_ints_fini(struct tg3 * tp)11640 static void tg3_ints_fini(struct tg3 *tp)
11641 {
11642 if (tg3_flag(tp, USING_MSIX))
11643 pci_disable_msix(tp->pdev);
11644 else if (tg3_flag(tp, USING_MSI))
11645 pci_disable_msi(tp->pdev);
11646 tg3_flag_clear(tp, USING_MSI);
11647 tg3_flag_clear(tp, USING_MSIX);
11648 tg3_flag_clear(tp, ENABLE_RSS);
11649 tg3_flag_clear(tp, ENABLE_TSS);
11650 }
11651
tg3_start(struct tg3 * tp,bool reset_phy,bool test_irq,bool init)11652 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11653 bool init)
11654 {
11655 struct net_device *dev = tp->dev;
11656 int i, err;
11657
11658 /*
11659 * Setup interrupts first so we know how
11660 * many NAPI resources to allocate
11661 */
11662 tg3_ints_init(tp);
11663
11664 tg3_rss_check_indir_tbl(tp);
11665
11666 /* The placement of this call is tied
11667 * to the setup and use of Host TX descriptors.
11668 */
11669 err = tg3_alloc_consistent(tp);
11670 if (err)
11671 goto out_ints_fini;
11672
11673 netdev_lock(dev);
11674 tg3_napi_init(tp);
11675
11676 tg3_napi_enable(tp);
11677 netdev_unlock(dev);
11678
11679 for (i = 0; i < tp->irq_cnt; i++) {
11680 err = tg3_request_irq(tp, i);
11681 if (err) {
11682 for (i--; i >= 0; i--) {
11683 struct tg3_napi *tnapi = &tp->napi[i];
11684
11685 free_irq(tnapi->irq_vec, tnapi);
11686 }
11687 goto out_napi_fini;
11688 }
11689 }
11690
11691 tg3_full_lock(tp, 0);
11692
11693 if (init)
11694 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11695
11696 err = tg3_init_hw(tp, reset_phy);
11697 if (err) {
11698 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11699 tg3_free_rings(tp);
11700 }
11701
11702 tg3_full_unlock(tp);
11703
11704 if (err)
11705 goto out_free_irq;
11706
11707 if (test_irq && tg3_flag(tp, USING_MSI)) {
11708 err = tg3_test_msi(tp);
11709
11710 if (err) {
11711 tg3_full_lock(tp, 0);
11712 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11713 tg3_free_rings(tp);
11714 tg3_full_unlock(tp);
11715
11716 goto out_napi_fini;
11717 }
11718
11719 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11720 u32 val = tr32(PCIE_TRANSACTION_CFG);
11721
11722 tw32(PCIE_TRANSACTION_CFG,
11723 val | PCIE_TRANS_CFG_1SHOT_MSI);
11724 }
11725 }
11726
11727 tg3_phy_start(tp);
11728
11729 tg3_hwmon_open(tp);
11730
11731 tg3_full_lock(tp, 0);
11732
11733 tg3_timer_start(tp);
11734 tg3_flag_set(tp, INIT_COMPLETE);
11735 tg3_enable_ints(tp);
11736
11737 tg3_ptp_resume(tp);
11738
11739 tg3_full_unlock(tp);
11740
11741 netif_tx_start_all_queues(dev);
11742
11743 /*
11744 * Reset loopback feature if it was turned on while the device was down
11745 * make sure that it's installed properly now.
11746 */
11747 if (dev->features & NETIF_F_LOOPBACK)
11748 tg3_set_loopback(dev, dev->features);
11749
11750 return 0;
11751
11752 out_free_irq:
11753 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11754 struct tg3_napi *tnapi = &tp->napi[i];
11755 free_irq(tnapi->irq_vec, tnapi);
11756 }
11757
11758 out_napi_fini:
11759 tg3_napi_disable(tp);
11760 tg3_napi_fini(tp);
11761 tg3_free_consistent(tp);
11762
11763 out_ints_fini:
11764 tg3_ints_fini(tp);
11765
11766 return err;
11767 }
11768
tg3_stop(struct tg3 * tp)11769 static void tg3_stop(struct tg3 *tp)
11770 {
11771 int i;
11772
11773 tg3_reset_task_cancel(tp);
11774 tg3_netif_stop(tp);
11775
11776 tg3_timer_stop(tp);
11777
11778 tg3_hwmon_close(tp);
11779
11780 tg3_phy_stop(tp);
11781
11782 tg3_full_lock(tp, 1);
11783
11784 tg3_disable_ints(tp);
11785
11786 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11787 tg3_free_rings(tp);
11788 tg3_flag_clear(tp, INIT_COMPLETE);
11789
11790 tg3_full_unlock(tp);
11791
11792 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11793 struct tg3_napi *tnapi = &tp->napi[i];
11794 free_irq(tnapi->irq_vec, tnapi);
11795 }
11796
11797 tg3_ints_fini(tp);
11798
11799 tg3_napi_fini(tp);
11800
11801 tg3_free_consistent(tp);
11802 }
11803
tg3_open(struct net_device * dev)11804 static int tg3_open(struct net_device *dev)
11805 {
11806 struct tg3 *tp = netdev_priv(dev);
11807 int err;
11808
11809 if (tp->pcierr_recovery) {
11810 netdev_err(dev, "Failed to open device. PCI error recovery "
11811 "in progress\n");
11812 return -EAGAIN;
11813 }
11814
11815 if (tp->fw_needed) {
11816 err = tg3_request_firmware(tp);
11817 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11818 if (err) {
11819 netdev_warn(tp->dev, "EEE capability disabled\n");
11820 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11821 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11822 netdev_warn(tp->dev, "EEE capability restored\n");
11823 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11824 }
11825 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11826 if (err)
11827 return err;
11828 } else if (err) {
11829 netdev_warn(tp->dev, "TSO capability disabled\n");
11830 tg3_flag_clear(tp, TSO_CAPABLE);
11831 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11832 netdev_notice(tp->dev, "TSO capability restored\n");
11833 tg3_flag_set(tp, TSO_CAPABLE);
11834 }
11835 }
11836
11837 tg3_carrier_off(tp);
11838
11839 err = tg3_power_up(tp);
11840 if (err)
11841 return err;
11842
11843 tg3_full_lock(tp, 0);
11844
11845 tg3_disable_ints(tp);
11846 tg3_flag_clear(tp, INIT_COMPLETE);
11847
11848 tg3_full_unlock(tp);
11849
11850 err = tg3_start(tp,
11851 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11852 true, true);
11853 if (err) {
11854 tg3_frob_aux_power(tp, false);
11855 pci_set_power_state(tp->pdev, PCI_D3hot);
11856 }
11857
11858 return err;
11859 }
11860
tg3_close(struct net_device * dev)11861 static int tg3_close(struct net_device *dev)
11862 {
11863 struct tg3 *tp = netdev_priv(dev);
11864
11865 if (tp->pcierr_recovery) {
11866 netdev_err(dev, "Failed to close device. PCI error recovery "
11867 "in progress\n");
11868 return -EAGAIN;
11869 }
11870
11871 tg3_stop(tp);
11872
11873 if (pci_device_is_present(tp->pdev)) {
11874 tg3_power_down_prepare(tp);
11875
11876 tg3_carrier_off(tp);
11877 }
11878 return 0;
11879 }
11880
get_stat64(tg3_stat64_t * val)11881 static inline u64 get_stat64(tg3_stat64_t *val)
11882 {
11883 return ((u64)val->high << 32) | ((u64)val->low);
11884 }
11885
tg3_calc_crc_errors(struct tg3 * tp)11886 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11887 {
11888 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11889
11890 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11891 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11892 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11893 u32 val;
11894
11895 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11896 tg3_writephy(tp, MII_TG3_TEST1,
11897 val | MII_TG3_TEST1_CRC_EN);
11898 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11899 } else
11900 val = 0;
11901
11902 tp->phy_crc_errors += val;
11903
11904 return tp->phy_crc_errors;
11905 }
11906
11907 return get_stat64(&hw_stats->rx_fcs_errors);
11908 }
11909
11910 #define ESTAT_ADD(member) \
11911 estats->member = old_estats->member + \
11912 get_stat64(&hw_stats->member)
11913
tg3_get_estats(struct tg3 * tp,struct tg3_ethtool_stats * estats)11914 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11915 {
11916 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11917 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11918
11919 ESTAT_ADD(rx_octets);
11920 ESTAT_ADD(rx_fragments);
11921 ESTAT_ADD(rx_ucast_packets);
11922 ESTAT_ADD(rx_mcast_packets);
11923 ESTAT_ADD(rx_bcast_packets);
11924 ESTAT_ADD(rx_fcs_errors);
11925 ESTAT_ADD(rx_align_errors);
11926 ESTAT_ADD(rx_xon_pause_rcvd);
11927 ESTAT_ADD(rx_xoff_pause_rcvd);
11928 ESTAT_ADD(rx_mac_ctrl_rcvd);
11929 ESTAT_ADD(rx_xoff_entered);
11930 ESTAT_ADD(rx_frame_too_long_errors);
11931 ESTAT_ADD(rx_jabbers);
11932 ESTAT_ADD(rx_undersize_packets);
11933 ESTAT_ADD(rx_in_length_errors);
11934 ESTAT_ADD(rx_out_length_errors);
11935 ESTAT_ADD(rx_64_or_less_octet_packets);
11936 ESTAT_ADD(rx_65_to_127_octet_packets);
11937 ESTAT_ADD(rx_128_to_255_octet_packets);
11938 ESTAT_ADD(rx_256_to_511_octet_packets);
11939 ESTAT_ADD(rx_512_to_1023_octet_packets);
11940 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11941 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11942 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11943 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11944 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11945
11946 ESTAT_ADD(tx_octets);
11947 ESTAT_ADD(tx_collisions);
11948 ESTAT_ADD(tx_xon_sent);
11949 ESTAT_ADD(tx_xoff_sent);
11950 ESTAT_ADD(tx_flow_control);
11951 ESTAT_ADD(tx_mac_errors);
11952 ESTAT_ADD(tx_single_collisions);
11953 ESTAT_ADD(tx_mult_collisions);
11954 ESTAT_ADD(tx_deferred);
11955 ESTAT_ADD(tx_excessive_collisions);
11956 ESTAT_ADD(tx_late_collisions);
11957 ESTAT_ADD(tx_collide_2times);
11958 ESTAT_ADD(tx_collide_3times);
11959 ESTAT_ADD(tx_collide_4times);
11960 ESTAT_ADD(tx_collide_5times);
11961 ESTAT_ADD(tx_collide_6times);
11962 ESTAT_ADD(tx_collide_7times);
11963 ESTAT_ADD(tx_collide_8times);
11964 ESTAT_ADD(tx_collide_9times);
11965 ESTAT_ADD(tx_collide_10times);
11966 ESTAT_ADD(tx_collide_11times);
11967 ESTAT_ADD(tx_collide_12times);
11968 ESTAT_ADD(tx_collide_13times);
11969 ESTAT_ADD(tx_collide_14times);
11970 ESTAT_ADD(tx_collide_15times);
11971 ESTAT_ADD(tx_ucast_packets);
11972 ESTAT_ADD(tx_mcast_packets);
11973 ESTAT_ADD(tx_bcast_packets);
11974 ESTAT_ADD(tx_carrier_sense_errors);
11975 ESTAT_ADD(tx_discards);
11976 ESTAT_ADD(tx_errors);
11977
11978 ESTAT_ADD(dma_writeq_full);
11979 ESTAT_ADD(dma_write_prioq_full);
11980 ESTAT_ADD(rxbds_empty);
11981 ESTAT_ADD(rx_discards);
11982 ESTAT_ADD(rx_errors);
11983 ESTAT_ADD(rx_threshold_hit);
11984
11985 ESTAT_ADD(dma_readq_full);
11986 ESTAT_ADD(dma_read_prioq_full);
11987 ESTAT_ADD(tx_comp_queue_full);
11988
11989 ESTAT_ADD(ring_set_send_prod_index);
11990 ESTAT_ADD(ring_status_update);
11991 ESTAT_ADD(nic_irqs);
11992 ESTAT_ADD(nic_avoided_irqs);
11993 ESTAT_ADD(nic_tx_threshold_hit);
11994
11995 ESTAT_ADD(mbuf_lwm_thresh_hit);
11996 }
11997
tg3_get_nstats(struct tg3 * tp,struct rtnl_link_stats64 * stats)11998 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11999 {
12000 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
12001 struct tg3_hw_stats *hw_stats = tp->hw_stats;
12002 unsigned long rx_dropped;
12003 unsigned long tx_dropped;
12004 int i;
12005
12006 stats->rx_packets = old_stats->rx_packets +
12007 get_stat64(&hw_stats->rx_ucast_packets) +
12008 get_stat64(&hw_stats->rx_mcast_packets) +
12009 get_stat64(&hw_stats->rx_bcast_packets);
12010
12011 stats->tx_packets = old_stats->tx_packets +
12012 get_stat64(&hw_stats->tx_ucast_packets) +
12013 get_stat64(&hw_stats->tx_mcast_packets) +
12014 get_stat64(&hw_stats->tx_bcast_packets);
12015
12016 stats->rx_bytes = old_stats->rx_bytes +
12017 get_stat64(&hw_stats->rx_octets);
12018 stats->tx_bytes = old_stats->tx_bytes +
12019 get_stat64(&hw_stats->tx_octets);
12020
12021 stats->rx_errors = old_stats->rx_errors +
12022 get_stat64(&hw_stats->rx_errors);
12023 stats->tx_errors = old_stats->tx_errors +
12024 get_stat64(&hw_stats->tx_errors) +
12025 get_stat64(&hw_stats->tx_mac_errors) +
12026 get_stat64(&hw_stats->tx_carrier_sense_errors) +
12027 get_stat64(&hw_stats->tx_discards);
12028
12029 stats->multicast = old_stats->multicast +
12030 get_stat64(&hw_stats->rx_mcast_packets);
12031 stats->collisions = old_stats->collisions +
12032 get_stat64(&hw_stats->tx_collisions);
12033
12034 stats->rx_length_errors = old_stats->rx_length_errors +
12035 get_stat64(&hw_stats->rx_frame_too_long_errors) +
12036 get_stat64(&hw_stats->rx_undersize_packets);
12037
12038 stats->rx_frame_errors = old_stats->rx_frame_errors +
12039 get_stat64(&hw_stats->rx_align_errors);
12040 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
12041 get_stat64(&hw_stats->tx_discards);
12042 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
12043 get_stat64(&hw_stats->tx_carrier_sense_errors);
12044
12045 stats->rx_crc_errors = old_stats->rx_crc_errors +
12046 tg3_calc_crc_errors(tp);
12047
12048 stats->rx_missed_errors = old_stats->rx_missed_errors +
12049 get_stat64(&hw_stats->rx_discards);
12050
12051 /* Aggregate per-queue counters. The per-queue counters are updated
12052 * by a single writer, race-free. The result computed by this loop
12053 * might not be 100% accurate (counters can be updated in the middle of
12054 * the loop) but the next tg3_get_nstats() will recompute the current
12055 * value so it is acceptable.
12056 *
12057 * Note that these counters wrap around at 4G on 32bit machines.
12058 */
12059 rx_dropped = (unsigned long)(old_stats->rx_dropped);
12060 tx_dropped = (unsigned long)(old_stats->tx_dropped);
12061
12062 for (i = 0; i < tp->irq_cnt; i++) {
12063 struct tg3_napi *tnapi = &tp->napi[i];
12064
12065 rx_dropped += tnapi->rx_dropped;
12066 tx_dropped += tnapi->tx_dropped;
12067 }
12068
12069 stats->rx_dropped = rx_dropped;
12070 stats->tx_dropped = tx_dropped;
12071 }
12072
tg3_get_regs_len(struct net_device * dev)12073 static int tg3_get_regs_len(struct net_device *dev)
12074 {
12075 return TG3_REG_BLK_SIZE;
12076 }
12077
tg3_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * _p)12078 static void tg3_get_regs(struct net_device *dev,
12079 struct ethtool_regs *regs, void *_p)
12080 {
12081 struct tg3 *tp = netdev_priv(dev);
12082
12083 regs->version = 0;
12084
12085 memset(_p, 0, TG3_REG_BLK_SIZE);
12086
12087 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12088 return;
12089
12090 tg3_full_lock(tp, 0);
12091
12092 tg3_dump_legacy_regs(tp, (u32 *)_p);
12093
12094 tg3_full_unlock(tp);
12095 }
12096
tg3_get_eeprom_len(struct net_device * dev)12097 static int tg3_get_eeprom_len(struct net_device *dev)
12098 {
12099 struct tg3 *tp = netdev_priv(dev);
12100
12101 return tp->nvram_size;
12102 }
12103
tg3_get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12104 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12105 {
12106 struct tg3 *tp = netdev_priv(dev);
12107 int ret, cpmu_restore = 0;
12108 u8 *pd;
12109 u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
12110 __be32 val;
12111
12112 if (tg3_flag(tp, NO_NVRAM))
12113 return -EINVAL;
12114
12115 offset = eeprom->offset;
12116 len = eeprom->len;
12117 eeprom->len = 0;
12118
12119 eeprom->magic = TG3_EEPROM_MAGIC;
12120
12121 /* Override clock, link aware and link idle modes */
12122 if (tg3_flag(tp, CPMU_PRESENT)) {
12123 cpmu_val = tr32(TG3_CPMU_CTRL);
12124 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12125 CPMU_CTRL_LINK_IDLE_MODE)) {
12126 tw32(TG3_CPMU_CTRL, cpmu_val &
12127 ~(CPMU_CTRL_LINK_AWARE_MODE |
12128 CPMU_CTRL_LINK_IDLE_MODE));
12129 cpmu_restore = 1;
12130 }
12131 }
12132 tg3_override_clk(tp);
12133
12134 if (offset & 3) {
12135 /* adjustments to start on required 4 byte boundary */
12136 b_offset = offset & 3;
12137 b_count = 4 - b_offset;
12138 if (b_count > len) {
12139 /* i.e. offset=1 len=2 */
12140 b_count = len;
12141 }
12142 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12143 if (ret)
12144 goto eeprom_done;
12145 memcpy(data, ((char *)&val) + b_offset, b_count);
12146 len -= b_count;
12147 offset += b_count;
12148 eeprom->len += b_count;
12149 }
12150
12151 /* read bytes up to the last 4 byte boundary */
12152 pd = &data[eeprom->len];
12153 for (i = 0; i < (len - (len & 3)); i += 4) {
12154 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12155 if (ret) {
12156 if (i)
12157 i -= 4;
12158 eeprom->len += i;
12159 goto eeprom_done;
12160 }
12161 memcpy(pd + i, &val, 4);
12162 if (need_resched()) {
12163 if (signal_pending(current)) {
12164 eeprom->len += i;
12165 ret = -EINTR;
12166 goto eeprom_done;
12167 }
12168 cond_resched();
12169 }
12170 }
12171 eeprom->len += i;
12172
12173 if (len & 3) {
12174 /* read last bytes not ending on 4 byte boundary */
12175 pd = &data[eeprom->len];
12176 b_count = len & 3;
12177 b_offset = offset + len - b_count;
12178 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12179 if (ret)
12180 goto eeprom_done;
12181 memcpy(pd, &val, b_count);
12182 eeprom->len += b_count;
12183 }
12184 ret = 0;
12185
12186 eeprom_done:
12187 /* Restore clock, link aware and link idle modes */
12188 tg3_restore_clk(tp);
12189 if (cpmu_restore)
12190 tw32(TG3_CPMU_CTRL, cpmu_val);
12191
12192 return ret;
12193 }
12194
tg3_set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)12195 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12196 {
12197 struct tg3 *tp = netdev_priv(dev);
12198 int ret;
12199 u32 offset, len, b_offset, odd_len;
12200 u8 *buf;
12201 __be32 start = 0, end;
12202
12203 if (tg3_flag(tp, NO_NVRAM) ||
12204 eeprom->magic != TG3_EEPROM_MAGIC)
12205 return -EINVAL;
12206
12207 offset = eeprom->offset;
12208 len = eeprom->len;
12209
12210 if ((b_offset = (offset & 3))) {
12211 /* adjustments to start on required 4 byte boundary */
12212 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12213 if (ret)
12214 return ret;
12215 len += b_offset;
12216 offset &= ~3;
12217 if (len < 4)
12218 len = 4;
12219 }
12220
12221 odd_len = 0;
12222 if (len & 3) {
12223 /* adjustments to end on required 4 byte boundary */
12224 odd_len = 1;
12225 len = (len + 3) & ~3;
12226 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12227 if (ret)
12228 return ret;
12229 }
12230
12231 buf = data;
12232 if (b_offset || odd_len) {
12233 buf = kmalloc(len, GFP_KERNEL);
12234 if (!buf)
12235 return -ENOMEM;
12236 if (b_offset)
12237 memcpy(buf, &start, 4);
12238 if (odd_len)
12239 memcpy(buf+len-4, &end, 4);
12240 memcpy(buf + b_offset, data, eeprom->len);
12241 }
12242
12243 ret = tg3_nvram_write_block(tp, offset, len, buf);
12244
12245 if (buf != data)
12246 kfree(buf);
12247
12248 return ret;
12249 }
12250
tg3_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)12251 static int tg3_get_link_ksettings(struct net_device *dev,
12252 struct ethtool_link_ksettings *cmd)
12253 {
12254 struct tg3 *tp = netdev_priv(dev);
12255 u32 supported, advertising;
12256
12257 if (tg3_flag(tp, USE_PHYLIB)) {
12258 struct phy_device *phydev;
12259 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12260 return -EAGAIN;
12261 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12262 phy_ethtool_ksettings_get(phydev, cmd);
12263
12264 return 0;
12265 }
12266
12267 supported = (SUPPORTED_Autoneg);
12268
12269 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12270 supported |= (SUPPORTED_1000baseT_Half |
12271 SUPPORTED_1000baseT_Full);
12272
12273 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12274 supported |= (SUPPORTED_100baseT_Half |
12275 SUPPORTED_100baseT_Full |
12276 SUPPORTED_10baseT_Half |
12277 SUPPORTED_10baseT_Full |
12278 SUPPORTED_TP);
12279 cmd->base.port = PORT_TP;
12280 } else {
12281 supported |= SUPPORTED_FIBRE;
12282 cmd->base.port = PORT_FIBRE;
12283 }
12284 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12285 supported);
12286
12287 advertising = tp->link_config.advertising;
12288 if (tg3_flag(tp, PAUSE_AUTONEG)) {
12289 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12290 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12291 advertising |= ADVERTISED_Pause;
12292 } else {
12293 advertising |= ADVERTISED_Pause |
12294 ADVERTISED_Asym_Pause;
12295 }
12296 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12297 advertising |= ADVERTISED_Asym_Pause;
12298 }
12299 }
12300 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12301 advertising);
12302
12303 if (netif_running(dev) && tp->link_up) {
12304 cmd->base.speed = tp->link_config.active_speed;
12305 cmd->base.duplex = tp->link_config.active_duplex;
12306 ethtool_convert_legacy_u32_to_link_mode(
12307 cmd->link_modes.lp_advertising,
12308 tp->link_config.rmt_adv);
12309
12310 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12311 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12312 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12313 else
12314 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12315 }
12316 } else {
12317 cmd->base.speed = SPEED_UNKNOWN;
12318 cmd->base.duplex = DUPLEX_UNKNOWN;
12319 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12320 }
12321 cmd->base.phy_address = tp->phy_addr;
12322 cmd->base.autoneg = tp->link_config.autoneg;
12323 return 0;
12324 }
12325
tg3_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)12326 static int tg3_set_link_ksettings(struct net_device *dev,
12327 const struct ethtool_link_ksettings *cmd)
12328 {
12329 struct tg3 *tp = netdev_priv(dev);
12330 u32 speed = cmd->base.speed;
12331 u32 advertising;
12332
12333 if (tg3_flag(tp, USE_PHYLIB)) {
12334 struct phy_device *phydev;
12335 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12336 return -EAGAIN;
12337 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12338 return phy_ethtool_ksettings_set(phydev, cmd);
12339 }
12340
12341 if (cmd->base.autoneg != AUTONEG_ENABLE &&
12342 cmd->base.autoneg != AUTONEG_DISABLE)
12343 return -EINVAL;
12344
12345 if (cmd->base.autoneg == AUTONEG_DISABLE &&
12346 cmd->base.duplex != DUPLEX_FULL &&
12347 cmd->base.duplex != DUPLEX_HALF)
12348 return -EINVAL;
12349
12350 ethtool_convert_link_mode_to_legacy_u32(&advertising,
12351 cmd->link_modes.advertising);
12352
12353 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12354 u32 mask = ADVERTISED_Autoneg |
12355 ADVERTISED_Pause |
12356 ADVERTISED_Asym_Pause;
12357
12358 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12359 mask |= ADVERTISED_1000baseT_Half |
12360 ADVERTISED_1000baseT_Full;
12361
12362 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12363 mask |= ADVERTISED_100baseT_Half |
12364 ADVERTISED_100baseT_Full |
12365 ADVERTISED_10baseT_Half |
12366 ADVERTISED_10baseT_Full |
12367 ADVERTISED_TP;
12368 else
12369 mask |= ADVERTISED_FIBRE;
12370
12371 if (advertising & ~mask)
12372 return -EINVAL;
12373
12374 mask &= (ADVERTISED_1000baseT_Half |
12375 ADVERTISED_1000baseT_Full |
12376 ADVERTISED_100baseT_Half |
12377 ADVERTISED_100baseT_Full |
12378 ADVERTISED_10baseT_Half |
12379 ADVERTISED_10baseT_Full);
12380
12381 advertising &= mask;
12382 } else {
12383 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12384 if (speed != SPEED_1000)
12385 return -EINVAL;
12386
12387 if (cmd->base.duplex != DUPLEX_FULL)
12388 return -EINVAL;
12389 } else {
12390 if (speed != SPEED_100 &&
12391 speed != SPEED_10)
12392 return -EINVAL;
12393 }
12394 }
12395
12396 tg3_full_lock(tp, 0);
12397
12398 tp->link_config.autoneg = cmd->base.autoneg;
12399 if (cmd->base.autoneg == AUTONEG_ENABLE) {
12400 tp->link_config.advertising = (advertising |
12401 ADVERTISED_Autoneg);
12402 tp->link_config.speed = SPEED_UNKNOWN;
12403 tp->link_config.duplex = DUPLEX_UNKNOWN;
12404 } else {
12405 tp->link_config.advertising = 0;
12406 tp->link_config.speed = speed;
12407 tp->link_config.duplex = cmd->base.duplex;
12408 }
12409
12410 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12411
12412 tg3_warn_mgmt_link_flap(tp);
12413
12414 if (netif_running(dev))
12415 tg3_setup_phy(tp, true);
12416
12417 tg3_full_unlock(tp);
12418
12419 return 0;
12420 }
12421
tg3_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)12422 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12423 {
12424 struct tg3 *tp = netdev_priv(dev);
12425
12426 strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12427 strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12428 strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12429 }
12430
tg3_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12431 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12432 {
12433 struct tg3 *tp = netdev_priv(dev);
12434
12435 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12436 wol->supported = WAKE_MAGIC;
12437 else
12438 wol->supported = 0;
12439 wol->wolopts = 0;
12440 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12441 wol->wolopts = WAKE_MAGIC;
12442 memset(&wol->sopass, 0, sizeof(wol->sopass));
12443 }
12444
tg3_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)12445 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12446 {
12447 struct tg3 *tp = netdev_priv(dev);
12448 struct device *dp = &tp->pdev->dev;
12449
12450 if (wol->wolopts & ~WAKE_MAGIC)
12451 return -EINVAL;
12452 if ((wol->wolopts & WAKE_MAGIC) &&
12453 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12454 return -EINVAL;
12455
12456 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12457
12458 if (device_may_wakeup(dp))
12459 tg3_flag_set(tp, WOL_ENABLE);
12460 else
12461 tg3_flag_clear(tp, WOL_ENABLE);
12462
12463 return 0;
12464 }
12465
tg3_get_msglevel(struct net_device * dev)12466 static u32 tg3_get_msglevel(struct net_device *dev)
12467 {
12468 struct tg3 *tp = netdev_priv(dev);
12469 return tp->msg_enable;
12470 }
12471
tg3_set_msglevel(struct net_device * dev,u32 value)12472 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12473 {
12474 struct tg3 *tp = netdev_priv(dev);
12475 tp->msg_enable = value;
12476 }
12477
tg3_nway_reset(struct net_device * dev)12478 static int tg3_nway_reset(struct net_device *dev)
12479 {
12480 struct tg3 *tp = netdev_priv(dev);
12481 int r;
12482
12483 if (!netif_running(dev))
12484 return -EAGAIN;
12485
12486 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12487 return -EINVAL;
12488
12489 tg3_warn_mgmt_link_flap(tp);
12490
12491 if (tg3_flag(tp, USE_PHYLIB)) {
12492 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12493 return -EAGAIN;
12494 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12495 } else {
12496 u32 bmcr;
12497
12498 spin_lock_bh(&tp->lock);
12499 r = -EINVAL;
12500 tg3_readphy(tp, MII_BMCR, &bmcr);
12501 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12502 ((bmcr & BMCR_ANENABLE) ||
12503 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12504 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12505 BMCR_ANENABLE);
12506 r = 0;
12507 }
12508 spin_unlock_bh(&tp->lock);
12509 }
12510
12511 return r;
12512 }
12513
tg3_get_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12514 static void tg3_get_ringparam(struct net_device *dev,
12515 struct ethtool_ringparam *ering,
12516 struct kernel_ethtool_ringparam *kernel_ering,
12517 struct netlink_ext_ack *extack)
12518 {
12519 struct tg3 *tp = netdev_priv(dev);
12520
12521 ering->rx_max_pending = tp->rx_std_ring_mask;
12522 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12523 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12524 else
12525 ering->rx_jumbo_max_pending = 0;
12526
12527 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12528
12529 ering->rx_pending = tp->rx_pending;
12530 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12531 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12532 else
12533 ering->rx_jumbo_pending = 0;
12534
12535 ering->tx_pending = tp->napi[0].tx_pending;
12536 }
12537
tg3_set_ringparam(struct net_device * dev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)12538 static int tg3_set_ringparam(struct net_device *dev,
12539 struct ethtool_ringparam *ering,
12540 struct kernel_ethtool_ringparam *kernel_ering,
12541 struct netlink_ext_ack *extack)
12542 {
12543 struct tg3 *tp = netdev_priv(dev);
12544 int i, irq_sync = 0, err = 0;
12545 bool reset_phy = false;
12546
12547 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12548 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12549 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12550 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12551 (tg3_flag(tp, TSO_BUG) &&
12552 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12553 return -EINVAL;
12554
12555 if (netif_running(dev)) {
12556 tg3_phy_stop(tp);
12557 tg3_netif_stop(tp);
12558 irq_sync = 1;
12559 }
12560
12561 netdev_lock(dev);
12562 tg3_full_lock(tp, irq_sync);
12563
12564 tp->rx_pending = ering->rx_pending;
12565
12566 if (tg3_flag(tp, MAX_RXPEND_64) &&
12567 tp->rx_pending > 63)
12568 tp->rx_pending = 63;
12569
12570 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12571 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12572
12573 for (i = 0; i < tp->irq_max; i++)
12574 tp->napi[i].tx_pending = ering->tx_pending;
12575
12576 if (netif_running(dev)) {
12577 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12578 /* Reset PHY to avoid PHY lock up */
12579 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12580 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12581 tg3_asic_rev(tp) == ASIC_REV_5720)
12582 reset_phy = true;
12583
12584 err = tg3_restart_hw(tp, reset_phy);
12585 if (!err)
12586 tg3_netif_start(tp);
12587 }
12588
12589 tg3_full_unlock(tp);
12590 netdev_unlock(dev);
12591
12592 if (irq_sync && !err)
12593 tg3_phy_start(tp);
12594
12595 return err;
12596 }
12597
tg3_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12598 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12599 {
12600 struct tg3 *tp = netdev_priv(dev);
12601
12602 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12603
12604 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12605 epause->rx_pause = 1;
12606 else
12607 epause->rx_pause = 0;
12608
12609 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12610 epause->tx_pause = 1;
12611 else
12612 epause->tx_pause = 0;
12613 }
12614
tg3_set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)12615 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12616 {
12617 struct tg3 *tp = netdev_priv(dev);
12618 int err = 0;
12619 bool reset_phy = false;
12620
12621 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12622 tg3_warn_mgmt_link_flap(tp);
12623
12624 if (tg3_flag(tp, USE_PHYLIB)) {
12625 struct phy_device *phydev;
12626
12627 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12628
12629 if (!phy_validate_pause(phydev, epause))
12630 return -EINVAL;
12631
12632 tp->link_config.flowctrl = 0;
12633 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12634 if (epause->rx_pause) {
12635 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12636
12637 if (epause->tx_pause) {
12638 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12639 }
12640 } else if (epause->tx_pause) {
12641 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12642 }
12643
12644 if (epause->autoneg)
12645 tg3_flag_set(tp, PAUSE_AUTONEG);
12646 else
12647 tg3_flag_clear(tp, PAUSE_AUTONEG);
12648
12649 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12650 if (phydev->autoneg) {
12651 /* phy_set_asym_pause() will
12652 * renegotiate the link to inform our
12653 * link partner of our flow control
12654 * settings, even if the flow control
12655 * is forced. Let tg3_adjust_link()
12656 * do the final flow control setup.
12657 */
12658 return 0;
12659 }
12660
12661 if (!epause->autoneg)
12662 tg3_setup_flow_control(tp, 0, 0);
12663 }
12664 } else {
12665 int irq_sync = 0;
12666
12667 if (netif_running(dev)) {
12668 tg3_netif_stop(tp);
12669 irq_sync = 1;
12670 }
12671
12672 netdev_lock(dev);
12673 tg3_full_lock(tp, irq_sync);
12674
12675 if (epause->autoneg)
12676 tg3_flag_set(tp, PAUSE_AUTONEG);
12677 else
12678 tg3_flag_clear(tp, PAUSE_AUTONEG);
12679 if (epause->rx_pause)
12680 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12681 else
12682 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12683 if (epause->tx_pause)
12684 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12685 else
12686 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12687
12688 if (netif_running(dev)) {
12689 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12690 /* Reset PHY to avoid PHY lock up */
12691 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12692 tg3_asic_rev(tp) == ASIC_REV_5719 ||
12693 tg3_asic_rev(tp) == ASIC_REV_5720)
12694 reset_phy = true;
12695
12696 err = tg3_restart_hw(tp, reset_phy);
12697 if (!err)
12698 tg3_netif_start(tp);
12699 }
12700
12701 tg3_full_unlock(tp);
12702 netdev_unlock(dev);
12703 }
12704
12705 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12706
12707 return err;
12708 }
12709
tg3_get_sset_count(struct net_device * dev,int sset)12710 static int tg3_get_sset_count(struct net_device *dev, int sset)
12711 {
12712 switch (sset) {
12713 case ETH_SS_TEST:
12714 return TG3_NUM_TEST;
12715 case ETH_SS_STATS:
12716 return TG3_NUM_STATS;
12717 default:
12718 return -EOPNOTSUPP;
12719 }
12720 }
12721
tg3_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules __always_unused)12722 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12723 u32 *rules __always_unused)
12724 {
12725 struct tg3 *tp = netdev_priv(dev);
12726
12727 if (!tg3_flag(tp, SUPPORT_MSIX))
12728 return -EOPNOTSUPP;
12729
12730 switch (info->cmd) {
12731 case ETHTOOL_GRXRINGS:
12732 if (netif_running(tp->dev))
12733 info->data = tp->rxq_cnt;
12734 else {
12735 info->data = num_online_cpus();
12736 if (info->data > TG3_RSS_MAX_NUM_QS)
12737 info->data = TG3_RSS_MAX_NUM_QS;
12738 }
12739
12740 return 0;
12741
12742 default:
12743 return -EOPNOTSUPP;
12744 }
12745 }
12746
tg3_get_rxfh_indir_size(struct net_device * dev)12747 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12748 {
12749 u32 size = 0;
12750 struct tg3 *tp = netdev_priv(dev);
12751
12752 if (tg3_flag(tp, SUPPORT_MSIX))
12753 size = TG3_RSS_INDIR_TBL_SIZE;
12754
12755 return size;
12756 }
12757
tg3_get_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh)12758 static int tg3_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh)
12759 {
12760 struct tg3 *tp = netdev_priv(dev);
12761 int i;
12762
12763 rxfh->hfunc = ETH_RSS_HASH_TOP;
12764 if (!rxfh->indir)
12765 return 0;
12766
12767 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12768 rxfh->indir[i] = tp->rss_ind_tbl[i];
12769
12770 return 0;
12771 }
12772
tg3_set_rxfh(struct net_device * dev,struct ethtool_rxfh_param * rxfh,struct netlink_ext_ack * extack)12773 static int tg3_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
12774 struct netlink_ext_ack *extack)
12775 {
12776 struct tg3 *tp = netdev_priv(dev);
12777 size_t i;
12778
12779 /* We require at least one supported parameter to be changed and no
12780 * change in any of the unsupported parameters
12781 */
12782 if (rxfh->key ||
12783 (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
12784 rxfh->hfunc != ETH_RSS_HASH_TOP))
12785 return -EOPNOTSUPP;
12786
12787 if (!rxfh->indir)
12788 return 0;
12789
12790 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12791 tp->rss_ind_tbl[i] = rxfh->indir[i];
12792
12793 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12794 return 0;
12795
12796 /* It is legal to write the indirection
12797 * table while the device is running.
12798 */
12799 tg3_full_lock(tp, 0);
12800 tg3_rss_write_indir_tbl(tp);
12801 tg3_full_unlock(tp);
12802
12803 return 0;
12804 }
12805
tg3_get_channels(struct net_device * dev,struct ethtool_channels * channel)12806 static void tg3_get_channels(struct net_device *dev,
12807 struct ethtool_channels *channel)
12808 {
12809 struct tg3 *tp = netdev_priv(dev);
12810 u32 deflt_qs = netif_get_num_default_rss_queues();
12811
12812 channel->max_rx = tp->rxq_max;
12813 channel->max_tx = tp->txq_max;
12814
12815 if (netif_running(dev)) {
12816 channel->rx_count = tp->rxq_cnt;
12817 channel->tx_count = tp->txq_cnt;
12818 } else {
12819 if (tp->rxq_req)
12820 channel->rx_count = tp->rxq_req;
12821 else
12822 channel->rx_count = min(deflt_qs, tp->rxq_max);
12823
12824 if (tp->txq_req)
12825 channel->tx_count = tp->txq_req;
12826 else
12827 channel->tx_count = min(deflt_qs, tp->txq_max);
12828 }
12829 }
12830
tg3_set_channels(struct net_device * dev,struct ethtool_channels * channel)12831 static int tg3_set_channels(struct net_device *dev,
12832 struct ethtool_channels *channel)
12833 {
12834 struct tg3 *tp = netdev_priv(dev);
12835
12836 if (!tg3_flag(tp, SUPPORT_MSIX))
12837 return -EOPNOTSUPP;
12838
12839 if (channel->rx_count > tp->rxq_max ||
12840 channel->tx_count > tp->txq_max)
12841 return -EINVAL;
12842
12843 tp->rxq_req = channel->rx_count;
12844 tp->txq_req = channel->tx_count;
12845
12846 if (!netif_running(dev))
12847 return 0;
12848
12849 tg3_stop(tp);
12850
12851 tg3_carrier_off(tp);
12852
12853 tg3_start(tp, true, false, false);
12854
12855 return 0;
12856 }
12857
tg3_get_strings(struct net_device * dev,u32 stringset,u8 * buf)12858 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12859 {
12860 switch (stringset) {
12861 case ETH_SS_STATS:
12862 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12863 break;
12864 case ETH_SS_TEST:
12865 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12866 break;
12867 default:
12868 WARN_ON(1); /* we need a WARN() */
12869 break;
12870 }
12871 }
12872
tg3_set_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)12873 static int tg3_set_phys_id(struct net_device *dev,
12874 enum ethtool_phys_id_state state)
12875 {
12876 struct tg3 *tp = netdev_priv(dev);
12877
12878 switch (state) {
12879 case ETHTOOL_ID_ACTIVE:
12880 return 1; /* cycle on/off once per second */
12881
12882 case ETHTOOL_ID_ON:
12883 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12884 LED_CTRL_1000MBPS_ON |
12885 LED_CTRL_100MBPS_ON |
12886 LED_CTRL_10MBPS_ON |
12887 LED_CTRL_TRAFFIC_OVERRIDE |
12888 LED_CTRL_TRAFFIC_BLINK |
12889 LED_CTRL_TRAFFIC_LED);
12890 break;
12891
12892 case ETHTOOL_ID_OFF:
12893 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12894 LED_CTRL_TRAFFIC_OVERRIDE);
12895 break;
12896
12897 case ETHTOOL_ID_INACTIVE:
12898 tw32(MAC_LED_CTRL, tp->led_ctrl);
12899 break;
12900 }
12901
12902 return 0;
12903 }
12904
tg3_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * estats,u64 * tmp_stats)12905 static void tg3_get_ethtool_stats(struct net_device *dev,
12906 struct ethtool_stats *estats, u64 *tmp_stats)
12907 {
12908 struct tg3 *tp = netdev_priv(dev);
12909
12910 if (tp->hw_stats)
12911 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12912 else
12913 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12914 }
12915
tg3_vpd_readblock(struct tg3 * tp,unsigned int * vpdlen)12916 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12917 {
12918 int i;
12919 __be32 *buf;
12920 u32 offset = 0, len = 0;
12921 u32 magic, val;
12922
12923 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12924 return NULL;
12925
12926 if (magic == TG3_EEPROM_MAGIC) {
12927 for (offset = TG3_NVM_DIR_START;
12928 offset < TG3_NVM_DIR_END;
12929 offset += TG3_NVM_DIRENT_SIZE) {
12930 if (tg3_nvram_read(tp, offset, &val))
12931 return NULL;
12932
12933 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12934 TG3_NVM_DIRTYPE_EXTVPD)
12935 break;
12936 }
12937
12938 if (offset != TG3_NVM_DIR_END) {
12939 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12940 if (tg3_nvram_read(tp, offset + 4, &offset))
12941 return NULL;
12942
12943 offset = tg3_nvram_logical_addr(tp, offset);
12944 }
12945
12946 if (!offset || !len) {
12947 offset = TG3_NVM_VPD_OFF;
12948 len = TG3_NVM_VPD_LEN;
12949 }
12950
12951 buf = kmalloc(len, GFP_KERNEL);
12952 if (!buf)
12953 return NULL;
12954
12955 for (i = 0; i < len; i += 4) {
12956 /* The data is in little-endian format in NVRAM.
12957 * Use the big-endian read routines to preserve
12958 * the byte order as it exists in NVRAM.
12959 */
12960 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12961 goto error;
12962 }
12963 *vpdlen = len;
12964 } else {
12965 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12966 if (IS_ERR(buf))
12967 return NULL;
12968 }
12969
12970 return buf;
12971
12972 error:
12973 kfree(buf);
12974 return NULL;
12975 }
12976
12977 #define NVRAM_TEST_SIZE 0x100
12978 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12979 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12980 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12981 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12982 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12983 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12984 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12985 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12986
tg3_test_nvram(struct tg3 * tp)12987 static int tg3_test_nvram(struct tg3 *tp)
12988 {
12989 u32 csum, magic;
12990 __be32 *buf;
12991 int i, j, k, err = 0, size;
12992 unsigned int len;
12993
12994 if (tg3_flag(tp, NO_NVRAM))
12995 return 0;
12996
12997 if (tg3_nvram_read(tp, 0, &magic) != 0)
12998 return -EIO;
12999
13000 if (magic == TG3_EEPROM_MAGIC)
13001 size = NVRAM_TEST_SIZE;
13002 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
13003 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
13004 TG3_EEPROM_SB_FORMAT_1) {
13005 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
13006 case TG3_EEPROM_SB_REVISION_0:
13007 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
13008 break;
13009 case TG3_EEPROM_SB_REVISION_2:
13010 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
13011 break;
13012 case TG3_EEPROM_SB_REVISION_3:
13013 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
13014 break;
13015 case TG3_EEPROM_SB_REVISION_4:
13016 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
13017 break;
13018 case TG3_EEPROM_SB_REVISION_5:
13019 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
13020 break;
13021 case TG3_EEPROM_SB_REVISION_6:
13022 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
13023 break;
13024 default:
13025 return -EIO;
13026 }
13027 } else
13028 return 0;
13029 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13030 size = NVRAM_SELFBOOT_HW_SIZE;
13031 else
13032 return -EIO;
13033
13034 buf = kmalloc(size, GFP_KERNEL);
13035 if (buf == NULL)
13036 return -ENOMEM;
13037
13038 err = -EIO;
13039 for (i = 0, j = 0; i < size; i += 4, j++) {
13040 err = tg3_nvram_read_be32(tp, i, &buf[j]);
13041 if (err)
13042 break;
13043 }
13044 if (i < size)
13045 goto out;
13046
13047 /* Selfboot format */
13048 magic = be32_to_cpu(buf[0]);
13049 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
13050 TG3_EEPROM_MAGIC_FW) {
13051 u8 *buf8 = (u8 *) buf, csum8 = 0;
13052
13053 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
13054 TG3_EEPROM_SB_REVISION_2) {
13055 /* For rev 2, the csum doesn't include the MBA. */
13056 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
13057 csum8 += buf8[i];
13058 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
13059 csum8 += buf8[i];
13060 } else {
13061 for (i = 0; i < size; i++)
13062 csum8 += buf8[i];
13063 }
13064
13065 if (csum8 == 0) {
13066 err = 0;
13067 goto out;
13068 }
13069
13070 err = -EIO;
13071 goto out;
13072 }
13073
13074 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
13075 TG3_EEPROM_MAGIC_HW) {
13076 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
13077 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
13078 u8 *buf8 = (u8 *) buf;
13079
13080 /* Separate the parity bits and the data bytes. */
13081 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
13082 if ((i == 0) || (i == 8)) {
13083 int l;
13084 u8 msk;
13085
13086 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
13087 parity[k++] = buf8[i] & msk;
13088 i++;
13089 } else if (i == 16) {
13090 int l;
13091 u8 msk;
13092
13093 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
13094 parity[k++] = buf8[i] & msk;
13095 i++;
13096
13097 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
13098 parity[k++] = buf8[i] & msk;
13099 i++;
13100 }
13101 data[j++] = buf8[i];
13102 }
13103
13104 err = -EIO;
13105 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
13106 u8 hw8 = hweight8(data[i]);
13107
13108 if ((hw8 & 0x1) && parity[i])
13109 goto out;
13110 else if (!(hw8 & 0x1) && !parity[i])
13111 goto out;
13112 }
13113 err = 0;
13114 goto out;
13115 }
13116
13117 err = -EIO;
13118
13119 /* Bootstrap checksum at offset 0x10 */
13120 csum = calc_crc((unsigned char *) buf, 0x10);
13121
13122 /* The type of buf is __be32 *, but this value is __le32 */
13123 if (csum != le32_to_cpu((__force __le32)buf[0x10 / 4]))
13124 goto out;
13125
13126 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13127 csum = calc_crc((unsigned char *)&buf[0x74 / 4], 0x88);
13128
13129 /* The type of buf is __be32 *, but this value is __le32 */
13130 if (csum != le32_to_cpu((__force __le32)buf[0xfc / 4]))
13131 goto out;
13132
13133 kfree(buf);
13134
13135 buf = tg3_vpd_readblock(tp, &len);
13136 if (!buf)
13137 return -ENOMEM;
13138
13139 err = pci_vpd_check_csum(buf, len);
13140 /* go on if no checksum found */
13141 if (err == 1)
13142 err = 0;
13143 out:
13144 kfree(buf);
13145 return err;
13146 }
13147
13148 #define TG3_SERDES_TIMEOUT_SEC 2
13149 #define TG3_COPPER_TIMEOUT_SEC 6
13150
tg3_test_link(struct tg3 * tp)13151 static int tg3_test_link(struct tg3 *tp)
13152 {
13153 int i, max;
13154
13155 if (!netif_running(tp->dev))
13156 return -ENODEV;
13157
13158 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13159 max = TG3_SERDES_TIMEOUT_SEC;
13160 else
13161 max = TG3_COPPER_TIMEOUT_SEC;
13162
13163 for (i = 0; i < max; i++) {
13164 if (tp->link_up)
13165 return 0;
13166
13167 if (msleep_interruptible(1000))
13168 break;
13169 }
13170
13171 return -EIO;
13172 }
13173
13174 /* Only test the commonly used registers */
tg3_test_registers(struct tg3 * tp)13175 static int tg3_test_registers(struct tg3 *tp)
13176 {
13177 int i, is_5705, is_5750;
13178 u32 offset, read_mask, write_mask, val, save_val, read_val;
13179 static struct {
13180 u16 offset;
13181 u16 flags;
13182 #define TG3_FL_5705 0x1
13183 #define TG3_FL_NOT_5705 0x2
13184 #define TG3_FL_NOT_5788 0x4
13185 #define TG3_FL_NOT_5750 0x8
13186 u32 read_mask;
13187 u32 write_mask;
13188 } reg_tbl[] = {
13189 /* MAC Control Registers */
13190 { MAC_MODE, TG3_FL_NOT_5705,
13191 0x00000000, 0x00ef6f8c },
13192 { MAC_MODE, TG3_FL_5705,
13193 0x00000000, 0x01ef6b8c },
13194 { MAC_STATUS, TG3_FL_NOT_5705,
13195 0x03800107, 0x00000000 },
13196 { MAC_STATUS, TG3_FL_5705,
13197 0x03800100, 0x00000000 },
13198 { MAC_ADDR_0_HIGH, 0x0000,
13199 0x00000000, 0x0000ffff },
13200 { MAC_ADDR_0_LOW, 0x0000,
13201 0x00000000, 0xffffffff },
13202 { MAC_RX_MTU_SIZE, 0x0000,
13203 0x00000000, 0x0000ffff },
13204 { MAC_TX_MODE, 0x0000,
13205 0x00000000, 0x00000070 },
13206 { MAC_TX_LENGTHS, 0x0000,
13207 0x00000000, 0x00003fff },
13208 { MAC_RX_MODE, TG3_FL_NOT_5705,
13209 0x00000000, 0x000007fc },
13210 { MAC_RX_MODE, TG3_FL_5705,
13211 0x00000000, 0x000007dc },
13212 { MAC_HASH_REG_0, 0x0000,
13213 0x00000000, 0xffffffff },
13214 { MAC_HASH_REG_1, 0x0000,
13215 0x00000000, 0xffffffff },
13216 { MAC_HASH_REG_2, 0x0000,
13217 0x00000000, 0xffffffff },
13218 { MAC_HASH_REG_3, 0x0000,
13219 0x00000000, 0xffffffff },
13220
13221 /* Receive Data and Receive BD Initiator Control Registers. */
13222 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13223 0x00000000, 0xffffffff },
13224 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13225 0x00000000, 0xffffffff },
13226 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13227 0x00000000, 0x00000003 },
13228 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13229 0x00000000, 0xffffffff },
13230 { RCVDBDI_STD_BD+0, 0x0000,
13231 0x00000000, 0xffffffff },
13232 { RCVDBDI_STD_BD+4, 0x0000,
13233 0x00000000, 0xffffffff },
13234 { RCVDBDI_STD_BD+8, 0x0000,
13235 0x00000000, 0xffff0002 },
13236 { RCVDBDI_STD_BD+0xc, 0x0000,
13237 0x00000000, 0xffffffff },
13238
13239 /* Receive BD Initiator Control Registers. */
13240 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13241 0x00000000, 0xffffffff },
13242 { RCVBDI_STD_THRESH, TG3_FL_5705,
13243 0x00000000, 0x000003ff },
13244 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13245 0x00000000, 0xffffffff },
13246
13247 /* Host Coalescing Control Registers. */
13248 { HOSTCC_MODE, TG3_FL_NOT_5705,
13249 0x00000000, 0x00000004 },
13250 { HOSTCC_MODE, TG3_FL_5705,
13251 0x00000000, 0x000000f6 },
13252 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13253 0x00000000, 0xffffffff },
13254 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13255 0x00000000, 0x000003ff },
13256 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13257 0x00000000, 0xffffffff },
13258 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13259 0x00000000, 0x000003ff },
13260 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13261 0x00000000, 0xffffffff },
13262 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13263 0x00000000, 0x000000ff },
13264 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13265 0x00000000, 0xffffffff },
13266 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13267 0x00000000, 0x000000ff },
13268 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13269 0x00000000, 0xffffffff },
13270 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13271 0x00000000, 0xffffffff },
13272 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13273 0x00000000, 0xffffffff },
13274 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13275 0x00000000, 0x000000ff },
13276 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13277 0x00000000, 0xffffffff },
13278 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13279 0x00000000, 0x000000ff },
13280 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13281 0x00000000, 0xffffffff },
13282 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13283 0x00000000, 0xffffffff },
13284 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13285 0x00000000, 0xffffffff },
13286 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13287 0x00000000, 0xffffffff },
13288 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13289 0x00000000, 0xffffffff },
13290 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13291 0xffffffff, 0x00000000 },
13292 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13293 0xffffffff, 0x00000000 },
13294
13295 /* Buffer Manager Control Registers. */
13296 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13297 0x00000000, 0x007fff80 },
13298 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13299 0x00000000, 0x007fffff },
13300 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13301 0x00000000, 0x0000003f },
13302 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13303 0x00000000, 0x000001ff },
13304 { BUFMGR_MB_HIGH_WATER, 0x0000,
13305 0x00000000, 0x000001ff },
13306 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13307 0xffffffff, 0x00000000 },
13308 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13309 0xffffffff, 0x00000000 },
13310
13311 /* Mailbox Registers */
13312 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13313 0x00000000, 0x000001ff },
13314 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13315 0x00000000, 0x000001ff },
13316 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13317 0x00000000, 0x000007ff },
13318 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13319 0x00000000, 0x000001ff },
13320
13321 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13322 };
13323
13324 is_5705 = is_5750 = 0;
13325 if (tg3_flag(tp, 5705_PLUS)) {
13326 is_5705 = 1;
13327 if (tg3_flag(tp, 5750_PLUS))
13328 is_5750 = 1;
13329 }
13330
13331 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13332 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13333 continue;
13334
13335 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13336 continue;
13337
13338 if (tg3_flag(tp, IS_5788) &&
13339 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13340 continue;
13341
13342 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13343 continue;
13344
13345 offset = (u32) reg_tbl[i].offset;
13346 read_mask = reg_tbl[i].read_mask;
13347 write_mask = reg_tbl[i].write_mask;
13348
13349 /* Save the original register content */
13350 save_val = tr32(offset);
13351
13352 /* Determine the read-only value. */
13353 read_val = save_val & read_mask;
13354
13355 /* Write zero to the register, then make sure the read-only bits
13356 * are not changed and the read/write bits are all zeros.
13357 */
13358 tw32(offset, 0);
13359
13360 val = tr32(offset);
13361
13362 /* Test the read-only and read/write bits. */
13363 if (((val & read_mask) != read_val) || (val & write_mask))
13364 goto out;
13365
13366 /* Write ones to all the bits defined by RdMask and WrMask, then
13367 * make sure the read-only bits are not changed and the
13368 * read/write bits are all ones.
13369 */
13370 tw32(offset, read_mask | write_mask);
13371
13372 val = tr32(offset);
13373
13374 /* Test the read-only bits. */
13375 if ((val & read_mask) != read_val)
13376 goto out;
13377
13378 /* Test the read/write bits. */
13379 if ((val & write_mask) != write_mask)
13380 goto out;
13381
13382 tw32(offset, save_val);
13383 }
13384
13385 return 0;
13386
13387 out:
13388 if (netif_msg_hw(tp))
13389 netdev_err(tp->dev,
13390 "Register test failed at offset %x\n", offset);
13391 tw32(offset, save_val);
13392 return -EIO;
13393 }
13394
tg3_do_mem_test(struct tg3 * tp,u32 offset,u32 len)13395 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13396 {
13397 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13398 int i;
13399 u32 j;
13400
13401 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13402 for (j = 0; j < len; j += 4) {
13403 u32 val;
13404
13405 tg3_write_mem(tp, offset + j, test_pattern[i]);
13406 tg3_read_mem(tp, offset + j, &val);
13407 if (val != test_pattern[i])
13408 return -EIO;
13409 }
13410 }
13411 return 0;
13412 }
13413
tg3_test_memory(struct tg3 * tp)13414 static int tg3_test_memory(struct tg3 *tp)
13415 {
13416 static struct mem_entry {
13417 u32 offset;
13418 u32 len;
13419 } mem_tbl_570x[] = {
13420 { 0x00000000, 0x00b50},
13421 { 0x00002000, 0x1c000},
13422 { 0xffffffff, 0x00000}
13423 }, mem_tbl_5705[] = {
13424 { 0x00000100, 0x0000c},
13425 { 0x00000200, 0x00008},
13426 { 0x00004000, 0x00800},
13427 { 0x00006000, 0x01000},
13428 { 0x00008000, 0x02000},
13429 { 0x00010000, 0x0e000},
13430 { 0xffffffff, 0x00000}
13431 }, mem_tbl_5755[] = {
13432 { 0x00000200, 0x00008},
13433 { 0x00004000, 0x00800},
13434 { 0x00006000, 0x00800},
13435 { 0x00008000, 0x02000},
13436 { 0x00010000, 0x0c000},
13437 { 0xffffffff, 0x00000}
13438 }, mem_tbl_5906[] = {
13439 { 0x00000200, 0x00008},
13440 { 0x00004000, 0x00400},
13441 { 0x00006000, 0x00400},
13442 { 0x00008000, 0x01000},
13443 { 0x00010000, 0x01000},
13444 { 0xffffffff, 0x00000}
13445 }, mem_tbl_5717[] = {
13446 { 0x00000200, 0x00008},
13447 { 0x00010000, 0x0a000},
13448 { 0x00020000, 0x13c00},
13449 { 0xffffffff, 0x00000}
13450 }, mem_tbl_57765[] = {
13451 { 0x00000200, 0x00008},
13452 { 0x00004000, 0x00800},
13453 { 0x00006000, 0x09800},
13454 { 0x00010000, 0x0a000},
13455 { 0xffffffff, 0x00000}
13456 };
13457 struct mem_entry *mem_tbl;
13458 int err = 0;
13459 int i;
13460
13461 if (tg3_flag(tp, 5717_PLUS))
13462 mem_tbl = mem_tbl_5717;
13463 else if (tg3_flag(tp, 57765_CLASS) ||
13464 tg3_asic_rev(tp) == ASIC_REV_5762)
13465 mem_tbl = mem_tbl_57765;
13466 else if (tg3_flag(tp, 5755_PLUS))
13467 mem_tbl = mem_tbl_5755;
13468 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13469 mem_tbl = mem_tbl_5906;
13470 else if (tg3_flag(tp, 5705_PLUS))
13471 mem_tbl = mem_tbl_5705;
13472 else
13473 mem_tbl = mem_tbl_570x;
13474
13475 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13476 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13477 if (err)
13478 break;
13479 }
13480
13481 return err;
13482 }
13483
13484 #define TG3_TSO_MSS 500
13485
13486 #define TG3_TSO_IP_HDR_LEN 20
13487 #define TG3_TSO_TCP_HDR_LEN 20
13488 #define TG3_TSO_TCP_OPT_LEN 12
13489
13490 static const u8 tg3_tso_header[] = {
13491 0x08, 0x00,
13492 0x45, 0x00, 0x00, 0x00,
13493 0x00, 0x00, 0x40, 0x00,
13494 0x40, 0x06, 0x00, 0x00,
13495 0x0a, 0x00, 0x00, 0x01,
13496 0x0a, 0x00, 0x00, 0x02,
13497 0x0d, 0x00, 0xe0, 0x00,
13498 0x00, 0x00, 0x01, 0x00,
13499 0x00, 0x00, 0x02, 0x00,
13500 0x80, 0x10, 0x10, 0x00,
13501 0x14, 0x09, 0x00, 0x00,
13502 0x01, 0x01, 0x08, 0x0a,
13503 0x11, 0x11, 0x11, 0x11,
13504 0x11, 0x11, 0x11, 0x11,
13505 };
13506
tg3_run_loopback(struct tg3 * tp,u32 pktsz,bool tso_loopback)13507 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13508 {
13509 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13510 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13511 u32 budget;
13512 struct sk_buff *skb;
13513 u8 *tx_data, *rx_data;
13514 dma_addr_t map;
13515 int num_pkts, tx_len, rx_len, i, err;
13516 struct tg3_rx_buffer_desc *desc;
13517 struct tg3_napi *tnapi, *rnapi;
13518 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13519
13520 tnapi = &tp->napi[0];
13521 rnapi = &tp->napi[0];
13522 if (tp->irq_cnt > 1) {
13523 if (tg3_flag(tp, ENABLE_RSS))
13524 rnapi = &tp->napi[1];
13525 if (tg3_flag(tp, ENABLE_TSS))
13526 tnapi = &tp->napi[1];
13527 }
13528 coal_now = tnapi->coal_now | rnapi->coal_now;
13529
13530 err = -EIO;
13531
13532 tx_len = pktsz;
13533 skb = netdev_alloc_skb(tp->dev, tx_len);
13534 if (!skb)
13535 return -ENOMEM;
13536
13537 tx_data = skb_put(skb, tx_len);
13538 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13539 memset(tx_data + ETH_ALEN, 0x0, 8);
13540
13541 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13542
13543 if (tso_loopback) {
13544 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13545
13546 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13547 TG3_TSO_TCP_OPT_LEN;
13548
13549 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13550 sizeof(tg3_tso_header));
13551 mss = TG3_TSO_MSS;
13552
13553 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13554 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13555
13556 /* Set the total length field in the IP header */
13557 iph->tot_len = htons((u16)(mss + hdr_len));
13558
13559 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13560 TXD_FLAG_CPU_POST_DMA);
13561
13562 if (tg3_flag(tp, HW_TSO_1) ||
13563 tg3_flag(tp, HW_TSO_2) ||
13564 tg3_flag(tp, HW_TSO_3)) {
13565 struct tcphdr *th;
13566 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13567 th = (struct tcphdr *)&tx_data[val];
13568 th->check = 0;
13569 } else
13570 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13571
13572 if (tg3_flag(tp, HW_TSO_3)) {
13573 mss |= (hdr_len & 0xc) << 12;
13574 if (hdr_len & 0x10)
13575 base_flags |= 0x00000010;
13576 base_flags |= (hdr_len & 0x3e0) << 5;
13577 } else if (tg3_flag(tp, HW_TSO_2))
13578 mss |= hdr_len << 9;
13579 else if (tg3_flag(tp, HW_TSO_1) ||
13580 tg3_asic_rev(tp) == ASIC_REV_5705) {
13581 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13582 } else {
13583 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13584 }
13585
13586 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13587 } else {
13588 num_pkts = 1;
13589 data_off = ETH_HLEN;
13590
13591 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13592 tx_len > VLAN_ETH_FRAME_LEN)
13593 base_flags |= TXD_FLAG_JMB_PKT;
13594 }
13595
13596 for (i = data_off; i < tx_len; i++)
13597 tx_data[i] = (u8) (i & 0xff);
13598
13599 map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13600 if (dma_mapping_error(&tp->pdev->dev, map)) {
13601 dev_kfree_skb(skb);
13602 return -EIO;
13603 }
13604
13605 val = tnapi->tx_prod;
13606 tnapi->tx_buffers[val].skb = skb;
13607 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13608
13609 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13610 rnapi->coal_now);
13611
13612 udelay(10);
13613
13614 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13615
13616 budget = tg3_tx_avail(tnapi);
13617 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13618 base_flags | TXD_FLAG_END, mss, 0)) {
13619 tnapi->tx_buffers[val].skb = NULL;
13620 dev_kfree_skb(skb);
13621 return -EIO;
13622 }
13623
13624 tnapi->tx_prod++;
13625
13626 /* Sync BD data before updating mailbox */
13627 wmb();
13628
13629 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13630 tr32_mailbox(tnapi->prodmbox);
13631
13632 udelay(10);
13633
13634 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13635 for (i = 0; i < 35; i++) {
13636 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13637 coal_now);
13638
13639 udelay(10);
13640
13641 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13642 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13643 if ((tx_idx == tnapi->tx_prod) &&
13644 (rx_idx == (rx_start_idx + num_pkts)))
13645 break;
13646 }
13647
13648 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13649 dev_kfree_skb(skb);
13650
13651 if (tx_idx != tnapi->tx_prod)
13652 goto out;
13653
13654 if (rx_idx != rx_start_idx + num_pkts)
13655 goto out;
13656
13657 val = data_off;
13658 while (rx_idx != rx_start_idx) {
13659 desc = &rnapi->rx_rcb[rx_start_idx++];
13660 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13661 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13662
13663 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13664 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13665 goto out;
13666
13667 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13668 - ETH_FCS_LEN;
13669
13670 if (!tso_loopback) {
13671 if (rx_len != tx_len)
13672 goto out;
13673
13674 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13675 if (opaque_key != RXD_OPAQUE_RING_STD)
13676 goto out;
13677 } else {
13678 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13679 goto out;
13680 }
13681 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13682 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13683 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13684 goto out;
13685 }
13686
13687 if (opaque_key == RXD_OPAQUE_RING_STD) {
13688 rx_data = tpr->rx_std_buffers[desc_idx].data;
13689 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13690 mapping);
13691 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13692 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13693 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13694 mapping);
13695 } else
13696 goto out;
13697
13698 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13699 DMA_FROM_DEVICE);
13700
13701 rx_data += TG3_RX_OFFSET(tp);
13702 for (i = data_off; i < rx_len; i++, val++) {
13703 if (*(rx_data + i) != (u8) (val & 0xff))
13704 goto out;
13705 }
13706 }
13707
13708 err = 0;
13709
13710 /* tg3_free_rings will unmap and free the rx_data */
13711 out:
13712 return err;
13713 }
13714
13715 #define TG3_STD_LOOPBACK_FAILED 1
13716 #define TG3_JMB_LOOPBACK_FAILED 2
13717 #define TG3_TSO_LOOPBACK_FAILED 4
13718 #define TG3_LOOPBACK_FAILED \
13719 (TG3_STD_LOOPBACK_FAILED | \
13720 TG3_JMB_LOOPBACK_FAILED | \
13721 TG3_TSO_LOOPBACK_FAILED)
13722
tg3_test_loopback(struct tg3 * tp,u64 * data,bool do_extlpbk)13723 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13724 {
13725 int err = -EIO;
13726 u32 eee_cap;
13727 u32 jmb_pkt_sz = 9000;
13728
13729 if (tp->dma_limit)
13730 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13731
13732 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13733 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13734
13735 if (!netif_running(tp->dev)) {
13736 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13737 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13738 if (do_extlpbk)
13739 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13740 goto done;
13741 }
13742
13743 err = tg3_reset_hw(tp, true);
13744 if (err) {
13745 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13746 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13747 if (do_extlpbk)
13748 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13749 goto done;
13750 }
13751
13752 if (tg3_flag(tp, ENABLE_RSS)) {
13753 int i;
13754
13755 /* Reroute all rx packets to the 1st queue */
13756 for (i = MAC_RSS_INDIR_TBL_0;
13757 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13758 tw32(i, 0x0);
13759 }
13760
13761 /* HW errata - mac loopback fails in some cases on 5780.
13762 * Normal traffic and PHY loopback are not affected by
13763 * errata. Also, the MAC loopback test is deprecated for
13764 * all newer ASIC revisions.
13765 */
13766 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13767 !tg3_flag(tp, CPMU_PRESENT)) {
13768 tg3_mac_loopback(tp, true);
13769
13770 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13771 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13772
13773 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13774 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13775 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13776
13777 tg3_mac_loopback(tp, false);
13778 }
13779
13780 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13781 !tg3_flag(tp, USE_PHYLIB)) {
13782 int i;
13783
13784 tg3_phy_lpbk_set(tp, 0, false);
13785
13786 /* Wait for link */
13787 for (i = 0; i < 100; i++) {
13788 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13789 break;
13790 mdelay(1);
13791 }
13792
13793 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13794 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13795 if (tg3_flag(tp, TSO_CAPABLE) &&
13796 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13797 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13798 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13799 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13800 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13801
13802 if (do_extlpbk) {
13803 tg3_phy_lpbk_set(tp, 0, true);
13804
13805 /* All link indications report up, but the hardware
13806 * isn't really ready for about 20 msec. Double it
13807 * to be sure.
13808 */
13809 mdelay(40);
13810
13811 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13812 data[TG3_EXT_LOOPB_TEST] |=
13813 TG3_STD_LOOPBACK_FAILED;
13814 if (tg3_flag(tp, TSO_CAPABLE) &&
13815 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13816 data[TG3_EXT_LOOPB_TEST] |=
13817 TG3_TSO_LOOPBACK_FAILED;
13818 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13819 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13820 data[TG3_EXT_LOOPB_TEST] |=
13821 TG3_JMB_LOOPBACK_FAILED;
13822 }
13823
13824 /* Re-enable gphy autopowerdown. */
13825 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13826 tg3_phy_toggle_apd(tp, true);
13827 }
13828
13829 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13830 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13831
13832 done:
13833 tp->phy_flags |= eee_cap;
13834
13835 return err;
13836 }
13837
tg3_self_test(struct net_device * dev,struct ethtool_test * etest,u64 * data)13838 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13839 u64 *data)
13840 {
13841 struct tg3 *tp = netdev_priv(dev);
13842 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13843
13844 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13845 if (tg3_power_up(tp)) {
13846 etest->flags |= ETH_TEST_FL_FAILED;
13847 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13848 return;
13849 }
13850 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13851 }
13852
13853 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13854
13855 if (tg3_test_nvram(tp) != 0) {
13856 etest->flags |= ETH_TEST_FL_FAILED;
13857 data[TG3_NVRAM_TEST] = 1;
13858 }
13859 if (!doextlpbk && tg3_test_link(tp)) {
13860 etest->flags |= ETH_TEST_FL_FAILED;
13861 data[TG3_LINK_TEST] = 1;
13862 }
13863 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13864 int err, err2 = 0, irq_sync = 0;
13865
13866 if (netif_running(dev)) {
13867 tg3_phy_stop(tp);
13868 tg3_netif_stop(tp);
13869 irq_sync = 1;
13870 }
13871
13872 tg3_full_lock(tp, irq_sync);
13873 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13874 err = tg3_nvram_lock(tp);
13875 tg3_halt_cpu(tp, RX_CPU_BASE);
13876 if (!tg3_flag(tp, 5705_PLUS))
13877 tg3_halt_cpu(tp, TX_CPU_BASE);
13878 if (!err)
13879 tg3_nvram_unlock(tp);
13880
13881 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13882 tg3_phy_reset(tp);
13883
13884 if (tg3_test_registers(tp) != 0) {
13885 etest->flags |= ETH_TEST_FL_FAILED;
13886 data[TG3_REGISTER_TEST] = 1;
13887 }
13888
13889 if (tg3_test_memory(tp) != 0) {
13890 etest->flags |= ETH_TEST_FL_FAILED;
13891 data[TG3_MEMORY_TEST] = 1;
13892 }
13893
13894 if (doextlpbk)
13895 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13896
13897 if (tg3_test_loopback(tp, data, doextlpbk))
13898 etest->flags |= ETH_TEST_FL_FAILED;
13899
13900 tg3_full_unlock(tp);
13901
13902 if (tg3_test_interrupt(tp) != 0) {
13903 etest->flags |= ETH_TEST_FL_FAILED;
13904 data[TG3_INTERRUPT_TEST] = 1;
13905 }
13906
13907 netdev_lock(dev);
13908 tg3_full_lock(tp, 0);
13909
13910 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13911 if (netif_running(dev)) {
13912 tg3_flag_set(tp, INIT_COMPLETE);
13913 err2 = tg3_restart_hw(tp, true);
13914 if (!err2)
13915 tg3_netif_start(tp);
13916 }
13917
13918 tg3_full_unlock(tp);
13919 netdev_unlock(dev);
13920
13921 if (irq_sync && !err2)
13922 tg3_phy_start(tp);
13923 }
13924 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13925 tg3_power_down_prepare(tp);
13926
13927 }
13928
tg3_hwtstamp_set(struct net_device * dev,struct kernel_hwtstamp_config * stmpconf,struct netlink_ext_ack * extack)13929 static int tg3_hwtstamp_set(struct net_device *dev,
13930 struct kernel_hwtstamp_config *stmpconf,
13931 struct netlink_ext_ack *extack)
13932 {
13933 struct tg3 *tp = netdev_priv(dev);
13934
13935 if (!tg3_flag(tp, PTP_CAPABLE))
13936 return -EOPNOTSUPP;
13937
13938 if (stmpconf->tx_type != HWTSTAMP_TX_ON &&
13939 stmpconf->tx_type != HWTSTAMP_TX_OFF)
13940 return -ERANGE;
13941
13942 switch (stmpconf->rx_filter) {
13943 case HWTSTAMP_FILTER_NONE:
13944 tp->rxptpctl = 0;
13945 break;
13946 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13947 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13948 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13949 break;
13950 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13951 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13952 TG3_RX_PTP_CTL_SYNC_EVNT;
13953 break;
13954 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13955 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13956 TG3_RX_PTP_CTL_DELAY_REQ;
13957 break;
13958 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13959 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13960 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13961 break;
13962 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13963 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13964 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13965 break;
13966 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13967 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13968 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13969 break;
13970 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13971 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13972 TG3_RX_PTP_CTL_SYNC_EVNT;
13973 break;
13974 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13975 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13976 TG3_RX_PTP_CTL_SYNC_EVNT;
13977 break;
13978 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13979 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13980 TG3_RX_PTP_CTL_SYNC_EVNT;
13981 break;
13982 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13983 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13984 TG3_RX_PTP_CTL_DELAY_REQ;
13985 break;
13986 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13987 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13988 TG3_RX_PTP_CTL_DELAY_REQ;
13989 break;
13990 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13991 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13992 TG3_RX_PTP_CTL_DELAY_REQ;
13993 break;
13994 default:
13995 return -ERANGE;
13996 }
13997
13998 if (netif_running(dev) && tp->rxptpctl)
13999 tw32(TG3_RX_PTP_CTL,
14000 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
14001
14002 if (stmpconf->tx_type == HWTSTAMP_TX_ON)
14003 tg3_flag_set(tp, TX_TSTAMP_EN);
14004 else
14005 tg3_flag_clear(tp, TX_TSTAMP_EN);
14006
14007 return 0;
14008 }
14009
tg3_hwtstamp_get(struct net_device * dev,struct kernel_hwtstamp_config * stmpconf)14010 static int tg3_hwtstamp_get(struct net_device *dev,
14011 struct kernel_hwtstamp_config *stmpconf)
14012 {
14013 struct tg3 *tp = netdev_priv(dev);
14014
14015 if (!tg3_flag(tp, PTP_CAPABLE))
14016 return -EOPNOTSUPP;
14017
14018 stmpconf->flags = 0;
14019 stmpconf->tx_type = tg3_flag(tp, TX_TSTAMP_EN) ?
14020 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
14021
14022 switch (tp->rxptpctl) {
14023 case 0:
14024 stmpconf->rx_filter = HWTSTAMP_FILTER_NONE;
14025 break;
14026 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
14027 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
14028 break;
14029 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14030 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
14031 break;
14032 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14033 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
14034 break;
14035 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14036 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
14037 break;
14038 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14039 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
14040 break;
14041 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
14042 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
14043 break;
14044 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14045 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
14046 break;
14047 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14048 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
14049 break;
14050 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
14051 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
14052 break;
14053 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14054 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
14055 break;
14056 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14057 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
14058 break;
14059 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
14060 stmpconf->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
14061 break;
14062 default:
14063 WARN_ON_ONCE(1);
14064 return -ERANGE;
14065 }
14066
14067 return 0;
14068 }
14069
tg3_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)14070 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
14071 {
14072 struct mii_ioctl_data *data = if_mii(ifr);
14073 struct tg3 *tp = netdev_priv(dev);
14074 int err;
14075
14076 if (tg3_flag(tp, USE_PHYLIB)) {
14077 struct phy_device *phydev;
14078 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
14079 return -EAGAIN;
14080 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
14081 return phy_mii_ioctl(phydev, ifr, cmd);
14082 }
14083
14084 switch (cmd) {
14085 case SIOCGMIIPHY:
14086 data->phy_id = tp->phy_addr;
14087
14088 fallthrough;
14089 case SIOCGMIIREG: {
14090 u32 mii_regval;
14091
14092 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14093 break; /* We have no PHY */
14094
14095 if (!netif_running(dev))
14096 return -EAGAIN;
14097
14098 spin_lock_bh(&tp->lock);
14099 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14100 data->reg_num & 0x1f, &mii_regval);
14101 spin_unlock_bh(&tp->lock);
14102
14103 data->val_out = mii_regval;
14104
14105 return err;
14106 }
14107
14108 case SIOCSMIIREG:
14109 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14110 break; /* We have no PHY */
14111
14112 if (!netif_running(dev))
14113 return -EAGAIN;
14114
14115 spin_lock_bh(&tp->lock);
14116 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14117 data->reg_num & 0x1f, data->val_in);
14118 spin_unlock_bh(&tp->lock);
14119
14120 return err;
14121
14122 default:
14123 /* do nothing */
14124 break;
14125 }
14126 return -EOPNOTSUPP;
14127 }
14128
tg3_get_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14129 static int tg3_get_coalesce(struct net_device *dev,
14130 struct ethtool_coalesce *ec,
14131 struct kernel_ethtool_coalesce *kernel_coal,
14132 struct netlink_ext_ack *extack)
14133 {
14134 struct tg3 *tp = netdev_priv(dev);
14135
14136 memcpy(ec, &tp->coal, sizeof(*ec));
14137 return 0;
14138 }
14139
tg3_set_coalesce(struct net_device * dev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)14140 static int tg3_set_coalesce(struct net_device *dev,
14141 struct ethtool_coalesce *ec,
14142 struct kernel_ethtool_coalesce *kernel_coal,
14143 struct netlink_ext_ack *extack)
14144 {
14145 struct tg3 *tp = netdev_priv(dev);
14146 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14147 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14148
14149 if (!tg3_flag(tp, 5705_PLUS)) {
14150 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14151 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14152 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14153 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14154 }
14155
14156 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14157 (!ec->rx_coalesce_usecs) ||
14158 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14159 (!ec->tx_coalesce_usecs) ||
14160 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14161 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14162 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14163 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14164 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14165 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14166 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14167 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14168 return -EINVAL;
14169
14170 /* Only copy relevant parameters, ignore all others. */
14171 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14172 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14173 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14174 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14175 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14176 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14177 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14178 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14179 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14180
14181 if (netif_running(dev)) {
14182 tg3_full_lock(tp, 0);
14183 __tg3_set_coalesce(tp, &tp->coal);
14184 tg3_full_unlock(tp);
14185 }
14186 return 0;
14187 }
14188
tg3_set_eee(struct net_device * dev,struct ethtool_keee * edata)14189 static int tg3_set_eee(struct net_device *dev, struct ethtool_keee *edata)
14190 {
14191 struct tg3 *tp = netdev_priv(dev);
14192
14193 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14194 netdev_warn(tp->dev, "Board does not support EEE!\n");
14195 return -EOPNOTSUPP;
14196 }
14197
14198 if (!linkmode_equal(edata->advertised, tp->eee.advertised)) {
14199 netdev_warn(tp->dev,
14200 "Direct manipulation of EEE advertisement is not supported\n");
14201 return -EINVAL;
14202 }
14203
14204 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14205 netdev_warn(tp->dev,
14206 "Maximal Tx Lpi timer supported is %#x(u)\n",
14207 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14208 return -EINVAL;
14209 }
14210
14211 tp->eee.eee_enabled = edata->eee_enabled;
14212 tp->eee.tx_lpi_enabled = edata->tx_lpi_enabled;
14213 tp->eee.tx_lpi_timer = edata->tx_lpi_timer;
14214
14215 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14216 tg3_warn_mgmt_link_flap(tp);
14217
14218 if (netif_running(tp->dev)) {
14219 tg3_full_lock(tp, 0);
14220 tg3_setup_eee(tp);
14221 tg3_phy_reset(tp);
14222 tg3_full_unlock(tp);
14223 }
14224
14225 return 0;
14226 }
14227
tg3_get_eee(struct net_device * dev,struct ethtool_keee * edata)14228 static int tg3_get_eee(struct net_device *dev, struct ethtool_keee *edata)
14229 {
14230 struct tg3 *tp = netdev_priv(dev);
14231
14232 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14233 netdev_warn(tp->dev,
14234 "Board does not support EEE!\n");
14235 return -EOPNOTSUPP;
14236 }
14237
14238 *edata = tp->eee;
14239 return 0;
14240 }
14241
14242 static const struct ethtool_ops tg3_ethtool_ops = {
14243 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14244 ETHTOOL_COALESCE_MAX_FRAMES |
14245 ETHTOOL_COALESCE_USECS_IRQ |
14246 ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14247 ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14248 .get_drvinfo = tg3_get_drvinfo,
14249 .get_regs_len = tg3_get_regs_len,
14250 .get_regs = tg3_get_regs,
14251 .get_wol = tg3_get_wol,
14252 .set_wol = tg3_set_wol,
14253 .get_msglevel = tg3_get_msglevel,
14254 .set_msglevel = tg3_set_msglevel,
14255 .nway_reset = tg3_nway_reset,
14256 .get_link = ethtool_op_get_link,
14257 .get_eeprom_len = tg3_get_eeprom_len,
14258 .get_eeprom = tg3_get_eeprom,
14259 .set_eeprom = tg3_set_eeprom,
14260 .get_ringparam = tg3_get_ringparam,
14261 .set_ringparam = tg3_set_ringparam,
14262 .get_pauseparam = tg3_get_pauseparam,
14263 .set_pauseparam = tg3_set_pauseparam,
14264 .self_test = tg3_self_test,
14265 .get_strings = tg3_get_strings,
14266 .set_phys_id = tg3_set_phys_id,
14267 .get_ethtool_stats = tg3_get_ethtool_stats,
14268 .get_coalesce = tg3_get_coalesce,
14269 .set_coalesce = tg3_set_coalesce,
14270 .get_sset_count = tg3_get_sset_count,
14271 .get_rxnfc = tg3_get_rxnfc,
14272 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
14273 .get_rxfh = tg3_get_rxfh,
14274 .set_rxfh = tg3_set_rxfh,
14275 .get_channels = tg3_get_channels,
14276 .set_channels = tg3_set_channels,
14277 .get_ts_info = tg3_get_ts_info,
14278 .get_eee = tg3_get_eee,
14279 .set_eee = tg3_set_eee,
14280 .get_link_ksettings = tg3_get_link_ksettings,
14281 .set_link_ksettings = tg3_set_link_ksettings,
14282 };
14283
tg3_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)14284 static void tg3_get_stats64(struct net_device *dev,
14285 struct rtnl_link_stats64 *stats)
14286 {
14287 struct tg3 *tp = netdev_priv(dev);
14288
14289 spin_lock_bh(&tp->lock);
14290 if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14291 *stats = tp->net_stats_prev;
14292 spin_unlock_bh(&tp->lock);
14293 return;
14294 }
14295
14296 tg3_get_nstats(tp, stats);
14297 spin_unlock_bh(&tp->lock);
14298 }
14299
tg3_set_rx_mode(struct net_device * dev)14300 static void tg3_set_rx_mode(struct net_device *dev)
14301 {
14302 struct tg3 *tp = netdev_priv(dev);
14303
14304 if (!netif_running(dev))
14305 return;
14306
14307 tg3_full_lock(tp, 0);
14308 __tg3_set_rx_mode(dev);
14309 tg3_full_unlock(tp);
14310 }
14311
tg3_set_mtu(struct net_device * dev,struct tg3 * tp,int new_mtu)14312 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14313 int new_mtu)
14314 {
14315 WRITE_ONCE(dev->mtu, new_mtu);
14316
14317 if (new_mtu > ETH_DATA_LEN) {
14318 if (tg3_flag(tp, 5780_CLASS)) {
14319 netdev_update_features(dev);
14320 tg3_flag_clear(tp, TSO_CAPABLE);
14321 } else {
14322 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14323 }
14324 } else {
14325 if (tg3_flag(tp, 5780_CLASS)) {
14326 tg3_flag_set(tp, TSO_CAPABLE);
14327 netdev_update_features(dev);
14328 }
14329 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14330 }
14331 }
14332
tg3_change_mtu(struct net_device * dev,int new_mtu)14333 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14334 {
14335 struct tg3 *tp = netdev_priv(dev);
14336 int err;
14337 bool reset_phy = false;
14338
14339 if (!netif_running(dev)) {
14340 /* We'll just catch it later when the
14341 * device is up'd.
14342 */
14343 tg3_set_mtu(dev, tp, new_mtu);
14344 return 0;
14345 }
14346
14347 tg3_phy_stop(tp);
14348
14349 tg3_netif_stop(tp);
14350
14351 tg3_set_mtu(dev, tp, new_mtu);
14352
14353 netdev_lock(dev);
14354 tg3_full_lock(tp, 1);
14355
14356 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14357
14358 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14359 * breaks all requests to 256 bytes.
14360 */
14361 if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14362 tg3_asic_rev(tp) == ASIC_REV_5717 ||
14363 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14364 tg3_asic_rev(tp) == ASIC_REV_5720)
14365 reset_phy = true;
14366
14367 err = tg3_restart_hw(tp, reset_phy);
14368
14369 if (!err)
14370 tg3_netif_start(tp);
14371
14372 tg3_full_unlock(tp);
14373 netdev_unlock(dev);
14374
14375 if (!err)
14376 tg3_phy_start(tp);
14377
14378 return err;
14379 }
14380
14381 static const struct net_device_ops tg3_netdev_ops = {
14382 .ndo_open = tg3_open,
14383 .ndo_stop = tg3_close,
14384 .ndo_start_xmit = tg3_start_xmit,
14385 .ndo_get_stats64 = tg3_get_stats64,
14386 .ndo_validate_addr = eth_validate_addr,
14387 .ndo_set_rx_mode = tg3_set_rx_mode,
14388 .ndo_set_mac_address = tg3_set_mac_addr,
14389 .ndo_eth_ioctl = tg3_ioctl,
14390 .ndo_tx_timeout = tg3_tx_timeout,
14391 .ndo_change_mtu = tg3_change_mtu,
14392 .ndo_fix_features = tg3_fix_features,
14393 .ndo_set_features = tg3_set_features,
14394 #ifdef CONFIG_NET_POLL_CONTROLLER
14395 .ndo_poll_controller = tg3_poll_controller,
14396 #endif
14397 .ndo_hwtstamp_get = tg3_hwtstamp_get,
14398 .ndo_hwtstamp_set = tg3_hwtstamp_set,
14399 };
14400
tg3_get_eeprom_size(struct tg3 * tp)14401 static void tg3_get_eeprom_size(struct tg3 *tp)
14402 {
14403 u32 cursize, val, magic;
14404
14405 tp->nvram_size = EEPROM_CHIP_SIZE;
14406
14407 if (tg3_nvram_read(tp, 0, &magic) != 0)
14408 return;
14409
14410 if ((magic != TG3_EEPROM_MAGIC) &&
14411 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14412 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14413 return;
14414
14415 /*
14416 * Size the chip by reading offsets at increasing powers of two.
14417 * When we encounter our validation signature, we know the addressing
14418 * has wrapped around, and thus have our chip size.
14419 */
14420 cursize = 0x10;
14421
14422 while (cursize < tp->nvram_size) {
14423 if (tg3_nvram_read(tp, cursize, &val) != 0)
14424 return;
14425
14426 if (val == magic)
14427 break;
14428
14429 cursize <<= 1;
14430 }
14431
14432 tp->nvram_size = cursize;
14433 }
14434
tg3_get_nvram_size(struct tg3 * tp)14435 static void tg3_get_nvram_size(struct tg3 *tp)
14436 {
14437 u32 val;
14438
14439 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14440 return;
14441
14442 /* Selfboot format */
14443 if (val != TG3_EEPROM_MAGIC) {
14444 tg3_get_eeprom_size(tp);
14445 return;
14446 }
14447
14448 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14449 if (val != 0) {
14450 /* This is confusing. We want to operate on the
14451 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14452 * call will read from NVRAM and byteswap the data
14453 * according to the byteswapping settings for all
14454 * other register accesses. This ensures the data we
14455 * want will always reside in the lower 16-bits.
14456 * However, the data in NVRAM is in LE format, which
14457 * means the data from the NVRAM read will always be
14458 * opposite the endianness of the CPU. The 16-bit
14459 * byteswap then brings the data to CPU endianness.
14460 */
14461 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14462 return;
14463 }
14464 }
14465 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14466 }
14467
tg3_get_nvram_info(struct tg3 * tp)14468 static void tg3_get_nvram_info(struct tg3 *tp)
14469 {
14470 u32 nvcfg1;
14471
14472 nvcfg1 = tr32(NVRAM_CFG1);
14473 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14474 tg3_flag_set(tp, FLASH);
14475 } else {
14476 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14477 tw32(NVRAM_CFG1, nvcfg1);
14478 }
14479
14480 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14481 tg3_flag(tp, 5780_CLASS)) {
14482 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14483 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14484 tp->nvram_jedecnum = JEDEC_ATMEL;
14485 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14486 tg3_flag_set(tp, NVRAM_BUFFERED);
14487 break;
14488 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14489 tp->nvram_jedecnum = JEDEC_ATMEL;
14490 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14491 break;
14492 case FLASH_VENDOR_ATMEL_EEPROM:
14493 tp->nvram_jedecnum = JEDEC_ATMEL;
14494 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14495 tg3_flag_set(tp, NVRAM_BUFFERED);
14496 break;
14497 case FLASH_VENDOR_ST:
14498 tp->nvram_jedecnum = JEDEC_ST;
14499 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14500 tg3_flag_set(tp, NVRAM_BUFFERED);
14501 break;
14502 case FLASH_VENDOR_SAIFUN:
14503 tp->nvram_jedecnum = JEDEC_SAIFUN;
14504 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14505 break;
14506 case FLASH_VENDOR_SST_SMALL:
14507 case FLASH_VENDOR_SST_LARGE:
14508 tp->nvram_jedecnum = JEDEC_SST;
14509 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14510 break;
14511 }
14512 } else {
14513 tp->nvram_jedecnum = JEDEC_ATMEL;
14514 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14515 tg3_flag_set(tp, NVRAM_BUFFERED);
14516 }
14517 }
14518
tg3_nvram_get_pagesize(struct tg3 * tp,u32 nvmcfg1)14519 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14520 {
14521 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14522 case FLASH_5752PAGE_SIZE_256:
14523 tp->nvram_pagesize = 256;
14524 break;
14525 case FLASH_5752PAGE_SIZE_512:
14526 tp->nvram_pagesize = 512;
14527 break;
14528 case FLASH_5752PAGE_SIZE_1K:
14529 tp->nvram_pagesize = 1024;
14530 break;
14531 case FLASH_5752PAGE_SIZE_2K:
14532 tp->nvram_pagesize = 2048;
14533 break;
14534 case FLASH_5752PAGE_SIZE_4K:
14535 tp->nvram_pagesize = 4096;
14536 break;
14537 case FLASH_5752PAGE_SIZE_264:
14538 tp->nvram_pagesize = 264;
14539 break;
14540 case FLASH_5752PAGE_SIZE_528:
14541 tp->nvram_pagesize = 528;
14542 break;
14543 }
14544 }
14545
tg3_get_5752_nvram_info(struct tg3 * tp)14546 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14547 {
14548 u32 nvcfg1;
14549
14550 nvcfg1 = tr32(NVRAM_CFG1);
14551
14552 /* NVRAM protection for TPM */
14553 if (nvcfg1 & (1 << 27))
14554 tg3_flag_set(tp, PROTECTED_NVRAM);
14555
14556 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14557 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14558 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14559 tp->nvram_jedecnum = JEDEC_ATMEL;
14560 tg3_flag_set(tp, NVRAM_BUFFERED);
14561 break;
14562 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14563 tp->nvram_jedecnum = JEDEC_ATMEL;
14564 tg3_flag_set(tp, NVRAM_BUFFERED);
14565 tg3_flag_set(tp, FLASH);
14566 break;
14567 case FLASH_5752VENDOR_ST_M45PE10:
14568 case FLASH_5752VENDOR_ST_M45PE20:
14569 case FLASH_5752VENDOR_ST_M45PE40:
14570 tp->nvram_jedecnum = JEDEC_ST;
14571 tg3_flag_set(tp, NVRAM_BUFFERED);
14572 tg3_flag_set(tp, FLASH);
14573 break;
14574 }
14575
14576 if (tg3_flag(tp, FLASH)) {
14577 tg3_nvram_get_pagesize(tp, nvcfg1);
14578 } else {
14579 /* For eeprom, set pagesize to maximum eeprom size */
14580 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14581
14582 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14583 tw32(NVRAM_CFG1, nvcfg1);
14584 }
14585 }
14586
tg3_get_5755_nvram_info(struct tg3 * tp)14587 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14588 {
14589 u32 nvcfg1, protect = 0;
14590
14591 nvcfg1 = tr32(NVRAM_CFG1);
14592
14593 /* NVRAM protection for TPM */
14594 if (nvcfg1 & (1 << 27)) {
14595 tg3_flag_set(tp, PROTECTED_NVRAM);
14596 protect = 1;
14597 }
14598
14599 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14600 switch (nvcfg1) {
14601 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14602 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14603 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14604 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14605 tp->nvram_jedecnum = JEDEC_ATMEL;
14606 tg3_flag_set(tp, NVRAM_BUFFERED);
14607 tg3_flag_set(tp, FLASH);
14608 tp->nvram_pagesize = 264;
14609 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14610 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14611 tp->nvram_size = (protect ? 0x3e200 :
14612 TG3_NVRAM_SIZE_512KB);
14613 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14614 tp->nvram_size = (protect ? 0x1f200 :
14615 TG3_NVRAM_SIZE_256KB);
14616 else
14617 tp->nvram_size = (protect ? 0x1f200 :
14618 TG3_NVRAM_SIZE_128KB);
14619 break;
14620 case FLASH_5752VENDOR_ST_M45PE10:
14621 case FLASH_5752VENDOR_ST_M45PE20:
14622 case FLASH_5752VENDOR_ST_M45PE40:
14623 tp->nvram_jedecnum = JEDEC_ST;
14624 tg3_flag_set(tp, NVRAM_BUFFERED);
14625 tg3_flag_set(tp, FLASH);
14626 tp->nvram_pagesize = 256;
14627 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14628 tp->nvram_size = (protect ?
14629 TG3_NVRAM_SIZE_64KB :
14630 TG3_NVRAM_SIZE_128KB);
14631 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14632 tp->nvram_size = (protect ?
14633 TG3_NVRAM_SIZE_64KB :
14634 TG3_NVRAM_SIZE_256KB);
14635 else
14636 tp->nvram_size = (protect ?
14637 TG3_NVRAM_SIZE_128KB :
14638 TG3_NVRAM_SIZE_512KB);
14639 break;
14640 }
14641 }
14642
tg3_get_5787_nvram_info(struct tg3 * tp)14643 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14644 {
14645 u32 nvcfg1;
14646
14647 nvcfg1 = tr32(NVRAM_CFG1);
14648
14649 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14650 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14651 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14652 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14653 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14654 tp->nvram_jedecnum = JEDEC_ATMEL;
14655 tg3_flag_set(tp, NVRAM_BUFFERED);
14656 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14657
14658 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14659 tw32(NVRAM_CFG1, nvcfg1);
14660 break;
14661 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14662 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14663 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14664 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14665 tp->nvram_jedecnum = JEDEC_ATMEL;
14666 tg3_flag_set(tp, NVRAM_BUFFERED);
14667 tg3_flag_set(tp, FLASH);
14668 tp->nvram_pagesize = 264;
14669 break;
14670 case FLASH_5752VENDOR_ST_M45PE10:
14671 case FLASH_5752VENDOR_ST_M45PE20:
14672 case FLASH_5752VENDOR_ST_M45PE40:
14673 tp->nvram_jedecnum = JEDEC_ST;
14674 tg3_flag_set(tp, NVRAM_BUFFERED);
14675 tg3_flag_set(tp, FLASH);
14676 tp->nvram_pagesize = 256;
14677 break;
14678 }
14679 }
14680
tg3_get_5761_nvram_info(struct tg3 * tp)14681 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14682 {
14683 u32 nvcfg1, protect = 0;
14684
14685 nvcfg1 = tr32(NVRAM_CFG1);
14686
14687 /* NVRAM protection for TPM */
14688 if (nvcfg1 & (1 << 27)) {
14689 tg3_flag_set(tp, PROTECTED_NVRAM);
14690 protect = 1;
14691 }
14692
14693 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14694 switch (nvcfg1) {
14695 case FLASH_5761VENDOR_ATMEL_ADB021D:
14696 case FLASH_5761VENDOR_ATMEL_ADB041D:
14697 case FLASH_5761VENDOR_ATMEL_ADB081D:
14698 case FLASH_5761VENDOR_ATMEL_ADB161D:
14699 case FLASH_5761VENDOR_ATMEL_MDB021D:
14700 case FLASH_5761VENDOR_ATMEL_MDB041D:
14701 case FLASH_5761VENDOR_ATMEL_MDB081D:
14702 case FLASH_5761VENDOR_ATMEL_MDB161D:
14703 tp->nvram_jedecnum = JEDEC_ATMEL;
14704 tg3_flag_set(tp, NVRAM_BUFFERED);
14705 tg3_flag_set(tp, FLASH);
14706 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14707 tp->nvram_pagesize = 256;
14708 break;
14709 case FLASH_5761VENDOR_ST_A_M45PE20:
14710 case FLASH_5761VENDOR_ST_A_M45PE40:
14711 case FLASH_5761VENDOR_ST_A_M45PE80:
14712 case FLASH_5761VENDOR_ST_A_M45PE16:
14713 case FLASH_5761VENDOR_ST_M_M45PE20:
14714 case FLASH_5761VENDOR_ST_M_M45PE40:
14715 case FLASH_5761VENDOR_ST_M_M45PE80:
14716 case FLASH_5761VENDOR_ST_M_M45PE16:
14717 tp->nvram_jedecnum = JEDEC_ST;
14718 tg3_flag_set(tp, NVRAM_BUFFERED);
14719 tg3_flag_set(tp, FLASH);
14720 tp->nvram_pagesize = 256;
14721 break;
14722 }
14723
14724 if (protect) {
14725 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14726 } else {
14727 switch (nvcfg1) {
14728 case FLASH_5761VENDOR_ATMEL_ADB161D:
14729 case FLASH_5761VENDOR_ATMEL_MDB161D:
14730 case FLASH_5761VENDOR_ST_A_M45PE16:
14731 case FLASH_5761VENDOR_ST_M_M45PE16:
14732 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14733 break;
14734 case FLASH_5761VENDOR_ATMEL_ADB081D:
14735 case FLASH_5761VENDOR_ATMEL_MDB081D:
14736 case FLASH_5761VENDOR_ST_A_M45PE80:
14737 case FLASH_5761VENDOR_ST_M_M45PE80:
14738 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14739 break;
14740 case FLASH_5761VENDOR_ATMEL_ADB041D:
14741 case FLASH_5761VENDOR_ATMEL_MDB041D:
14742 case FLASH_5761VENDOR_ST_A_M45PE40:
14743 case FLASH_5761VENDOR_ST_M_M45PE40:
14744 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14745 break;
14746 case FLASH_5761VENDOR_ATMEL_ADB021D:
14747 case FLASH_5761VENDOR_ATMEL_MDB021D:
14748 case FLASH_5761VENDOR_ST_A_M45PE20:
14749 case FLASH_5761VENDOR_ST_M_M45PE20:
14750 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14751 break;
14752 }
14753 }
14754 }
14755
tg3_get_5906_nvram_info(struct tg3 * tp)14756 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14757 {
14758 tp->nvram_jedecnum = JEDEC_ATMEL;
14759 tg3_flag_set(tp, NVRAM_BUFFERED);
14760 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14761 }
14762
tg3_get_57780_nvram_info(struct tg3 * tp)14763 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14764 {
14765 u32 nvcfg1;
14766
14767 nvcfg1 = tr32(NVRAM_CFG1);
14768
14769 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14770 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14771 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14772 tp->nvram_jedecnum = JEDEC_ATMEL;
14773 tg3_flag_set(tp, NVRAM_BUFFERED);
14774 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14775
14776 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14777 tw32(NVRAM_CFG1, nvcfg1);
14778 return;
14779 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14780 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14781 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14782 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14783 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14784 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14785 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14786 tp->nvram_jedecnum = JEDEC_ATMEL;
14787 tg3_flag_set(tp, NVRAM_BUFFERED);
14788 tg3_flag_set(tp, FLASH);
14789
14790 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14791 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14792 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14793 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14794 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14795 break;
14796 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14797 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14798 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14799 break;
14800 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14801 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14802 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14803 break;
14804 }
14805 break;
14806 case FLASH_5752VENDOR_ST_M45PE10:
14807 case FLASH_5752VENDOR_ST_M45PE20:
14808 case FLASH_5752VENDOR_ST_M45PE40:
14809 tp->nvram_jedecnum = JEDEC_ST;
14810 tg3_flag_set(tp, NVRAM_BUFFERED);
14811 tg3_flag_set(tp, FLASH);
14812
14813 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14814 case FLASH_5752VENDOR_ST_M45PE10:
14815 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14816 break;
14817 case FLASH_5752VENDOR_ST_M45PE20:
14818 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14819 break;
14820 case FLASH_5752VENDOR_ST_M45PE40:
14821 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14822 break;
14823 }
14824 break;
14825 default:
14826 tg3_flag_set(tp, NO_NVRAM);
14827 return;
14828 }
14829
14830 tg3_nvram_get_pagesize(tp, nvcfg1);
14831 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14832 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14833 }
14834
14835
tg3_get_5717_nvram_info(struct tg3 * tp)14836 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14837 {
14838 u32 nvcfg1;
14839
14840 nvcfg1 = tr32(NVRAM_CFG1);
14841
14842 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14843 case FLASH_5717VENDOR_ATMEL_EEPROM:
14844 case FLASH_5717VENDOR_MICRO_EEPROM:
14845 tp->nvram_jedecnum = JEDEC_ATMEL;
14846 tg3_flag_set(tp, NVRAM_BUFFERED);
14847 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14848
14849 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14850 tw32(NVRAM_CFG1, nvcfg1);
14851 return;
14852 case FLASH_5717VENDOR_ATMEL_MDB011D:
14853 case FLASH_5717VENDOR_ATMEL_ADB011B:
14854 case FLASH_5717VENDOR_ATMEL_ADB011D:
14855 case FLASH_5717VENDOR_ATMEL_MDB021D:
14856 case FLASH_5717VENDOR_ATMEL_ADB021B:
14857 case FLASH_5717VENDOR_ATMEL_ADB021D:
14858 case FLASH_5717VENDOR_ATMEL_45USPT:
14859 tp->nvram_jedecnum = JEDEC_ATMEL;
14860 tg3_flag_set(tp, NVRAM_BUFFERED);
14861 tg3_flag_set(tp, FLASH);
14862
14863 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14864 case FLASH_5717VENDOR_ATMEL_MDB021D:
14865 /* Detect size with tg3_nvram_get_size() */
14866 break;
14867 case FLASH_5717VENDOR_ATMEL_ADB021B:
14868 case FLASH_5717VENDOR_ATMEL_ADB021D:
14869 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14870 break;
14871 default:
14872 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14873 break;
14874 }
14875 break;
14876 case FLASH_5717VENDOR_ST_M_M25PE10:
14877 case FLASH_5717VENDOR_ST_A_M25PE10:
14878 case FLASH_5717VENDOR_ST_M_M45PE10:
14879 case FLASH_5717VENDOR_ST_A_M45PE10:
14880 case FLASH_5717VENDOR_ST_M_M25PE20:
14881 case FLASH_5717VENDOR_ST_A_M25PE20:
14882 case FLASH_5717VENDOR_ST_M_M45PE20:
14883 case FLASH_5717VENDOR_ST_A_M45PE20:
14884 case FLASH_5717VENDOR_ST_25USPT:
14885 case FLASH_5717VENDOR_ST_45USPT:
14886 tp->nvram_jedecnum = JEDEC_ST;
14887 tg3_flag_set(tp, NVRAM_BUFFERED);
14888 tg3_flag_set(tp, FLASH);
14889
14890 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14891 case FLASH_5717VENDOR_ST_M_M25PE20:
14892 case FLASH_5717VENDOR_ST_M_M45PE20:
14893 /* Detect size with tg3_nvram_get_size() */
14894 break;
14895 case FLASH_5717VENDOR_ST_A_M25PE20:
14896 case FLASH_5717VENDOR_ST_A_M45PE20:
14897 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14898 break;
14899 default:
14900 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14901 break;
14902 }
14903 break;
14904 default:
14905 tg3_flag_set(tp, NO_NVRAM);
14906 return;
14907 }
14908
14909 tg3_nvram_get_pagesize(tp, nvcfg1);
14910 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14911 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14912 }
14913
tg3_get_5720_nvram_info(struct tg3 * tp)14914 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14915 {
14916 u32 nvcfg1, nvmpinstrp, nv_status;
14917
14918 nvcfg1 = tr32(NVRAM_CFG1);
14919 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14920
14921 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14922 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14923 tg3_flag_set(tp, NO_NVRAM);
14924 return;
14925 }
14926
14927 switch (nvmpinstrp) {
14928 case FLASH_5762_MX25L_100:
14929 case FLASH_5762_MX25L_200:
14930 case FLASH_5762_MX25L_400:
14931 case FLASH_5762_MX25L_800:
14932 case FLASH_5762_MX25L_160_320:
14933 tp->nvram_pagesize = 4096;
14934 tp->nvram_jedecnum = JEDEC_MACRONIX;
14935 tg3_flag_set(tp, NVRAM_BUFFERED);
14936 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14937 tg3_flag_set(tp, FLASH);
14938 nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14939 tp->nvram_size =
14940 (1 << (nv_status >> AUTOSENSE_DEVID &
14941 AUTOSENSE_DEVID_MASK)
14942 << AUTOSENSE_SIZE_IN_MB);
14943 return;
14944
14945 case FLASH_5762_EEPROM_HD:
14946 nvmpinstrp = FLASH_5720_EEPROM_HD;
14947 break;
14948 case FLASH_5762_EEPROM_LD:
14949 nvmpinstrp = FLASH_5720_EEPROM_LD;
14950 break;
14951 case FLASH_5720VENDOR_M_ST_M45PE20:
14952 /* This pinstrap supports multiple sizes, so force it
14953 * to read the actual size from location 0xf0.
14954 */
14955 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14956 break;
14957 }
14958 }
14959
14960 switch (nvmpinstrp) {
14961 case FLASH_5720_EEPROM_HD:
14962 case FLASH_5720_EEPROM_LD:
14963 tp->nvram_jedecnum = JEDEC_ATMEL;
14964 tg3_flag_set(tp, NVRAM_BUFFERED);
14965
14966 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14967 tw32(NVRAM_CFG1, nvcfg1);
14968 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14969 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14970 else
14971 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14972 return;
14973 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14974 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14975 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14976 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14977 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14978 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14979 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14980 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14981 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14982 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14983 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14984 case FLASH_5720VENDOR_ATMEL_45USPT:
14985 tp->nvram_jedecnum = JEDEC_ATMEL;
14986 tg3_flag_set(tp, NVRAM_BUFFERED);
14987 tg3_flag_set(tp, FLASH);
14988
14989 switch (nvmpinstrp) {
14990 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14991 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14992 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14993 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14994 break;
14995 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14996 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14997 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14998 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14999 break;
15000 case FLASH_5720VENDOR_M_ATMEL_DB081D:
15001 case FLASH_5720VENDOR_A_ATMEL_DB081D:
15002 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15003 break;
15004 default:
15005 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15006 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15007 break;
15008 }
15009 break;
15010 case FLASH_5720VENDOR_M_ST_M25PE10:
15011 case FLASH_5720VENDOR_M_ST_M45PE10:
15012 case FLASH_5720VENDOR_A_ST_M25PE10:
15013 case FLASH_5720VENDOR_A_ST_M45PE10:
15014 case FLASH_5720VENDOR_M_ST_M25PE20:
15015 case FLASH_5720VENDOR_M_ST_M45PE20:
15016 case FLASH_5720VENDOR_A_ST_M25PE20:
15017 case FLASH_5720VENDOR_A_ST_M45PE20:
15018 case FLASH_5720VENDOR_M_ST_M25PE40:
15019 case FLASH_5720VENDOR_M_ST_M45PE40:
15020 case FLASH_5720VENDOR_A_ST_M25PE40:
15021 case FLASH_5720VENDOR_A_ST_M45PE40:
15022 case FLASH_5720VENDOR_M_ST_M25PE80:
15023 case FLASH_5720VENDOR_M_ST_M45PE80:
15024 case FLASH_5720VENDOR_A_ST_M25PE80:
15025 case FLASH_5720VENDOR_A_ST_M45PE80:
15026 case FLASH_5720VENDOR_ST_25USPT:
15027 case FLASH_5720VENDOR_ST_45USPT:
15028 tp->nvram_jedecnum = JEDEC_ST;
15029 tg3_flag_set(tp, NVRAM_BUFFERED);
15030 tg3_flag_set(tp, FLASH);
15031
15032 switch (nvmpinstrp) {
15033 case FLASH_5720VENDOR_M_ST_M25PE20:
15034 case FLASH_5720VENDOR_M_ST_M45PE20:
15035 case FLASH_5720VENDOR_A_ST_M25PE20:
15036 case FLASH_5720VENDOR_A_ST_M45PE20:
15037 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
15038 break;
15039 case FLASH_5720VENDOR_M_ST_M25PE40:
15040 case FLASH_5720VENDOR_M_ST_M45PE40:
15041 case FLASH_5720VENDOR_A_ST_M25PE40:
15042 case FLASH_5720VENDOR_A_ST_M45PE40:
15043 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
15044 break;
15045 case FLASH_5720VENDOR_M_ST_M25PE80:
15046 case FLASH_5720VENDOR_M_ST_M45PE80:
15047 case FLASH_5720VENDOR_A_ST_M25PE80:
15048 case FLASH_5720VENDOR_A_ST_M45PE80:
15049 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
15050 break;
15051 default:
15052 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15053 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
15054 break;
15055 }
15056 break;
15057 default:
15058 tg3_flag_set(tp, NO_NVRAM);
15059 return;
15060 }
15061
15062 tg3_nvram_get_pagesize(tp, nvcfg1);
15063 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
15064 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
15065
15066 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
15067 u32 val;
15068
15069 if (tg3_nvram_read(tp, 0, &val))
15070 return;
15071
15072 if (val != TG3_EEPROM_MAGIC &&
15073 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
15074 tg3_flag_set(tp, NO_NVRAM);
15075 }
15076 }
15077
15078 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
tg3_nvram_init(struct tg3 * tp)15079 static void tg3_nvram_init(struct tg3 *tp)
15080 {
15081 if (tg3_flag(tp, IS_SSB_CORE)) {
15082 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
15083 tg3_flag_clear(tp, NVRAM);
15084 tg3_flag_clear(tp, NVRAM_BUFFERED);
15085 tg3_flag_set(tp, NO_NVRAM);
15086 return;
15087 }
15088
15089 tw32_f(GRC_EEPROM_ADDR,
15090 (EEPROM_ADDR_FSM_RESET |
15091 (EEPROM_DEFAULT_CLOCK_PERIOD <<
15092 EEPROM_ADDR_CLKPERD_SHIFT)));
15093
15094 msleep(1);
15095
15096 /* Enable seeprom accesses. */
15097 tw32_f(GRC_LOCAL_CTRL,
15098 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15099 udelay(100);
15100
15101 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15102 tg3_asic_rev(tp) != ASIC_REV_5701) {
15103 tg3_flag_set(tp, NVRAM);
15104
15105 if (tg3_nvram_lock(tp)) {
15106 netdev_warn(tp->dev,
15107 "Cannot get nvram lock, %s failed\n",
15108 __func__);
15109 return;
15110 }
15111 tg3_enable_nvram_access(tp);
15112
15113 tp->nvram_size = 0;
15114
15115 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15116 tg3_get_5752_nvram_info(tp);
15117 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15118 tg3_get_5755_nvram_info(tp);
15119 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15120 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15121 tg3_asic_rev(tp) == ASIC_REV_5785)
15122 tg3_get_5787_nvram_info(tp);
15123 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15124 tg3_get_5761_nvram_info(tp);
15125 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15126 tg3_get_5906_nvram_info(tp);
15127 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15128 tg3_flag(tp, 57765_CLASS))
15129 tg3_get_57780_nvram_info(tp);
15130 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15131 tg3_asic_rev(tp) == ASIC_REV_5719)
15132 tg3_get_5717_nvram_info(tp);
15133 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15134 tg3_asic_rev(tp) == ASIC_REV_5762)
15135 tg3_get_5720_nvram_info(tp);
15136 else
15137 tg3_get_nvram_info(tp);
15138
15139 if (tp->nvram_size == 0)
15140 tg3_get_nvram_size(tp);
15141
15142 tg3_disable_nvram_access(tp);
15143 tg3_nvram_unlock(tp);
15144
15145 } else {
15146 tg3_flag_clear(tp, NVRAM);
15147 tg3_flag_clear(tp, NVRAM_BUFFERED);
15148
15149 tg3_get_eeprom_size(tp);
15150 }
15151 }
15152
15153 struct subsys_tbl_ent {
15154 u16 subsys_vendor, subsys_devid;
15155 u32 phy_id;
15156 };
15157
15158 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15159 /* Broadcom boards. */
15160 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15161 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15162 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15163 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15164 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15165 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15166 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15167 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15168 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15169 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15170 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15171 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15172 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15173 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15174 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15175 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15176 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15177 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15178 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15179 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15180 { TG3PCI_SUBVENDOR_ID_BROADCOM,
15181 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15182
15183 /* 3com boards. */
15184 { TG3PCI_SUBVENDOR_ID_3COM,
15185 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15186 { TG3PCI_SUBVENDOR_ID_3COM,
15187 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15188 { TG3PCI_SUBVENDOR_ID_3COM,
15189 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15190 { TG3PCI_SUBVENDOR_ID_3COM,
15191 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15192 { TG3PCI_SUBVENDOR_ID_3COM,
15193 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15194
15195 /* DELL boards. */
15196 { TG3PCI_SUBVENDOR_ID_DELL,
15197 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15198 { TG3PCI_SUBVENDOR_ID_DELL,
15199 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15200 { TG3PCI_SUBVENDOR_ID_DELL,
15201 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15202 { TG3PCI_SUBVENDOR_ID_DELL,
15203 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15204
15205 /* Compaq boards. */
15206 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15207 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15208 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15209 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15210 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15211 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15212 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15213 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15214 { TG3PCI_SUBVENDOR_ID_COMPAQ,
15215 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15216
15217 /* IBM boards. */
15218 { TG3PCI_SUBVENDOR_ID_IBM,
15219 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15220 };
15221
tg3_lookup_by_subsys(struct tg3 * tp)15222 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15223 {
15224 int i;
15225
15226 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15227 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15228 tp->pdev->subsystem_vendor) &&
15229 (subsys_id_to_phy_id[i].subsys_devid ==
15230 tp->pdev->subsystem_device))
15231 return &subsys_id_to_phy_id[i];
15232 }
15233 return NULL;
15234 }
15235
tg3_get_eeprom_hw_cfg(struct tg3 * tp)15236 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15237 {
15238 u32 val;
15239
15240 tp->phy_id = TG3_PHY_ID_INVALID;
15241 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15242
15243 /* Assume an onboard device and WOL capable by default. */
15244 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15245 tg3_flag_set(tp, WOL_CAP);
15246
15247 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15248 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15249 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15250 tg3_flag_set(tp, IS_NIC);
15251 }
15252 val = tr32(VCPU_CFGSHDW);
15253 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15254 tg3_flag_set(tp, ASPM_WORKAROUND);
15255 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15256 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15257 tg3_flag_set(tp, WOL_ENABLE);
15258 device_set_wakeup_enable(&tp->pdev->dev, true);
15259 }
15260 goto done;
15261 }
15262
15263 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15264 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15265 u32 nic_cfg, led_cfg;
15266 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15267 u32 nic_phy_id, ver, eeprom_phy_id;
15268 int eeprom_phy_serdes = 0;
15269
15270 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15271 tp->nic_sram_data_cfg = nic_cfg;
15272
15273 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15274 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15275 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15276 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15277 tg3_asic_rev(tp) != ASIC_REV_5703 &&
15278 (ver > 0) && (ver < 0x100))
15279 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15280
15281 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15282 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15283
15284 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15285 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15286 tg3_asic_rev(tp) == ASIC_REV_5720)
15287 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15288
15289 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15290 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15291 eeprom_phy_serdes = 1;
15292
15293 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15294 if (nic_phy_id != 0) {
15295 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15296 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15297
15298 eeprom_phy_id = (id1 >> 16) << 10;
15299 eeprom_phy_id |= (id2 & 0xfc00) << 16;
15300 eeprom_phy_id |= (id2 & 0x03ff) << 0;
15301 } else
15302 eeprom_phy_id = 0;
15303
15304 tp->phy_id = eeprom_phy_id;
15305 if (eeprom_phy_serdes) {
15306 if (!tg3_flag(tp, 5705_PLUS))
15307 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15308 else
15309 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15310 }
15311
15312 if (tg3_flag(tp, 5750_PLUS))
15313 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15314 SHASTA_EXT_LED_MODE_MASK);
15315 else
15316 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15317
15318 switch (led_cfg) {
15319 default:
15320 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15321 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15322 break;
15323
15324 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15325 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15326 break;
15327
15328 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15329 tp->led_ctrl = LED_CTRL_MODE_MAC;
15330
15331 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15332 * read on some older 5700/5701 bootcode.
15333 */
15334 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15335 tg3_asic_rev(tp) == ASIC_REV_5701)
15336 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15337
15338 break;
15339
15340 case SHASTA_EXT_LED_SHARED:
15341 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15342 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15343 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15344 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15345 LED_CTRL_MODE_PHY_2);
15346
15347 if (tg3_flag(tp, 5717_PLUS) ||
15348 tg3_asic_rev(tp) == ASIC_REV_5762)
15349 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15350 LED_CTRL_BLINK_RATE_MASK;
15351
15352 break;
15353
15354 case SHASTA_EXT_LED_MAC:
15355 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15356 break;
15357
15358 case SHASTA_EXT_LED_COMBO:
15359 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15360 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15361 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15362 LED_CTRL_MODE_PHY_2);
15363 break;
15364
15365 }
15366
15367 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15368 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15369 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15370 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15371
15372 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15373 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15374
15375 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15376 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15377 if ((tp->pdev->subsystem_vendor ==
15378 PCI_VENDOR_ID_ARIMA) &&
15379 (tp->pdev->subsystem_device == 0x205a ||
15380 tp->pdev->subsystem_device == 0x2063))
15381 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15382 } else {
15383 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15384 tg3_flag_set(tp, IS_NIC);
15385 }
15386
15387 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15388 tg3_flag_set(tp, ENABLE_ASF);
15389 if (tg3_flag(tp, 5750_PLUS))
15390 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15391 }
15392
15393 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15394 tg3_flag(tp, 5750_PLUS))
15395 tg3_flag_set(tp, ENABLE_APE);
15396
15397 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15398 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15399 tg3_flag_clear(tp, WOL_CAP);
15400
15401 if (tg3_flag(tp, WOL_CAP) &&
15402 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15403 tg3_flag_set(tp, WOL_ENABLE);
15404 device_set_wakeup_enable(&tp->pdev->dev, true);
15405 }
15406
15407 if (cfg2 & (1 << 17))
15408 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15409
15410 /* serdes signal pre-emphasis in register 0x590 set by */
15411 /* bootcode if bit 18 is set */
15412 if (cfg2 & (1 << 18))
15413 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15414
15415 if ((tg3_flag(tp, 57765_PLUS) ||
15416 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15417 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15418 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15419 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15420
15421 if (tg3_flag(tp, PCI_EXPRESS)) {
15422 u32 cfg3;
15423
15424 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15425 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15426 !tg3_flag(tp, 57765_PLUS) &&
15427 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15428 tg3_flag_set(tp, ASPM_WORKAROUND);
15429 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15430 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15431 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15432 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15433 }
15434
15435 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15436 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15437 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15438 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15439 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15440 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15441
15442 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15443 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15444 }
15445 done:
15446 if (tg3_flag(tp, WOL_CAP))
15447 device_set_wakeup_enable(&tp->pdev->dev,
15448 tg3_flag(tp, WOL_ENABLE));
15449 else
15450 device_set_wakeup_capable(&tp->pdev->dev, false);
15451 }
15452
tg3_ape_otp_read(struct tg3 * tp,u32 offset,u32 * val)15453 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15454 {
15455 int i, err;
15456 u32 val2, off = offset * 8;
15457
15458 err = tg3_nvram_lock(tp);
15459 if (err)
15460 return err;
15461
15462 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15463 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15464 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15465 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15466 udelay(10);
15467
15468 for (i = 0; i < 100; i++) {
15469 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15470 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15471 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15472 break;
15473 }
15474 udelay(10);
15475 }
15476
15477 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15478
15479 tg3_nvram_unlock(tp);
15480 if (val2 & APE_OTP_STATUS_CMD_DONE)
15481 return 0;
15482
15483 return -EBUSY;
15484 }
15485
tg3_issue_otp_command(struct tg3 * tp,u32 cmd)15486 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15487 {
15488 int i;
15489 u32 val;
15490
15491 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15492 tw32(OTP_CTRL, cmd);
15493
15494 /* Wait for up to 1 ms for command to execute. */
15495 for (i = 0; i < 100; i++) {
15496 val = tr32(OTP_STATUS);
15497 if (val & OTP_STATUS_CMD_DONE)
15498 break;
15499 udelay(10);
15500 }
15501
15502 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15503 }
15504
15505 /* Read the gphy configuration from the OTP region of the chip. The gphy
15506 * configuration is a 32-bit value that straddles the alignment boundary.
15507 * We do two 32-bit reads and then shift and merge the results.
15508 */
tg3_read_otp_phycfg(struct tg3 * tp)15509 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15510 {
15511 u32 bhalf_otp, thalf_otp;
15512
15513 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15514
15515 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15516 return 0;
15517
15518 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15519
15520 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15521 return 0;
15522
15523 thalf_otp = tr32(OTP_READ_DATA);
15524
15525 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15526
15527 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15528 return 0;
15529
15530 bhalf_otp = tr32(OTP_READ_DATA);
15531
15532 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15533 }
15534
tg3_phy_init_link_config(struct tg3 * tp)15535 static void tg3_phy_init_link_config(struct tg3 *tp)
15536 {
15537 u32 adv = ADVERTISED_Autoneg;
15538
15539 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15540 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15541 adv |= ADVERTISED_1000baseT_Half;
15542 adv |= ADVERTISED_1000baseT_Full;
15543 }
15544
15545 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15546 adv |= ADVERTISED_100baseT_Half |
15547 ADVERTISED_100baseT_Full |
15548 ADVERTISED_10baseT_Half |
15549 ADVERTISED_10baseT_Full |
15550 ADVERTISED_TP;
15551 else
15552 adv |= ADVERTISED_FIBRE;
15553
15554 tp->link_config.advertising = adv;
15555 tp->link_config.speed = SPEED_UNKNOWN;
15556 tp->link_config.duplex = DUPLEX_UNKNOWN;
15557 tp->link_config.autoneg = AUTONEG_ENABLE;
15558 tp->link_config.active_speed = SPEED_UNKNOWN;
15559 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15560
15561 tp->old_link = -1;
15562 }
15563
tg3_phy_probe(struct tg3 * tp)15564 static int tg3_phy_probe(struct tg3 *tp)
15565 {
15566 u32 hw_phy_id_1, hw_phy_id_2;
15567 u32 hw_phy_id, hw_phy_id_masked;
15568 int err;
15569
15570 /* flow control autonegotiation is default behavior */
15571 tg3_flag_set(tp, PAUSE_AUTONEG);
15572 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15573
15574 if (tg3_flag(tp, ENABLE_APE)) {
15575 switch (tp->pci_fn) {
15576 case 0:
15577 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15578 break;
15579 case 1:
15580 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15581 break;
15582 case 2:
15583 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15584 break;
15585 case 3:
15586 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15587 break;
15588 }
15589 }
15590
15591 if (!tg3_flag(tp, ENABLE_ASF) &&
15592 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15593 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15594 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15595 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15596
15597 if (tg3_flag(tp, USE_PHYLIB))
15598 return tg3_phy_init(tp);
15599
15600 /* Reading the PHY ID register can conflict with ASF
15601 * firmware access to the PHY hardware.
15602 */
15603 err = 0;
15604 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15605 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15606 } else {
15607 /* Now read the physical PHY_ID from the chip and verify
15608 * that it is sane. If it doesn't look good, we fall back
15609 * to either the hard-coded table based PHY_ID and failing
15610 * that the value found in the eeprom area.
15611 */
15612 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15613 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15614
15615 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15616 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15617 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15618
15619 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15620 }
15621
15622 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15623 tp->phy_id = hw_phy_id;
15624 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15625 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15626 else
15627 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15628 } else {
15629 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15630 /* Do nothing, phy ID already set up in
15631 * tg3_get_eeprom_hw_cfg().
15632 */
15633 } else {
15634 struct subsys_tbl_ent *p;
15635
15636 /* No eeprom signature? Try the hardcoded
15637 * subsys device table.
15638 */
15639 p = tg3_lookup_by_subsys(tp);
15640 if (p) {
15641 tp->phy_id = p->phy_id;
15642 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15643 /* For now we saw the IDs 0xbc050cd0,
15644 * 0xbc050f80 and 0xbc050c30 on devices
15645 * connected to an BCM4785 and there are
15646 * probably more. Just assume that the phy is
15647 * supported when it is connected to a SSB core
15648 * for now.
15649 */
15650 return -ENODEV;
15651 }
15652
15653 if (!tp->phy_id ||
15654 tp->phy_id == TG3_PHY_ID_BCM8002)
15655 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15656 }
15657 }
15658
15659 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15660 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15661 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15662 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15663 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15664 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15665 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15666 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15667 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15668 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15669
15670 linkmode_zero(tp->eee.supported);
15671 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
15672 tp->eee.supported);
15673 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
15674 tp->eee.supported);
15675 linkmode_copy(tp->eee.advertised, tp->eee.supported);
15676
15677 tp->eee.eee_enabled = 1;
15678 tp->eee.tx_lpi_enabled = 1;
15679 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15680 }
15681
15682 tg3_phy_init_link_config(tp);
15683
15684 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15685 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15686 !tg3_flag(tp, ENABLE_APE) &&
15687 !tg3_flag(tp, ENABLE_ASF)) {
15688 u32 bmsr, dummy;
15689
15690 tg3_readphy(tp, MII_BMSR, &bmsr);
15691 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15692 (bmsr & BMSR_LSTATUS))
15693 goto skip_phy_reset;
15694
15695 err = tg3_phy_reset(tp);
15696 if (err)
15697 return err;
15698
15699 tg3_phy_set_wirespeed(tp);
15700
15701 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15702 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15703 tp->link_config.flowctrl);
15704
15705 tg3_writephy(tp, MII_BMCR,
15706 BMCR_ANENABLE | BMCR_ANRESTART);
15707 }
15708 }
15709
15710 skip_phy_reset:
15711 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15712 err = tg3_init_5401phy_dsp(tp);
15713 if (err)
15714 return err;
15715
15716 err = tg3_init_5401phy_dsp(tp);
15717 }
15718
15719 return err;
15720 }
15721
tg3_read_vpd(struct tg3 * tp)15722 static void tg3_read_vpd(struct tg3 *tp)
15723 {
15724 u8 *vpd_data;
15725 unsigned int len, vpdlen;
15726 int i;
15727
15728 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15729 if (!vpd_data)
15730 goto out_no_vpd;
15731
15732 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15733 PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15734 if (i < 0)
15735 goto partno;
15736
15737 if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15738 goto partno;
15739
15740 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15741 PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15742 if (i < 0)
15743 goto partno;
15744
15745 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15746 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15747
15748 partno:
15749 i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15750 PCI_VPD_RO_KEYWORD_PARTNO, &len);
15751 if (i < 0)
15752 goto out_not_found;
15753
15754 if (len > TG3_BPN_SIZE)
15755 goto out_not_found;
15756
15757 memcpy(tp->board_part_number, &vpd_data[i], len);
15758
15759 out_not_found:
15760 kfree(vpd_data);
15761 if (tp->board_part_number[0])
15762 return;
15763
15764 out_no_vpd:
15765 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15766 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15767 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15768 strcpy(tp->board_part_number, "BCM5717");
15769 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15770 strcpy(tp->board_part_number, "BCM5718");
15771 else
15772 goto nomatch;
15773 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15774 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15775 strcpy(tp->board_part_number, "BCM57780");
15776 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15777 strcpy(tp->board_part_number, "BCM57760");
15778 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15779 strcpy(tp->board_part_number, "BCM57790");
15780 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15781 strcpy(tp->board_part_number, "BCM57788");
15782 else
15783 goto nomatch;
15784 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15785 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15786 strcpy(tp->board_part_number, "BCM57761");
15787 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15788 strcpy(tp->board_part_number, "BCM57765");
15789 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15790 strcpy(tp->board_part_number, "BCM57781");
15791 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15792 strcpy(tp->board_part_number, "BCM57785");
15793 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15794 strcpy(tp->board_part_number, "BCM57791");
15795 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15796 strcpy(tp->board_part_number, "BCM57795");
15797 else
15798 goto nomatch;
15799 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15800 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15801 strcpy(tp->board_part_number, "BCM57762");
15802 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15803 strcpy(tp->board_part_number, "BCM57766");
15804 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15805 strcpy(tp->board_part_number, "BCM57782");
15806 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15807 strcpy(tp->board_part_number, "BCM57786");
15808 else
15809 goto nomatch;
15810 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15811 strcpy(tp->board_part_number, "BCM95906");
15812 } else {
15813 nomatch:
15814 strcpy(tp->board_part_number, "none");
15815 }
15816 }
15817
tg3_fw_img_is_valid(struct tg3 * tp,u32 offset)15818 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15819 {
15820 u32 val;
15821
15822 if (tg3_nvram_read(tp, offset, &val) ||
15823 (val & 0xfc000000) != 0x0c000000 ||
15824 tg3_nvram_read(tp, offset + 4, &val) ||
15825 val != 0)
15826 return 0;
15827
15828 return 1;
15829 }
15830
tg3_read_bc_ver(struct tg3 * tp)15831 static void tg3_read_bc_ver(struct tg3 *tp)
15832 {
15833 u32 val, offset, start, ver_offset;
15834 int i, dst_off;
15835 bool newver = false;
15836
15837 if (tg3_nvram_read(tp, 0xc, &offset) ||
15838 tg3_nvram_read(tp, 0x4, &start))
15839 return;
15840
15841 offset = tg3_nvram_logical_addr(tp, offset);
15842
15843 if (tg3_nvram_read(tp, offset, &val))
15844 return;
15845
15846 if ((val & 0xfc000000) == 0x0c000000) {
15847 if (tg3_nvram_read(tp, offset + 4, &val))
15848 return;
15849
15850 if (val == 0)
15851 newver = true;
15852 }
15853
15854 dst_off = strlen(tp->fw_ver);
15855
15856 if (newver) {
15857 if (TG3_VER_SIZE - dst_off < 16 ||
15858 tg3_nvram_read(tp, offset + 8, &ver_offset))
15859 return;
15860
15861 offset = offset + ver_offset - start;
15862 for (i = 0; i < 16; i += 4) {
15863 __be32 v;
15864 if (tg3_nvram_read_be32(tp, offset + i, &v))
15865 return;
15866
15867 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15868 }
15869 } else {
15870 u32 major, minor;
15871
15872 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15873 return;
15874
15875 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15876 TG3_NVM_BCVER_MAJSFT;
15877 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15878 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15879 "v%d.%02d", major, minor);
15880 }
15881 }
15882
tg3_read_hwsb_ver(struct tg3 * tp)15883 static void tg3_read_hwsb_ver(struct tg3 *tp)
15884 {
15885 u32 val, major, minor;
15886
15887 /* Use native endian representation */
15888 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15889 return;
15890
15891 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15892 TG3_NVM_HWSB_CFG1_MAJSFT;
15893 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15894 TG3_NVM_HWSB_CFG1_MINSFT;
15895
15896 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15897 }
15898
tg3_read_sb_ver(struct tg3 * tp,u32 val)15899 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15900 {
15901 u32 offset, major, minor, build;
15902
15903 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15904
15905 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15906 return;
15907
15908 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15909 case TG3_EEPROM_SB_REVISION_0:
15910 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15911 break;
15912 case TG3_EEPROM_SB_REVISION_2:
15913 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15914 break;
15915 case TG3_EEPROM_SB_REVISION_3:
15916 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15917 break;
15918 case TG3_EEPROM_SB_REVISION_4:
15919 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15920 break;
15921 case TG3_EEPROM_SB_REVISION_5:
15922 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15923 break;
15924 case TG3_EEPROM_SB_REVISION_6:
15925 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15926 break;
15927 default:
15928 return;
15929 }
15930
15931 if (tg3_nvram_read(tp, offset, &val))
15932 return;
15933
15934 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15935 TG3_EEPROM_SB_EDH_BLD_SHFT;
15936 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15937 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15938 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15939
15940 if (minor > 99 || build > 26)
15941 return;
15942
15943 offset = strlen(tp->fw_ver);
15944 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15945 " v%d.%02d", major, minor);
15946
15947 if (build > 0) {
15948 offset = strlen(tp->fw_ver);
15949 if (offset < TG3_VER_SIZE - 1)
15950 tp->fw_ver[offset] = 'a' + build - 1;
15951 }
15952 }
15953
tg3_read_mgmtfw_ver(struct tg3 * tp)15954 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15955 {
15956 u32 val, offset, start;
15957 int i, vlen;
15958
15959 for (offset = TG3_NVM_DIR_START;
15960 offset < TG3_NVM_DIR_END;
15961 offset += TG3_NVM_DIRENT_SIZE) {
15962 if (tg3_nvram_read(tp, offset, &val))
15963 return;
15964
15965 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15966 break;
15967 }
15968
15969 if (offset == TG3_NVM_DIR_END)
15970 return;
15971
15972 if (!tg3_flag(tp, 5705_PLUS))
15973 start = 0x08000000;
15974 else if (tg3_nvram_read(tp, offset - 4, &start))
15975 return;
15976
15977 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15978 !tg3_fw_img_is_valid(tp, offset) ||
15979 tg3_nvram_read(tp, offset + 8, &val))
15980 return;
15981
15982 offset += val - start;
15983
15984 vlen = strlen(tp->fw_ver);
15985
15986 tp->fw_ver[vlen++] = ',';
15987 tp->fw_ver[vlen++] = ' ';
15988
15989 for (i = 0; i < 4; i++) {
15990 __be32 v;
15991 if (tg3_nvram_read_be32(tp, offset, &v))
15992 return;
15993
15994 offset += sizeof(v);
15995
15996 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15997 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15998 break;
15999 }
16000
16001 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
16002 vlen += sizeof(v);
16003 }
16004 }
16005
tg3_probe_ncsi(struct tg3 * tp)16006 static void tg3_probe_ncsi(struct tg3 *tp)
16007 {
16008 u32 apedata;
16009
16010 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
16011 if (apedata != APE_SEG_SIG_MAGIC)
16012 return;
16013
16014 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
16015 if (!(apedata & APE_FW_STATUS_READY))
16016 return;
16017
16018 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
16019 tg3_flag_set(tp, APE_HAS_NCSI);
16020 }
16021
tg3_read_dash_ver(struct tg3 * tp)16022 static void tg3_read_dash_ver(struct tg3 *tp)
16023 {
16024 int vlen;
16025 u32 apedata;
16026 char *fwtype;
16027
16028 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
16029
16030 if (tg3_flag(tp, APE_HAS_NCSI))
16031 fwtype = "NCSI";
16032 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
16033 fwtype = "SMASH";
16034 else
16035 fwtype = "DASH";
16036
16037 vlen = strlen(tp->fw_ver);
16038
16039 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
16040 fwtype,
16041 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
16042 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
16043 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
16044 (apedata & APE_FW_VERSION_BLDMSK));
16045 }
16046
tg3_read_otp_ver(struct tg3 * tp)16047 static void tg3_read_otp_ver(struct tg3 *tp)
16048 {
16049 u32 val, val2;
16050
16051 if (tg3_asic_rev(tp) != ASIC_REV_5762)
16052 return;
16053
16054 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
16055 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
16056 TG3_OTP_MAGIC0_VALID(val)) {
16057 u64 val64 = (u64) val << 32 | val2;
16058 u32 ver = 0;
16059 int i, vlen;
16060
16061 for (i = 0; i < 7; i++) {
16062 if ((val64 & 0xff) == 0)
16063 break;
16064 ver = val64 & 0xff;
16065 val64 >>= 8;
16066 }
16067 vlen = strlen(tp->fw_ver);
16068 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16069 }
16070 }
16071
tg3_read_fw_ver(struct tg3 * tp)16072 static void tg3_read_fw_ver(struct tg3 *tp)
16073 {
16074 u32 val;
16075 bool vpd_vers = false;
16076
16077 if (tp->fw_ver[0] != 0)
16078 vpd_vers = true;
16079
16080 if (tg3_flag(tp, NO_NVRAM)) {
16081 strcat(tp->fw_ver, "sb");
16082 tg3_read_otp_ver(tp);
16083 return;
16084 }
16085
16086 if (tg3_nvram_read(tp, 0, &val))
16087 return;
16088
16089 if (val == TG3_EEPROM_MAGIC)
16090 tg3_read_bc_ver(tp);
16091 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16092 tg3_read_sb_ver(tp, val);
16093 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16094 tg3_read_hwsb_ver(tp);
16095
16096 if (tg3_flag(tp, ENABLE_ASF)) {
16097 if (tg3_flag(tp, ENABLE_APE)) {
16098 tg3_probe_ncsi(tp);
16099 if (!vpd_vers)
16100 tg3_read_dash_ver(tp);
16101 } else if (!vpd_vers) {
16102 tg3_read_mgmtfw_ver(tp);
16103 }
16104 }
16105
16106 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16107 }
16108
tg3_rx_ret_ring_size(struct tg3 * tp)16109 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16110 {
16111 if (tg3_flag(tp, LRG_PROD_RING_CAP))
16112 return TG3_RX_RET_MAX_SIZE_5717;
16113 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16114 return TG3_RX_RET_MAX_SIZE_5700;
16115 else
16116 return TG3_RX_RET_MAX_SIZE_5705;
16117 }
16118
16119 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16120 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16121 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16122 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16123 { },
16124 };
16125
tg3_find_peer(struct tg3 * tp)16126 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16127 {
16128 struct pci_dev *peer;
16129 unsigned int func, devnr = tp->pdev->devfn & ~7;
16130
16131 for (func = 0; func < 8; func++) {
16132 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16133 if (peer && peer != tp->pdev)
16134 break;
16135 pci_dev_put(peer);
16136 }
16137 /* 5704 can be configured in single-port mode, set peer to
16138 * tp->pdev in that case.
16139 */
16140 if (!peer) {
16141 peer = tp->pdev;
16142 return peer;
16143 }
16144
16145 /*
16146 * We don't need to keep the refcount elevated; there's no way
16147 * to remove one half of this device without removing the other
16148 */
16149 pci_dev_put(peer);
16150
16151 return peer;
16152 }
16153
tg3_detect_asic_rev(struct tg3 * tp,u32 misc_ctrl_reg)16154 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16155 {
16156 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16157 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16158 u32 reg;
16159
16160 /* All devices that use the alternate
16161 * ASIC REV location have a CPMU.
16162 */
16163 tg3_flag_set(tp, CPMU_PRESENT);
16164
16165 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16166 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16167 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16168 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16169 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16170 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16171 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16172 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16173 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16174 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16175 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16176 reg = TG3PCI_GEN2_PRODID_ASICREV;
16177 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16178 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16179 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16180 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16181 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16182 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16183 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16184 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16185 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16186 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16187 reg = TG3PCI_GEN15_PRODID_ASICREV;
16188 else
16189 reg = TG3PCI_PRODID_ASICREV;
16190
16191 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16192 }
16193
16194 /* Wrong chip ID in 5752 A0. This code can be removed later
16195 * as A0 is not in production.
16196 */
16197 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16198 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16199
16200 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16201 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16202
16203 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16204 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16205 tg3_asic_rev(tp) == ASIC_REV_5720)
16206 tg3_flag_set(tp, 5717_PLUS);
16207
16208 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16209 tg3_asic_rev(tp) == ASIC_REV_57766)
16210 tg3_flag_set(tp, 57765_CLASS);
16211
16212 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16213 tg3_asic_rev(tp) == ASIC_REV_5762)
16214 tg3_flag_set(tp, 57765_PLUS);
16215
16216 /* Intentionally exclude ASIC_REV_5906 */
16217 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16218 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16219 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16220 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16221 tg3_asic_rev(tp) == ASIC_REV_5785 ||
16222 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16223 tg3_flag(tp, 57765_PLUS))
16224 tg3_flag_set(tp, 5755_PLUS);
16225
16226 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16227 tg3_asic_rev(tp) == ASIC_REV_5714)
16228 tg3_flag_set(tp, 5780_CLASS);
16229
16230 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16231 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16232 tg3_asic_rev(tp) == ASIC_REV_5906 ||
16233 tg3_flag(tp, 5755_PLUS) ||
16234 tg3_flag(tp, 5780_CLASS))
16235 tg3_flag_set(tp, 5750_PLUS);
16236
16237 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16238 tg3_flag(tp, 5750_PLUS))
16239 tg3_flag_set(tp, 5705_PLUS);
16240 }
16241
tg3_10_100_only_device(struct tg3 * tp,const struct pci_device_id * ent)16242 static bool tg3_10_100_only_device(struct tg3 *tp,
16243 const struct pci_device_id *ent)
16244 {
16245 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16246
16247 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16248 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16249 (tp->phy_flags & TG3_PHYFLG_IS_FET))
16250 return true;
16251
16252 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16253 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16254 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16255 return true;
16256 } else {
16257 return true;
16258 }
16259 }
16260
16261 return false;
16262 }
16263
tg3_get_invariants(struct tg3 * tp,const struct pci_device_id * ent)16264 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16265 {
16266 u32 misc_ctrl_reg;
16267 u32 pci_state_reg, grc_misc_cfg;
16268 u32 val;
16269 u16 pci_cmd;
16270 int err;
16271
16272 /* Force memory write invalidate off. If we leave it on,
16273 * then on 5700_BX chips we have to enable a workaround.
16274 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16275 * to match the cacheline size. The Broadcom driver have this
16276 * workaround but turns MWI off all the times so never uses
16277 * it. This seems to suggest that the workaround is insufficient.
16278 */
16279 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16280 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16281 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16282
16283 /* Important! -- Make sure register accesses are byteswapped
16284 * correctly. Also, for those chips that require it, make
16285 * sure that indirect register accesses are enabled before
16286 * the first operation.
16287 */
16288 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16289 &misc_ctrl_reg);
16290 tp->misc_host_ctrl |= (misc_ctrl_reg &
16291 MISC_HOST_CTRL_CHIPREV);
16292 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16293 tp->misc_host_ctrl);
16294
16295 tg3_detect_asic_rev(tp, misc_ctrl_reg);
16296
16297 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16298 * we need to disable memory and use config. cycles
16299 * only to access all registers. The 5702/03 chips
16300 * can mistakenly decode the special cycles from the
16301 * ICH chipsets as memory write cycles, causing corruption
16302 * of register and memory space. Only certain ICH bridges
16303 * will drive special cycles with non-zero data during the
16304 * address phase which can fall within the 5703's address
16305 * range. This is not an ICH bug as the PCI spec allows
16306 * non-zero address during special cycles. However, only
16307 * these ICH bridges are known to drive non-zero addresses
16308 * during special cycles.
16309 *
16310 * Since special cycles do not cross PCI bridges, we only
16311 * enable this workaround if the 5703 is on the secondary
16312 * bus of these ICH bridges.
16313 */
16314 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16315 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16316 static struct tg3_dev_id {
16317 u32 vendor;
16318 u32 device;
16319 u32 rev;
16320 } ich_chipsets[] = {
16321 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16322 PCI_ANY_ID },
16323 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16324 PCI_ANY_ID },
16325 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16326 0xa },
16327 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16328 PCI_ANY_ID },
16329 { },
16330 };
16331 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16332 struct pci_dev *bridge = NULL;
16333
16334 while (pci_id->vendor != 0) {
16335 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16336 bridge);
16337 if (!bridge) {
16338 pci_id++;
16339 continue;
16340 }
16341 if (pci_id->rev != PCI_ANY_ID) {
16342 if (bridge->revision > pci_id->rev)
16343 continue;
16344 }
16345 if (bridge->subordinate &&
16346 (bridge->subordinate->number ==
16347 tp->pdev->bus->number)) {
16348 tg3_flag_set(tp, ICH_WORKAROUND);
16349 pci_dev_put(bridge);
16350 break;
16351 }
16352 }
16353 }
16354
16355 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16356 static struct tg3_dev_id {
16357 u32 vendor;
16358 u32 device;
16359 } bridge_chipsets[] = {
16360 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16361 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16362 { },
16363 };
16364 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16365 struct pci_dev *bridge = NULL;
16366
16367 while (pci_id->vendor != 0) {
16368 bridge = pci_get_device(pci_id->vendor,
16369 pci_id->device,
16370 bridge);
16371 if (!bridge) {
16372 pci_id++;
16373 continue;
16374 }
16375 if (bridge->subordinate &&
16376 (bridge->subordinate->number <=
16377 tp->pdev->bus->number) &&
16378 (bridge->subordinate->busn_res.end >=
16379 tp->pdev->bus->number)) {
16380 tg3_flag_set(tp, 5701_DMA_BUG);
16381 pci_dev_put(bridge);
16382 break;
16383 }
16384 }
16385 }
16386
16387 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16388 * DMA addresses > 40-bit. This bridge may have other additional
16389 * 57xx devices behind it in some 4-port NIC designs for example.
16390 * Any tg3 device found behind the bridge will also need the 40-bit
16391 * DMA workaround.
16392 */
16393 if (tg3_flag(tp, 5780_CLASS)) {
16394 tg3_flag_set(tp, 40BIT_DMA_BUG);
16395 tp->msi_cap = tp->pdev->msi_cap;
16396 } else {
16397 struct pci_dev *bridge = NULL;
16398
16399 do {
16400 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16401 PCI_DEVICE_ID_SERVERWORKS_EPB,
16402 bridge);
16403 if (bridge && bridge->subordinate &&
16404 (bridge->subordinate->number <=
16405 tp->pdev->bus->number) &&
16406 (bridge->subordinate->busn_res.end >=
16407 tp->pdev->bus->number)) {
16408 tg3_flag_set(tp, 40BIT_DMA_BUG);
16409 pci_dev_put(bridge);
16410 break;
16411 }
16412 } while (bridge);
16413 }
16414
16415 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16416 tg3_asic_rev(tp) == ASIC_REV_5714)
16417 tp->pdev_peer = tg3_find_peer(tp);
16418
16419 /* Determine TSO capabilities */
16420 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16421 ; /* Do nothing. HW bug. */
16422 else if (tg3_flag(tp, 57765_PLUS))
16423 tg3_flag_set(tp, HW_TSO_3);
16424 else if (tg3_flag(tp, 5755_PLUS) ||
16425 tg3_asic_rev(tp) == ASIC_REV_5906)
16426 tg3_flag_set(tp, HW_TSO_2);
16427 else if (tg3_flag(tp, 5750_PLUS)) {
16428 tg3_flag_set(tp, HW_TSO_1);
16429 tg3_flag_set(tp, TSO_BUG);
16430 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16431 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16432 tg3_flag_clear(tp, TSO_BUG);
16433 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16434 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16435 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16436 tg3_flag_set(tp, FW_TSO);
16437 tg3_flag_set(tp, TSO_BUG);
16438 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16439 tp->fw_needed = FIRMWARE_TG3TSO5;
16440 else
16441 tp->fw_needed = FIRMWARE_TG3TSO;
16442 }
16443
16444 /* Selectively allow TSO based on operating conditions */
16445 if (tg3_flag(tp, HW_TSO_1) ||
16446 tg3_flag(tp, HW_TSO_2) ||
16447 tg3_flag(tp, HW_TSO_3) ||
16448 tg3_flag(tp, FW_TSO)) {
16449 /* For firmware TSO, assume ASF is disabled.
16450 * We'll disable TSO later if we discover ASF
16451 * is enabled in tg3_get_eeprom_hw_cfg().
16452 */
16453 tg3_flag_set(tp, TSO_CAPABLE);
16454 } else {
16455 tg3_flag_clear(tp, TSO_CAPABLE);
16456 tg3_flag_clear(tp, TSO_BUG);
16457 tp->fw_needed = NULL;
16458 }
16459
16460 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16461 tp->fw_needed = FIRMWARE_TG3;
16462
16463 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16464 tp->fw_needed = FIRMWARE_TG357766;
16465
16466 tp->irq_max = 1;
16467
16468 if (tg3_flag(tp, 5750_PLUS)) {
16469 tg3_flag_set(tp, SUPPORT_MSI);
16470 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16471 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16472 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16473 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16474 tp->pdev_peer == tp->pdev))
16475 tg3_flag_clear(tp, SUPPORT_MSI);
16476
16477 if (tg3_flag(tp, 5755_PLUS) ||
16478 tg3_asic_rev(tp) == ASIC_REV_5906) {
16479 tg3_flag_set(tp, 1SHOT_MSI);
16480 }
16481
16482 if (tg3_flag(tp, 57765_PLUS)) {
16483 tg3_flag_set(tp, SUPPORT_MSIX);
16484 tp->irq_max = TG3_IRQ_MAX_VECS;
16485 }
16486 }
16487
16488 tp->txq_max = 1;
16489 tp->rxq_max = 1;
16490 if (tp->irq_max > 1) {
16491 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16492 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16493
16494 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16495 tg3_asic_rev(tp) == ASIC_REV_5720)
16496 tp->txq_max = tp->irq_max - 1;
16497 }
16498
16499 if (tg3_flag(tp, 5755_PLUS) ||
16500 tg3_asic_rev(tp) == ASIC_REV_5906)
16501 tg3_flag_set(tp, SHORT_DMA_BUG);
16502
16503 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16504 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16505
16506 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16507 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16508 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16509 tg3_asic_rev(tp) == ASIC_REV_5762)
16510 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16511
16512 if (tg3_flag(tp, 57765_PLUS) &&
16513 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16514 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16515
16516 if (!tg3_flag(tp, 5705_PLUS) ||
16517 tg3_flag(tp, 5780_CLASS) ||
16518 tg3_flag(tp, USE_JUMBO_BDFLAG))
16519 tg3_flag_set(tp, JUMBO_CAPABLE);
16520
16521 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16522 &pci_state_reg);
16523
16524 if (pci_is_pcie(tp->pdev)) {
16525 u16 lnkctl;
16526
16527 tg3_flag_set(tp, PCI_EXPRESS);
16528
16529 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16530 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16531 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16532 tg3_flag_clear(tp, HW_TSO_2);
16533 tg3_flag_clear(tp, TSO_CAPABLE);
16534 }
16535 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16536 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16537 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16538 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16539 tg3_flag_set(tp, CLKREQ_BUG);
16540 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16541 tg3_flag_set(tp, L1PLLPD_EN);
16542 }
16543 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16544 /* BCM5785 devices are effectively PCIe devices, and should
16545 * follow PCIe codepaths, but do not have a PCIe capabilities
16546 * section.
16547 */
16548 tg3_flag_set(tp, PCI_EXPRESS);
16549 } else if (!tg3_flag(tp, 5705_PLUS) ||
16550 tg3_flag(tp, 5780_CLASS)) {
16551 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16552 if (!tp->pcix_cap) {
16553 dev_err(&tp->pdev->dev,
16554 "Cannot find PCI-X capability, aborting\n");
16555 return -EIO;
16556 }
16557
16558 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16559 tg3_flag_set(tp, PCIX_MODE);
16560 }
16561
16562 /* If we have an AMD 762 or VIA K8T800 chipset, write
16563 * reordering to the mailbox registers done by the host
16564 * controller can cause major troubles. We read back from
16565 * every mailbox register write to force the writes to be
16566 * posted to the chip in order.
16567 */
16568 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16569 !tg3_flag(tp, PCI_EXPRESS))
16570 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16571
16572 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16573 &tp->pci_cacheline_sz);
16574 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16575 &tp->pci_lat_timer);
16576 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16577 tp->pci_lat_timer < 64) {
16578 tp->pci_lat_timer = 64;
16579 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16580 tp->pci_lat_timer);
16581 }
16582
16583 /* Important! -- It is critical that the PCI-X hw workaround
16584 * situation is decided before the first MMIO register access.
16585 */
16586 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16587 /* 5700 BX chips need to have their TX producer index
16588 * mailboxes written twice to workaround a bug.
16589 */
16590 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16591
16592 /* If we are in PCI-X mode, enable register write workaround.
16593 *
16594 * The workaround is to use indirect register accesses
16595 * for all chip writes not to mailbox registers.
16596 */
16597 if (tg3_flag(tp, PCIX_MODE)) {
16598 u32 pm_reg;
16599
16600 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16601
16602 /* The chip can have its power management PCI config
16603 * space registers clobbered due to this bug.
16604 * So explicitly force the chip into D0 here.
16605 */
16606 pci_read_config_dword(tp->pdev,
16607 tp->pdev->pm_cap + PCI_PM_CTRL,
16608 &pm_reg);
16609 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16610 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16611 pci_write_config_dword(tp->pdev,
16612 tp->pdev->pm_cap + PCI_PM_CTRL,
16613 pm_reg);
16614
16615 /* Also, force SERR#/PERR# in PCI command. */
16616 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16617 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16618 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16619 }
16620 }
16621
16622 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16623 tg3_flag_set(tp, PCI_HIGH_SPEED);
16624 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16625 tg3_flag_set(tp, PCI_32BIT);
16626
16627 /* Chip-specific fixup from Broadcom driver */
16628 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16629 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16630 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16631 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16632 }
16633
16634 /* Default fast path register access methods */
16635 tp->read32 = tg3_read32;
16636 tp->write32 = tg3_write32;
16637 tp->read32_mbox = tg3_read32;
16638 tp->write32_mbox = tg3_write32;
16639 tp->write32_tx_mbox = tg3_write32;
16640 tp->write32_rx_mbox = tg3_write32;
16641
16642 /* Various workaround register access methods */
16643 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16644 tp->write32 = tg3_write_indirect_reg32;
16645 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16646 (tg3_flag(tp, PCI_EXPRESS) &&
16647 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16648 /*
16649 * Back to back register writes can cause problems on these
16650 * chips, the workaround is to read back all reg writes
16651 * except those to mailbox regs.
16652 *
16653 * See tg3_write_indirect_reg32().
16654 */
16655 tp->write32 = tg3_write_flush_reg32;
16656 }
16657
16658 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16659 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16660 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16661 tp->write32_rx_mbox = tg3_write_flush_reg32;
16662 }
16663
16664 if (tg3_flag(tp, ICH_WORKAROUND)) {
16665 tp->read32 = tg3_read_indirect_reg32;
16666 tp->write32 = tg3_write_indirect_reg32;
16667 tp->read32_mbox = tg3_read_indirect_mbox;
16668 tp->write32_mbox = tg3_write_indirect_mbox;
16669 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16670 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16671
16672 iounmap(tp->regs);
16673 tp->regs = NULL;
16674
16675 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16676 pci_cmd &= ~PCI_COMMAND_MEMORY;
16677 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16678 }
16679 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16680 tp->read32_mbox = tg3_read32_mbox_5906;
16681 tp->write32_mbox = tg3_write32_mbox_5906;
16682 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16683 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16684 }
16685
16686 if (tp->write32 == tg3_write_indirect_reg32 ||
16687 (tg3_flag(tp, PCIX_MODE) &&
16688 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16689 tg3_asic_rev(tp) == ASIC_REV_5701)))
16690 tg3_flag_set(tp, SRAM_USE_CONFIG);
16691
16692 /* The memory arbiter has to be enabled in order for SRAM accesses
16693 * to succeed. Normally on powerup the tg3 chip firmware will make
16694 * sure it is enabled, but other entities such as system netboot
16695 * code might disable it.
16696 */
16697 val = tr32(MEMARB_MODE);
16698 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16699
16700 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16701 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16702 tg3_flag(tp, 5780_CLASS)) {
16703 if (tg3_flag(tp, PCIX_MODE)) {
16704 pci_read_config_dword(tp->pdev,
16705 tp->pcix_cap + PCI_X_STATUS,
16706 &val);
16707 tp->pci_fn = val & 0x7;
16708 }
16709 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16710 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16711 tg3_asic_rev(tp) == ASIC_REV_5720) {
16712 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16713 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16714 val = tr32(TG3_CPMU_STATUS);
16715
16716 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16717 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16718 else
16719 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16720 TG3_CPMU_STATUS_FSHFT_5719;
16721 }
16722
16723 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16724 tp->write32_tx_mbox = tg3_write_flush_reg32;
16725 tp->write32_rx_mbox = tg3_write_flush_reg32;
16726 }
16727
16728 /* Get eeprom hw config before calling tg3_set_power_state().
16729 * In particular, the TG3_FLAG_IS_NIC flag must be
16730 * determined before calling tg3_set_power_state() so that
16731 * we know whether or not to switch out of Vaux power.
16732 * When the flag is set, it means that GPIO1 is used for eeprom
16733 * write protect and also implies that it is a LOM where GPIOs
16734 * are not used to switch power.
16735 */
16736 tg3_get_eeprom_hw_cfg(tp);
16737
16738 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16739 tg3_flag_clear(tp, TSO_CAPABLE);
16740 tg3_flag_clear(tp, TSO_BUG);
16741 tp->fw_needed = NULL;
16742 }
16743
16744 if (tg3_flag(tp, ENABLE_APE)) {
16745 /* Allow reads and writes to the
16746 * APE register and memory space.
16747 */
16748 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16749 PCISTATE_ALLOW_APE_SHMEM_WR |
16750 PCISTATE_ALLOW_APE_PSPACE_WR;
16751 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16752 pci_state_reg);
16753
16754 tg3_ape_lock_init(tp);
16755 tp->ape_hb_interval =
16756 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16757 }
16758
16759 /* Set up tp->grc_local_ctrl before calling
16760 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16761 * will bring 5700's external PHY out of reset.
16762 * It is also used as eeprom write protect on LOMs.
16763 */
16764 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16765 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16766 tg3_flag(tp, EEPROM_WRITE_PROT))
16767 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16768 GRC_LCLCTRL_GPIO_OUTPUT1);
16769 /* Unused GPIO3 must be driven as output on 5752 because there
16770 * are no pull-up resistors on unused GPIO pins.
16771 */
16772 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16773 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16774
16775 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16776 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16777 tg3_flag(tp, 57765_CLASS))
16778 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16779
16780 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16781 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16782 /* Turn off the debug UART. */
16783 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16784 if (tg3_flag(tp, IS_NIC))
16785 /* Keep VMain power. */
16786 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16787 GRC_LCLCTRL_GPIO_OUTPUT0;
16788 }
16789
16790 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16791 tp->grc_local_ctrl |=
16792 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16793
16794 /* Switch out of Vaux if it is a NIC */
16795 tg3_pwrsrc_switch_to_vmain(tp);
16796
16797 /* Derive initial jumbo mode from MTU assigned in
16798 * ether_setup() via the alloc_etherdev() call
16799 */
16800 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16801 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16802
16803 /* Determine WakeOnLan speed to use. */
16804 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16805 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16806 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16807 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16808 tg3_flag_clear(tp, WOL_SPEED_100MB);
16809 } else {
16810 tg3_flag_set(tp, WOL_SPEED_100MB);
16811 }
16812
16813 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16814 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16815
16816 /* A few boards don't want Ethernet@WireSpeed phy feature */
16817 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16818 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16819 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16820 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16821 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16822 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16823 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16824
16825 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16826 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16827 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16828 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16829 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16830
16831 if (tg3_flag(tp, 5705_PLUS) &&
16832 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16833 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16834 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16835 !tg3_flag(tp, 57765_PLUS)) {
16836 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16837 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16838 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16839 tg3_asic_rev(tp) == ASIC_REV_5761) {
16840 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16841 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16842 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16843 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16844 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16845 } else
16846 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16847 }
16848
16849 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16850 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16851 tp->phy_otp = tg3_read_otp_phycfg(tp);
16852 if (tp->phy_otp == 0)
16853 tp->phy_otp = TG3_OTP_DEFAULT;
16854 }
16855
16856 if (tg3_flag(tp, CPMU_PRESENT))
16857 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16858 else
16859 tp->mi_mode = MAC_MI_MODE_BASE;
16860
16861 tp->coalesce_mode = 0;
16862 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16863 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16864 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16865
16866 /* Set these bits to enable statistics workaround. */
16867 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16868 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16869 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16870 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16871 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16872 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16873 }
16874
16875 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16876 tg3_asic_rev(tp) == ASIC_REV_57780)
16877 tg3_flag_set(tp, USE_PHYLIB);
16878
16879 err = tg3_mdio_init(tp);
16880 if (err)
16881 return err;
16882
16883 /* Initialize data/descriptor byte/word swapping. */
16884 val = tr32(GRC_MODE);
16885 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16886 tg3_asic_rev(tp) == ASIC_REV_5762)
16887 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16888 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16889 GRC_MODE_B2HRX_ENABLE |
16890 GRC_MODE_HTX2B_ENABLE |
16891 GRC_MODE_HOST_STACKUP);
16892 else
16893 val &= GRC_MODE_HOST_STACKUP;
16894
16895 tw32(GRC_MODE, val | tp->grc_mode);
16896
16897 tg3_switch_clocks(tp);
16898
16899 /* Clear this out for sanity. */
16900 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16901
16902 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16903 tw32(TG3PCI_REG_BASE_ADDR, 0);
16904
16905 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16906 &pci_state_reg);
16907 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16908 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16909 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16910 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16911 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16912 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16913 void __iomem *sram_base;
16914
16915 /* Write some dummy words into the SRAM status block
16916 * area, see if it reads back correctly. If the return
16917 * value is bad, force enable the PCIX workaround.
16918 */
16919 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16920
16921 writel(0x00000000, sram_base);
16922 writel(0x00000000, sram_base + 4);
16923 writel(0xffffffff, sram_base + 4);
16924 if (readl(sram_base) != 0x00000000)
16925 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16926 }
16927 }
16928
16929 udelay(50);
16930 tg3_nvram_init(tp);
16931
16932 /* If the device has an NVRAM, no need to load patch firmware */
16933 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16934 !tg3_flag(tp, NO_NVRAM))
16935 tp->fw_needed = NULL;
16936
16937 grc_misc_cfg = tr32(GRC_MISC_CFG);
16938 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16939
16940 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16941 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16942 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16943 tg3_flag_set(tp, IS_5788);
16944
16945 if (!tg3_flag(tp, IS_5788) &&
16946 tg3_asic_rev(tp) != ASIC_REV_5700)
16947 tg3_flag_set(tp, TAGGED_STATUS);
16948 if (tg3_flag(tp, TAGGED_STATUS)) {
16949 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16950 HOSTCC_MODE_CLRTICK_TXBD);
16951
16952 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16953 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16954 tp->misc_host_ctrl);
16955 }
16956
16957 /* Preserve the APE MAC_MODE bits */
16958 if (tg3_flag(tp, ENABLE_APE))
16959 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16960 else
16961 tp->mac_mode = 0;
16962
16963 if (tg3_10_100_only_device(tp, ent))
16964 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16965
16966 err = tg3_phy_probe(tp);
16967 if (err) {
16968 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16969 /* ... but do not return immediately ... */
16970 tg3_mdio_fini(tp);
16971 }
16972
16973 tg3_read_vpd(tp);
16974 tg3_read_fw_ver(tp);
16975
16976 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16977 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16978 } else {
16979 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16980 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16981 else
16982 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16983 }
16984
16985 /* 5700 {AX,BX} chips have a broken status block link
16986 * change bit implementation, so we must use the
16987 * status register in those cases.
16988 */
16989 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16990 tg3_flag_set(tp, USE_LINKCHG_REG);
16991 else
16992 tg3_flag_clear(tp, USE_LINKCHG_REG);
16993
16994 /* The led_ctrl is set during tg3_phy_probe, here we might
16995 * have to force the link status polling mechanism based
16996 * upon subsystem IDs.
16997 */
16998 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16999 tg3_asic_rev(tp) == ASIC_REV_5701 &&
17000 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
17001 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
17002 tg3_flag_set(tp, USE_LINKCHG_REG);
17003 }
17004
17005 /* For all SERDES we poll the MAC status register. */
17006 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
17007 tg3_flag_set(tp, POLL_SERDES);
17008 else
17009 tg3_flag_clear(tp, POLL_SERDES);
17010
17011 if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
17012 tg3_flag_set(tp, POLL_CPMU_LINK);
17013
17014 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
17015 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
17016 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
17017 tg3_flag(tp, PCIX_MODE)) {
17018 tp->rx_offset = NET_SKB_PAD;
17019 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
17020 tp->rx_copy_thresh = ~(u16)0;
17021 #endif
17022 }
17023
17024 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
17025 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
17026 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
17027
17028 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
17029
17030 /* Increment the rx prod index on the rx std ring by at most
17031 * 8 for these chips to workaround hw errata.
17032 */
17033 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
17034 tg3_asic_rev(tp) == ASIC_REV_5752 ||
17035 tg3_asic_rev(tp) == ASIC_REV_5755)
17036 tp->rx_std_max_post = 8;
17037
17038 if (tg3_flag(tp, ASPM_WORKAROUND))
17039 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
17040 PCIE_PWR_MGMT_L1_THRESH_MSK;
17041
17042 return err;
17043 }
17044
tg3_get_device_address(struct tg3 * tp,u8 * addr)17045 static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
17046 {
17047 u32 hi, lo, mac_offset;
17048 int addr_ok = 0;
17049 int err;
17050
17051 if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
17052 return 0;
17053
17054 if (tg3_flag(tp, IS_SSB_CORE)) {
17055 err = ssb_gige_get_macaddr(tp->pdev, addr);
17056 if (!err && is_valid_ether_addr(addr))
17057 return 0;
17058 }
17059
17060 mac_offset = 0x7c;
17061 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
17062 tg3_flag(tp, 5780_CLASS)) {
17063 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
17064 mac_offset = 0xcc;
17065 if (tg3_nvram_lock(tp))
17066 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17067 else
17068 tg3_nvram_unlock(tp);
17069 } else if (tg3_flag(tp, 5717_PLUS)) {
17070 if (tp->pci_fn & 1)
17071 mac_offset = 0xcc;
17072 if (tp->pci_fn > 1)
17073 mac_offset += 0x18c;
17074 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17075 mac_offset = 0x10;
17076
17077 /* First try to get it from MAC address mailbox. */
17078 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17079 if ((hi >> 16) == 0x484b) {
17080 addr[0] = (hi >> 8) & 0xff;
17081 addr[1] = (hi >> 0) & 0xff;
17082
17083 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17084 addr[2] = (lo >> 24) & 0xff;
17085 addr[3] = (lo >> 16) & 0xff;
17086 addr[4] = (lo >> 8) & 0xff;
17087 addr[5] = (lo >> 0) & 0xff;
17088
17089 /* Some old bootcode may report a 0 MAC address in SRAM */
17090 addr_ok = is_valid_ether_addr(addr);
17091 }
17092 if (!addr_ok) {
17093 __be32 be_hi, be_lo;
17094
17095 /* Next, try NVRAM. */
17096 if (!tg3_flag(tp, NO_NVRAM) &&
17097 !tg3_nvram_read_be32(tp, mac_offset + 0, &be_hi) &&
17098 !tg3_nvram_read_be32(tp, mac_offset + 4, &be_lo)) {
17099 memcpy(&addr[0], ((char *)&be_hi) + 2, 2);
17100 memcpy(&addr[2], (char *)&be_lo, sizeof(be_lo));
17101 }
17102 /* Finally just fetch it out of the MAC control regs. */
17103 else {
17104 hi = tr32(MAC_ADDR_0_HIGH);
17105 lo = tr32(MAC_ADDR_0_LOW);
17106
17107 addr[5] = lo & 0xff;
17108 addr[4] = (lo >> 8) & 0xff;
17109 addr[3] = (lo >> 16) & 0xff;
17110 addr[2] = (lo >> 24) & 0xff;
17111 addr[1] = hi & 0xff;
17112 addr[0] = (hi >> 8) & 0xff;
17113 }
17114 }
17115
17116 if (!is_valid_ether_addr(addr))
17117 return -EINVAL;
17118 return 0;
17119 }
17120
17121 #define BOUNDARY_SINGLE_CACHELINE 1
17122 #define BOUNDARY_MULTI_CACHELINE 2
17123
tg3_calc_dma_bndry(struct tg3 * tp,u32 val)17124 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17125 {
17126 int cacheline_size;
17127 u8 byte;
17128 int goal;
17129
17130 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17131 if (byte == 0)
17132 cacheline_size = 1024;
17133 else
17134 cacheline_size = (int) byte * 4;
17135
17136 /* On 5703 and later chips, the boundary bits have no
17137 * effect.
17138 */
17139 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17140 tg3_asic_rev(tp) != ASIC_REV_5701 &&
17141 !tg3_flag(tp, PCI_EXPRESS))
17142 goto out;
17143
17144 #if defined(CONFIG_PPC64) || defined(CONFIG_PARISC)
17145 goal = BOUNDARY_MULTI_CACHELINE;
17146 #else
17147 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17148 goal = BOUNDARY_SINGLE_CACHELINE;
17149 #else
17150 goal = 0;
17151 #endif
17152 #endif
17153
17154 if (tg3_flag(tp, 57765_PLUS)) {
17155 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17156 goto out;
17157 }
17158
17159 if (!goal)
17160 goto out;
17161
17162 /* PCI controllers on most RISC systems tend to disconnect
17163 * when a device tries to burst across a cache-line boundary.
17164 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17165 *
17166 * Unfortunately, for PCI-E there are only limited
17167 * write-side controls for this, and thus for reads
17168 * we will still get the disconnects. We'll also waste
17169 * these PCI cycles for both read and write for chips
17170 * other than 5700 and 5701 which do not implement the
17171 * boundary bits.
17172 */
17173 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17174 switch (cacheline_size) {
17175 case 16:
17176 case 32:
17177 case 64:
17178 case 128:
17179 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17180 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17181 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17182 } else {
17183 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17184 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17185 }
17186 break;
17187
17188 case 256:
17189 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17190 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17191 break;
17192
17193 default:
17194 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17195 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17196 break;
17197 }
17198 } else if (tg3_flag(tp, PCI_EXPRESS)) {
17199 switch (cacheline_size) {
17200 case 16:
17201 case 32:
17202 case 64:
17203 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17204 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17205 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17206 break;
17207 }
17208 fallthrough;
17209 case 128:
17210 default:
17211 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17212 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17213 break;
17214 }
17215 } else {
17216 switch (cacheline_size) {
17217 case 16:
17218 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17219 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17220 DMA_RWCTRL_WRITE_BNDRY_16);
17221 break;
17222 }
17223 fallthrough;
17224 case 32:
17225 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17226 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17227 DMA_RWCTRL_WRITE_BNDRY_32);
17228 break;
17229 }
17230 fallthrough;
17231 case 64:
17232 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17233 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17234 DMA_RWCTRL_WRITE_BNDRY_64);
17235 break;
17236 }
17237 fallthrough;
17238 case 128:
17239 if (goal == BOUNDARY_SINGLE_CACHELINE) {
17240 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17241 DMA_RWCTRL_WRITE_BNDRY_128);
17242 break;
17243 }
17244 fallthrough;
17245 case 256:
17246 val |= (DMA_RWCTRL_READ_BNDRY_256 |
17247 DMA_RWCTRL_WRITE_BNDRY_256);
17248 break;
17249 case 512:
17250 val |= (DMA_RWCTRL_READ_BNDRY_512 |
17251 DMA_RWCTRL_WRITE_BNDRY_512);
17252 break;
17253 case 1024:
17254 default:
17255 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17256 DMA_RWCTRL_WRITE_BNDRY_1024);
17257 break;
17258 }
17259 }
17260
17261 out:
17262 return val;
17263 }
17264
tg3_do_test_dma(struct tg3 * tp,u32 * buf,dma_addr_t buf_dma,int size,bool to_device)17265 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17266 int size, bool to_device)
17267 {
17268 struct tg3_internal_buffer_desc test_desc;
17269 u32 sram_dma_descs;
17270 int i, ret;
17271
17272 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17273
17274 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17275 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17276 tw32(RDMAC_STATUS, 0);
17277 tw32(WDMAC_STATUS, 0);
17278
17279 tw32(BUFMGR_MODE, 0);
17280 tw32(FTQ_RESET, 0);
17281
17282 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17283 test_desc.addr_lo = buf_dma & 0xffffffff;
17284 test_desc.nic_mbuf = 0x00002100;
17285 test_desc.len = size;
17286
17287 /*
17288 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17289 * the *second* time the tg3 driver was getting loaded after an
17290 * initial scan.
17291 *
17292 * Broadcom tells me:
17293 * ...the DMA engine is connected to the GRC block and a DMA
17294 * reset may affect the GRC block in some unpredictable way...
17295 * The behavior of resets to individual blocks has not been tested.
17296 *
17297 * Broadcom noted the GRC reset will also reset all sub-components.
17298 */
17299 if (to_device) {
17300 test_desc.cqid_sqid = (13 << 8) | 2;
17301
17302 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17303 udelay(40);
17304 } else {
17305 test_desc.cqid_sqid = (16 << 8) | 7;
17306
17307 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17308 udelay(40);
17309 }
17310 test_desc.flags = 0x00000005;
17311
17312 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17313 u32 val;
17314
17315 val = *(((u32 *)&test_desc) + i);
17316 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17317 sram_dma_descs + (i * sizeof(u32)));
17318 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17319 }
17320 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17321
17322 if (to_device)
17323 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17324 else
17325 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17326
17327 ret = -ENODEV;
17328 for (i = 0; i < 40; i++) {
17329 u32 val;
17330
17331 if (to_device)
17332 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17333 else
17334 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17335 if ((val & 0xffff) == sram_dma_descs) {
17336 ret = 0;
17337 break;
17338 }
17339
17340 udelay(100);
17341 }
17342
17343 return ret;
17344 }
17345
17346 #define TEST_BUFFER_SIZE 0x2000
17347
17348 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17349 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17350 { },
17351 };
17352
tg3_test_dma(struct tg3 * tp)17353 static int tg3_test_dma(struct tg3 *tp)
17354 {
17355 dma_addr_t buf_dma;
17356 u32 *buf, saved_dma_rwctrl;
17357 int ret = 0;
17358
17359 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17360 &buf_dma, GFP_KERNEL);
17361 if (!buf) {
17362 ret = -ENOMEM;
17363 goto out_nofree;
17364 }
17365
17366 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17367 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17368
17369 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17370
17371 if (tg3_flag(tp, 57765_PLUS))
17372 goto out;
17373
17374 if (tg3_flag(tp, PCI_EXPRESS)) {
17375 /* DMA read watermark not used on PCIE */
17376 tp->dma_rwctrl |= 0x00180000;
17377 } else if (!tg3_flag(tp, PCIX_MODE)) {
17378 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17379 tg3_asic_rev(tp) == ASIC_REV_5750)
17380 tp->dma_rwctrl |= 0x003f0000;
17381 else
17382 tp->dma_rwctrl |= 0x003f000f;
17383 } else {
17384 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17385 tg3_asic_rev(tp) == ASIC_REV_5704) {
17386 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17387 u32 read_water = 0x7;
17388
17389 /* If the 5704 is behind the EPB bridge, we can
17390 * do the less restrictive ONE_DMA workaround for
17391 * better performance.
17392 */
17393 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17394 tg3_asic_rev(tp) == ASIC_REV_5704)
17395 tp->dma_rwctrl |= 0x8000;
17396 else if (ccval == 0x6 || ccval == 0x7)
17397 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17398
17399 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17400 read_water = 4;
17401 /* Set bit 23 to enable PCIX hw bug fix */
17402 tp->dma_rwctrl |=
17403 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17404 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17405 (1 << 23);
17406 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17407 /* 5780 always in PCIX mode */
17408 tp->dma_rwctrl |= 0x00144000;
17409 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17410 /* 5714 always in PCIX mode */
17411 tp->dma_rwctrl |= 0x00148000;
17412 } else {
17413 tp->dma_rwctrl |= 0x001b000f;
17414 }
17415 }
17416 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17417 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17418
17419 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17420 tg3_asic_rev(tp) == ASIC_REV_5704)
17421 tp->dma_rwctrl &= 0xfffffff0;
17422
17423 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17424 tg3_asic_rev(tp) == ASIC_REV_5701) {
17425 /* Remove this if it causes problems for some boards. */
17426 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17427
17428 /* On 5700/5701 chips, we need to set this bit.
17429 * Otherwise the chip will issue cacheline transactions
17430 * to streamable DMA memory with not all the byte
17431 * enables turned on. This is an error on several
17432 * RISC PCI controllers, in particular sparc64.
17433 *
17434 * On 5703/5704 chips, this bit has been reassigned
17435 * a different meaning. In particular, it is used
17436 * on those chips to enable a PCI-X workaround.
17437 */
17438 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17439 }
17440
17441 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17442
17443
17444 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17445 tg3_asic_rev(tp) != ASIC_REV_5701)
17446 goto out;
17447
17448 /* It is best to perform DMA test with maximum write burst size
17449 * to expose the 5700/5701 write DMA bug.
17450 */
17451 saved_dma_rwctrl = tp->dma_rwctrl;
17452 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17453 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17454
17455 while (1) {
17456 u32 *p = buf, i;
17457
17458 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17459 p[i] = i;
17460
17461 /* Send the buffer to the chip. */
17462 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17463 if (ret) {
17464 dev_err(&tp->pdev->dev,
17465 "%s: Buffer write failed. err = %d\n",
17466 __func__, ret);
17467 break;
17468 }
17469
17470 /* Now read it back. */
17471 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17472 if (ret) {
17473 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17474 "err = %d\n", __func__, ret);
17475 break;
17476 }
17477
17478 /* Verify it. */
17479 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17480 if (p[i] == i)
17481 continue;
17482
17483 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17484 DMA_RWCTRL_WRITE_BNDRY_16) {
17485 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17486 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17487 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17488 break;
17489 } else {
17490 dev_err(&tp->pdev->dev,
17491 "%s: Buffer corrupted on read back! "
17492 "(%d != %d)\n", __func__, p[i], i);
17493 ret = -ENODEV;
17494 goto out;
17495 }
17496 }
17497
17498 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17499 /* Success. */
17500 ret = 0;
17501 break;
17502 }
17503 }
17504 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17505 DMA_RWCTRL_WRITE_BNDRY_16) {
17506 /* DMA test passed without adjusting DMA boundary,
17507 * now look for chipsets that are known to expose the
17508 * DMA bug without failing the test.
17509 */
17510 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17511 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17512 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17513 } else {
17514 /* Safe to use the calculated DMA boundary. */
17515 tp->dma_rwctrl = saved_dma_rwctrl;
17516 }
17517
17518 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17519 }
17520
17521 out:
17522 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17523 out_nofree:
17524 return ret;
17525 }
17526
tg3_init_bufmgr_config(struct tg3 * tp)17527 static void tg3_init_bufmgr_config(struct tg3 *tp)
17528 {
17529 if (tg3_flag(tp, 57765_PLUS)) {
17530 tp->bufmgr_config.mbuf_read_dma_low_water =
17531 DEFAULT_MB_RDMA_LOW_WATER_5705;
17532 tp->bufmgr_config.mbuf_mac_rx_low_water =
17533 DEFAULT_MB_MACRX_LOW_WATER_57765;
17534 tp->bufmgr_config.mbuf_high_water =
17535 DEFAULT_MB_HIGH_WATER_57765;
17536
17537 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17538 DEFAULT_MB_RDMA_LOW_WATER_5705;
17539 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17540 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17541 tp->bufmgr_config.mbuf_high_water_jumbo =
17542 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17543 } else if (tg3_flag(tp, 5705_PLUS)) {
17544 tp->bufmgr_config.mbuf_read_dma_low_water =
17545 DEFAULT_MB_RDMA_LOW_WATER_5705;
17546 tp->bufmgr_config.mbuf_mac_rx_low_water =
17547 DEFAULT_MB_MACRX_LOW_WATER_5705;
17548 tp->bufmgr_config.mbuf_high_water =
17549 DEFAULT_MB_HIGH_WATER_5705;
17550 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17551 tp->bufmgr_config.mbuf_mac_rx_low_water =
17552 DEFAULT_MB_MACRX_LOW_WATER_5906;
17553 tp->bufmgr_config.mbuf_high_water =
17554 DEFAULT_MB_HIGH_WATER_5906;
17555 }
17556
17557 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17558 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17559 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17560 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17561 tp->bufmgr_config.mbuf_high_water_jumbo =
17562 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17563 } else {
17564 tp->bufmgr_config.mbuf_read_dma_low_water =
17565 DEFAULT_MB_RDMA_LOW_WATER;
17566 tp->bufmgr_config.mbuf_mac_rx_low_water =
17567 DEFAULT_MB_MACRX_LOW_WATER;
17568 tp->bufmgr_config.mbuf_high_water =
17569 DEFAULT_MB_HIGH_WATER;
17570
17571 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17572 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17573 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17574 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17575 tp->bufmgr_config.mbuf_high_water_jumbo =
17576 DEFAULT_MB_HIGH_WATER_JUMBO;
17577 }
17578
17579 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17580 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17581 }
17582
tg3_phy_string(struct tg3 * tp)17583 static char *tg3_phy_string(struct tg3 *tp)
17584 {
17585 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17586 case TG3_PHY_ID_BCM5400: return "5400";
17587 case TG3_PHY_ID_BCM5401: return "5401";
17588 case TG3_PHY_ID_BCM5411: return "5411";
17589 case TG3_PHY_ID_BCM5701: return "5701";
17590 case TG3_PHY_ID_BCM5703: return "5703";
17591 case TG3_PHY_ID_BCM5704: return "5704";
17592 case TG3_PHY_ID_BCM5705: return "5705";
17593 case TG3_PHY_ID_BCM5750: return "5750";
17594 case TG3_PHY_ID_BCM5752: return "5752";
17595 case TG3_PHY_ID_BCM5714: return "5714";
17596 case TG3_PHY_ID_BCM5780: return "5780";
17597 case TG3_PHY_ID_BCM5755: return "5755";
17598 case TG3_PHY_ID_BCM5787: return "5787";
17599 case TG3_PHY_ID_BCM5784: return "5784";
17600 case TG3_PHY_ID_BCM5756: return "5722/5756";
17601 case TG3_PHY_ID_BCM5906: return "5906";
17602 case TG3_PHY_ID_BCM5761: return "5761";
17603 case TG3_PHY_ID_BCM5718C: return "5718C";
17604 case TG3_PHY_ID_BCM5718S: return "5718S";
17605 case TG3_PHY_ID_BCM57765: return "57765";
17606 case TG3_PHY_ID_BCM5719C: return "5719C";
17607 case TG3_PHY_ID_BCM5720C: return "5720C";
17608 case TG3_PHY_ID_BCM5762: return "5762C";
17609 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17610 case 0: return "serdes";
17611 default: return "unknown";
17612 }
17613 }
17614
tg3_bus_string(struct tg3 * tp,char * str)17615 static char *tg3_bus_string(struct tg3 *tp, char *str)
17616 {
17617 if (tg3_flag(tp, PCI_EXPRESS)) {
17618 strcpy(str, "PCI Express");
17619 return str;
17620 } else if (tg3_flag(tp, PCIX_MODE)) {
17621 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17622
17623 strcpy(str, "PCIX:");
17624
17625 if ((clock_ctrl == 7) ||
17626 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17627 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17628 strcat(str, "133MHz");
17629 else if (clock_ctrl == 0)
17630 strcat(str, "33MHz");
17631 else if (clock_ctrl == 2)
17632 strcat(str, "50MHz");
17633 else if (clock_ctrl == 4)
17634 strcat(str, "66MHz");
17635 else if (clock_ctrl == 6)
17636 strcat(str, "100MHz");
17637 } else {
17638 strcpy(str, "PCI:");
17639 if (tg3_flag(tp, PCI_HIGH_SPEED))
17640 strcat(str, "66MHz");
17641 else
17642 strcat(str, "33MHz");
17643 }
17644 if (tg3_flag(tp, PCI_32BIT))
17645 strcat(str, ":32-bit");
17646 else
17647 strcat(str, ":64-bit");
17648 return str;
17649 }
17650
tg3_init_coal(struct tg3 * tp)17651 static void tg3_init_coal(struct tg3 *tp)
17652 {
17653 struct ethtool_coalesce *ec = &tp->coal;
17654
17655 memset(ec, 0, sizeof(*ec));
17656 ec->cmd = ETHTOOL_GCOALESCE;
17657 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17658 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17659 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17660 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17661 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17662 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17663 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17664 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17665 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17666
17667 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17668 HOSTCC_MODE_CLRTICK_TXBD)) {
17669 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17670 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17671 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17672 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17673 }
17674
17675 if (tg3_flag(tp, 5705_PLUS)) {
17676 ec->rx_coalesce_usecs_irq = 0;
17677 ec->tx_coalesce_usecs_irq = 0;
17678 ec->stats_block_coalesce_usecs = 0;
17679 }
17680 }
17681
tg3_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)17682 static int tg3_init_one(struct pci_dev *pdev,
17683 const struct pci_device_id *ent)
17684 {
17685 struct net_device *dev;
17686 struct tg3 *tp;
17687 int i, err;
17688 u32 sndmbx, rcvmbx, intmbx;
17689 char str[40];
17690 u64 dma_mask, persist_dma_mask;
17691 netdev_features_t features = 0;
17692 u8 addr[ETH_ALEN] __aligned(2);
17693
17694 err = pci_enable_device(pdev);
17695 if (err) {
17696 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17697 return err;
17698 }
17699
17700 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17701 if (err) {
17702 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17703 goto err_out_disable_pdev;
17704 }
17705
17706 pci_set_master(pdev);
17707
17708 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17709 if (!dev) {
17710 err = -ENOMEM;
17711 goto err_out_free_res;
17712 }
17713
17714 SET_NETDEV_DEV(dev, &pdev->dev);
17715
17716 tp = netdev_priv(dev);
17717 tp->pdev = pdev;
17718 tp->dev = dev;
17719 tp->rx_mode = TG3_DEF_RX_MODE;
17720 tp->tx_mode = TG3_DEF_TX_MODE;
17721 tp->irq_sync = 1;
17722 tp->pcierr_recovery = false;
17723
17724 if (tg3_debug > 0)
17725 tp->msg_enable = tg3_debug;
17726 else
17727 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17728
17729 if (pdev_is_ssb_gige_core(pdev)) {
17730 tg3_flag_set(tp, IS_SSB_CORE);
17731 if (ssb_gige_must_flush_posted_writes(pdev))
17732 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17733 if (ssb_gige_one_dma_at_once(pdev))
17734 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17735 if (ssb_gige_have_roboswitch(pdev)) {
17736 tg3_flag_set(tp, USE_PHYLIB);
17737 tg3_flag_set(tp, ROBOSWITCH);
17738 }
17739 if (ssb_gige_is_rgmii(pdev))
17740 tg3_flag_set(tp, RGMII_MODE);
17741 }
17742
17743 /* The word/byte swap controls here control register access byte
17744 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17745 * setting below.
17746 */
17747 tp->misc_host_ctrl =
17748 MISC_HOST_CTRL_MASK_PCI_INT |
17749 MISC_HOST_CTRL_WORD_SWAP |
17750 MISC_HOST_CTRL_INDIR_ACCESS |
17751 MISC_HOST_CTRL_PCISTATE_RW;
17752
17753 /* The NONFRM (non-frame) byte/word swap controls take effect
17754 * on descriptor entries, anything which isn't packet data.
17755 *
17756 * The StrongARM chips on the board (one for tx, one for rx)
17757 * are running in big-endian mode.
17758 */
17759 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17760 GRC_MODE_WSWAP_NONFRM_DATA);
17761 #ifdef __BIG_ENDIAN
17762 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17763 #endif
17764 spin_lock_init(&tp->lock);
17765 spin_lock_init(&tp->indirect_lock);
17766 INIT_WORK(&tp->reset_task, tg3_reset_task);
17767
17768 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17769 if (!tp->regs) {
17770 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17771 err = -ENOMEM;
17772 goto err_out_free_dev;
17773 }
17774
17775 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17776 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17777 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17778 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17779 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17780 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17781 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17782 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17783 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17784 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17785 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17786 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17787 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17788 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17789 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17790 tg3_flag_set(tp, ENABLE_APE);
17791 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17792 if (!tp->aperegs) {
17793 dev_err(&pdev->dev,
17794 "Cannot map APE registers, aborting\n");
17795 err = -ENOMEM;
17796 goto err_out_iounmap;
17797 }
17798 }
17799
17800 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17801 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17802
17803 dev->ethtool_ops = &tg3_ethtool_ops;
17804 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17805 dev->netdev_ops = &tg3_netdev_ops;
17806 dev->irq = pdev->irq;
17807
17808 err = tg3_get_invariants(tp, ent);
17809 if (err) {
17810 dev_err(&pdev->dev,
17811 "Problem fetching invariants of chip, aborting\n");
17812 goto err_out_apeunmap;
17813 }
17814
17815 /* The EPB bridge inside 5714, 5715, and 5780 and any
17816 * device behind the EPB cannot support DMA addresses > 40-bit.
17817 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17818 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17819 * do DMA address check in __tg3_start_xmit().
17820 */
17821 if (tg3_flag(tp, IS_5788))
17822 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17823 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17824 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17825 #ifdef CONFIG_HIGHMEM
17826 dma_mask = DMA_BIT_MASK(64);
17827 #endif
17828 } else
17829 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17830
17831 if (tg3_asic_rev(tp) == ASIC_REV_57766)
17832 persist_dma_mask = DMA_BIT_MASK(31);
17833
17834 /* Configure DMA attributes. */
17835 if (dma_mask > DMA_BIT_MASK(32)) {
17836 err = dma_set_mask(&pdev->dev, dma_mask);
17837 if (!err) {
17838 features |= NETIF_F_HIGHDMA;
17839 err = dma_set_coherent_mask(&pdev->dev,
17840 persist_dma_mask);
17841 if (err < 0) {
17842 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17843 "DMA for consistent allocations\n");
17844 goto err_out_apeunmap;
17845 }
17846 }
17847 }
17848 if (err || dma_mask == DMA_BIT_MASK(32)) {
17849 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17850 if (err) {
17851 dev_err(&pdev->dev,
17852 "No usable DMA configuration, aborting\n");
17853 goto err_out_apeunmap;
17854 }
17855 }
17856
17857 tg3_init_bufmgr_config(tp);
17858
17859 /* 5700 B0 chips do not support checksumming correctly due
17860 * to hardware bugs.
17861 */
17862 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17863 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17864
17865 if (tg3_flag(tp, 5755_PLUS))
17866 features |= NETIF_F_IPV6_CSUM;
17867 }
17868
17869 /* TSO is on by default on chips that support hardware TSO.
17870 * Firmware TSO on older chips gives lower performance, so it
17871 * is off by default, but can be enabled using ethtool.
17872 */
17873 if ((tg3_flag(tp, HW_TSO_1) ||
17874 tg3_flag(tp, HW_TSO_2) ||
17875 tg3_flag(tp, HW_TSO_3)) &&
17876 (features & NETIF_F_IP_CSUM))
17877 features |= NETIF_F_TSO;
17878 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17879 if (features & NETIF_F_IPV6_CSUM)
17880 features |= NETIF_F_TSO6;
17881 if (tg3_flag(tp, HW_TSO_3) ||
17882 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17883 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17884 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17885 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17886 tg3_asic_rev(tp) == ASIC_REV_57780)
17887 features |= NETIF_F_TSO_ECN;
17888 }
17889
17890 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17891 NETIF_F_HW_VLAN_CTAG_RX;
17892 dev->vlan_features |= features;
17893
17894 /*
17895 * Add loopback capability only for a subset of devices that support
17896 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17897 * loopback for the remaining devices.
17898 */
17899 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17900 !tg3_flag(tp, CPMU_PRESENT))
17901 /* Add the loopback capability */
17902 features |= NETIF_F_LOOPBACK;
17903
17904 dev->hw_features |= features;
17905 dev->priv_flags |= IFF_UNICAST_FLT;
17906
17907 /* MTU range: 60 - 9000 or 1500, depending on hardware */
17908 dev->min_mtu = TG3_MIN_MTU;
17909 dev->max_mtu = TG3_MAX_MTU(tp);
17910
17911 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17912 !tg3_flag(tp, TSO_CAPABLE) &&
17913 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17914 tg3_flag_set(tp, MAX_RXPEND_64);
17915 tp->rx_pending = 63;
17916 }
17917
17918 err = tg3_get_device_address(tp, addr);
17919 if (err) {
17920 dev_err(&pdev->dev,
17921 "Could not obtain valid ethernet address, aborting\n");
17922 goto err_out_apeunmap;
17923 }
17924 eth_hw_addr_set(dev, addr);
17925
17926 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17927 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17928 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17929 for (i = 0; i < tp->irq_max; i++) {
17930 struct tg3_napi *tnapi = &tp->napi[i];
17931
17932 tnapi->tp = tp;
17933 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17934
17935 tnapi->int_mbox = intmbx;
17936 intmbx += 0x8;
17937
17938 tnapi->consmbox = rcvmbx;
17939 tnapi->prodmbox = sndmbx;
17940
17941 if (i)
17942 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17943 else
17944 tnapi->coal_now = HOSTCC_MODE_NOW;
17945
17946 if (!tg3_flag(tp, SUPPORT_MSIX))
17947 break;
17948
17949 /*
17950 * If we support MSIX, we'll be using RSS. If we're using
17951 * RSS, the first vector only handles link interrupts and the
17952 * remaining vectors handle rx and tx interrupts. Reuse the
17953 * mailbox values for the next iteration. The values we setup
17954 * above are still useful for the single vectored mode.
17955 */
17956 if (!i)
17957 continue;
17958
17959 rcvmbx += 0x8;
17960
17961 if (sndmbx & 0x4)
17962 sndmbx -= 0x4;
17963 else
17964 sndmbx += 0xc;
17965 }
17966
17967 /*
17968 * Reset chip in case UNDI or EFI driver did not shutdown
17969 * DMA self test will enable WDMAC and we'll see (spurious)
17970 * pending DMA on the PCI bus at that point.
17971 */
17972 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17973 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17974 tg3_full_lock(tp, 0);
17975 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17976 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17977 tg3_full_unlock(tp);
17978 }
17979
17980 err = tg3_test_dma(tp);
17981 if (err) {
17982 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17983 goto err_out_apeunmap;
17984 }
17985
17986 tg3_init_coal(tp);
17987
17988 pci_set_drvdata(pdev, dev);
17989
17990 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17991 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17992 tg3_asic_rev(tp) == ASIC_REV_5762)
17993 tg3_flag_set(tp, PTP_CAPABLE);
17994
17995 tg3_timer_init(tp);
17996
17997 tg3_carrier_off(tp);
17998
17999 err = register_netdev(dev);
18000 if (err) {
18001 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
18002 goto err_out_apeunmap;
18003 }
18004
18005 if (tg3_flag(tp, PTP_CAPABLE)) {
18006 tg3_ptp_init(tp);
18007 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
18008 &tp->pdev->dev);
18009 if (IS_ERR(tp->ptp_clock))
18010 tp->ptp_clock = NULL;
18011 }
18012
18013 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
18014 tp->board_part_number,
18015 tg3_chip_rev_id(tp),
18016 tg3_bus_string(tp, str),
18017 dev->dev_addr);
18018
18019 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
18020 char *ethtype;
18021
18022 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
18023 ethtype = "10/100Base-TX";
18024 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
18025 ethtype = "1000Base-SX";
18026 else
18027 ethtype = "10/100/1000Base-T";
18028
18029 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
18030 "(WireSpeed[%d], EEE[%d])\n",
18031 tg3_phy_string(tp), ethtype,
18032 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
18033 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
18034 }
18035
18036 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
18037 (dev->features & NETIF_F_RXCSUM) != 0,
18038 tg3_flag(tp, USE_LINKCHG_REG) != 0,
18039 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
18040 tg3_flag(tp, ENABLE_ASF) != 0,
18041 tg3_flag(tp, TSO_CAPABLE) != 0);
18042 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
18043 tp->dma_rwctrl,
18044 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
18045 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
18046
18047 pci_save_state(pdev);
18048
18049 return 0;
18050
18051 err_out_apeunmap:
18052 if (tp->aperegs) {
18053 iounmap(tp->aperegs);
18054 tp->aperegs = NULL;
18055 }
18056
18057 err_out_iounmap:
18058 if (tp->regs) {
18059 iounmap(tp->regs);
18060 tp->regs = NULL;
18061 }
18062
18063 err_out_free_dev:
18064 free_netdev(dev);
18065
18066 err_out_free_res:
18067 pci_release_regions(pdev);
18068
18069 err_out_disable_pdev:
18070 if (pci_is_enabled(pdev))
18071 pci_disable_device(pdev);
18072 return err;
18073 }
18074
tg3_remove_one(struct pci_dev * pdev)18075 static void tg3_remove_one(struct pci_dev *pdev)
18076 {
18077 struct net_device *dev = pci_get_drvdata(pdev);
18078
18079 if (dev) {
18080 struct tg3 *tp = netdev_priv(dev);
18081
18082 tg3_ptp_fini(tp);
18083
18084 release_firmware(tp->fw);
18085
18086 tg3_reset_task_cancel(tp);
18087
18088 if (tg3_flag(tp, USE_PHYLIB)) {
18089 tg3_phy_fini(tp);
18090 tg3_mdio_fini(tp);
18091 }
18092
18093 unregister_netdev(dev);
18094 if (tp->aperegs) {
18095 iounmap(tp->aperegs);
18096 tp->aperegs = NULL;
18097 }
18098 if (tp->regs) {
18099 iounmap(tp->regs);
18100 tp->regs = NULL;
18101 }
18102 free_netdev(dev);
18103 pci_release_regions(pdev);
18104 pci_disable_device(pdev);
18105 }
18106 }
18107
18108 #ifdef CONFIG_PM_SLEEP
tg3_suspend(struct device * device)18109 static int tg3_suspend(struct device *device)
18110 {
18111 struct net_device *dev = dev_get_drvdata(device);
18112 struct tg3 *tp = netdev_priv(dev);
18113
18114 rtnl_lock();
18115
18116 if (!netif_running(dev))
18117 goto unlock;
18118
18119 tg3_reset_task_cancel(tp);
18120 tg3_phy_stop(tp);
18121 tg3_netif_stop(tp);
18122
18123 tg3_timer_stop(tp);
18124
18125 tg3_full_lock(tp, 1);
18126 tg3_disable_ints(tp);
18127 tg3_full_unlock(tp);
18128
18129 netif_device_detach(dev);
18130
18131 tg3_full_lock(tp, 0);
18132 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18133 tg3_flag_clear(tp, INIT_COMPLETE);
18134 tg3_full_unlock(tp);
18135
18136 tg3_power_down_prepare(tp);
18137
18138 unlock:
18139 rtnl_unlock();
18140 return 0;
18141 }
18142
tg3_resume(struct device * device)18143 static int tg3_resume(struct device *device)
18144 {
18145 struct net_device *dev = dev_get_drvdata(device);
18146 struct tg3 *tp = netdev_priv(dev);
18147 int err = 0;
18148
18149 rtnl_lock();
18150
18151 if (!netif_running(dev))
18152 goto unlock;
18153
18154 netif_device_attach(dev);
18155
18156 netdev_lock(dev);
18157 tg3_full_lock(tp, 0);
18158
18159 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18160
18161 tg3_flag_set(tp, INIT_COMPLETE);
18162 err = tg3_restart_hw(tp,
18163 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18164 if (err)
18165 goto out;
18166
18167 tg3_timer_start(tp);
18168
18169 tg3_netif_start(tp);
18170
18171 out:
18172 tg3_full_unlock(tp);
18173 netdev_unlock(dev);
18174
18175 if (!err)
18176 tg3_phy_start(tp);
18177
18178 unlock:
18179 rtnl_unlock();
18180 return err;
18181 }
18182 #endif /* CONFIG_PM_SLEEP */
18183
18184 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18185
18186 /* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
18187 * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
18188 * be, powered down.
18189 */
18190 static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
18191 {
18192 .matches = {
18193 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18194 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
18195 },
18196 },
18197 {
18198 .matches = {
18199 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18200 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
18201 },
18202 },
18203 {
18204 .matches = {
18205 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18206 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
18207 },
18208 },
18209 {
18210 .matches = {
18211 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18212 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
18213 },
18214 },
18215 {
18216 .matches = {
18217 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18218 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
18219 },
18220 },
18221 {
18222 .matches = {
18223 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
18224 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
18225 },
18226 },
18227 {}
18228 };
18229
tg3_shutdown(struct pci_dev * pdev)18230 static void tg3_shutdown(struct pci_dev *pdev)
18231 {
18232 struct net_device *dev = pci_get_drvdata(pdev);
18233 struct tg3 *tp = netdev_priv(dev);
18234
18235 tg3_reset_task_cancel(tp);
18236
18237 rtnl_lock();
18238
18239 netif_device_detach(dev);
18240
18241 if (netif_running(dev))
18242 dev_close(dev);
18243
18244 if (system_state == SYSTEM_POWER_OFF)
18245 tg3_power_down(tp);
18246 else if (system_state == SYSTEM_RESTART &&
18247 dmi_first_match(tg3_restart_aer_quirk_table) &&
18248 pdev->current_state != PCI_D3cold &&
18249 pdev->current_state != PCI_UNKNOWN) {
18250 /* Disable PCIe AER on the tg3 to avoid a fatal
18251 * error during this system restart.
18252 */
18253 pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
18254 PCI_EXP_DEVCTL_CERE |
18255 PCI_EXP_DEVCTL_NFERE |
18256 PCI_EXP_DEVCTL_FERE |
18257 PCI_EXP_DEVCTL_URRE);
18258 }
18259
18260 rtnl_unlock();
18261
18262 pci_disable_device(pdev);
18263 }
18264
18265 /**
18266 * tg3_io_error_detected - called when PCI error is detected
18267 * @pdev: Pointer to PCI device
18268 * @state: The current pci connection state
18269 *
18270 * This function is called after a PCI bus error affecting
18271 * this device has been detected.
18272 */
tg3_io_error_detected(struct pci_dev * pdev,pci_channel_state_t state)18273 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18274 pci_channel_state_t state)
18275 {
18276 struct net_device *netdev = pci_get_drvdata(pdev);
18277 struct tg3 *tp = netdev_priv(netdev);
18278 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18279
18280 netdev_info(netdev, "PCI I/O error detected\n");
18281
18282 /* Want to make sure that the reset task doesn't run */
18283 tg3_reset_task_cancel(tp);
18284
18285 rtnl_lock();
18286
18287 /* Could be second call or maybe we don't have netdev yet */
18288 if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18289 goto done;
18290
18291 /* We needn't recover from permanent error */
18292 if (state == pci_channel_io_frozen)
18293 tp->pcierr_recovery = true;
18294
18295 tg3_phy_stop(tp);
18296
18297 tg3_netif_stop(tp);
18298
18299 tg3_timer_stop(tp);
18300
18301 netif_device_detach(netdev);
18302
18303 /* Clean up software state, even if MMIO is blocked */
18304 tg3_full_lock(tp, 0);
18305 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18306 tg3_full_unlock(tp);
18307
18308 done:
18309 if (state == pci_channel_io_perm_failure) {
18310 if (netdev) {
18311 netdev_lock(netdev);
18312 tg3_napi_enable(tp);
18313 netdev_unlock(netdev);
18314 dev_close(netdev);
18315 }
18316 err = PCI_ERS_RESULT_DISCONNECT;
18317 } else {
18318 pci_disable_device(pdev);
18319 }
18320
18321 rtnl_unlock();
18322
18323 return err;
18324 }
18325
18326 /**
18327 * tg3_io_slot_reset - called after the pci bus has been reset.
18328 * @pdev: Pointer to PCI device
18329 *
18330 * Restart the card from scratch, as if from a cold-boot.
18331 * At this point, the card has experienced a hard reset,
18332 * followed by fixups by BIOS, and has its config space
18333 * set up identically to what it was at cold boot.
18334 */
tg3_io_slot_reset(struct pci_dev * pdev)18335 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18336 {
18337 struct net_device *netdev = pci_get_drvdata(pdev);
18338 struct tg3 *tp = netdev_priv(netdev);
18339 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18340 int err;
18341
18342 rtnl_lock();
18343
18344 if (pci_enable_device(pdev)) {
18345 dev_err(&pdev->dev,
18346 "Cannot re-enable PCI device after reset.\n");
18347 goto done;
18348 }
18349
18350 pci_set_master(pdev);
18351 pci_restore_state(pdev);
18352 pci_save_state(pdev);
18353
18354 if (!netdev || !netif_running(netdev)) {
18355 rc = PCI_ERS_RESULT_RECOVERED;
18356 goto done;
18357 }
18358
18359 err = tg3_power_up(tp);
18360 if (err)
18361 goto done;
18362
18363 rc = PCI_ERS_RESULT_RECOVERED;
18364
18365 done:
18366 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18367 netdev_lock(netdev);
18368 tg3_napi_enable(tp);
18369 netdev_unlock(netdev);
18370 dev_close(netdev);
18371 }
18372 rtnl_unlock();
18373
18374 return rc;
18375 }
18376
18377 /**
18378 * tg3_io_resume - called when traffic can start flowing again.
18379 * @pdev: Pointer to PCI device
18380 *
18381 * This callback is called when the error recovery driver tells
18382 * us that its OK to resume normal operation.
18383 */
tg3_io_resume(struct pci_dev * pdev)18384 static void tg3_io_resume(struct pci_dev *pdev)
18385 {
18386 struct net_device *netdev = pci_get_drvdata(pdev);
18387 struct tg3 *tp = netdev_priv(netdev);
18388 int err;
18389
18390 rtnl_lock();
18391
18392 if (!netdev || !netif_running(netdev))
18393 goto done;
18394
18395 netdev_lock(netdev);
18396 tg3_full_lock(tp, 0);
18397 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18398 tg3_flag_set(tp, INIT_COMPLETE);
18399 err = tg3_restart_hw(tp, true);
18400 if (err) {
18401 tg3_full_unlock(tp);
18402 netdev_unlock(netdev);
18403 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18404 goto done;
18405 }
18406
18407 netif_device_attach(netdev);
18408
18409 tg3_timer_start(tp);
18410
18411 tg3_netif_start(tp);
18412
18413 tg3_full_unlock(tp);
18414 netdev_unlock(netdev);
18415
18416 tg3_phy_start(tp);
18417
18418 done:
18419 tp->pcierr_recovery = false;
18420 rtnl_unlock();
18421 }
18422
18423 static const struct pci_error_handlers tg3_err_handler = {
18424 .error_detected = tg3_io_error_detected,
18425 .slot_reset = tg3_io_slot_reset,
18426 .resume = tg3_io_resume
18427 };
18428
18429 static struct pci_driver tg3_driver = {
18430 .name = DRV_MODULE_NAME,
18431 .id_table = tg3_pci_tbl,
18432 .probe = tg3_init_one,
18433 .remove = tg3_remove_one,
18434 .err_handler = &tg3_err_handler,
18435 .driver.pm = &tg3_pm_ops,
18436 .shutdown = tg3_shutdown,
18437 };
18438
18439 module_pci_driver(tg3_driver);
18440